Compare commits

..

18 Commits

Author SHA1 Message Date
ayush chaurasia
db67b27a42 ruff 2024-04-16 12:11:57 +05:30
ayush chaurasia
4b0820ef15 ruff 2024-04-16 12:09:18 +05:30
ayush chaurasia
c7bb919561 update benchmark script 2024-04-16 11:13:52 +05:30
ayush chaurasia
df404b726e ruff 2024-04-16 10:00:47 +05:30
ayush chaurasia
ffbb104648 remove protected namespaces 2024-04-16 09:51:08 +05:30
ayush chaurasia
3ebd561fd9 ruff 2024-04-16 09:26:47 +05:30
ayush chaurasia
6bc488f674 update 2024-04-16 09:24:29 +05:30
ayush chaurasia
ea34c0b4c4 update 2024-04-16 09:07:40 +05:30
ayush chaurasia
1a827925eb update docs 2024-04-16 08:59:36 +05:30
ayush chaurasia
fe5888d661 update usage 2024-04-16 08:24:40 +05:30
ayush chaurasia
6074e6b7ee update 2024-04-15 17:19:04 +05:30
ayush chaurasia
fd8de238bb ruff 2024-04-15 17:11:59 +05:30
ayush chaurasia
d0c1113417 add test 2024-04-15 17:07:29 +05:30
ayush chaurasia
3ca96a852f remove test file 2024-04-15 17:02:58 +05:30
ayush chaurasia
9428c6b565 update 2024-04-15 16:59:16 +05:30
ayush chaurasia
ff00a3242c update 2024-04-15 07:52:04 +05:30
ayush chaurasia
878deb73a0 update 2024-04-15 07:51:05 +05:30
ayush chaurasia
c75bb65609 update 2024-04-15 05:59:26 +05:30
114 changed files with 10511 additions and 12677 deletions

22
.bumpversion.cfg Normal file
View File

@@ -0,0 +1,22 @@
[bumpversion]
current_version = 0.4.17
commit = True
message = Bump version: {current_version} → {new_version}
tag = True
tag_name = v{new_version}
[bumpversion:file:node/package.json]
[bumpversion:file:nodejs/package.json]
[bumpversion:file:nodejs/npm/darwin-x64/package.json]
[bumpversion:file:nodejs/npm/darwin-arm64/package.json]
[bumpversion:file:nodejs/npm/linux-x64-gnu/package.json]
[bumpversion:file:nodejs/npm/linux-arm64-gnu/package.json]
[bumpversion:file:rust/ffi/node/Cargo.toml]
[bumpversion:file:rust/lancedb/Cargo.toml]

View File

@@ -1,57 +0,0 @@
[tool.bumpversion]
current_version = "0.5.0"
parse = """(?x)
(?P<major>0|[1-9]\\d*)\\.
(?P<minor>0|[1-9]\\d*)\\.
(?P<patch>0|[1-9]\\d*)
(?:-(?P<pre_l>[a-zA-Z-]+)\\.(?P<pre_n>0|[1-9]\\d*))?
"""
serialize = [
"{major}.{minor}.{patch}-{pre_l}.{pre_n}",
"{major}.{minor}.{patch}",
]
search = "{current_version}"
replace = "{new_version}"
regex = false
ignore_missing_version = false
ignore_missing_files = false
tag = true
sign_tags = false
tag_name = "v{new_version}"
tag_message = "Bump version: {current_version} → {new_version}"
allow_dirty = true
commit = true
message = "Bump version: {current_version} → {new_version}"
commit_args = ""
[tool.bumpversion.parts.pre_l]
values = ["beta", "final"]
optional_value = "final"
[[tool.bumpversion.files]]
filename = "node/package.json"
search = "\"version\": \"{current_version}\","
replace = "\"version\": \"{new_version}\","
[[tool.bumpversion.files]]
filename = "nodejs/package.json"
search = "\"version\": \"{current_version}\","
replace = "\"version\": \"{new_version}\","
# nodejs binary packages
[[tool.bumpversion.files]]
glob = "nodejs/npm/*/package.json"
search = "\"version\": \"{current_version}\","
replace = "\"version\": \"{new_version}\","
# Cargo files
# ------------
[[tool.bumpversion.files]]
filename = "rust/ffi/node/Cargo.toml"
search = "\nversion = \"{current_version}\""
replace = "\nversion = \"{new_version}\""
[[tool.bumpversion.files]]
filename = "rust/lancedb/Cargo.toml"
search = "\nversion = \"{current_version}\""
replace = "\nversion = \"{new_version}\""

33
.github/labeler.yml vendored
View File

@@ -1,33 +0,0 @@
version: 1
appendOnly: true
# Labels are applied based on conventional commits standard
# https://www.conventionalcommits.org/en/v1.0.0/
# These labels are later used in release notes. See .github/release.yml
labels:
# If the PR title has an ! before the : it will be considered a breaking change
# For example, `feat!: add new feature` will be considered a breaking change
- label: breaking-change
title: "^[^:]+!:.*"
- label: breaking-change
body: "BREAKING CHANGE"
- label: enhancement
title: "^feat(\\(.+\\))?!?:.*"
- label: bug
title: "^fix(\\(.+\\))?!?:.*"
- label: documentation
title: "^docs(\\(.+\\))?!?:.*"
- label: performance
title: "^perf(\\(.+\\))?!?:.*"
- label: ci
title: "^ci(\\(.+\\))?!?:.*"
- label: chore
title: "^(chore|test|build|style)(\\(.+\\))?!?:.*"
- label: Python
files:
- "^python\\/.*"
- label: Rust
files:
- "^rust\\/.*"
- label: typescript
files:
- "^node\\/.*"

View File

@@ -1,41 +0,0 @@
{
"ignore_labels": ["chore"],
"pr_template": "- ${{TITLE}} by @${{AUTHOR}} in ${{URL}}",
"categories": [
{
"title": "## 🏆 Highlights",
"labels": ["highlight"]
},
{
"title": "## 🛠 Breaking Changes",
"labels": ["breaking-change"]
},
{
"title": "## ⚠️ Deprecations ",
"labels": ["deprecation"]
},
{
"title": "## 🎉 New Features",
"labels": ["enhancement"]
},
{
"title": "## 🐛 Bug Fixes",
"labels": ["bug"]
},
{
"title": "## 📚 Documentation",
"labels": ["documentation"]
},
{
"title": "## 🚀 Performance Improvements",
"labels": ["performance"]
},
{
"title": "## Other Changes"
},
{
"title": "## 🔧 Build and CI",
"labels": ["ci"]
}
]
}

View File

@@ -1,12 +1,8 @@
name: Cargo Publish
on:
push:
tags-ignore:
# We don't publish pre-releases for Rust. Crates.io is just a source
# distribution, so we don't need to publish pre-releases.
- 'v*-beta*'
- '*-v*' # for example, python-vX.Y.Z
release:
types: [ published ]
env:
# This env var is used by Swatinem/rust-cache@v2 for the cache

View File

@@ -1,81 +0,0 @@
name: PR Checks
on:
pull_request_target:
types: [opened, edited, synchronize, reopened]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
labeler:
permissions:
pull-requests: write
name: Label PR
runs-on: ubuntu-latest
steps:
- uses: srvaroa/labeler@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
commitlint:
permissions:
pull-requests: write
name: Verify PR title / description conforms to semantic-release
runs-on: ubuntu-latest
steps:
- uses: actions/setup-node@v3
with:
node-version: "18"
# These rules are disabled because Github will always ensure there
# is a blank line between the title and the body and Github will
# word wrap the description field to ensure a reasonable max line
# length.
- run: npm install @commitlint/config-conventional
- run: >
echo 'module.exports = {
"rules": {
"body-max-line-length": [0, "always", Infinity],
"footer-max-line-length": [0, "always", Infinity],
"body-leading-blank": [0, "always"]
}
}' > .commitlintrc.js
- run: npx commitlint --extends @commitlint/config-conventional --verbose <<< $COMMIT_MSG
env:
COMMIT_MSG: >
${{ github.event.pull_request.title }}
${{ github.event.pull_request.body }}
- if: failure()
uses: actions/github-script@v6
with:
script: |
const message = `**ACTION NEEDED**
Lance follows the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) for release automation.
The PR title and description are used as the merge commit message.\
Please update your PR title and description to match the specification.
For details on the error please inspect the "PR Title Check" action.
`
// Get list of current comments
const comments = await github.paginate(github.rest.issues.listComments, {
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number
});
// Check if this job already commented
for (const comment of comments) {
if (comment.body === message) {
return // Already commented
}
}
// Post the comment about Conventional Commits
github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: message
})
core.setFailed(message)

View File

@@ -1,62 +1,37 @@
name: Create release commit
# This workflow increments versions, tags the version, and pushes it.
# When a tag is pushed, another workflow is triggered that creates a GH release
# and uploads the binaries. This workflow is only for creating the tag.
# This script will enforce that a minor version is incremented if there are any
# breaking changes since the last minor increment. However, it isn't able to
# differentiate between breaking changes in Node versus Python. If you wish to
# bypass this check, you can manually increment the version and push the tag.
on:
workflow_dispatch:
inputs:
dry_run:
description: 'Dry run (create the local commit/tags but do not push it)'
required: true
default: false
type: boolean
type:
description: 'What kind of release is this?'
required: true
default: 'preview'
default: "false"
type: choice
options:
- preview
- stable
python:
description: 'Make a Python release'
- "true"
- "false"
part:
description: 'What kind of release is this?'
required: true
default: true
type: boolean
other:
description: 'Make a Node/Rust release'
required: true
default: true
type: boolean
bump-minor:
description: 'Bump minor version'
required: true
default: false
type: boolean
default: 'patch'
type: choice
options:
- patch
- minor
- major
jobs:
make-release:
# Creates tag and GH release. The GH release will trigger the build and release jobs.
bump-version:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Output Inputs
run: echo "${{ toJSON(github.event.inputs) }}"
- uses: actions/checkout@v4
- name: Check out main
uses: actions/checkout@v4
with:
ref: main
persist-credentials: false
fetch-depth: 0
lfs: true
# It's important we use our token here, as the default token will NOT
# trigger any workflows watching for new tags. See:
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
- name: Set git configs for bumpversion
shell: bash
run: |
@@ -66,34 +41,19 @@ jobs:
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Bump Python version
if: ${{ inputs.python }}
working-directory: python
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Bump version, create tag and commit
run: |
# Need to get the commit before bumping the version, so we can
# determine if there are breaking changes in the next step as well.
echo "COMMIT_BEFORE_BUMP=$(git rev-parse HEAD)" >> $GITHUB_ENV
pip install bump-my-version PyGithub packaging
bash ../ci/bump_version.sh ${{ inputs.type }} ${{ inputs.bump-minor }} python-v
- name: Bump Node/Rust version
if: ${{ inputs.other }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
pip install bump-my-version PyGithub packaging
bash ci/bump_version.sh ${{ inputs.type }} ${{ inputs.bump-minor }} v $COMMIT_BEFORE_BUMP
- name: Push new version tag
if: ${{ !inputs.dry_run }}
pip install bump2version
bumpversion --verbose ${{ inputs.part }}
- name: Push new version and tag
if: ${{ inputs.dry_run }} == "false"
uses: ad-m/github-push-action@master
with:
# Need to use PAT here too to trigger next workflow. See comment above.
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
branch: ${{ github.ref }}
branch: main
tags: true
- uses: ./.github/workflows/update_package_lock
if: ${{ inputs.dry_run }} == "false"
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}

View File

@@ -52,7 +52,8 @@ jobs:
cargo fmt --all -- --check
cargo clippy --all --all-features -- -D warnings
npm ci
npm run lint-ci
npm run lint
npm run chkformat
linux:
name: Linux (NodeJS ${{ matrix.node-version }})
timeout-minutes: 30

View File

@@ -1,9 +1,8 @@
name: NPM Publish
on:
push:
tags:
- 'v*'
release:
types: [published]
jobs:
node:
@@ -275,15 +274,9 @@ jobs:
env:
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
run: |
# Tag beta as "preview" instead of default "latest". See lancedb
# npm publish step for more info.
if [[ $GITHUB_REF =~ refs/tags/v(.*)-beta.* ]]; then
PUBLISH_ARGS="--tag preview"
fi
mv */*.tgz .
for filename in *.tgz; do
npm publish $PUBLISH_ARGS $filename
npm publish $filename
done
release-nodejs:
@@ -323,23 +316,11 @@ jobs:
- name: Publish to NPM
env:
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
# By default, things are published to the latest tag. This is what is
# installed by default if the user does not specify a version. This is
# good for stable releases, but for pre-releases, we want to publish to
# the "preview" tag so they can install with `npm install lancedb@preview`.
# See: https://medium.com/@mbostock/prereleases-and-npm-e778fc5e2420
run: |
if [[ $GITHUB_REF =~ refs/tags/v(.*)-beta.* ]]; then
npm publish --access public --tag preview
else
npm publish --access public
fi
run: npm publish --access public
update-package-lock:
needs: [release]
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -350,13 +331,11 @@ jobs:
lfs: true
- uses: ./.github/workflows/update_package_lock
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
update-package-lock-nodejs:
needs: [release-nodejs]
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -367,70 +346,4 @@ jobs:
lfs: true
- uses: ./.github/workflows/update_package_lock_nodejs
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
gh-release:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
lfs: true
- name: Extract version
id: extract_version
env:
GITHUB_REF: ${{ github.ref }}
run: |
set -e
echo "Extracting tag and version from $GITHUB_REF"
if [[ $GITHUB_REF =~ refs/tags/v(.*) ]]; then
VERSION=${BASH_REMATCH[1]}
TAG=v$VERSION
echo "tag=$TAG" >> $GITHUB_OUTPUT
echo "version=$VERSION" >> $GITHUB_OUTPUT
else
echo "Failed to extract version from $GITHUB_REF"
exit 1
fi
echo "Extracted version $VERSION from $GITHUB_REF"
if [[ $VERSION =~ beta ]]; then
echo "This is a beta release"
# Get last release (that is not this one)
FROM_TAG=$(git tag --sort='version:refname' \
| grep ^v \
| grep -vF "$TAG" \
| python ci/semver_sort.py v \
| tail -n 1)
else
echo "This is a stable release"
# Get last stable tag (ignore betas)
FROM_TAG=$(git tag --sort='version:refname' \
| grep ^v \
| grep -vF "$TAG" \
| grep -v beta \
| python ci/semver_sort.py v \
| tail -n 1)
fi
echo "Found from tag $FROM_TAG"
echo "from_tag=$FROM_TAG" >> $GITHUB_OUTPUT
- name: Create Release Notes
id: release_notes
uses: mikepenz/release-changelog-builder-action@v4
with:
configuration: .github/release_notes.json
toTag: ${{ steps.extract_version.outputs.tag }}
fromTag: ${{ steps.extract_version.outputs.from_tag }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Create GH release
uses: softprops/action-gh-release@v2
with:
prerelease: ${{ contains('beta', github.ref) }}
tag_name: ${{ steps.extract_version.outputs.tag }}
token: ${{ secrets.GITHUB_TOKEN }}
generate_release_notes: false
name: Node/Rust LanceDB v${{ steps.extract_version.outputs.version }}
body: ${{ steps.release_notes.outputs.changelog }}
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}

View File

@@ -1,16 +1,18 @@
name: PyPI Publish
on:
push:
tags:
- 'python-v*'
release:
types: [published]
jobs:
linux:
# Only runs on tags that matches the python-make-release action
if: startsWith(github.ref, 'refs/tags/python-v')
name: Python ${{ matrix.config.platform }} manylinux${{ matrix.config.manylinux }}
timeout-minutes: 60
strategy:
matrix:
python-minor-version: ["8"]
config:
- platform: x86_64
manylinux: "2_17"
@@ -32,22 +34,25 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: 3.8
python-version: 3.${{ matrix.python-minor-version }}
- uses: ./.github/workflows/build_linux_wheel
with:
python-minor-version: 8
python-minor-version: ${{ matrix.python-minor-version }}
args: "--release --strip ${{ matrix.config.extra_args }}"
arm-build: ${{ matrix.config.platform == 'aarch64' }}
manylinux: ${{ matrix.config.manylinux }}
- uses: ./.github/workflows/upload_wheel
with:
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
fury_token: ${{ secrets.FURY_TOKEN }}
token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
repo: "pypi"
mac:
# Only runs on tags that matches the python-make-release action
if: startsWith(github.ref, 'refs/tags/python-v')
timeout-minutes: 60
runs-on: ${{ matrix.config.runner }}
strategy:
matrix:
python-minor-version: ["8"]
config:
- target: x86_64-apple-darwin
runner: macos-13
@@ -58,6 +63,7 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.ref }}
fetch-depth: 0
lfs: true
- name: Set up Python
@@ -66,95 +72,38 @@ jobs:
python-version: 3.12
- uses: ./.github/workflows/build_mac_wheel
with:
python-minor-version: 8
python-minor-version: ${{ matrix.python-minor-version }}
args: "--release --strip --target ${{ matrix.config.target }} --features fp16kernels"
- uses: ./.github/workflows/upload_wheel
with:
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
fury_token: ${{ secrets.FURY_TOKEN }}
python-minor-version: ${{ matrix.python-minor-version }}
token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
repo: "pypi"
windows:
# Only runs on tags that matches the python-make-release action
if: startsWith(github.ref, 'refs/tags/python-v')
timeout-minutes: 60
runs-on: windows-latest
strategy:
matrix:
python-minor-version: ["8"]
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.ref }}
fetch-depth: 0
lfs: true
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: 3.8
python-version: 3.${{ matrix.python-minor-version }}
- uses: ./.github/workflows/build_windows_wheel
with:
python-minor-version: 8
python-minor-version: ${{ matrix.python-minor-version }}
args: "--release --strip"
vcpkg_token: ${{ secrets.VCPKG_GITHUB_PACKAGES }}
- uses: ./.github/workflows/upload_wheel
with:
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
fury_token: ${{ secrets.FURY_TOKEN }}
gh-release:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
lfs: true
- name: Extract version
id: extract_version
env:
GITHUB_REF: ${{ github.ref }}
run: |
set -e
echo "Extracting tag and version from $GITHUB_REF"
if [[ $GITHUB_REF =~ refs/tags/python-v(.*) ]]; then
VERSION=${BASH_REMATCH[1]}
TAG=python-v$VERSION
echo "tag=$TAG" >> $GITHUB_OUTPUT
echo "version=$VERSION" >> $GITHUB_OUTPUT
else
echo "Failed to extract version from $GITHUB_REF"
exit 1
fi
echo "Extracted version $VERSION from $GITHUB_REF"
if [[ $VERSION =~ beta ]]; then
echo "This is a beta release"
# Get last release (that is not this one)
FROM_TAG=$(git tag --sort='version:refname' \
| grep ^python-v \
| grep -vF "$TAG" \
| python ci/semver_sort.py python-v \
| tail -n 1)
else
echo "This is a stable release"
# Get last stable tag (ignore betas)
FROM_TAG=$(git tag --sort='version:refname' \
| grep ^python-v \
| grep -vF "$TAG" \
| grep -v beta \
| python ci/semver_sort.py python-v \
| tail -n 1)
fi
echo "Found from tag $FROM_TAG"
echo "from_tag=$FROM_TAG" >> $GITHUB_OUTPUT
- name: Create Python Release Notes
id: python_release_notes
uses: mikepenz/release-changelog-builder-action@v4
with:
configuration: .github/release_notes.json
toTag: ${{ steps.extract_version.outputs.tag }}
fromTag: ${{ steps.extract_version.outputs.from_tag }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Create Python GH release
uses: softprops/action-gh-release@v2
with:
prerelease: ${{ contains('beta', github.ref) }}
tag_name: ${{ steps.extract_version.outputs.tag }}
token: ${{ secrets.GITHUB_TOKEN }}
generate_release_notes: false
name: Python LanceDB v${{ steps.extract_version.outputs.version }}
body: ${{ steps.python_release_notes.outputs.changelog }}
python-minor-version: ${{ matrix.python-minor-version }}
token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
repo: "pypi"

View File

@@ -0,0 +1,56 @@
name: Python - Create release commit
on:
workflow_dispatch:
inputs:
dry_run:
description: 'Dry run (create the local commit/tags but do not push it)'
required: true
default: "false"
type: choice
options:
- "true"
- "false"
part:
description: 'What kind of release is this?'
required: true
default: 'patch'
type: choice
options:
- patch
- minor
- major
jobs:
bump-version:
runs-on: ubuntu-latest
steps:
- name: Check out main
uses: actions/checkout@v4
with:
ref: main
persist-credentials: false
fetch-depth: 0
lfs: true
- name: Set git configs for bumpversion
shell: bash
run: |
git config user.name 'Lance Release'
git config user.email 'lance-dev@lancedb.com'
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Bump version, create tag and commit
working-directory: python
run: |
pip install bump2version
bumpversion --verbose ${{ inputs.part }}
- name: Push new version and tag
if: ${{ inputs.dry_run }} == "false"
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
branch: main
tags: true

View File

@@ -75,7 +75,7 @@ jobs:
timeout-minutes: 30
strategy:
matrix:
python-minor-version: ["9", "11"]
python-minor-version: ["8", "11"]
runs-on: "ubuntu-22.04"
defaults:
run:

View File

@@ -74,11 +74,11 @@ jobs:
run: |
sudo apt update
sudo apt install -y protobuf-compiler libssl-dev
- name: Build
run: cargo build --all-features
- name: Start S3 integration test environment
working-directory: .
run: docker compose up --detach --wait
- name: Build
run: cargo build --all-features
- name: Run tests
run: cargo test --all-features
- name: Run examples

View File

@@ -2,43 +2,28 @@ name: upload-wheel
description: "Upload wheels to Pypi"
inputs:
pypi_token:
os:
required: true
description: "ubuntu-22.04 or macos-13"
repo:
required: false
description: "pypi or testpypi"
default: "pypi"
token:
required: true
description: "release token for the repo"
fury_token:
required: true
description: "release token for the fury repo"
runs:
using: "composite"
steps:
- name: Install dependencies
shell: bash
run: |
python -m pip install --upgrade pip
pip install twine
- name: Choose repo
shell: bash
id: choose_repo
run: |
if [ ${{ github.ref }} == "*beta*" ]; then
echo "repo=fury" >> $GITHUB_OUTPUT
else
echo "repo=pypi" >> $GITHUB_OUTPUT
fi
- name: Publish to PyPI
shell: bash
env:
FURY_TOKEN: ${{ inputs.fury_token }}
PYPI_TOKEN: ${{ inputs.pypi_token }}
run: |
if [ ${{ steps.choose_repo.outputs.repo }} == "fury" ]; then
WHEEL=$(ls target/wheels/lancedb-*.whl 2> /dev/null | head -n 1)
echo "Uploading $WHEEL to Fury"
curl -f -F package=@$WHEEL https://$FURY_TOKEN@push.fury.io/lancedb/
else
twine upload --repository ${{ steps.choose_repo.outputs.repo }} \
--username __token__ \
--password $PYPI_TOKEN \
target/wheels/lancedb-*.whl
fi
- name: Install dependencies
shell: bash
run: |
python -m pip install --upgrade pip
pip install twine
- name: Publish wheel
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ inputs.token }}
shell: bash
run: twine upload --repository ${{ inputs.repo }} target/wheels/lancedb-*.whl

2
.gitignore vendored
View File

@@ -6,7 +6,7 @@
venv
.vscode
.zed
rust/target
rust/Cargo.lock

View File

@@ -10,12 +10,9 @@ repos:
rev: v0.2.2
hooks:
- id: ruff
- repo: local
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.1.0
hooks:
- id: local-biome-check
name: biome check
entry: npx biome check
language: system
types: [text]
- id: prettier
files: "nodejs/.*"
exclude: nodejs/lancedb/native.d.ts|nodejs/dist/.*

View File

@@ -14,22 +14,22 @@ keywords = ["lancedb", "lance", "database", "vector", "search"]
categories = ["database-implementations"]
[workspace.dependencies]
lance = { "version" = "=0.11.0", "features" = ["dynamodb"] }
lance-index = { "version" = "=0.11.0" }
lance-linalg = { "version" = "=0.11.0" }
lance-testing = { "version" = "=0.11.0" }
lance = { "version" = "=0.10.12", "features" = ["dynamodb"] }
lance-index = { "version" = "=0.10.12" }
lance-linalg = { "version" = "=0.10.12" }
lance-testing = { "version" = "=0.10.12" }
# Note that this one does not include pyarrow
arrow = { version = "51.0", optional = false }
arrow-array = "51.0"
arrow-data = "51.0"
arrow-ipc = "51.0"
arrow-ord = "51.0"
arrow-schema = "51.0"
arrow-arith = "51.0"
arrow-cast = "51.0"
arrow = { version = "50.0", optional = false }
arrow-array = "50.0"
arrow-data = "50.0"
arrow-ipc = "50.0"
arrow-ord = "50.0"
arrow-schema = "50.0"
arrow-arith = "50.0"
arrow-cast = "50.0"
async-trait = "0"
chrono = "0.4.35"
half = { "version" = "=2.4.1", default-features = false, features = [
half = { "version" = "=2.3.1", default-features = false, features = [
"num-traits",
] }
futures = "0"

View File

@@ -20,7 +20,7 @@
<hr />
LanceDB is an open-source database for vector-search built with persistent storage, which greatly simplifies retrieval, filtering and management of embeddings.
LanceDB is an open-source database for vector-search built with persistent storage, which greatly simplifies retrevial, filtering and management of embeddings.
The key features of LanceDB include:
@@ -36,7 +36,7 @@ The key features of LanceDB include:
* GPU support in building vector index(*).
* Ecosystem integrations with [LangChain 🦜️🔗](https://python.langchain.com/docs/integrations/vectorstores/lancedb/), [LlamaIndex 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way.
* Ecosystem integrations with [LangChain 🦜️🔗](https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lanecdb.html), [LlamaIndex 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way.
LanceDB's core is written in Rust 🦀 and is built using <a href="https://github.com/lancedb/lance">Lance</a>, an open-source columnar format designed for performant ML workloads.

View File

@@ -1,51 +0,0 @@
set -e
RELEASE_TYPE=${1:-"stable"}
BUMP_MINOR=${2:-false}
TAG_PREFIX=${3:-"v"} # Such as "python-v"
HEAD_SHA=${4:-$(git rev-parse HEAD)}
readonly SELF_DIR=$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
PREV_TAG=$(git tag --sort='version:refname' | grep ^$TAG_PREFIX | python $SELF_DIR/semver_sort.py $TAG_PREFIX | tail -n 1)
echo "Found previous tag $PREV_TAG"
# Initially, we don't want to tag if we are doing stable, because we will bump
# again later. See comment at end for why.
if [[ "$RELEASE_TYPE" == 'stable' ]]; then
BUMP_ARGS="--no-tag"
fi
# If last is stable and not bumping minor
if [[ $PREV_TAG != *beta* ]]; then
if [[ "$BUMP_MINOR" != "false" ]]; then
# X.Y.Z -> X.(Y+1).0-beta.0
bump-my-version bump -vv $BUMP_ARGS minor
else
# X.Y.Z -> X.Y.(Z+1)-beta.0
bump-my-version bump -vv $BUMP_ARGS patch
fi
else
if [[ "$BUMP_MINOR" != "false" ]]; then
# X.Y.Z-beta.N -> X.(Y+1).0-beta.0
bump-my-version bump -vv $BUMP_ARGS minor
else
# X.Y.Z-beta.N -> X.Y.Z-beta.(N+1)
bump-my-version bump -vv $BUMP_ARGS pre_n
fi
fi
# The above bump will always bump to a pre-release version. If we are releasing
# a stable version, bump the pre-release level ("pre_l") to make it stable.
if [[ $RELEASE_TYPE == 'stable' ]]; then
# X.Y.Z-beta.N -> X.Y.Z
bump-my-version bump -vv pre_l
fi
# Validate that we have incremented version appropriately for breaking changes
NEW_TAG=$(git describe --tags --exact-match HEAD)
NEW_VERSION=$(echo $NEW_TAG | sed "s/^$TAG_PREFIX//")
LAST_STABLE_RELEASE=$(git tag --sort='version:refname' | grep ^$TAG_PREFIX | grep -v beta | grep -vF "$NEW_TAG" | python $SELF_DIR/semver_sort.py $TAG_PREFIX | tail -n 1)
LAST_STABLE_VERSION=$(echo $LAST_STABLE_RELEASE | sed "s/^$TAG_PREFIX//")
python $SELF_DIR/check_breaking_changes.py $LAST_STABLE_RELEASE $HEAD_SHA $LAST_STABLE_VERSION $NEW_VERSION

View File

@@ -1,35 +0,0 @@
"""
Check whether there are any breaking changes in the PRs between the base and head commits.
If there are, assert that we have incremented the minor version.
"""
import argparse
import os
from packaging.version import parse
from github import Github
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("base")
parser.add_argument("head")
parser.add_argument("last_stable_version")
parser.add_argument("current_version")
args = parser.parse_args()
repo = Github(os.environ["GITHUB_TOKEN"]).get_repo(os.environ["GITHUB_REPOSITORY"])
commits = repo.compare(args.base, args.head).commits
prs = (pr for commit in commits for pr in commit.get_pulls())
for pr in prs:
if any(label.name == "breaking-change" for label in pr.labels):
print(f"Breaking change in PR: {pr.html_url}")
break
else:
print("No breaking changes found.")
exit(0)
last_stable_version = parse(args.last_stable_version)
current_version = parse(args.current_version)
if current_version.minor <= last_stable_version.minor:
print("Minor version is not greater than the last stable version.")
exit(1)

View File

@@ -1,35 +0,0 @@
"""
Takes a list of semver strings and sorts them in ascending order.
"""
import sys
from packaging.version import parse, InvalidVersion
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("prefix", default="v")
args = parser.parse_args()
# Read the input from stdin
lines = sys.stdin.readlines()
# Parse the versions
versions = []
for line in lines:
line = line.strip()
try:
version_str = line.removeprefix(args.prefix)
version = parse(version_str)
except InvalidVersion:
# There are old tags that don't follow the semver format
print(f"Invalid version: {line}", file=sys.stderr)
continue
versions.append((line, version))
# Sort the versions
versions.sort(key=lambda x: x[1])
# Print the sorted versions as original strings
for line, _ in versions:
print(line)

View File

@@ -0,0 +1,280 @@
import argparse
import os
from llama_index.core import SimpleDirectoryReader
from llama_index.core.llama_dataset import LabelledRagDataset
from llama_index.core.node_parser import SentenceSplitter
from lancedb.embeddings.fine_tuner.dataset import QADataset, TextChunk
from lancedb.embeddings.fine_tuner.llm import Openai
from lancedb.embeddings import get_registry
from llama_index.vector_stores.lancedb import LanceDBVectorStore
from llama_index.core import ServiceContext, VectorStoreIndex, StorageContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.llama_pack import download_llama_pack
import time
import wandb
import pandas as pd
def get_paths_from_dataset(dataset: str, split=True):
"""
Returns paths of:
- downloaded dataset, lance train dataset, lance test dataset, finetuned model
"""
if split:
return (
f"./data/{dataset}",
f"./data/{dataset}_lance_train",
f"./data/{dataset}_lance_test",
f"./data/tuned_{dataset}",
)
return f"./data/{dataset}", f"./data/{dataset}_lance", f"./data/tuned_{dataset}"
def get_llama_dataset(dataset: str):
"""
returns:
- nodes, documents, rag_dataset
"""
if not os.path.exists(f"./data/{dataset}"):
os.system(
f"llamaindex-cli download-llamadataset {dataset} --download-dir ./data/{dataset}" # noqa
)
rag_dataset = LabelledRagDataset.from_json(f"./data/{dataset}/rag_dataset.json")
docs = SimpleDirectoryReader(input_dir=f"./data/{dataset}/source_files").load_data()
parser = SentenceSplitter()
nodes = parser.get_nodes_from_documents(docs)
return nodes, docs, rag_dataset
def lance_dataset_from_llama_nodes(nodes: list, name: str, split=True):
llm = Openai()
chunks = [TextChunk.from_llama_index_node(node) for node in nodes]
# train test split 75-35
if not split:
if os.path.exists(f"./data/{name}_lance"):
ds = QADataset.load(f"./data/{name}_lance")
return ds
ds = QADataset.from_llm(chunks, llm)
ds.save(f"./data/{name}_lance")
return ds
if os.path.exists(f"./data/{name}_lance_train") and os.path.exists(
f"./data/{name}_lance_test"
):
train_ds = QADataset.load(f"./data/{name}_lance_train")
test_ds = QADataset.load(f"./data/{name}_lance_test")
return train_ds, test_ds
# split chunks random
train_size = int(len(chunks) * 0.65)
train_chunks = chunks[:train_size]
test_chunks = chunks[train_size:]
train_ds = QADataset.from_llm(train_chunks, llm)
test_ds = QADataset.from_llm(test_chunks, llm)
train_ds.save(f"./data/{name}_lance_train")
test_ds.save(f"./data/{name}_lance_test")
return train_ds, test_ds
def finetune(
trainset: str, model: str, epochs: int, path: str, valset: str = None, top_k=5
):
print(f"Finetuning {model} for {epochs} epochs")
print(f"trainset query instances: {len(trainset.queries)}")
print(f"valset query instances: {len(valset.queries)}")
valset = valset if valset is not None else trainset
model = get_registry().get("sentence-transformers").create(name=model)
base_result = model.evaluate(valset, path="./data/eval/", top_k=top_k)
base_hit_rate = pd.DataFrame(base_result)["is_hit"].mean()
model.finetune(trainset=trainset, valset=valset, path=path, epochs=epochs)
tuned = get_registry().get("sentence-transformers").create(name=path)
tuned_result = tuned.evaluate(
valset, path=f"./data/eval/{str(time.time())}", top_k=top_k
)
tuned_hit_rate = pd.DataFrame(tuned_result)["is_hit"].mean()
return base_hit_rate, tuned_hit_rate
def do_eval_rag(dataset: str, model: str):
# Requires - pip install llama-index-vector-stores-lancedb
# Requires - pip install llama-index-embeddings-huggingface
nodes, docs, rag_dataset = get_llama_dataset(dataset)
embed_model = HuggingFaceEmbedding(model)
vector_store = LanceDBVectorStore(uri="/tmp/lancedb")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
index = VectorStoreIndex(
nodes,
service_context=service_context,
show_progress=True,
storage_context=storage_context,
)
# build basic RAG system
index = VectorStoreIndex.from_documents(documents=docs)
query_engine = index.as_query_engine()
# evaluate using the RagEvaluatorPack
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./rag_evaluator_pack")
rag_evaluator_pack = RagEvaluatorPack(
rag_dataset=rag_dataset, query_engine=query_engine
)
metrics = rag_evaluator_pack.run(
batch_size=20, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # seconds to sleep before making an api call
)
return metrics
def main(
dataset,
model,
epochs,
top_k=5,
eval_rag=False,
split=True,
project: str = "lancedb_finetune",
):
nodes, _, _ = get_llama_dataset(dataset)
trainset = None
valset = None
if split:
trainset, valset = lance_dataset_from_llama_nodes(nodes, dataset, split)
data_path, lance_train_path, lance_test_path, tuned_path = (
get_paths_from_dataset(dataset, split=split)
)
else:
trainset = lance_dataset_from_llama_nodes(nodes, dataset, split)
valset = trainset
data_path, lance_path, tuned_path = get_paths_from_dataset(dataset, split=split)
base_hit_rate, tuned_hit_rate = finetune(
trainset, model, epochs, tuned_path, valset, top_k=top_k
)
# Base model model metrics
metrics = do_eval_rag(dataset, model) if eval_rag else {}
# Tuned model metrics
metrics_tuned = do_eval_rag(dataset, tuned_path) if eval_rag else {}
wandb.init(project="lancedb_finetune", name=f"{dataset}_{model}_{epochs}")
wandb.log(
{
"hit_rate": tuned_hit_rate,
}
)
wandb.log(metrics_tuned)
wandb.finish()
wandb.init(project="lancedb_finetune", name=f"{dataset}_{model}_base")
wandb.log(
{
"hit_rate": base_hit_rate,
}
)
wandb.log(metrics)
wandb.finish()
def banchmark_all():
datasets = [
"Uber10KDataset2021",
"MiniTruthfulQADataset",
"MiniSquadV2Dataset",
"MiniEsgBenchDataset",
"MiniCovidQaDataset",
"Llama2PaperDataset",
"HistoryOfAlexnetDataset",
"PatronusAIFinanceBenchDataset",
]
models = ["BAAI/bge-small-en-v1.5"]
top_ks = [5]
for top_k in top_ks:
for model in models:
for dataset in datasets:
main(dataset, model, 5, top_k=top_k)
if __name__ == "__main__":
"""
Benchmark the fine-tuning process for a given dataset and model.
Usage:
- For a single dataset
python lancedb/docs/benchmarks/llama-index-datasets.py --dataset Uber10KDataset2021 --model BAAI/bge-small-en-v1.5 --epochs 4 --top_k 5 --split 1 --project lancedb_finetune
- For all datasets and models across all top_ks
python lancedb/docs/benchmarks/llama-index-datasets.py --benchmark-all
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset",
type=str,
default="BraintrustCodaHelpDeskDataset",
help="The dataset to use for fine-tuning",
)
parser.add_argument(
"--model",
type=str,
default="BAAI/bge-small-en-v1.5",
help="The model to use for fine-tuning",
)
parser.add_argument(
"--epochs",
type=int,
default=4,
help="The number of epochs to fine-tune the model",
)
parser.add_argument(
"--project",
type=str,
default="lancedb_finetune",
help="The wandb project to log the results",
)
parser.add_argument(
"--top_k", type=int, default=5, help="The number of top results to evaluate"
)
parser.add_argument(
"--split",
type=int,
default=1,
help="Whether to split the dataset into train and test(65-35 split), default is 1",
)
parser.add_argument(
"--eval-rag",
action="store_true",
default=False,
help="Whether to evaluate the model using RAG",
)
parser.add_argument(
"--benchmark-all",
action="store_true",
default=False,
help="Benchmark all datasets across all models and top_ks",
)
args = parser.parse_args()
if args.benchmark_all:
banchmark_all()
else:
main(
args.dataset,
args.model,
args.epochs,
args.top_k,
args.eval_rag,
args.split,
args.project,
)

View File

@@ -119,7 +119,7 @@ nav:
- Polars: python/polars_arrow.md
- DuckDB: python/duckdb.md
- LangChain:
- LangChain 🔗: integrations/langchain.md
- LangChain 🔗: https://python.langchain.com/docs/integrations/vectorstores/lancedb/
- LangChain JS/TS 🔗: https://js.langchain.com/docs/integrations/vectorstores/lancedb
- LlamaIndex 🦙: https://docs.llamaindex.ai/en/stable/examples/vector_stores/LanceDBIndexDemo/
- Pydantic: python/pydantic.md

View File

@@ -44,36 +44,6 @@
!!! info "Please also make sure you're using the same version of Arrow as in the [lancedb crate](https://github.com/lancedb/lancedb/blob/main/Cargo.toml)"
### Preview releases
Stable releases are created about every 2 weeks. For the latest features and bug
fixes, you can install the preview release. These releases receive the same
level of testing as stable releases, but are not guaranteed to be available for
more than 6 months after they are released. Once your application is stable, we
recommend switching to stable releases.
=== "Python"
```shell
pip install --pre --extra-index-url https://pypi.fury.io/lancedb/ lancedb
```
=== "Typescript"
```shell
npm install vectordb@preview
```
=== "Rust"
We don't push preview releases to crates.io, but you can referent the tag
in GitHub within your Cargo dependencies:
```toml
[dependencies]
lancedb = { git = "https://github.com/lancedb/lancedb.git", tag = "vX.Y.Z-beta.N" }
```
## Connect to a database
=== "Python"

View File

@@ -159,7 +159,7 @@ Allows you to set parameters when registering a `sentence-transformers` object.
from lancedb.embeddings import get_registry
db = lancedb.connect("/tmp/db")
model = get_registry().get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
model = get_registry.get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
class Words(LanceModel):
text: str = model.SourceField()
@@ -206,44 +206,6 @@ print(actual.text)
```
### Ollama embeddings
Generate embeddings via the [ollama](https://github.com/ollama/ollama-python) python library. More details:
- [Ollama docs on embeddings](https://github.com/ollama/ollama/blob/main/docs/api.md#generate-embeddings)
- [Ollama blog on embeddings](https://ollama.com/blog/embedding-models)
| Parameter | Type | Default Value | Description |
|------------------------|----------------------------|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------|
| `name` | `str` | `nomic-embed-text` | The name of the model. |
| `host` | `str` | `http://localhost:11434` | The Ollama host to connect to. |
| `options` | `ollama.Options` or `dict` | `None` | Additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`. |
| `keep_alive` | `float` or `str` | `"5m"` | Controls how long the model will stay loaded into memory following the request. |
| `ollama_client_kwargs` | `dict` | `{}` | kwargs that can be past to the `ollama.Client`. |
```python
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
db = lancedb.connect("/tmp/db")
func = get_registry().get("ollama").create(name="nomic-embed-text")
class Words(LanceModel):
text: str = func.SourceField()
vector: Vector(func.ndims()) = func.VectorField()
table = db.create_table("words", schema=Words, mode="overwrite")
table.add([
{"text": "hello world"},
{"text": "goodbye world"}
])
query = "greetings"
actual = table.search(query).limit(1).to_pydantic(Words)[0]
print(actual.text)
```
### OpenAI embeddings
LanceDB registers the OpenAI embeddings function in the registry by default, as `openai`. Below are the parameters that you can customize when creating the instances:

View File

@@ -46,7 +46,7 @@ For this purpose, LanceDB introduces an **embedding functions API**, that allow
```python
class Pets(LanceModel):
vector: Vector(clip.ndims()) = clip.VectorField()
vector: Vector(clip.ndims) = clip.VectorField()
image_uri: str = clip.SourceField()
```
@@ -149,7 +149,7 @@ You can also use the integration for adding utility operations in the schema. Fo
```python
class Pets(LanceModel):
vector: Vector(clip.ndims()) = clip.VectorField()
vector: Vector(clip.ndims) = clip.VectorField()
image_uri: str = clip.SourceField()
@property
@@ -166,4 +166,4 @@ rs[2].image
![](../assets/dog_clip_output.png)
Now that you have the basic idea about LanceDB embedding functions and the embedding function registry,
let's dive deeper into defining your own [custom functions](./custom_embedding_function.md).
let's dive deeper into defining your own [custom functions](./custom_embedding_function.md).

View File

@@ -299,14 +299,6 @@ LanceDB can also connect to S3-compatible stores, such as MinIO. To do so, you m
This can also be done with the ``AWS_ENDPOINT`` and ``AWS_DEFAULT_REGION`` environment variables.
!!! tip "Local servers"
For local development, the server often has a `http` endpoint rather than a
secure `https` endpoint. In this case, you must also set the `ALLOW_HTTP`
environment variable to `true` to allow non-TLS connections, or pass the
storage option `allow_http` as `true`. If you do not do this, you will get
an error like `URL scheme is not allowed`.
#### S3 Express
LanceDB supports [S3 Express One Zone](https://aws.amazon.com/s3/storage-classes/express-one-zone/) endpoints, but requires additional configuration. Also, S3 Express endpoints only support connecting from an EC2 instance within the same region.

View File

@@ -13,7 +13,7 @@ Get started using these examples and quick links.
| Integrations | |
|---|---:|
| <h3> LlamaIndex </h3>LlamaIndex is a simple, flexible data framework for connecting custom data sources to large language models. Llama index integrates with LanceDB as the serverless VectorDB. <h3>[Lean More](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html) </h3> |<img src="../assets/llama-index.jpg" alt="image" width="150" height="auto">|
| <h3>Langchain</h3>Langchain allows building applications with LLMs through composability <h3>[Lean More](https://lancedb.github.io/lancedb/integrations/langchain/) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
| <h3>Langchain</h3>Langchain allows building applications with LLMs through composability <h3>[Lean More](https://python.langchain.com/docs/integrations/vectorstores/lancedb) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
| <h3>Langchain TS</h3> Javascript bindings for Langchain. It integrates with LanceDB's serverless vectordb allowing you to build powerful AI applications through composibility using only serverless functions. <h3>[Learn More]( https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/lancedb) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
| <h3>Voxel51</h3> It is an open source toolkit that enables you to build better computer vision workflows by improving the quality of your datasets and delivering insights about your models.<h3>[Learn More](./voxel51.md) | <img src="../assets/voxel.gif" alt="image" width="150" height="auto">|
| <h3>PromptTools</h3> Offers a set of free, open-source tools for testing and experimenting with models, prompts, and configurations. The core idea is to enable developers to evaluate prompts using familiar interfaces like code and notebooks. You can use it to experiment with different configurations of LanceDB, and test how LanceDB integrates with the LLM of your choice.<h3>[Learn More](./prompttools.md) | <img src="../assets/prompttools.jpeg" alt="image" width="150" height="auto">|

View File

@@ -1,92 +0,0 @@
# Langchain
![Illustration](../assets/langchain.png)
## Quick Start
You can load your document data using langchain's loaders, for this example we are using `TextLoader` and `OpenAIEmbeddings` as the embedding model.
```python
import os
from langchain.document_loaders import TextLoader
from langchain.vectorstores import LanceDB
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
os.environ["OPENAI_API_KEY"] = "sk-..."
loader = TextLoader("../../modules/state_of_the_union.txt") # Replace with your data path
documents = loader.load()
documents = CharacterTextSplitter().split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = LanceDB.from_documents(documents, embeddings)
query = "What did the president say about Ketanji Brown Jackson"
docs = docsearch.similarity_search(query)
print(docs[0].page_content)
```
## Documentation
In the above example `LanceDB` vector store class object is created using `from_documents()` method which is a `classmethod` and returns the initialized class object.
You can also use `LanceDB.from_texts(texts: List[str],embedding: Embeddings)` class method.
The exhaustive list of parameters for `LanceDB` vector store are :
- `connection`: (Optional) `lancedb.db.LanceDBConnection` connection object to use. If not provided, a new connection will be created.
- `embedding`: Langchain embedding model.
- `vector_key`: (Optional) Column name to use for vector's in the table. Defaults to `'vector'`.
- `id_key`: (Optional) Column name to use for id's in the table. Defaults to `'id'`.
- `text_key`: (Optional) Column name to use for text in the table. Defaults to `'text'`.
- `table_name`: (Optional) Name of your table in the database. Defaults to `'vectorstore'`.
- `api_key`: (Optional) API key to use for LanceDB cloud database. Defaults to `None`.
- `region`: (Optional) Region to use for LanceDB cloud database. Only for LanceDB Cloud, defaults to `None`.
- `mode`: (Optional) Mode to use for adding data to the table. Defaults to `'overwrite'`.
```python
db_url = "db://lang_test" # url of db you created
api_key = "xxxxx" # your API key
region="us-east-1-dev" # your selected region
vector_store = LanceDB(
uri=db_url,
api_key=api_key, #(dont include for local API)
region=region, #(dont include for local API)
embedding=embeddings,
table_name='langchain_test' #Optional
)
```
### Methods
To add texts and store respective embeddings automatically:
##### add_texts()
- `texts`: `Iterable` of strings to add to the vectorstore.
- `metadatas`: Optional `list[dict()]` of metadatas associated with the texts.
- `ids`: Optional `list` of ids to associate with the texts.
```python
vector_store.add_texts(texts = ['test_123'], metadatas =[{'source' :'wiki'}])
#Additionaly, to explore the table you can load it into a df or save it in a csv file:
tbl = vector_store.get_table()
print("tbl:", tbl)
pd_df = tbl.to_pandas()
pd_df.to_csv("docsearch.csv", index=False)
# you can also create a new vector store object using an older connection object:
vector_store = LanceDB(connection=tbl, embedding=embeddings)
```
For index creation make sure your table has enough data in it. An ANN index is ususally not needed for datasets ~100K vectors. For large-scale (>1M) or higher dimension vectors, it is beneficial to create an ANN index.
##### create_index()
- `col_name`: `Optional[str] = None`
- `vector_col`: `Optional[str] = None`
- `num_partitions`: `Optional[int] = 256`
- `num_sub_vectors`: `Optional[int] = 96`
- `index_cache_size`: `Optional[int] = None`
```python
# for creating vector index
vector_store.create_index(vector_col='vector', metric = 'cosine')
# for creating scalar index(for non-vector columns)
vector_store.create_index(col_name='text')
```

View File

@@ -36,7 +36,7 @@
}
],
"source": [
"!pip install --quiet openai datasets\n",
"!pip install --quiet openai datasets \n",
"!pip install --quiet -U lancedb"
]
},
@@ -213,7 +213,7 @@
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" # OR set the key here as a variable\n",
" os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n",
"\n",
" \n",
"client = OpenAI()\n",
"assert len(client.models.list().data) > 0"
]
@@ -234,12 +234,9 @@
"metadata": {},
"outputs": [],
"source": [
"def embed_func(c):\n",
"def embed_func(c): \n",
" rs = client.embeddings.create(input=c, model=\"text-embedding-ada-002\")\n",
" return [\n",
" data.embedding\n",
" for data in rs.data\n",
" ]"
" return [rs.data[0].embedding]"
]
},
{
@@ -517,7 +514,7 @@
" prompt_start +\n",
" \"\\n\\n---\\n\\n\".join(context.text) +\n",
" prompt_end\n",
" )\n",
" ) \n",
" return prompt"
]
},

View File

@@ -8,7 +8,6 @@ excluded_globs = [
"../src/embedding.md",
"../src/examples/*.md",
"../src/integrations/voxel51.md",
"../src/integrations/langchain.md",
"../src/guides/tables.md",
"../src/python/duckdb.md",
"../src/embeddings/*.md",

74
node/package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "vectordb",
"version": "0.5.0",
"version": "0.4.17",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "vectordb",
"version": "0.5.0",
"version": "0.4.17",
"cpu": [
"x64",
"arm64"
@@ -52,11 +52,11 @@
"uuid": "^9.0.0"
},
"optionalDependencies": {
"@lancedb/vectordb-darwin-arm64": "0.4.20",
"@lancedb/vectordb-darwin-x64": "0.4.20",
"@lancedb/vectordb-linux-arm64-gnu": "0.4.20",
"@lancedb/vectordb-linux-x64-gnu": "0.4.20",
"@lancedb/vectordb-win32-x64-msvc": "0.4.20"
"@lancedb/vectordb-darwin-arm64": "0.4.17",
"@lancedb/vectordb-darwin-x64": "0.4.17",
"@lancedb/vectordb-linux-arm64-gnu": "0.4.17",
"@lancedb/vectordb-linux-x64-gnu": "0.4.17",
"@lancedb/vectordb-win32-x64-msvc": "0.4.17"
},
"peerDependencies": {
"@apache-arrow/ts": "^14.0.2",
@@ -333,66 +333,6 @@
"@jridgewell/sourcemap-codec": "^1.4.10"
}
},
"node_modules/@lancedb/vectordb-darwin-arm64": {
"version": "0.4.20",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.4.20.tgz",
"integrity": "sha512-ffP2K4sA5mQTgePyARw1y8dPN996FmpvyAYoWO+TSItaXlhcXvc+KVa5udNMCZMDYeEnEv2Xpj6k4PwW3oBz+A==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"darwin"
]
},
"node_modules/@lancedb/vectordb-darwin-x64": {
"version": "0.4.20",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.4.20.tgz",
"integrity": "sha512-GSYsXE20RIehDu30FjREhJdEzhnwOTV7ZsrSXagStzLY1gr7pyd7sfqxmmUtdD09di7LnQoiM71AOpPTa01YwQ==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"darwin"
]
},
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
"version": "0.4.20",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.4.20.tgz",
"integrity": "sha512-FpNOjOsz3nJVm6EBGyNgbOW2aFhsWZ/igeY45Z8hbZaaK2YBwrg/DASoNlUzgv6IR8cUaGJ2irNVJfsKR2cG6g==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"linux"
]
},
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
"version": "0.4.20",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.4.20.tgz",
"integrity": "sha512-pOqWjrRZQSrLTlQPkjidRii7NZDw8Xu9pN6ouVu2JAK8n81FXaPtFCyAI+Y3v9GpnYDN0rvD4eQ36aHAVPsa2g==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"linux"
]
},
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
"version": "0.4.20",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.4.20.tgz",
"integrity": "sha512-5J5SsYSJ7jRCmU/sgwVHdrGz43B/7R2T9OEoFTKyVAtqTZdu75rkytXyn9SyEayXVhlUOaw76N0ASm0hAoDS/A==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"win32"
]
},
"node_modules/@neon-rs/cli": {
"version": "0.0.160",
"resolved": "https://registry.npmjs.org/@neon-rs/cli/-/cli-0.0.160.tgz",

View File

@@ -1,6 +1,6 @@
{
"name": "vectordb",
"version": "0.5.0",
"version": "0.4.17",
"description": " Serverless, low-latency vector database for AI applications",
"main": "dist/index.js",
"types": "dist/index.d.ts",
@@ -88,10 +88,10 @@
}
},
"optionalDependencies": {
"@lancedb/vectordb-darwin-arm64": "0.4.20",
"@lancedb/vectordb-darwin-x64": "0.4.20",
"@lancedb/vectordb-linux-arm64-gnu": "0.4.20",
"@lancedb/vectordb-linux-x64-gnu": "0.4.20",
"@lancedb/vectordb-win32-x64-msvc": "0.4.20"
"@lancedb/vectordb-darwin-arm64": "0.4.17",
"@lancedb/vectordb-darwin-x64": "0.4.17",
"@lancedb/vectordb-linux-arm64-gnu": "0.4.17",
"@lancedb/vectordb-linux-x64-gnu": "0.4.17",
"@lancedb/vectordb-win32-x64-msvc": "0.4.17"
}
}

View File

@@ -27,23 +27,23 @@ import {
RecordBatch,
makeData,
Struct,
type Float,
Float,
DataType,
Binary,
Float32
} from "apache-arrow";
import { type EmbeddingFunction } from "./index";
import { sanitizeSchema } from "./sanitize";
} from 'apache-arrow'
import { type EmbeddingFunction } from './index'
import { sanitizeSchema } from './sanitize'
/*
* Options to control how a column should be converted to a vector array
*/
export class VectorColumnOptions {
/** Vector column type. */
type: Float = new Float32();
type: Float = new Float32()
constructor(values?: Partial<VectorColumnOptions>) {
Object.assign(this, values);
constructor (values?: Partial<VectorColumnOptions>) {
Object.assign(this, values)
}
}
@@ -60,7 +60,7 @@ export class MakeArrowTableOptions {
* The schema must be specified if there are no records (e.g. to make
* an empty table)
*/
schema?: Schema;
schema?: Schema
/*
* Mapping from vector column name to expected type
@@ -80,9 +80,7 @@ export class MakeArrowTableOptions {
*/
vectorColumns: Record<string, VectorColumnOptions> = {
vector: new VectorColumnOptions()
};
embeddings?: EmbeddingFunction<any>;
}
/**
* If true then string columns will be encoded with dictionary encoding
@@ -93,10 +91,10 @@ export class MakeArrowTableOptions {
*
* If `schema` is provided then this property is ignored.
*/
dictionaryEncodeStrings: boolean = false;
dictionaryEncodeStrings: boolean = false
constructor(values?: Partial<MakeArrowTableOptions>) {
Object.assign(this, values);
constructor (values?: Partial<MakeArrowTableOptions>) {
Object.assign(this, values)
}
}
@@ -195,68 +193,59 @@ export class MakeArrowTableOptions {
* assert.deepEqual(table.schema, schema)
* ```
*/
export function makeArrowTable(
export function makeArrowTable (
data: Array<Record<string, any>>,
options?: Partial<MakeArrowTableOptions>
): ArrowTable {
if (
data.length === 0 &&
(options?.schema === undefined || options?.schema === null)
) {
throw new Error("At least one record or a schema needs to be provided");
if (data.length === 0 && (options?.schema === undefined || options?.schema === null)) {
throw new Error('At least one record or a schema needs to be provided')
}
const opt = new MakeArrowTableOptions(options !== undefined ? options : {});
const opt = new MakeArrowTableOptions(options !== undefined ? options : {})
if (opt.schema !== undefined && opt.schema !== null) {
opt.schema = sanitizeSchema(opt.schema);
opt.schema = validateSchemaEmbeddings(opt.schema, data, opt.embeddings);
opt.schema = sanitizeSchema(opt.schema)
}
const columns: Record<string, Vector> = {};
const columns: Record<string, Vector> = {}
// TODO: sample dataset to find missing columns
// Prefer the field ordering of the schema, if present
const columnNames =
opt.schema != null ? (opt.schema.names as string[]) : Object.keys(data[0]);
const columnNames = ((opt.schema) != null) ? (opt.schema.names as string[]) : Object.keys(data[0])
for (const colName of columnNames) {
if (
data.length !== 0 &&
!Object.prototype.hasOwnProperty.call(data[0], colName)
) {
if (data.length !== 0 && !Object.prototype.hasOwnProperty.call(data[0], colName)) {
// The field is present in the schema, but not in the data, skip it
continue;
continue
}
// Extract a single column from the records (transpose from row-major to col-major)
let values = data.map((datum) => datum[colName]);
let values = data.map((datum) => datum[colName])
// By default (type === undefined) arrow will infer the type from the JS type
let type;
let type
if (opt.schema !== undefined) {
// If there is a schema provided, then use that for the type instead
type = opt.schema?.fields.filter((f) => f.name === colName)[0]?.type;
type = opt.schema?.fields.filter((f) => f.name === colName)[0]?.type
if (DataType.isInt(type) && type.bitWidth === 64) {
// wrap in BigInt to avoid bug: https://github.com/apache/arrow/issues/40051
values = values.map((v) => {
if (v === null) {
return v;
return v
}
return BigInt(v);
});
return BigInt(v)
})
}
} else {
// Otherwise, check to see if this column is one of the vector columns
// defined by opt.vectorColumns and, if so, use the fixed size list type
const vectorColumnOptions = opt.vectorColumns[colName];
const vectorColumnOptions = opt.vectorColumns[colName]
if (vectorColumnOptions !== undefined) {
type = newVectorType(values[0].length, vectorColumnOptions.type);
type = newVectorType(values[0].length, vectorColumnOptions.type)
}
}
try {
// Convert an Array of JS values to an arrow vector
columns[colName] = makeVector(values, type, opt.dictionaryEncodeStrings);
columns[colName] = makeVector(values, type, opt.dictionaryEncodeStrings)
} catch (error: unknown) {
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
throw Error(`Could not convert column "${colName}" to Arrow: ${error}`);
throw Error(`Could not convert column "${colName}" to Arrow: ${error}`)
}
}
@@ -271,116 +260,97 @@ export function makeArrowTable(
// To work around this we first create a table with the wrong schema and
// then patch the schema of the batches so we can use
// `new ArrowTable(schema, batches)` which does not do any schema inference
const firstTable = new ArrowTable(columns);
const batchesFixed = firstTable.batches.map(
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
(batch) => new RecordBatch(opt.schema!, batch.data)
);
return new ArrowTable(opt.schema, batchesFixed);
const firstTable = new ArrowTable(columns)
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
const batchesFixed = firstTable.batches.map(batch => new RecordBatch(opt.schema!, batch.data))
return new ArrowTable(opt.schema, batchesFixed)
} else {
return new ArrowTable(columns);
return new ArrowTable(columns)
}
}
/**
* Create an empty Arrow table with the provided schema
*/
export function makeEmptyTable(schema: Schema): ArrowTable {
return makeArrowTable([], { schema });
export function makeEmptyTable (schema: Schema): ArrowTable {
return makeArrowTable([], { schema })
}
// Helper function to convert Array<Array<any>> to a variable sized list array
function makeListVector(lists: any[][]): Vector<any> {
function makeListVector (lists: any[][]): Vector<any> {
if (lists.length === 0 || lists[0].length === 0) {
throw Error("Cannot infer list vector from empty array or empty list");
throw Error('Cannot infer list vector from empty array or empty list')
}
const sampleList = lists[0];
let inferredType;
const sampleList = lists[0]
let inferredType
try {
const sampleVector = makeVector(sampleList);
inferredType = sampleVector.type;
const sampleVector = makeVector(sampleList)
inferredType = sampleVector.type
} catch (error: unknown) {
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
throw Error(`Cannot infer list vector. Cannot infer inner type: ${error}`);
throw Error(`Cannot infer list vector. Cannot infer inner type: ${error}`)
}
const listBuilder = makeBuilder({
type: new List(new Field("item", inferredType, true))
});
type: new List(new Field('item', inferredType, true))
})
for (const list of lists) {
listBuilder.append(list);
listBuilder.append(list)
}
return listBuilder.finish().toVector();
return listBuilder.finish().toVector()
}
// Helper function to convert an Array of JS values to an Arrow Vector
function makeVector(
values: any[],
type?: DataType,
stringAsDictionary?: boolean
): Vector<any> {
function makeVector (values: any[], type?: DataType, stringAsDictionary?: boolean): Vector<any> {
if (type !== undefined) {
// No need for inference, let Arrow create it
return vectorFromArray(values, type);
return vectorFromArray(values, type)
}
if (values.length === 0) {
throw Error(
"makeVector requires at least one value or the type must be specfied"
);
throw Error('makeVector requires at least one value or the type must be specfied')
}
const sampleValue = values.find((val) => val !== null && val !== undefined);
const sampleValue = values.find(val => val !== null && val !== undefined)
if (sampleValue === undefined) {
throw Error(
"makeVector cannot infer the type if all values are null or undefined"
);
throw Error('makeVector cannot infer the type if all values are null or undefined')
}
if (Array.isArray(sampleValue)) {
// Default Arrow inference doesn't handle list types
return makeListVector(values);
return makeListVector(values)
} else if (Buffer.isBuffer(sampleValue)) {
// Default Arrow inference doesn't handle Buffer
return vectorFromArray(values, new Binary());
} else if (
!(stringAsDictionary ?? false) &&
(typeof sampleValue === "string" || sampleValue instanceof String)
) {
return vectorFromArray(values, new Binary())
} else if (!(stringAsDictionary ?? false) && (typeof sampleValue === 'string' || sampleValue instanceof String)) {
// If the type is string then don't use Arrow's default inference unless dictionaries are requested
// because it will always use dictionary encoding for strings
return vectorFromArray(values, new Utf8());
return vectorFromArray(values, new Utf8())
} else {
// Convert a JS array of values to an arrow vector
return vectorFromArray(values);
return vectorFromArray(values)
}
}
async function applyEmbeddings<T>(
table: ArrowTable,
embeddings?: EmbeddingFunction<T>,
schema?: Schema
): Promise<ArrowTable> {
async function applyEmbeddings<T> (table: ArrowTable, embeddings?: EmbeddingFunction<T>, schema?: Schema): Promise<ArrowTable> {
if (embeddings == null) {
return table;
return table
}
if (schema !== undefined && schema !== null) {
schema = sanitizeSchema(schema);
schema = sanitizeSchema(schema)
}
// Convert from ArrowTable to Record<String, Vector>
const colEntries = [...Array(table.numCols).keys()].map((_, idx) => {
const name = table.schema.fields[idx].name;
const name = table.schema.fields[idx].name
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
const vec = table.getChildAt(idx)!;
return [name, vec];
});
const newColumns = Object.fromEntries(colEntries);
const vec = table.getChildAt(idx)!
return [name, vec]
})
const newColumns = Object.fromEntries(colEntries)
const sourceColumn = newColumns[embeddings.sourceColumn];
const destColumn = embeddings.destColumn ?? "vector";
const innerDestType = embeddings.embeddingDataType ?? new Float32();
const sourceColumn = newColumns[embeddings.sourceColumn]
const destColumn = embeddings.destColumn ?? 'vector'
const innerDestType = embeddings.embeddingDataType ?? new Float32()
if (sourceColumn === undefined) {
throw new Error(
`Cannot apply embedding function because the source column '${embeddings.sourceColumn}' was not present in the data`
);
throw new Error(`Cannot apply embedding function because the source column '${embeddings.sourceColumn}' was not present in the data`)
}
if (table.numRows === 0) {
@@ -388,60 +358,45 @@ async function applyEmbeddings<T>(
// We have an empty table and it already has the embedding column so no work needs to be done
// Note: we don't return an error like we did below because this is a common occurrence. For example,
// if we call convertToTable with 0 records and a schema that includes the embedding
return table;
return table
}
if (embeddings.embeddingDimension !== undefined) {
const destType = newVectorType(
embeddings.embeddingDimension,
innerDestType
);
newColumns[destColumn] = makeVector([], destType);
const destType = newVectorType(embeddings.embeddingDimension, innerDestType)
newColumns[destColumn] = makeVector([], destType)
} else if (schema != null) {
const destField = schema.fields.find((f) => f.name === destColumn);
const destField = schema.fields.find(f => f.name === destColumn)
if (destField != null) {
newColumns[destColumn] = makeVector([], destField.type);
newColumns[destColumn] = makeVector([], destField.type)
} else {
throw new Error(
`Attempt to apply embeddings to an empty table failed because schema was missing embedding column '${destColumn}'`
);
throw new Error(`Attempt to apply embeddings to an empty table failed because schema was missing embedding column '${destColumn}'`)
}
} else {
throw new Error(
"Attempt to apply embeddings to an empty table when the embeddings function does not specify `embeddingDimension`"
);
throw new Error('Attempt to apply embeddings to an empty table when the embeddings function does not specify `embeddingDimension`')
}
} else {
if (Object.prototype.hasOwnProperty.call(newColumns, destColumn)) {
throw new Error(
`Attempt to apply embeddings to table failed because column ${destColumn} already existed`
);
throw new Error(`Attempt to apply embeddings to table failed because column ${destColumn} already existed`)
}
if (table.batches.length > 1) {
throw new Error(
"Internal error: `makeArrowTable` unexpectedly created a table with more than one batch"
);
throw new Error('Internal error: `makeArrowTable` unexpectedly created a table with more than one batch')
}
const values = sourceColumn.toArray();
const vectors = await embeddings.embed(values as T[]);
const values = sourceColumn.toArray()
const vectors = await embeddings.embed(values as T[])
if (vectors.length !== values.length) {
throw new Error(
"Embedding function did not return an embedding for each input element"
);
throw new Error('Embedding function did not return an embedding for each input element')
}
const destType = newVectorType(vectors[0].length, innerDestType);
newColumns[destColumn] = makeVector(vectors, destType);
const destType = newVectorType(vectors[0].length, innerDestType)
newColumns[destColumn] = makeVector(vectors, destType)
}
const newTable = new ArrowTable(newColumns);
const newTable = new ArrowTable(newColumns)
if (schema != null) {
if (schema.fields.find((f) => f.name === destColumn) === undefined) {
throw new Error(
`When using embedding functions and specifying a schema the schema should include the embedding column but the column ${destColumn} was missing`
);
if (schema.fields.find(f => f.name === destColumn) === undefined) {
throw new Error(`When using embedding functions and specifying a schema the schema should include the embedding column but the column ${destColumn} was missing`)
}
return alignTable(newTable, schema);
return alignTable(newTable, schema)
}
return newTable;
return newTable
}
/*
@@ -462,24 +417,21 @@ async function applyEmbeddings<T>(
* embedding columns. If no schema is provded then embedding columns will
* be placed at the end of the table, after all of the input columns.
*/
export async function convertToTable<T>(
export async function convertToTable<T> (
data: Array<Record<string, unknown>>,
embeddings?: EmbeddingFunction<T>,
makeTableOptions?: Partial<MakeArrowTableOptions>
): Promise<ArrowTable> {
const table = makeArrowTable(data, makeTableOptions);
return await applyEmbeddings(table, embeddings, makeTableOptions?.schema);
const table = makeArrowTable(data, makeTableOptions)
return await applyEmbeddings(table, embeddings, makeTableOptions?.schema)
}
// Creates the Arrow Type for a Vector column with dimension `dim`
function newVectorType<T extends Float>(
dim: number,
innerType: T
): FixedSizeList<T> {
function newVectorType <T extends Float> (dim: number, innerType: T): FixedSizeList<T> {
// Somewhere we always default to have the elements nullable, so we need to set it to true
// otherwise we often get schema mismatches because the stored data always has schema with nullable elements
const children = new Field<T>("item", innerType, true);
return new FixedSizeList(dim, children);
const children = new Field<T>('item', innerType, true)
return new FixedSizeList(dim, children)
}
/**
@@ -489,17 +441,17 @@ function newVectorType<T extends Float>(
*
* `schema` is required if data is empty
*/
export async function fromRecordsToBuffer<T>(
export async function fromRecordsToBuffer<T> (
data: Array<Record<string, unknown>>,
embeddings?: EmbeddingFunction<T>,
schema?: Schema
): Promise<Buffer> {
if (schema !== undefined && schema !== null) {
schema = sanitizeSchema(schema);
schema = sanitizeSchema(schema)
}
const table = await convertToTable(data, embeddings, { schema, embeddings });
const writer = RecordBatchFileWriter.writeAll(table);
return Buffer.from(await writer.toUint8Array());
const table = await convertToTable(data, embeddings, { schema })
const writer = RecordBatchFileWriter.writeAll(table)
return Buffer.from(await writer.toUint8Array())
}
/**
@@ -509,17 +461,17 @@ export async function fromRecordsToBuffer<T>(
*
* `schema` is required if data is empty
*/
export async function fromRecordsToStreamBuffer<T>(
export async function fromRecordsToStreamBuffer<T> (
data: Array<Record<string, unknown>>,
embeddings?: EmbeddingFunction<T>,
schema?: Schema
): Promise<Buffer> {
if (schema !== null && schema !== undefined) {
schema = sanitizeSchema(schema);
schema = sanitizeSchema(schema)
}
const table = await convertToTable(data, embeddings, { schema });
const writer = RecordBatchStreamWriter.writeAll(table);
return Buffer.from(await writer.toUint8Array());
const table = await convertToTable(data, embeddings, { schema })
const writer = RecordBatchStreamWriter.writeAll(table)
return Buffer.from(await writer.toUint8Array())
}
/**
@@ -530,17 +482,17 @@ export async function fromRecordsToStreamBuffer<T>(
*
* `schema` is required if the table is empty
*/
export async function fromTableToBuffer<T>(
export async function fromTableToBuffer<T> (
table: ArrowTable,
embeddings?: EmbeddingFunction<T>,
schema?: Schema
): Promise<Buffer> {
if (schema !== null && schema !== undefined) {
schema = sanitizeSchema(schema);
schema = sanitizeSchema(schema)
}
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema);
const writer = RecordBatchFileWriter.writeAll(tableWithEmbeddings);
return Buffer.from(await writer.toUint8Array());
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema)
const writer = RecordBatchFileWriter.writeAll(tableWithEmbeddings)
return Buffer.from(await writer.toUint8Array())
}
/**
@@ -551,85 +503,49 @@ export async function fromTableToBuffer<T>(
*
* `schema` is required if the table is empty
*/
export async function fromTableToStreamBuffer<T>(
export async function fromTableToStreamBuffer<T> (
table: ArrowTable,
embeddings?: EmbeddingFunction<T>,
schema?: Schema
): Promise<Buffer> {
if (schema !== null && schema !== undefined) {
schema = sanitizeSchema(schema);
schema = sanitizeSchema(schema)
}
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema);
const writer = RecordBatchStreamWriter.writeAll(tableWithEmbeddings);
return Buffer.from(await writer.toUint8Array());
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema)
const writer = RecordBatchStreamWriter.writeAll(tableWithEmbeddings)
return Buffer.from(await writer.toUint8Array())
}
function alignBatch(batch: RecordBatch, schema: Schema): RecordBatch {
const alignedChildren = [];
function alignBatch (batch: RecordBatch, schema: Schema): RecordBatch {
const alignedChildren = []
for (const field of schema.fields) {
const indexInBatch = batch.schema.fields?.findIndex(
(f) => f.name === field.name
);
)
if (indexInBatch < 0) {
throw new Error(
`The column ${field.name} was not found in the Arrow Table`
);
)
}
alignedChildren.push(batch.data.children[indexInBatch]);
alignedChildren.push(batch.data.children[indexInBatch])
}
const newData = makeData({
type: new Struct(schema.fields),
length: batch.numRows,
nullCount: batch.nullCount,
children: alignedChildren
});
return new RecordBatch(schema, newData);
})
return new RecordBatch(schema, newData)
}
function alignTable(table: ArrowTable, schema: Schema): ArrowTable {
function alignTable (table: ArrowTable, schema: Schema): ArrowTable {
const alignedBatches = table.batches.map((batch) =>
alignBatch(batch, schema)
);
return new ArrowTable(schema, alignedBatches);
)
return new ArrowTable(schema, alignedBatches)
}
// Creates an empty Arrow Table
export function createEmptyTable(schema: Schema): ArrowTable {
return new ArrowTable(sanitizeSchema(schema));
}
function validateSchemaEmbeddings(
schema: Schema<any>,
data: Array<Record<string, unknown>>,
embeddings: EmbeddingFunction<any> | undefined
) {
const fields = [];
const missingEmbeddingFields = [];
// First we check if the field is a `FixedSizeList`
// Then we check if the data contains the field
// if it does not, we add it to the list of missing embedding fields
// Finally, we check if those missing embedding fields are `this._embeddings`
// if they are not, we throw an error
for (const field of schema.fields) {
if (field.type instanceof FixedSizeList) {
if (data.length !== 0 && data?.[0]?.[field.name] === undefined) {
missingEmbeddingFields.push(field);
} else {
fields.push(field);
}
} else {
fields.push(field);
}
}
if (missingEmbeddingFields.length > 0 && embeddings === undefined) {
throw new Error(
`Table has embeddings: "${missingEmbeddingFields
.map((f) => f.name)
.join(",")}", but no embedding function was provided`
);
}
return new Schema(fields, schema.metadata);
export function createEmptyTable (schema: Schema): ArrowTable {
return new ArrowTable(sanitizeSchema(schema))
}

View File

@@ -12,20 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { type Schema, Table as ArrowTable, tableFromIPC } from "apache-arrow";
import { type Schema, Table as ArrowTable, tableFromIPC } from 'apache-arrow'
import {
createEmptyTable,
fromRecordsToBuffer,
fromTableToBuffer,
makeArrowTable
} from "./arrow";
import type { EmbeddingFunction } from "./embedding/embedding_function";
import { RemoteConnection } from "./remote";
import { Query } from "./query";
import { isEmbeddingFunction } from "./embedding/embedding_function";
import { type Literal, toSQL } from "./util";
import { type HttpMiddleware } from "./middleware";
} from './arrow'
import type { EmbeddingFunction } from './embedding/embedding_function'
import { RemoteConnection } from './remote'
import { Query } from './query'
import { isEmbeddingFunction } from './embedding/embedding_function'
import { type Literal, toSQL } from './util'
import { type HttpMiddleware } from './middleware'
const {
databaseNew,
@@ -49,18 +48,14 @@ const {
tableAlterColumns,
tableDropColumns
// eslint-disable-next-line @typescript-eslint/no-var-requires
} = require("../native.js");
} = require('../native.js')
export { Query };
export type { EmbeddingFunction };
export { OpenAIEmbeddingFunction } from "./embedding/openai";
export {
convertToTable,
makeArrowTable,
type MakeArrowTableOptions
} from "./arrow";
export { Query }
export type { EmbeddingFunction }
export { OpenAIEmbeddingFunction } from './embedding/openai'
export { convertToTable, makeArrowTable, type MakeArrowTableOptions } from './arrow'
const defaultAwsRegion = "us-west-2";
const defaultAwsRegion = 'us-west-2'
export interface AwsCredentials {
accessKeyId: string
@@ -133,19 +128,19 @@ export interface ConnectionOptions {
readConsistencyInterval?: number
}
function getAwsArgs(opts: ConnectionOptions): any[] {
const callArgs: any[] = [];
const awsCredentials = opts.awsCredentials;
function getAwsArgs (opts: ConnectionOptions): any[] {
const callArgs: any[] = []
const awsCredentials = opts.awsCredentials
if (awsCredentials !== undefined) {
callArgs.push(awsCredentials.accessKeyId);
callArgs.push(awsCredentials.secretKey);
callArgs.push(awsCredentials.sessionToken);
callArgs.push(awsCredentials.accessKeyId)
callArgs.push(awsCredentials.secretKey)
callArgs.push(awsCredentials.sessionToken)
} else {
callArgs.fill(undefined, 0, 3);
callArgs.fill(undefined, 0, 3)
}
callArgs.push(opts.awsRegion);
return callArgs;
callArgs.push(opts.awsRegion)
return callArgs
}
export interface CreateTableOptions<T> {
@@ -178,56 +173,56 @@ export interface CreateTableOptions<T> {
*
* @see {@link ConnectionOptions} for more details on the URI format.
*/
export async function connect(uri: string): Promise<Connection>;
export async function connect (uri: string): Promise<Connection>
/**
* Connect to a LanceDB instance with connection options.
*
* @param opts The {@link ConnectionOptions} to use when connecting to the database.
*/
export async function connect(
export async function connect (
opts: Partial<ConnectionOptions>
): Promise<Connection>;
export async function connect(
): Promise<Connection>
export async function connect (
arg: string | Partial<ConnectionOptions>
): Promise<Connection> {
let opts: ConnectionOptions;
if (typeof arg === "string") {
opts = { uri: arg };
let opts: ConnectionOptions
if (typeof arg === 'string') {
opts = { uri: arg }
} else {
const keys = Object.keys(arg);
if (keys.length === 1 && keys[0] === "uri" && typeof arg.uri === "string") {
opts = { uri: arg.uri };
const keys = Object.keys(arg)
if (keys.length === 1 && keys[0] === 'uri' && typeof arg.uri === 'string') {
opts = { uri: arg.uri }
} else {
opts = Object.assign(
{
uri: "",
uri: '',
awsCredentials: undefined,
awsRegion: defaultAwsRegion,
apiKey: undefined,
region: defaultAwsRegion
},
arg
);
)
}
}
if (opts.uri.startsWith("db://")) {
if (opts.uri.startsWith('db://')) {
// Remote connection
return new RemoteConnection(opts);
return new RemoteConnection(opts)
}
const storageOptions = opts.storageOptions ?? {};
if (opts.awsCredentials?.accessKeyId !== undefined) {
storageOptions.aws_access_key_id = opts.awsCredentials.accessKeyId;
storageOptions.aws_access_key_id = opts.awsCredentials.accessKeyId
}
if (opts.awsCredentials?.secretKey !== undefined) {
storageOptions.aws_secret_access_key = opts.awsCredentials.secretKey;
storageOptions.aws_secret_access_key = opts.awsCredentials.secretKey
}
if (opts.awsCredentials?.sessionToken !== undefined) {
storageOptions.aws_session_token = opts.awsCredentials.sessionToken;
storageOptions.aws_session_token = opts.awsCredentials.sessionToken
}
if (opts.awsRegion !== undefined) {
storageOptions.region = opts.awsRegion;
storageOptions.region = opts.awsRegion
}
// It's a pain to pass a record to Rust, so we convert it to an array of key-value pairs
const storageOptionsArr = Object.entries(storageOptions);
@@ -236,8 +231,8 @@ export async function connect(
opts.uri,
storageOptionsArr,
opts.readConsistencyInterval
);
return new LocalConnection(db, opts);
)
return new LocalConnection(db, opts)
}
/**
@@ -538,11 +533,7 @@ export interface Table<T = number[]> {
* @param data the new data to insert
* @param args parameters controlling how the operation should behave
*/
mergeInsert: (
on: string,
data: Array<Record<string, unknown>> | ArrowTable,
args: MergeInsertArgs
) => Promise<void>
mergeInsert: (on: string, data: Array<Record<string, unknown>> | ArrowTable, args: MergeInsertArgs) => Promise<void>
/**
* List the indicies on this table.
@@ -567,9 +558,7 @@ export interface Table<T = number[]> {
* expressions will be evaluated for each row in the
* table, and can reference existing columns in the table.
*/
addColumns(
newColumnTransforms: Array<{ name: string, valueSql: string }>
): Promise<void>
addColumns(newColumnTransforms: Array<{ name: string, valueSql: string }>): Promise<void>
/**
* Alter the name or nullability of columns.
@@ -710,23 +699,23 @@ export interface IndexStats {
* A connection to a LanceDB database.
*/
export class LocalConnection implements Connection {
private readonly _options: () => ConnectionOptions;
private readonly _db: any;
private readonly _options: () => ConnectionOptions
private readonly _db: any
constructor(db: any, options: ConnectionOptions) {
this._options = () => options;
this._db = db;
constructor (db: any, options: ConnectionOptions) {
this._options = () => options
this._db = db
}
get uri(): string {
return this._options().uri;
get uri (): string {
return this._options().uri
}
/**
* Get the names of all tables in the database.
*/
async tableNames(): Promise<string[]> {
return databaseTableNames.call(this._db);
async tableNames (): Promise<string[]> {
return databaseTableNames.call(this._db)
}
/**
@@ -734,7 +723,7 @@ export class LocalConnection implements Connection {
*
* @param name The name of the table.
*/
async openTable(name: string): Promise<Table>;
async openTable (name: string): Promise<Table>
/**
* Open a table in the database.
@@ -745,20 +734,23 @@ export class LocalConnection implements Connection {
async openTable<T>(
name: string,
embeddings: EmbeddingFunction<T>
): Promise<Table<T>>;
): Promise<Table<T>>
async openTable<T>(
name: string,
embeddings?: EmbeddingFunction<T>
): Promise<Table<T>>;
): Promise<Table<T>>
async openTable<T>(
name: string,
embeddings?: EmbeddingFunction<T>
): Promise<Table<T>> {
const tbl = await databaseOpenTable.call(this._db, name);
const tbl = await databaseOpenTable.call(
this._db,
name,
)
if (embeddings !== undefined) {
return new LocalTable(tbl, name, this._options(), embeddings);
return new LocalTable(tbl, name, this._options(), embeddings)
} else {
return new LocalTable(tbl, name, this._options());
return new LocalTable(tbl, name, this._options())
}
}
@@ -768,32 +760,32 @@ export class LocalConnection implements Connection {
optsOrEmbedding?: WriteOptions | EmbeddingFunction<T>,
opt?: WriteOptions
): Promise<Table<T>> {
if (typeof name === "string") {
let writeOptions: WriteOptions = new DefaultWriteOptions();
if (typeof name === 'string') {
let writeOptions: WriteOptions = new DefaultWriteOptions()
if (opt !== undefined && isWriteOptions(opt)) {
writeOptions = opt;
writeOptions = opt
} else if (
optsOrEmbedding !== undefined &&
isWriteOptions(optsOrEmbedding)
) {
writeOptions = optsOrEmbedding;
writeOptions = optsOrEmbedding
}
let embeddings: undefined | EmbeddingFunction<T>;
let embeddings: undefined | EmbeddingFunction<T>
if (
optsOrEmbedding !== undefined &&
isEmbeddingFunction(optsOrEmbedding)
) {
embeddings = optsOrEmbedding;
embeddings = optsOrEmbedding
}
return await this.createTableImpl({
name,
data,
embeddingFunction: embeddings,
writeOptions
});
})
}
return await this.createTableImpl(name);
return await this.createTableImpl(name)
}
private async createTableImpl<T>({
@@ -809,27 +801,27 @@ export class LocalConnection implements Connection {
embeddingFunction?: EmbeddingFunction<T> | undefined
writeOptions?: WriteOptions | undefined
}): Promise<Table<T>> {
let buffer: Buffer;
let buffer: Buffer
function isEmpty(
function isEmpty (
data: Array<Record<string, unknown>> | ArrowTable<any>
): boolean {
if (data instanceof ArrowTable) {
return data.data.length === 0;
return data.data.length === 0
}
return data.length === 0;
return data.length === 0
}
if (data === undefined || isEmpty(data)) {
if (schema === undefined) {
throw new Error("Either data or schema needs to defined");
throw new Error('Either data or schema needs to defined')
}
buffer = await fromTableToBuffer(createEmptyTable(schema));
buffer = await fromTableToBuffer(createEmptyTable(schema))
} else if (data instanceof ArrowTable) {
buffer = await fromTableToBuffer(data, embeddingFunction, schema);
buffer = await fromTableToBuffer(data, embeddingFunction, schema)
} else {
// data is Array<Record<...>>
buffer = await fromRecordsToBuffer(data, embeddingFunction, schema);
buffer = await fromRecordsToBuffer(data, embeddingFunction, schema)
}
const tbl = await tableCreate.call(
@@ -838,11 +830,11 @@ export class LocalConnection implements Connection {
buffer,
writeOptions?.writeMode?.toString(),
...getAwsArgs(this._options())
);
)
if (embeddingFunction !== undefined) {
return new LocalTable(tbl, name, this._options(), embeddingFunction);
return new LocalTable(tbl, name, this._options(), embeddingFunction)
} else {
return new LocalTable(tbl, name, this._options());
return new LocalTable(tbl, name, this._options())
}
}
@@ -850,69 +842,69 @@ export class LocalConnection implements Connection {
* Drop an existing table.
* @param name The name of the table to drop.
*/
async dropTable(name: string): Promise<void> {
await databaseDropTable.call(this._db, name);
async dropTable (name: string): Promise<void> {
await databaseDropTable.call(this._db, name)
}
withMiddleware(middleware: HttpMiddleware): Connection {
return this;
withMiddleware (middleware: HttpMiddleware): Connection {
return this
}
}
export class LocalTable<T = number[]> implements Table<T> {
private _tbl: any;
private readonly _name: string;
private readonly _isElectron: boolean;
private readonly _embeddings?: EmbeddingFunction<T>;
private readonly _options: () => ConnectionOptions;
private _tbl: any
private readonly _name: string
private readonly _isElectron: boolean
private readonly _embeddings?: EmbeddingFunction<T>
private readonly _options: () => ConnectionOptions
constructor(tbl: any, name: string, options: ConnectionOptions);
constructor (tbl: any, name: string, options: ConnectionOptions)
/**
* @param tbl
* @param name
* @param options
* @param embeddings An embedding function to use when interacting with this table
*/
constructor(
constructor (
tbl: any,
name: string,
options: ConnectionOptions,
embeddings: EmbeddingFunction<T>
);
constructor(
)
constructor (
tbl: any,
name: string,
options: ConnectionOptions,
embeddings?: EmbeddingFunction<T>
) {
this._tbl = tbl;
this._name = name;
this._embeddings = embeddings;
this._options = () => options;
this._isElectron = this.checkElectron();
this._tbl = tbl
this._name = name
this._embeddings = embeddings
this._options = () => options
this._isElectron = this.checkElectron()
}
get name(): string {
return this._name;
get name (): string {
return this._name
}
/**
* Creates a search query to find the nearest neighbors of the given search term
* @param query The query search term
*/
search(query: T): Query<T> {
return new Query(query, this._tbl, this._embeddings);
search (query: T): Query<T> {
return new Query(query, this._tbl, this._embeddings)
}
/**
* Creates a filter query to find all rows matching the specified criteria
* @param value The filter criteria (like SQL where clause syntax)
*/
filter(value: string): Query<T> {
return new Query(undefined, this._tbl, this._embeddings).filter(value);
filter (value: string): Query<T> {
return new Query(undefined, this._tbl, this._embeddings).filter(value)
}
where = this.filter;
where = this.filter
/**
* Insert records into this Table.
@@ -920,19 +912,16 @@ export class LocalTable<T = number[]> implements Table<T> {
* @param data Records to be inserted into the Table
* @return The number of rows added to the table
*/
async add(
async add (
data: Array<Record<string, unknown>> | ArrowTable
): Promise<number> {
const schema = await this.schema;
let tbl: ArrowTable;
const schema = await this.schema
let tbl: ArrowTable
if (data instanceof ArrowTable) {
tbl = data;
tbl = data
} else {
tbl = makeArrowTable(data, { schema, embeddings: this._embeddings });
tbl = makeArrowTable(data, { schema })
}
return tableAdd
.call(
this._tbl,
@@ -941,8 +930,8 @@ export class LocalTable<T = number[]> implements Table<T> {
...getAwsArgs(this._options())
)
.then((newTable: any) => {
this._tbl = newTable;
});
this._tbl = newTable
})
}
/**
@@ -951,14 +940,14 @@ export class LocalTable<T = number[]> implements Table<T> {
* @param data Records to be inserted into the Table
* @return The number of rows added to the table
*/
async overwrite(
async overwrite (
data: Array<Record<string, unknown>> | ArrowTable
): Promise<number> {
let buffer: Buffer;
let buffer: Buffer
if (data instanceof ArrowTable) {
buffer = await fromTableToBuffer(data, this._embeddings);
buffer = await fromTableToBuffer(data, this._embeddings)
} else {
buffer = await fromRecordsToBuffer(data, this._embeddings);
buffer = await fromRecordsToBuffer(data, this._embeddings)
}
return tableAdd
.call(
@@ -968,8 +957,8 @@ export class LocalTable<T = number[]> implements Table<T> {
...getAwsArgs(this._options())
)
.then((newTable: any) => {
this._tbl = newTable;
});
this._tbl = newTable
})
}
/**
@@ -977,26 +966,26 @@ export class LocalTable<T = number[]> implements Table<T> {
*
* @param indexParams The parameters of this Index, @see VectorIndexParams.
*/
async createIndex(indexParams: VectorIndexParams): Promise<any> {
async createIndex (indexParams: VectorIndexParams): Promise<any> {
return tableCreateVectorIndex
.call(this._tbl, indexParams)
.then((newTable: any) => {
this._tbl = newTable;
});
this._tbl = newTable
})
}
async createScalarIndex(column: string, replace?: boolean): Promise<void> {
async createScalarIndex (column: string, replace?: boolean): Promise<void> {
if (replace === undefined) {
replace = true;
replace = true
}
return tableCreateScalarIndex.call(this._tbl, column, replace);
return tableCreateScalarIndex.call(this._tbl, column, replace)
}
/**
* Returns the number of rows in this table.
*/
async countRows(filter?: string): Promise<number> {
return tableCountRows.call(this._tbl, filter);
async countRows (filter?: string): Promise<number> {
return tableCountRows.call(this._tbl, filter)
}
/**
@@ -1004,10 +993,10 @@ export class LocalTable<T = number[]> implements Table<T> {
*
* @param filter A filter in the same format used by a sql WHERE clause.
*/
async delete(filter: string): Promise<void> {
async delete (filter: string): Promise<void> {
return tableDelete.call(this._tbl, filter).then((newTable: any) => {
this._tbl = newTable;
});
this._tbl = newTable
})
}
/**
@@ -1017,65 +1006,55 @@ export class LocalTable<T = number[]> implements Table<T> {
*
* @returns
*/
async update(args: UpdateArgs | UpdateSqlArgs): Promise<void> {
let filter: string | null;
let updates: Record<string, string>;
async update (args: UpdateArgs | UpdateSqlArgs): Promise<void> {
let filter: string | null
let updates: Record<string, string>
if ("valuesSql" in args) {
filter = args.where ?? null;
updates = args.valuesSql;
if ('valuesSql' in args) {
filter = args.where ?? null
updates = args.valuesSql
} else {
filter = args.where ?? null;
updates = {};
filter = args.where ?? null
updates = {}
for (const [key, value] of Object.entries(args.values)) {
updates[key] = toSQL(value);
updates[key] = toSQL(value)
}
}
return tableUpdate
.call(this._tbl, filter, updates)
.then((newTable: any) => {
this._tbl = newTable;
});
this._tbl = newTable
})
}
async mergeInsert(
on: string,
data: Array<Record<string, unknown>> | ArrowTable,
args: MergeInsertArgs
): Promise<void> {
let whenMatchedUpdateAll = false;
let whenMatchedUpdateAllFilt = null;
if (
args.whenMatchedUpdateAll !== undefined &&
args.whenMatchedUpdateAll !== null
) {
whenMatchedUpdateAll = true;
async mergeInsert (on: string, data: Array<Record<string, unknown>> | ArrowTable, args: MergeInsertArgs): Promise<void> {
let whenMatchedUpdateAll = false
let whenMatchedUpdateAllFilt = null
if (args.whenMatchedUpdateAll !== undefined && args.whenMatchedUpdateAll !== null) {
whenMatchedUpdateAll = true
if (args.whenMatchedUpdateAll !== true) {
whenMatchedUpdateAllFilt = args.whenMatchedUpdateAll;
whenMatchedUpdateAllFilt = args.whenMatchedUpdateAll
}
}
const whenNotMatchedInsertAll = args.whenNotMatchedInsertAll ?? false;
let whenNotMatchedBySourceDelete = false;
let whenNotMatchedBySourceDeleteFilt = null;
if (
args.whenNotMatchedBySourceDelete !== undefined &&
args.whenNotMatchedBySourceDelete !== null
) {
whenNotMatchedBySourceDelete = true;
const whenNotMatchedInsertAll = args.whenNotMatchedInsertAll ?? false
let whenNotMatchedBySourceDelete = false
let whenNotMatchedBySourceDeleteFilt = null
if (args.whenNotMatchedBySourceDelete !== undefined && args.whenNotMatchedBySourceDelete !== null) {
whenNotMatchedBySourceDelete = true
if (args.whenNotMatchedBySourceDelete !== true) {
whenNotMatchedBySourceDeleteFilt = args.whenNotMatchedBySourceDelete;
whenNotMatchedBySourceDeleteFilt = args.whenNotMatchedBySourceDelete
}
}
const schema = await this.schema;
let tbl: ArrowTable;
const schema = await this.schema
let tbl: ArrowTable
if (data instanceof ArrowTable) {
tbl = data;
tbl = data
} else {
tbl = makeArrowTable(data, { schema });
tbl = makeArrowTable(data, { schema })
}
const buffer = await fromTableToBuffer(tbl, this._embeddings, schema);
const buffer = await fromTableToBuffer(tbl, this._embeddings, schema)
this._tbl = await tableMergeInsert.call(
this._tbl,
@@ -1086,7 +1065,7 @@ export class LocalTable<T = number[]> implements Table<T> {
whenNotMatchedBySourceDelete,
whenNotMatchedBySourceDeleteFilt,
buffer
);
)
}
/**
@@ -1104,16 +1083,16 @@ export class LocalTable<T = number[]> implements Table<T> {
* uphold this promise can lead to corrupted tables.
* @returns
*/
async cleanupOldVersions(
async cleanupOldVersions (
olderThan?: number,
deleteUnverified?: boolean
): Promise<CleanupStats> {
return tableCleanupOldVersions
.call(this._tbl, olderThan, deleteUnverified)
.then((res: { newTable: any, metrics: CleanupStats }) => {
this._tbl = res.newTable;
return res.metrics;
});
this._tbl = res.newTable
return res.metrics
})
}
/**
@@ -1127,64 +1106,62 @@ export class LocalTable<T = number[]> implements Table<T> {
* for most tables.
* @returns Metrics about the compaction operation.
*/
async compactFiles(options?: CompactionOptions): Promise<CompactionMetrics> {
const optionsArg = options ?? {};
async compactFiles (options?: CompactionOptions): Promise<CompactionMetrics> {
const optionsArg = options ?? {}
return tableCompactFiles
.call(this._tbl, optionsArg)
.then((res: { newTable: any, metrics: CompactionMetrics }) => {
this._tbl = res.newTable;
return res.metrics;
});
this._tbl = res.newTable
return res.metrics
})
}
async listIndices(): Promise<VectorIndex[]> {
return tableListIndices.call(this._tbl);
async listIndices (): Promise<VectorIndex[]> {
return tableListIndices.call(this._tbl)
}
async indexStats(indexUuid: string): Promise<IndexStats> {
return tableIndexStats.call(this._tbl, indexUuid);
async indexStats (indexUuid: string): Promise<IndexStats> {
return tableIndexStats.call(this._tbl, indexUuid)
}
get schema(): Promise<Schema> {
get schema (): Promise<Schema> {
// empty table
return this.getSchema();
return this.getSchema()
}
private async getSchema(): Promise<Schema> {
const buffer = await tableSchema.call(this._tbl, this._isElectron);
const table = tableFromIPC(buffer);
return table.schema;
private async getSchema (): Promise<Schema> {
const buffer = await tableSchema.call(this._tbl, this._isElectron)
const table = tableFromIPC(buffer)
return table.schema
}
// See https://github.com/electron/electron/issues/2288
private checkElectron(): boolean {
private checkElectron (): boolean {
try {
// eslint-disable-next-line no-prototype-builtins
return (
Object.prototype.hasOwnProperty.call(process?.versions, "electron") ||
navigator?.userAgent?.toLowerCase()?.includes(" electron")
);
Object.prototype.hasOwnProperty.call(process?.versions, 'electron') ||
navigator?.userAgent?.toLowerCase()?.includes(' electron')
)
} catch (e) {
return false;
return false
}
}
async addColumns(
newColumnTransforms: Array<{ name: string, valueSql: string }>
): Promise<void> {
return tableAddColumns.call(this._tbl, newColumnTransforms);
async addColumns (newColumnTransforms: Array<{ name: string, valueSql: string }>): Promise<void> {
return tableAddColumns.call(this._tbl, newColumnTransforms)
}
async alterColumns(columnAlterations: ColumnAlteration[]): Promise<void> {
return tableAlterColumns.call(this._tbl, columnAlterations);
async alterColumns (columnAlterations: ColumnAlteration[]): Promise<void> {
return tableAlterColumns.call(this._tbl, columnAlterations)
}
async dropColumns(columnNames: string[]): Promise<void> {
return tableDropColumns.call(this._tbl, columnNames);
async dropColumns (columnNames: string[]): Promise<void> {
return tableDropColumns.call(this._tbl, columnNames)
}
withMiddleware(middleware: HttpMiddleware): Table<T> {
return this;
withMiddleware (middleware: HttpMiddleware): Table<T> {
return this
}
}
@@ -1207,7 +1184,7 @@ export interface CompactionOptions {
*/
targetRowsPerFragment?: number
/**
* The maximum number of T per group. Defaults to 1024.
* The maximum number of rows per group. Defaults to 1024.
*/
maxRowsPerGroup?: number
/**
@@ -1307,21 +1284,21 @@ export interface IvfPQIndexConfig {
*/
index_cache_size?: number
type: "ivf_pq"
type: 'ivf_pq'
}
export type VectorIndexParams = IvfPQIndexConfig;
export type VectorIndexParams = IvfPQIndexConfig
/**
* Write mode for writing a table.
*/
export enum WriteMode {
/** Create a new {@link Table}. */
Create = "create",
Create = 'create',
/** Overwrite the existing {@link Table} if presented. */
Overwrite = "overwrite",
Overwrite = 'overwrite',
/** Append new data to the table. */
Append = "append",
Append = 'append',
}
/**
@@ -1333,14 +1310,14 @@ export interface WriteOptions {
}
export class DefaultWriteOptions implements WriteOptions {
writeMode = WriteMode.Create;
writeMode = WriteMode.Create
}
export function isWriteOptions(value: any): value is WriteOptions {
export function isWriteOptions (value: any): value is WriteOptions {
return (
Object.keys(value).length === 1 &&
(value.writeMode === undefined || typeof value.writeMode === "string")
);
(value.writeMode === undefined || typeof value.writeMode === 'string')
)
}
/**
@@ -1350,15 +1327,15 @@ export enum MetricType {
/**
* Euclidean distance
*/
L2 = "l2",
L2 = 'l2',
/**
* Cosine distance
*/
Cosine = "cosine",
Cosine = 'cosine',
/**
* Dot product
*/
Dot = "dot",
Dot = 'dot',
}

View File

@@ -51,7 +51,7 @@ describe('LanceDB Mirrored Store Integration test', function () {
const dir = tmpdir()
console.log(dir)
const conn = await lancedb.connect({ uri: `s3://lancedb-integtest?mirroredStore=${dir}`, storageOptions: { allowHttp: 'true' } })
const conn = await lancedb.connect(`s3://lancedb-integtest?mirroredStore=${dir}`)
const data = Array(200).fill({ vector: Array(128).fill(1.0), id: 0 })
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 1 }))
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 2 }))

View File

@@ -140,9 +140,6 @@ export class RemoteConnection implements Connection {
schema = nameOrOpts.schema
embeddings = nameOrOpts.embeddingFunction
tableName = nameOrOpts.name
if (data === undefined) {
data = nameOrOpts.data
}
}
let buffer: Buffer

View File

@@ -32,7 +32,7 @@ import {
Bool,
Date_,
Decimal,
type DataType,
DataType,
Dictionary,
Binary,
Float32,
@@ -74,12 +74,12 @@ import {
DurationNanosecond,
DurationMicrosecond,
DurationMillisecond,
DurationSecond
DurationSecond,
} from "apache-arrow";
import type { IntBitWidth, TimeBitWidth } from "apache-arrow/type";
function sanitizeMetadata(
metadataLike?: unknown
metadataLike?: unknown,
): Map<string, string> | undefined {
if (metadataLike === undefined || metadataLike === null) {
return undefined;
@@ -90,7 +90,7 @@ function sanitizeMetadata(
for (const item of metadataLike) {
if (!(typeof item[0] === "string" || !(typeof item[1] === "string"))) {
throw Error(
"Expected metadata, if present, to be a Map<string, string> but it had non-string keys or values"
"Expected metadata, if present, to be a Map<string, string> but it had non-string keys or values",
);
}
}
@@ -105,7 +105,7 @@ function sanitizeInt(typeLike: object) {
typeof typeLike.isSigned !== "boolean"
) {
throw Error(
"Expected an Int Type to have a `bitWidth` and `isSigned` property"
"Expected an Int Type to have a `bitWidth` and `isSigned` property",
);
}
return new Int(typeLike.isSigned, typeLike.bitWidth as IntBitWidth);
@@ -128,7 +128,7 @@ function sanitizeDecimal(typeLike: object) {
typeof typeLike.bitWidth !== "number"
) {
throw Error(
"Expected a Decimal Type to have `scale`, `precision`, and `bitWidth` properties"
"Expected a Decimal Type to have `scale`, `precision`, and `bitWidth` properties",
);
}
return new Decimal(typeLike.scale, typeLike.precision, typeLike.bitWidth);
@@ -149,7 +149,7 @@ function sanitizeTime(typeLike: object) {
typeof typeLike.bitWidth !== "number"
) {
throw Error(
"Expected a Time type to have `unit` and `bitWidth` properties"
"Expected a Time type to have `unit` and `bitWidth` properties",
);
}
return new Time(typeLike.unit, typeLike.bitWidth as TimeBitWidth);
@@ -172,7 +172,7 @@ function sanitizeTypedTimestamp(
| typeof TimestampNanosecond
| typeof TimestampMicrosecond
| typeof TimestampMillisecond
| typeof TimestampSecond
| typeof TimestampSecond,
) {
let timezone = null;
if ("timezone" in typeLike && typeof typeLike.timezone === "string") {
@@ -191,7 +191,7 @@ function sanitizeInterval(typeLike: object) {
function sanitizeList(typeLike: object) {
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
throw Error(
"Expected a List type to have an array-like `children` property"
"Expected a List type to have an array-like `children` property",
);
}
if (typeLike.children.length !== 1) {
@@ -203,7 +203,7 @@ function sanitizeList(typeLike: object) {
function sanitizeStruct(typeLike: object) {
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
throw Error(
"Expected a Struct type to have an array-like `children` property"
"Expected a Struct type to have an array-like `children` property",
);
}
return new Struct(typeLike.children.map((child) => sanitizeField(child)));
@@ -216,47 +216,47 @@ function sanitizeUnion(typeLike: object) {
typeof typeLike.mode !== "number"
) {
throw Error(
"Expected a Union type to have `typeIds` and `mode` properties"
"Expected a Union type to have `typeIds` and `mode` properties",
);
}
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
throw Error(
"Expected a Union type to have an array-like `children` property"
"Expected a Union type to have an array-like `children` property",
);
}
return new Union(
typeLike.mode,
typeLike.typeIds as any,
typeLike.children.map((child) => sanitizeField(child))
typeLike.children.map((child) => sanitizeField(child)),
);
}
function sanitizeTypedUnion(
typeLike: object,
UnionType: typeof DenseUnion | typeof SparseUnion
UnionType: typeof DenseUnion | typeof SparseUnion,
) {
if (!("typeIds" in typeLike)) {
throw Error(
"Expected a DenseUnion/SparseUnion type to have a `typeIds` property"
"Expected a DenseUnion/SparseUnion type to have a `typeIds` property",
);
}
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
throw Error(
"Expected a DenseUnion/SparseUnion type to have an array-like `children` property"
"Expected a DenseUnion/SparseUnion type to have an array-like `children` property",
);
}
return new UnionType(
typeLike.typeIds as any,
typeLike.children.map((child) => sanitizeField(child))
typeLike.children.map((child) => sanitizeField(child)),
);
}
function sanitizeFixedSizeBinary(typeLike: object) {
if (!("byteWidth" in typeLike) || typeof typeLike.byteWidth !== "number") {
throw Error(
"Expected a FixedSizeBinary type to have a `byteWidth` property"
"Expected a FixedSizeBinary type to have a `byteWidth` property",
);
}
return new FixedSizeBinary(typeLike.byteWidth);
@@ -268,7 +268,7 @@ function sanitizeFixedSizeList(typeLike: object) {
}
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
throw Error(
"Expected a FixedSizeList type to have an array-like `children` property"
"Expected a FixedSizeList type to have an array-like `children` property",
);
}
if (typeLike.children.length !== 1) {
@@ -276,14 +276,14 @@ function sanitizeFixedSizeList(typeLike: object) {
}
return new FixedSizeList(
typeLike.listSize,
sanitizeField(typeLike.children[0])
sanitizeField(typeLike.children[0]),
);
}
function sanitizeMap(typeLike: object) {
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
throw Error(
"Expected a Map type to have an array-like `children` property"
"Expected a Map type to have an array-like `children` property",
);
}
if (!("keysSorted" in typeLike) || typeof typeLike.keysSorted !== "boolean") {
@@ -291,7 +291,7 @@ function sanitizeMap(typeLike: object) {
}
return new Map_(
typeLike.children.map((field) => sanitizeField(field)) as any,
typeLike.keysSorted
typeLike.keysSorted,
);
}
@@ -319,7 +319,7 @@ function sanitizeDictionary(typeLike: object) {
sanitizeType(typeLike.dictionary),
sanitizeType(typeLike.indices) as any,
typeLike.id,
typeLike.isOrdered
typeLike.isOrdered,
);
}
@@ -454,7 +454,7 @@ function sanitizeField(fieldLike: unknown): Field {
!("nullable" in fieldLike)
) {
throw Error(
"The field passed in is missing a `type`/`name`/`nullable` property"
"The field passed in is missing a `type`/`name`/`nullable` property",
);
}
const type = sanitizeType(fieldLike.type);
@@ -489,7 +489,7 @@ export function sanitizeSchema(schemaLike: unknown): Schema {
}
if (!("fields" in schemaLike)) {
throw Error(
"The schema passed in does not appear to be a schema (no 'fields' property)"
"The schema passed in does not appear to be a schema (no 'fields' property)",
);
}
let metadata;
@@ -498,11 +498,11 @@ export function sanitizeSchema(schemaLike: unknown): Schema {
}
if (!Array.isArray(schemaLike.fields)) {
throw Error(
"The schema passed in had a 'fields' property but it was not an array"
"The schema passed in had a 'fields' property but it was not an array",
);
}
const sanitizedFields = schemaLike.fields.map((field) =>
sanitizeField(field)
sanitizeField(field),
);
return new Schema(sanitizedFields, metadata);
}

File diff suppressed because it is too large Load Diff

3
nodejs/.eslintignore Normal file
View File

@@ -0,0 +1,3 @@
**/dist/**/*
**/native.js
**/native.d.ts

1
nodejs/.gitignore vendored
View File

@@ -1 +0,0 @@
yarn.lock

1
nodejs/.prettierignore Symbolic link
View File

@@ -0,0 +1 @@
.eslintignore

View File

@@ -43,20 +43,29 @@ npm run test
### Running lint / format
LanceDb uses [biome](https://biomejs.dev/) for linting and formatting. if you are using VSCode you will need to install the official [Biome](https://marketplace.visualstudio.com/items?itemName=biomejs.biome) extension.
To manually lint your code you can run:
LanceDb uses eslint for linting. VSCode does not need any plugins to use eslint. However, it
may need some additional configuration. Make sure that eslint.experimental.useFlatConfig is
set to true. Also, if your vscode root folder is the repo root then you will need to set
the eslint.workingDirectories to ["nodejs"]. To manually lint your code you can run:
```sh
npm run lint
```
to automatically fix all fixable issues:
LanceDb uses prettier for formatting. If you are using VSCode you will need to install the
"Prettier - Code formatter" extension. You should then configure it to be the default formatter
for typescript and you should enable format on save. To manually check your code's format you
can run:
```sh
npm run lint-fix
npm run chkformat
```
If you do not have your workspace root set to the `nodejs` directory, unfortunately the extension will not work. You can still run the linting and formatting commands manually.
If you need to manually format your code you can run:
```sh
npx prettier --write .
```
### Generating docs

View File

@@ -13,26 +13,32 @@
// limitations under the License.
import {
Binary,
Bool,
DataType,
Dictionary,
convertToTable,
fromTableToBuffer,
makeArrowTable,
makeEmptyTable,
} from "../dist/arrow";
import {
Field,
FixedSizeList,
Float,
Float16,
Float32,
Float64,
Int32,
Int64,
List,
MetadataVersion,
Precision,
Schema,
Struct,
type Table,
Utf8,
tableFromIPC,
Schema,
Float64,
type Table,
Binary,
Bool,
Utf8,
Struct,
List,
DataType,
Dictionary,
Int64,
Float,
Precision,
MetadataVersion,
} from "apache-arrow";
import {
Dictionary as OldDictionary,
@@ -40,20 +46,14 @@ import {
FixedSizeList as OldFixedSizeList,
Float32 as OldFloat32,
Int32 as OldInt32,
Schema as OldSchema,
Struct as OldStruct,
Schema as OldSchema,
TimestampNanosecond as OldTimestampNanosecond,
Utf8 as OldUtf8,
} from "apache-arrow-old";
import {
convertToTable,
fromTableToBuffer,
makeArrowTable,
makeEmptyTable,
} from "../lancedb/arrow";
import { type EmbeddingFunction } from "../lancedb/embedding/embedding_function";
import { type EmbeddingFunction } from "../dist/embedding/embedding_function";
// biome-ignore lint/suspicious/noExplicitAny: skip
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function sampleRecords(): Array<Record<string, any>> {
return [
{
@@ -438,7 +438,7 @@ describe("when using two versions of arrow", function () {
new OldField("ts_no_tz", new OldTimestampNanosecond(null)),
]),
),
// biome-ignore lint/suspicious/noExplicitAny: skip
// eslint-disable-next-line @typescript-eslint/no-explicit-any
]) as any;
schema.metadataVersion = MetadataVersion.V5;
const table = makeArrowTable([], { schema });

View File

@@ -14,13 +14,11 @@
import * as tmp from "tmp";
import { Connection, connect } from "../lancedb";
import { Connection, connect } from "../dist/index.js";
describe("when connecting", () => {
let tmpDir: tmp.DirResult;
beforeEach(() => {
tmpDir = tmp.dirSync({ unsafeCleanup: true });
});
beforeEach(() => (tmpDir = tmp.dirSync({ unsafeCleanup: true })));
afterEach(() => tmpDir.removeCallback());
it("should connect", async () => {

View File

@@ -14,11 +14,7 @@
/* eslint-disable @typescript-eslint/naming-convention */
import {
CreateKeyCommand,
KMSClient,
ScheduleKeyDeletionCommand,
} from "@aws-sdk/client-kms";
import { connect } from "../dist";
import {
CreateBucketCommand,
DeleteBucketCommand,
@@ -27,7 +23,11 @@ import {
ListObjectsV2Command,
S3Client,
} from "@aws-sdk/client-s3";
import { connect } from "../lancedb";
import {
CreateKeyCommand,
ScheduleKeyDeletionCommand,
KMSClient,
} from "@aws-sdk/client-kms";
// Skip these tests unless the S3_TEST environment variable is set
const maybeDescribe = process.env.S3_TEST ? describe : describe.skip;
@@ -63,10 +63,9 @@ class S3Bucket {
// Delete the bucket if it already exists
try {
await this.deleteBucket(client, name);
} catch {
} catch (e) {
// It's fine if the bucket doesn't exist
}
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
await client.send(new CreateBucketCommand({ Bucket: name }));
return new S3Bucket(name);
}
@@ -79,32 +78,27 @@ class S3Bucket {
static async deleteBucket(client: S3Client, name: string) {
// Must delete all objects before we can delete the bucket
const objects = await client.send(
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
new ListObjectsV2Command({ Bucket: name }),
);
if (objects.Contents) {
for (const object of objects.Contents) {
await client.send(
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
new DeleteObjectCommand({ Bucket: name, Key: object.Key }),
);
}
}
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
await client.send(new DeleteBucketCommand({ Bucket: name }));
}
public async assertAllEncrypted(path: string, keyId: string) {
const client = S3Bucket.s3Client();
const objects = await client.send(
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
new ListObjectsV2Command({ Bucket: this.name, Prefix: path }),
);
if (objects.Contents) {
for (const object of objects.Contents) {
const metadata = await client.send(
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
new HeadObjectCommand({ Bucket: this.name, Key: object.Key }),
);
expect(metadata.ServerSideEncryption).toBe("aws:kms");
@@ -143,7 +137,6 @@ class KmsKey {
public async delete() {
const client = KmsKey.kmsClient();
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
await client.send(new ScheduleKeyDeletionCommand({ KeyId: this.keyId }));
}
}

View File

@@ -16,18 +16,18 @@ import * as fs from "fs";
import * as path from "path";
import * as tmp from "tmp";
import { Table, connect } from "../dist";
import {
Field,
FixedSizeList,
Float32,
Float64,
Int32,
Int64,
Schema,
Field,
Float32,
Int32,
FixedSizeList,
Int64,
Float64,
} from "apache-arrow";
import { Table, connect } from "../lancedb";
import { makeArrowTable } from "../lancedb/arrow";
import { Index } from "../lancedb/indices";
import { makeArrowTable } from "../dist/arrow";
import { Index } from "../dist/indices";
describe("Given a table", () => {
let tmpDir: tmp.DirResult;
@@ -419,31 +419,3 @@ describe("when dealing with versioning", () => {
);
});
});
describe("when optimizing a dataset", () => {
let tmpDir: tmp.DirResult;
let table: Table;
beforeEach(async () => {
tmpDir = tmp.dirSync({ unsafeCleanup: true });
const con = await connect(tmpDir.name);
table = await con.createTable("vectors", [{ id: 1 }]);
await table.add([{ id: 2 }]);
});
afterEach(() => {
tmpDir.removeCallback();
});
it("compacts files", async () => {
const stats = await table.optimize();
expect(stats.compaction.filesAdded).toBe(1);
expect(stats.compaction.filesRemoved).toBe(2);
expect(stats.compaction.fragmentsAdded).toBe(1);
expect(stats.compaction.fragmentsRemoved).toBe(2);
});
it("cleanups old versions", async () => {
const stats = await table.optimize({ cleanupOlderThan: new Date() });
expect(stats.prune.bytesRemoved).toBeGreaterThan(0);
expect(stats.prune.oldVersionsRemoved).toBe(3);
});
});

View File

@@ -1,136 +0,0 @@
{
"$schema": "https://biomejs.dev/schemas/1.7.3/schema.json",
"organizeImports": {
"enabled": true
},
"files": {
"ignore": [
"**/dist/**/*",
"**/native.js",
"**/native.d.ts",
"**/npm/**/*",
"**/.vscode/**"
]
},
"formatter": {
"indentStyle": "space"
},
"linter": {
"enabled": true,
"rules": {
"recommended": false,
"complexity": {
"noBannedTypes": "error",
"noExtraBooleanCast": "error",
"noMultipleSpacesInRegularExpressionLiterals": "error",
"noUselessCatch": "error",
"noUselessThisAlias": "error",
"noUselessTypeConstraint": "error",
"noWith": "error"
},
"correctness": {
"noConstAssign": "error",
"noConstantCondition": "error",
"noEmptyCharacterClassInRegex": "error",
"noEmptyPattern": "error",
"noGlobalObjectCalls": "error",
"noInnerDeclarations": "error",
"noInvalidConstructorSuper": "error",
"noNewSymbol": "error",
"noNonoctalDecimalEscape": "error",
"noPrecisionLoss": "error",
"noSelfAssign": "error",
"noSetterReturn": "error",
"noSwitchDeclarations": "error",
"noUndeclaredVariables": "error",
"noUnreachable": "error",
"noUnreachableSuper": "error",
"noUnsafeFinally": "error",
"noUnsafeOptionalChaining": "error",
"noUnusedLabels": "error",
"noUnusedVariables": "error",
"useIsNan": "error",
"useValidForDirection": "error",
"useYield": "error"
},
"style": {
"noNamespace": "error",
"useAsConstAssertion": "error",
"useBlockStatements": "off",
"useNamingConvention": {
"level": "error",
"options": {
"strictCase": false
}
}
},
"suspicious": {
"noAssignInExpressions": "error",
"noAsyncPromiseExecutor": "error",
"noCatchAssign": "error",
"noClassAssign": "error",
"noCompareNegZero": "error",
"noControlCharactersInRegex": "error",
"noDebugger": "error",
"noDuplicateCase": "error",
"noDuplicateClassMembers": "error",
"noDuplicateObjectKeys": "error",
"noDuplicateParameters": "error",
"noEmptyBlockStatements": "error",
"noExplicitAny": "error",
"noExtraNonNullAssertion": "error",
"noFallthroughSwitchClause": "error",
"noFunctionAssign": "error",
"noGlobalAssign": "error",
"noImportAssign": "error",
"noMisleadingCharacterClass": "error",
"noMisleadingInstantiator": "error",
"noPrototypeBuiltins": "error",
"noRedeclare": "error",
"noShadowRestrictedNames": "error",
"noUnsafeDeclarationMerging": "error",
"noUnsafeNegation": "error",
"useGetterReturn": "error",
"useValidTypeof": "error"
}
},
"ignore": ["**/dist/**/*", "**/native.js", "**/native.d.ts"]
},
"javascript": {
"globals": []
},
"overrides": [
{
"include": ["**/*.ts", "**/*.tsx", "**/*.mts", "**/*.cts"],
"linter": {
"rules": {
"correctness": {
"noConstAssign": "off",
"noGlobalObjectCalls": "off",
"noInvalidConstructorSuper": "off",
"noNewSymbol": "off",
"noSetterReturn": "off",
"noUndeclaredVariables": "off",
"noUnreachable": "off",
"noUnreachableSuper": "off"
},
"style": {
"noArguments": "error",
"noVar": "error",
"useConst": "error"
},
"suspicious": {
"noDuplicateClassMembers": "off",
"noDuplicateObjectKeys": "off",
"noDuplicateParameters": "off",
"noFunctionAssign": "off",
"noImportAssign": "off",
"noRedeclare": "off",
"noUnsafeNegation": "off",
"useGetterReturn": "off"
}
}
}
}
]
}

28
nodejs/eslint.config.js Normal file
View File

@@ -0,0 +1,28 @@
/* eslint-disable @typescript-eslint/naming-convention */
// @ts-check
const eslint = require("@eslint/js");
const tseslint = require("typescript-eslint");
const eslintConfigPrettier = require("eslint-config-prettier");
const jsdoc = require("eslint-plugin-jsdoc");
module.exports = tseslint.config(
eslint.configs.recommended,
jsdoc.configs["flat/recommended"],
eslintConfigPrettier,
...tseslint.configs.recommended,
{
rules: {
"@typescript-eslint/naming-convention": "error",
"jsdoc/require-returns": "off",
"jsdoc/require-param": "off",
"jsdoc/require-jsdoc": [
"error",
{
publicOnly: true,
},
],
},
plugins: jsdoc,
},
);

View File

@@ -13,25 +13,25 @@
// limitations under the License.
import {
Table as ArrowTable,
Binary,
DataType,
Field,
FixedSizeList,
type Float,
Float32,
List,
RecordBatch,
makeBuilder,
RecordBatchFileWriter,
RecordBatchStreamWriter,
Schema,
Struct,
Utf8,
type Vector,
makeBuilder,
makeData,
type makeTable,
FixedSizeList,
vectorFromArray,
type Schema,
Table as ArrowTable,
RecordBatchStreamWriter,
List,
RecordBatch,
makeData,
Struct,
type Float,
DataType,
Binary,
Float32,
type makeTable,
} from "apache-arrow";
import { type EmbeddingFunction } from "./embedding/embedding_function";
import { sanitizeSchema } from "./sanitize";
@@ -85,7 +85,6 @@ export class MakeArrowTableOptions {
vectorColumns: Record<string, VectorColumnOptions> = {
vector: new VectorColumnOptions(),
};
embeddings?: EmbeddingFunction<unknown>;
/**
* If true then string columns will be encoded with dictionary encoding
@@ -209,7 +208,6 @@ export function makeArrowTable(
const opt = new MakeArrowTableOptions(options !== undefined ? options : {});
if (opt.schema !== undefined && opt.schema !== null) {
opt.schema = sanitizeSchema(opt.schema);
opt.schema = validateSchemaEmbeddings(opt.schema, data, opt.embeddings);
}
const columns: Record<string, Vector> = {};
// TODO: sample dataset to find missing columns
@@ -289,8 +287,8 @@ export function makeArrowTable(
// then patch the schema of the batches so we can use
// `new ArrowTable(schema, batches)` which does not do any schema inference
const firstTable = new ArrowTable(columns);
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
const batchesFixed = firstTable.batches.map(
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
(batch) => new RecordBatch(opt.schema!, batch.data),
);
return new ArrowTable(opt.schema, batchesFixed);
@@ -315,7 +313,7 @@ function makeListVector(lists: unknown[][]): Vector<unknown> {
throw Error("Cannot infer list vector from empty array or empty list");
}
const sampleList = lists[0];
// biome-ignore lint/suspicious/noExplicitAny: skip
// eslint-disable-next-line @typescript-eslint/no-explicit-any
let inferredType: any;
try {
const sampleVector = makeVector(sampleList);
@@ -339,7 +337,7 @@ function makeVector(
values: unknown[],
type?: DataType,
stringAsDictionary?: boolean,
// biome-ignore lint/suspicious/noExplicitAny: skip
// eslint-disable-next-line @typescript-eslint/no-explicit-any
): Vector<any> {
if (type !== undefined) {
// No need for inference, let Arrow create it
@@ -650,39 +648,3 @@ function alignTable(table: ArrowTable, schema: Schema): ArrowTable {
export function createEmptyTable(schema: Schema): ArrowTable {
return new ArrowTable(sanitizeSchema(schema));
}
function validateSchemaEmbeddings(
schema: Schema,
data: Array<Record<string, unknown>>,
embeddings: EmbeddingFunction<unknown> | undefined,
) {
const fields = [];
const missingEmbeddingFields = [];
// First we check if the field is a `FixedSizeList`
// Then we check if the data contains the field
// if it does not, we add it to the list of missing embedding fields
// Finally, we check if those missing embedding fields are `this._embeddings`
// if they are not, we throw an error
for (const field of schema.fields) {
if (field.type instanceof FixedSizeList) {
if (data.length !== 0 && data?.[0]?.[field.name] === undefined) {
missingEmbeddingFields.push(field);
} else {
fields.push(field);
}
} else {
fields.push(field);
}
}
if (missingEmbeddingFields.length > 0 && embeddings === undefined) {
throw new Error(
`Table has embeddings: "${missingEmbeddingFields
.map((f) => f.name)
.join(",")}", but no embedding function was provided`,
);
}
return new Schema(fields, schema.metadata);
}

View File

@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { Table as ArrowTable, Schema } from "apache-arrow";
import { fromTableToBuffer, makeArrowTable, makeEmptyTable } from "./arrow";
import { ConnectionOptions, Connection as LanceDbConnection } from "./native";
import { Table } from "./table";
import { Table as ArrowTable, Schema } from "apache-arrow";
/**
* Connect to a LanceDB instance at the given URI.
@@ -77,18 +77,6 @@ export interface OpenTableOptions {
* The available options are described at https://lancedb.github.io/lancedb/guides/storage/
*/
storageOptions?: Record<string, string>;
/**
* Set the size of the index cache, specified as a number of entries
*
* The exact meaning of an "entry" will depend on the type of index:
* - IVF: there is one entry for each IVF partition
* - BTREE: there is one entry for the entire index
*
* This cache applies to the entire opened table, across all indices.
* Setting this value higher will increase performance on larger datasets
* at the expense of more RAM
*/
indexCacheSize?: number;
}
export interface TableNamesOptions {
@@ -172,7 +160,6 @@ export class Connection {
const innerTable = await this.inner.openTable(
name,
cleanseStorageOptions(options?.storageOptions),
options?.indexCacheSize,
);
return new Table(innerTable);
}

View File

@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import type OpenAI from "openai";
import { type EmbeddingFunction } from "./embedding_function";
import type OpenAI from "openai";
export class OpenAIEmbeddingFunction implements EmbeddingFunction<string> {
private readonly _openai: OpenAI;

View File

@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { Table as ArrowTable, RecordBatch, tableFromIPC } from "apache-arrow";
import { type IvfPqOptions } from "./indices";
import { RecordBatch, tableFromIPC, Table as ArrowTable } from "apache-arrow";
import {
RecordBatchIterator as NativeBatchIterator,
Query as NativeQuery,
Table as NativeTable,
VectorQuery as NativeVectorQuery,
} from "./native";
import { type IvfPqOptions } from "./indices";
export class RecordBatchIterator implements AsyncIterator<RecordBatch> {
private promisedInner?: Promise<NativeBatchIterator>;
private inner?: NativeBatchIterator;
@@ -29,7 +29,7 @@ export class RecordBatchIterator implements AsyncIterator<RecordBatch> {
this.promisedInner = promise;
}
// biome-ignore lint/suspicious/noExplicitAny: skip
// eslint-disable-next-line @typescript-eslint/no-explicit-any
async next(): Promise<IteratorResult<RecordBatch<any>>> {
if (this.inner === undefined) {
this.inner = await this.promisedInner;
@@ -56,9 +56,7 @@ export class QueryBase<
QueryType,
> implements AsyncIterable<RecordBatch>
{
protected constructor(protected inner: NativeQueryType) {
// intentionally empty
}
protected constructor(protected inner: NativeQueryType) {}
/**
* A filter statement to be applied to this query.
@@ -152,7 +150,7 @@ export class QueryBase<
return new RecordBatchIterator(this.nativeExecute());
}
// biome-ignore lint/suspicious/noExplicitAny: skip
// eslint-disable-next-line @typescript-eslint/no-explicit-any
[Symbol.asyncIterator](): AsyncIterator<RecordBatch<any>> {
const promise = this.nativeExecute();
return new RecordBatchIterator(promise);
@@ -370,7 +368,7 @@ export class Query extends QueryBase<NativeQuery, Query> {
* a default `limit` of 10 will be used. @see {@link Query#limit}
*/
nearestTo(vector: unknown): VectorQuery {
// biome-ignore lint/suspicious/noExplicitAny: skip
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const vectorQuery = this.inner.nearestTo(Float32Array.from(vector as any));
return new VectorQuery(vectorQuery);
}

View File

@@ -21,60 +21,60 @@
// and so we must sanitize the input to ensure that it is compatible.
import {
Binary,
Bool,
DataType,
DateDay,
DateMillisecond,
type DateUnit,
Date_,
Decimal,
DenseUnion,
Dictionary,
Duration,
DurationMicrosecond,
DurationMillisecond,
DurationNanosecond,
DurationSecond,
Field,
Utf8,
FixedSizeBinary,
FixedSizeList,
Schema,
List,
Struct,
Float,
Float16,
Bool,
Date_,
Decimal,
DataType,
Dictionary,
Binary,
Float32,
Float64,
Interval,
Map_,
Duration,
Union,
Time,
Timestamp,
Type,
Null,
Int,
type Precision,
type DateUnit,
Int8,
Int16,
Int32,
Int64,
Interval,
IntervalDayTime,
IntervalYearMonth,
List,
Map_,
Null,
type Precision,
Schema,
SparseUnion,
Struct,
Time,
TimeMicrosecond,
TimeMillisecond,
TimeNanosecond,
TimeSecond,
Timestamp,
TimestampMicrosecond,
TimestampMillisecond,
TimestampNanosecond,
TimestampSecond,
Type,
Uint8,
Uint16,
Uint32,
Uint64,
Union,
Utf8,
Float16,
Float64,
DateDay,
DateMillisecond,
DenseUnion,
SparseUnion,
TimeNanosecond,
TimeMicrosecond,
TimeMillisecond,
TimeSecond,
TimestampNanosecond,
TimestampMicrosecond,
TimestampMillisecond,
TimestampSecond,
IntervalDayTime,
IntervalYearMonth,
DurationNanosecond,
DurationMicrosecond,
DurationMillisecond,
DurationSecond,
} from "apache-arrow";
import type { IntBitWidth, TKeys, TimeBitWidth } from "apache-arrow/type";
@@ -228,7 +228,7 @@ function sanitizeUnion(typeLike: object) {
return new Union(
typeLike.mode,
// biome-ignore lint/suspicious/noExplicitAny: skip
// eslint-disable-next-line @typescript-eslint/no-explicit-any
typeLike.typeIds as any,
typeLike.children.map((child) => sanitizeField(child)),
);
@@ -294,7 +294,7 @@ function sanitizeMap(typeLike: object) {
}
return new Map_(
// biome-ignore lint/suspicious/noExplicitAny: skip
// eslint-disable-next-line @typescript-eslint/no-explicit-any
typeLike.children.map((field) => sanitizeField(field)) as any,
typeLike.keysSorted,
);
@@ -328,7 +328,7 @@ function sanitizeDictionary(typeLike: object) {
);
}
// biome-ignore lint/suspicious/noExplicitAny: skip
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function sanitizeType(typeLike: unknown): DataType<any> {
if (typeof typeLike !== "object" || typeLike === null) {
throw Error("Expected a Type but object was null/undefined");

View File

@@ -13,16 +13,15 @@
// limitations under the License.
import { Schema, tableFromIPC } from "apache-arrow";
import { Data, fromDataToBuffer } from "./arrow";
import { IndexOptions } from "./indices";
import {
AddColumnsSql,
ColumnAlteration,
IndexConfig,
OptimizeStats,
Table as _NativeTable,
} from "./native";
import { Query, VectorQuery } from "./query";
import { IndexOptions } from "./indices";
import { Data, fromDataToBuffer } from "./arrow";
export { IndexConfig } from "./native";
/**
@@ -51,23 +50,6 @@ export interface UpdateOptions {
where: string;
}
export interface OptimizeOptions {
/**
* If set then all versions older than the given date
* be removed. The current version will never be removed.
* The default is 7 days
* @example
* // Delete all versions older than 1 day
* const olderThan = new Date();
* olderThan.setDate(olderThan.getDate() - 1));
* tbl.cleanupOlderVersions(olderThan);
*
* // Delete all versions except the current version
* tbl.cleanupOlderVersions(new Date());
*/
cleanupOlderThan: Date;
}
/**
* A Table is a collection of Records in a LanceDB Database.
*
@@ -187,24 +169,21 @@ export class Table {
* // If the column has a vector (fixed size list) data type then
* // an IvfPq vector index will be created.
* const table = await conn.openTable("my_table");
* await table.createIndex("vector");
* await table.createIndex(["vector"]);
* @example
* // For advanced control over vector index creation you can specify
* // the index type and options.
* const table = await conn.openTable("my_table");
* await table.createIndex("vector", {
* config: lancedb.Index.ivfPq({
* numPartitions: 128,
* numSubVectors: 16,
* }),
* });
* await table.createIndex(["vector"], I)
* .ivf_pq({ num_partitions: 128, num_sub_vectors: 16 })
* .build();
* @example
* // Or create a Scalar index
* await table.createIndex("my_float_col");
* await table.createIndex("my_float_col").build();
*/
async createIndex(column: string, options?: Partial<IndexOptions>) {
// Bit of a hack to get around the fact that TS has no package-scope.
// biome-ignore lint/suspicious/noExplicitAny: skip
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const nativeIndex = (options?.config as any)?.inner;
await this.inner.createIndex(nativeIndex, column, options?.replace);
}
@@ -218,7 +197,8 @@ export class Table {
* vector similarity, sorting, and more.
*
* Note: By default, all columns are returned. For best performance, you should
* only fetch the columns you need.
* only fetch the columns you need. See [`Query::select_with_projection`] for
* more details.
*
* When appropriate, various indices and statistics based pruning will be used to
* accelerate the query.
@@ -226,13 +206,10 @@ export class Table {
* // SQL-style filtering
* //
* // This query will return up to 1000 rows whose value in the `id` column
* // is greater than 5. LanceDb supports a broad set of filtering functions.
* for await (const batch of table
* .query()
* .where("id > 1")
* .select(["id"])
* .limit(20)) {
* console.log(batch);
* // is greater than 5. LanceDb supports a broad set of filtering functions.
* for await (const batch of table.query()
* .filter("id > 1").select(["id"]).limit(20)) {
* console.log(batch);
* }
* @example
* // Vector Similarity Search
@@ -241,14 +218,13 @@ export class Table {
* // closest to the query vector [1.0, 2.0, 3.0]. If an index has been created
* // on the "vector" column then this will perform an ANN search.
* //
* // The `refineFactor` and `nprobes` methods are used to control the recall /
* // The `refine_factor` and `nprobes` methods are used to control the recall /
* // latency tradeoff of the search.
* for await (const batch of table
* .query()
* .where("id > 1")
* .select(["id"])
* .limit(20)) {
* console.log(batch);
* for await (const batch of table.query()
* .nearestTo([1, 2, 3])
* .refineFactor(5).nprobe(10)
* .limit(10)) {
* console.log(batch);
* }
* @example
* // Scan the full dataset
@@ -310,45 +286,43 @@ export class Table {
await this.inner.dropColumns(columnNames);
}
/** Retrieve the version of the table */
/**
* Retrieve the version of the table
*
* LanceDb supports versioning. Every operation that modifies the table increases
* version. As long as a version hasn't been deleted you can `[Self::checkout]` that
* version to view the data at that point. In addition, you can `[Self::restore]` the
* version to replace the current table with a previous version.
*/
async version(): Promise<number> {
return await this.inner.version();
}
/**
* Checks out a specific version of the table _This is an in-place operation._
* Checks out a specific version of the Table
*
* This allows viewing previous versions of the table. If you wish to
* keep writing to the dataset starting from an old version, then use
* the `restore` function.
* Any read operation on the table will now access the data at the checked out version.
* As a consequence, calling this method will disable any read consistency interval
* that was previously set.
*
* Calling this method will set the table into time-travel mode. If you
* wish to return to standard mode, call `checkoutLatest`.
* @param {number} version The version to checkout
* @example
* ```typescript
* import * as lancedb from "@lancedb/lancedb"
* const db = await lancedb.connect("./.lancedb");
* const table = await db.createTable("my_table", [
* { vector: [1.1, 0.9], type: "vector" },
* ]);
* This is a read-only operation that turns the table into a sort of "view"
* or "detached head". Other table instances will not be affected. To make the change
* permanent you can use the `[Self::restore]` method.
*
* console.log(await table.version()); // 1
* console.log(table.display());
* await table.add([{ vector: [0.5, 0.2], type: "vector" }]);
* await table.checkout(1);
* console.log(await table.version()); // 2
* ```
* Any operation that modifies the table will fail while the table is in a checked
* out state.
*
* To return the table to a normal state use `[Self::checkout_latest]`
*/
async checkout(version: number): Promise<void> {
await this.inner.checkout(version);
}
/**
* Checkout the latest version of the table. _This is an in-place operation._
* Ensures the table is pointing at the latest version
*
* The table will be set back into standard mode, and will track the latest
* version of the table.
* This can be used to manually update a table when the read_consistency_interval is None
* It can also be used to undo a `[Self::checkout]` operation
*/
async checkoutLatest(): Promise<void> {
await this.inner.checkoutLatest();
@@ -371,48 +345,8 @@ export class Table {
}
/**
* Optimize the on-disk data and indices for better performance.
*
* Modeled after ``VACUUM`` in PostgreSQL.
*
* Optimization covers three operations:
*
* - Compaction: Merges small files into larger ones
* - Prune: Removes old versions of the dataset
* - Index: Optimizes the indices, adding new data to existing indices
*
*
* Experimental API
* ----------------
*
* The optimization process is undergoing active development and may change.
* Our goal with these changes is to improve the performance of optimization and
* reduce the complexity.
*
* That being said, it is essential today to run optimize if you want the best
* performance. It should be stable and safe to use in production, but it our
* hope that the API may be simplified (or not even need to be called) in the
* future.
*
* The frequency an application shoudl call optimize is based on the frequency of
* data modifications. If data is frequently added, deleted, or updated then
* optimize should be run frequently. A good rule of thumb is to run optimize if
* you have added or modified 100,000 or more records or run more than 20 data
* modification operations.
* List all indices that have been created with Self::create_index
*/
async optimize(options?: Partial<OptimizeOptions>): Promise<OptimizeStats> {
let cleanupOlderThanMs;
if (
options?.cleanupOlderThan !== undefined &&
options?.cleanupOlderThan !== null
) {
cleanupOlderThanMs =
new Date().getTime() - options.cleanupOlderThan.getTime();
}
return await this.inner.optimize(cleanupOlderThanMs);
}
/** List all indices that have been created with {@link Table.createIndex} */
async listIndices(): Promise<IndexConfig[]> {
return await this.inner.listIndices();
}

View File

@@ -1,12 +1,18 @@
{
"name": "@lancedb/lancedb-darwin-arm64",
"version": "0.5.0",
"os": ["darwin"],
"cpu": ["arm64"],
"main": "lancedb.darwin-arm64.node",
"files": ["lancedb.darwin-arm64.node"],
"license": "Apache 2.0",
"engines": {
"node": ">= 18"
}
"name": "@lancedb/lancedb-darwin-arm64",
"version": "0.4.17",
"os": [
"darwin"
],
"cpu": [
"arm64"
],
"main": "lancedb.darwin-arm64.node",
"files": [
"lancedb.darwin-arm64.node"
],
"license": "Apache 2.0",
"engines": {
"node": ">= 18"
}
}

View File

@@ -1,12 +1,18 @@
{
"name": "@lancedb/lancedb-darwin-x64",
"version": "0.5.0",
"os": ["darwin"],
"cpu": ["x64"],
"main": "lancedb.darwin-x64.node",
"files": ["lancedb.darwin-x64.node"],
"license": "Apache 2.0",
"engines": {
"node": ">= 18"
}
"name": "@lancedb/lancedb-darwin-x64",
"version": "0.4.17",
"os": [
"darwin"
],
"cpu": [
"x64"
],
"main": "lancedb.darwin-x64.node",
"files": [
"lancedb.darwin-x64.node"
],
"license": "Apache 2.0",
"engines": {
"node": ">= 18"
}
}

View File

@@ -1,13 +1,21 @@
{
"name": "@lancedb/lancedb-linux-arm64-gnu",
"version": "0.5.0",
"os": ["linux"],
"cpu": ["arm64"],
"main": "lancedb.linux-arm64-gnu.node",
"files": ["lancedb.linux-arm64-gnu.node"],
"license": "Apache 2.0",
"engines": {
"node": ">= 18"
},
"libc": ["glibc"]
"name": "@lancedb/lancedb-linux-arm64-gnu",
"version": "0.4.17",
"os": [
"linux"
],
"cpu": [
"arm64"
],
"main": "lancedb.linux-arm64-gnu.node",
"files": [
"lancedb.linux-arm64-gnu.node"
],
"license": "Apache 2.0",
"engines": {
"node": ">= 18"
},
"libc": [
"glibc"
]
}

View File

@@ -1,13 +1,21 @@
{
"name": "@lancedb/lancedb-linux-x64-gnu",
"version": "0.5.0",
"os": ["linux"],
"cpu": ["x64"],
"main": "lancedb.linux-x64-gnu.node",
"files": ["lancedb.linux-x64-gnu.node"],
"license": "Apache 2.0",
"engines": {
"node": ">= 18"
},
"libc": ["glibc"]
"name": "@lancedb/lancedb-linux-x64-gnu",
"version": "0.4.17",
"os": [
"linux"
],
"cpu": [
"x64"
],
"main": "lancedb.linux-x64-gnu.node",
"files": [
"lancedb.linux-x64-gnu.node"
],
"license": "Apache 2.0",
"engines": {
"node": ">= 18"
},
"libc": [
"glibc"
]
}

View File

@@ -1,12 +1,18 @@
{
"name": "@lancedb/lancedb-win32-x64-msvc",
"version": "0.5.0",
"os": ["win32"],
"cpu": ["x64"],
"main": "lancedb.win32-x64-msvc.node",
"files": ["lancedb.win32-x64-msvc.node"],
"license": "Apache 2.0",
"engines": {
"node": ">= 18"
}
"name": "@lancedb/lancedb-win32-x64-msvc",
"version": "0.4.14",
"os": [
"win32"
],
"cpu": [
"x64"
],
"main": "lancedb.win32-x64-msvc.node",
"files": [
"lancedb.win32-x64-msvc.node"
],
"license": "Apache 2.0",
"engines": {
"node": ">= 18"
}
}

15661
nodejs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@lancedb/lancedb",
"version": "0.5.0",
"version": "0.4.17",
"main": "./dist/index.js",
"types": "./dist/index.d.ts",
"napi": {
@@ -18,16 +18,19 @@
},
"license": "Apache 2.0",
"devDependencies": {
"@aws-sdk/client-kms": "^3.33.0",
"@aws-sdk/client-s3": "^3.33.0",
"@biomejs/biome": "^1.7.3",
"@jest/globals": "^29.7.0",
"@aws-sdk/client-kms": "^3.33.0",
"@napi-rs/cli": "^2.18.0",
"@types/jest": "^29.1.2",
"@types/tmp": "^0.2.6",
"@typescript-eslint/eslint-plugin": "^6.19.0",
"@typescript-eslint/parser": "^6.19.0",
"apache-arrow-old": "npm:apache-arrow@13.0.0",
"eslint": "^8.57.0",
"eslint-config-prettier": "^9.1.0",
"eslint-plugin-jsdoc": "^48.2.1",
"jest": "^29.7.0",
"prettier": "^3.1.0",
"shx": "^0.3.4",
"tmp": "^0.2.3",
"ts-jest": "^29.1.2",
@@ -42,26 +45,39 @@
"engines": {
"node": ">= 18"
},
"cpu": ["x64", "arm64"],
"os": ["darwin", "linux", "win32"],
"cpu": [
"x64",
"arm64"
],
"os": [
"darwin",
"linux",
"win32"
],
"scripts": {
"artifacts": "napi artifacts",
"build:debug": "napi build --platform --dts ../lancedb/native.d.ts --js ../lancedb/native.js lancedb",
"build:debug": "napi build --platform --dts ../lancedb/native.d.ts --js ../lancedb/native.js dist/",
"build:release": "napi build --platform --release --dts ../lancedb/native.d.ts --js ../lancedb/native.js dist/",
"build": "npm run build:debug && tsc -b && shx cp lancedb/native.d.ts dist/native.d.ts && shx cp lancedb/*.node dist/",
"build": "npm run build:debug && tsc -b && shx cp lancedb/native.d.ts dist/native.d.ts",
"build-release": "npm run build:release && tsc -b && shx cp lancedb/native.d.ts dist/native.d.ts",
"lint-ci": "biome ci .",
"chkformat": "prettier . --check",
"docs": "typedoc --plugin typedoc-plugin-markdown --out ../docs/src/js lancedb/index.ts",
"lint": "biome check . && biome format .",
"lint-fix": "biome check --apply-unsafe . && biome format --write .",
"lint": "eslint lancedb && eslint __test__",
"prepublishOnly": "napi prepublish -t npm",
"test": "jest --verbose",
"test": "npm run build && jest --verbose",
"integration": "S3_TEST=1 npm run test",
"universal": "napi universal",
"version": "napi version"
},
"optionalDependencies": {
"@lancedb/lancedb-darwin-arm64": "0.4.17",
"@lancedb/lancedb-darwin-x64": "0.4.17",
"@lancedb/lancedb-linux-arm64-gnu": "0.4.17",
"@lancedb/lancedb-linux-x64-gnu": "0.4.17",
"@lancedb/lancedb-win32-x64-msvc": "0.4.17"
},
"dependencies": {
"apache-arrow": "^15.0.0",
"openai": "^4.29.2"
"openai": "^4.29.2",
"apache-arrow": "^15.0.0"
}
}

View File

@@ -176,7 +176,6 @@ impl Connection {
&self,
name: String,
storage_options: Option<HashMap<String, String>>,
index_cache_size: Option<u32>,
) -> napi::Result<Table> {
let mut builder = self.get_inner()?.open_table(&name);
if let Some(storage_options) = storage_options {
@@ -184,9 +183,6 @@ impl Connection {
builder = builder.storage_option(key, value);
}
}
if let Some(index_cache_size) = index_cache_size {
builder = builder.index_cache_size(index_cache_size);
}
let tbl = builder
.execute()
.await

View File

@@ -15,8 +15,8 @@
use arrow_ipc::writer::FileWriter;
use lancedb::ipc::ipc_file_to_batches;
use lancedb::table::{
AddDataMode, ColumnAlteration as LanceColumnAlteration, Duration, NewColumnTransform,
OptimizeAction, OptimizeOptions, Table as LanceDbTable,
AddDataMode, ColumnAlteration as LanceColumnAlteration, NewColumnTransform,
Table as LanceDbTable,
};
use napi::bindgen_prelude::*;
use napi_derive::napi;
@@ -263,60 +263,6 @@ impl Table {
self.inner_ref()?.restore().await.default_error()
}
#[napi]
pub async fn optimize(&self, older_than_ms: Option<i64>) -> napi::Result<OptimizeStats> {
let inner = self.inner_ref()?;
let older_than = if let Some(ms) = older_than_ms {
if ms == i64::MIN {
return Err(napi::Error::from_reason(format!(
"older_than_ms can not be {}",
i32::MIN,
)));
}
Duration::try_milliseconds(ms)
} else {
None
};
let compaction_stats = inner
.optimize(OptimizeAction::Compact {
options: lancedb::table::CompactionOptions::default(),
remap_options: None,
})
.await
.default_error()?
.compaction
.unwrap();
let prune_stats = inner
.optimize(OptimizeAction::Prune {
older_than,
delete_unverified: None,
})
.await
.default_error()?
.prune
.unwrap();
inner
.optimize(lancedb::table::OptimizeAction::Index(
OptimizeOptions::default(),
))
.await
.default_error()?;
Ok(OptimizeStats {
compaction: CompactionStats {
files_added: compaction_stats.files_added as i64,
files_removed: compaction_stats.files_removed as i64,
fragments_added: compaction_stats.fragments_added as i64,
fragments_removed: compaction_stats.fragments_removed as i64,
},
prune: RemovalStats {
bytes_removed: prune_stats.bytes_removed as i64,
old_versions_removed: prune_stats.old_versions as i64,
},
})
}
#[napi]
pub async fn list_indices(&self) -> napi::Result<Vec<IndexConfig>> {
Ok(self
@@ -352,40 +298,6 @@ impl From<lancedb::index::IndexConfig> for IndexConfig {
}
}
/// Statistics about a compaction operation.
#[napi(object)]
#[derive(Clone, Debug)]
pub struct CompactionStats {
/// The number of fragments removed
pub fragments_removed: i64,
/// The number of new, compacted fragments added
pub fragments_added: i64,
/// The number of data files removed
pub files_removed: i64,
/// The number of new, compacted data files added
pub files_added: i64,
}
/// Statistics about a cleanup operation
#[napi(object)]
#[derive(Clone, Debug)]
pub struct RemovalStats {
/// The number of bytes removed
pub bytes_removed: i64,
/// The number of old versions removed
pub old_versions_removed: i64,
}
/// Statistics about an optimize operation
#[napi(object)]
#[derive(Clone, Debug)]
pub struct OptimizeStats {
/// Statistics about the compaction operation
pub compaction: CompactionStats,
/// Statistics about the removal operation
pub prune: RemovalStats,
}
/// A definition of a column alteration. The alteration changes the column at
/// `path` to have the new name `name`, to be nullable if `nullable` is true,
/// and to have the data type `data_type`. At least one of `rename` or `nullable`

8
python/.bumpversion.cfg Normal file
View File

@@ -0,0 +1,8 @@
[bumpversion]
current_version = 0.6.9
commit = True
message = [python] Bump version: {current_version} → {new_version}
tag = True
tag_name = python-v{new_version}
[bumpversion:file:pyproject.toml]

View File

@@ -1,34 +0,0 @@
[tool.bumpversion]
current_version = "0.8.0"
parse = """(?x)
(?P<major>0|[1-9]\\d*)\\.
(?P<minor>0|[1-9]\\d*)\\.
(?P<patch>0|[1-9]\\d*)
(?:-(?P<pre_l>[a-zA-Z-]+)\\.(?P<pre_n>0|[1-9]\\d*))?
"""
serialize = [
"{major}.{minor}.{patch}-{pre_l}.{pre_n}",
"{major}.{minor}.{patch}",
]
search = "{current_version}"
replace = "{new_version}"
regex = false
ignore_missing_version = false
ignore_missing_files = false
tag = true
sign_tags = false
tag_name = "python-v{new_version}"
tag_message = "Bump version: {current_version} → {new_version}"
allow_dirty = true
commit = true
message = "Bump version: {current_version} → {new_version}"
commit_args = ""
[tool.bumpversion.parts.pre_l]
values = ["beta", "final"]
optional_value = "final"
[[tool.bumpversion.files]]
filename = "Cargo.toml"
search = "\nversion = \"{current_version}\""
replace = "\nversion = \"{new_version}\""

View File

@@ -1,6 +1,6 @@
[package]
name = "lancedb-python"
version = "0.8.0"
version = "0.4.10"
edition.workspace = true
description = "Python bindings for LanceDB"
license.workspace = true
@@ -14,7 +14,7 @@ name = "_lancedb"
crate-type = ["cdylib"]
[dependencies]
arrow = { version = "51.0.0", features = ["pyarrow"] }
arrow = { version = "50.0.0", features = ["pyarrow"] }
lancedb = { path = "../rust/lancedb" }
env_logger = "0.10"
pyo3 = { version = "0.20", features = ["extension-module", "abi3-py38"] }

View File

@@ -1,16 +1,16 @@
[project]
name = "lancedb"
# version in Cargo.toml
version = "0.6.9"
dependencies = [
"deprecation",
"pylance==0.11.0",
"pylance==0.10.12",
"ratelimiter~=1.0",
"requests>=2.31.0",
"retry>=0.9.2",
"tqdm>=4.27.0",
"pydantic>=1.10",
"attrs>=21.3.0",
"semver",
"semver>=3.0",
"cachetools",
"overrides>=0.7",
]
@@ -80,7 +80,6 @@ embeddings = [
"boto3>=1.28.57",
"awscli>=1.29.57",
"botocore>=1.31.57",
"ollama",
]
azure = ["adlfs>=2024.2.0"]

View File

@@ -107,9 +107,6 @@ def connect(
request_thread_pool=request_thread_pool,
**kwargs,
)
if kwargs:
raise ValueError(f"Unknown keyword arguments: {kwargs}")
return LanceDBConnection(uri, read_consistency_interval=read_consistency_interval)

View File

@@ -86,17 +86,3 @@ class VectorQuery:
def refine_factor(self, refine_factor: int): ...
def nprobes(self, nprobes: int): ...
def bypass_vector_index(self): ...
class CompactionStats:
fragments_removed: int
fragments_added: int
files_removed: int
files_added: int
class RemovalStats:
bytes_removed: int
old_versions_removed: int
class OptimizeStats:
compaction: CompactionStats
prune: RemovalStats

View File

@@ -224,23 +224,13 @@ class DBConnection(EnforceOverrides):
def __getitem__(self, name: str) -> LanceTable:
return self.open_table(name)
def open_table(self, name: str, *, index_cache_size: Optional[int] = None) -> Table:
def open_table(self, name: str) -> Table:
"""Open a Lance Table in the database.
Parameters
----------
name: str
The name of the table.
index_cache_size: int, default 256
Set the size of the index cache, specified as a number of entries
The exact meaning of an "entry" will depend on the type of index:
* IVF - there is one entry for each IVF partition
* BTREE - there is one entry for the entire index
This cache applies to the entire opened table, across all indices.
Setting this value higher will increase performance on larger datasets
at the expense of more RAM
Returns
-------
@@ -258,18 +248,6 @@ class DBConnection(EnforceOverrides):
"""
raise NotImplementedError
def rename_table(self, cur_name: str, new_name: str):
"""Rename a table in the database.
Parameters
----------
cur_name: str
The current name of the table.
new_name: str
The new name of the table.
"""
raise NotImplementedError
def drop_database(self):
"""
Drop database
@@ -429,9 +407,7 @@ class LanceDBConnection(DBConnection):
return tbl
@override
def open_table(
self, name: str, *, index_cache_size: Optional[int] = None
) -> LanceTable:
def open_table(self, name: str) -> LanceTable:
"""Open a table in the database.
Parameters
@@ -443,7 +419,7 @@ class LanceDBConnection(DBConnection):
-------
A LanceTable object representing the table.
"""
return LanceTable.open(self, name, index_cache_size=index_cache_size)
return LanceTable.open(self, name)
@override
def drop_table(self, name: str, ignore_missing: bool = False):
@@ -775,10 +751,7 @@ class AsyncConnection(object):
return AsyncTable(new_table)
async def open_table(
self,
name: str,
storage_options: Optional[Dict[str, str]] = None,
index_cache_size: Optional[int] = None,
self, name: str, storage_options: Optional[Dict[str, str]] = None
) -> Table:
"""Open a Lance Table in the database.
@@ -791,22 +764,12 @@ class AsyncConnection(object):
connection will be inherited by the table, but can be overridden here.
See available options at
https://lancedb.github.io/lancedb/guides/storage/
index_cache_size: int, default 256
Set the size of the index cache, specified as a number of entries
The exact meaning of an "entry" will depend on the type of index:
* IVF - there is one entry for each IVF partition
* BTREE - there is one entry for the entire index
This cache applies to the entire opened table, across all indices.
Setting this value higher will increase performance on larger datasets
at the expense of more RAM
Returns
-------
A LanceTable object representing the table.
"""
table = await self._inner.open_table(name, storage_options, index_cache_size)
table = await self._inner.open_table(name, storage_options)
return AsyncTable(table)
async def drop_table(self, name: str):

View File

@@ -16,7 +16,6 @@ from .bedrock import BedRockText
from .cohere import CohereEmbeddingFunction
from .gemini_text import GeminiText
from .instructor import InstructorEmbeddingFunction
from .ollama import OllamaEmbeddings
from .open_clip import OpenClipEmbeddings
from .openai import OpenAIEmbeddings
from .registry import EmbeddingFunctionRegistry, get_registry

View File

@@ -10,13 +10,18 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC, abstractmethod
from typing import List, Union
import numpy as np
import pyarrow as pa
from pydantic import BaseModel, Field, PrivateAttr
from tqdm import tqdm
import lancedb
from .fine_tuner import QADataset
from .utils import TEXT, retry_with_exponential_backoff
@@ -126,6 +131,22 @@ class EmbeddingFunction(BaseModel, ABC):
def __hash__(self) -> int:
return hash(frozenset(vars(self).items()))
def finetune(self, dataset: QADataset, *args, **kwargs):
"""
Finetune the embedding function on a dataset
"""
raise NotImplementedError(
"Finetuning is not supported for this embedding function"
)
def evaluate(self, dataset: QADataset, top_k=5, path=None, *args, **kwargs):
"""
Evaluate the embedding function on a dataset
"""
raise NotImplementedError(
"Evaluation is not supported for this embedding function"
)
class EmbeddingFunctionConfig(BaseModel):
"""
@@ -159,3 +180,52 @@ class TextEmbeddingFunction(EmbeddingFunction):
Generate the embeddings for the given texts
"""
pass
def evaluate(self, dataset: QADataset, top_k=5, path=None, *args, **kwargs):
"""
Evaluate the embedding function on a dataset. This calculates the hit-rate for
the top-k retrieved documents for each query in the dataset. Assumes that the
first relevant document is the expected document.
Pro - Should work for any embedding model
Con - Returns every simple metric.
Parameters
----------
dataset: QADataset
The dataset to evaluate on
Returns
-------
dict
The evaluation results
"""
corpus = dataset.corpus
queries = dataset.queries
relevant_docs = dataset.relevant_docs
path = path or os.path.join(os.getcwd(), "eval")
db = lancedb.connect(path)
class Schema(lancedb.pydantic.LanceModel):
id: str
text: str = self.SourceField()
vector: lancedb.pydantic.Vector(self.ndims()) = self.VectorField()
retriever = db.create_table("eval", schema=Schema, mode="overwrite")
pylist = [{"id": str(k), "text": v} for k, v in corpus.items()]
retriever.add(pylist)
eval_results = []
for query_id, query in tqdm(queries.items()):
retrieved_nodes = retriever.search(query).limit(top_k).to_list()
retrieved_ids = [node["id"] for node in retrieved_nodes]
expected_id = relevant_docs[query_id][0]
is_hit = expected_id in retrieved_ids # assume 1 relevant doc
eval_result = {
"is_hit": is_hit,
"retrieved": retrieved_ids,
"expected": expected_id,
"query": query_id,
}
eval_results.append(eval_result)
return eval_results

View File

@@ -0,0 +1,147 @@
### Fine-tuning workflow
The fine-tuning workflow is as follows:
1. Create a `QADataset` object.
2. Initialize any embedding function using LanceDB embedding API
3. Call `finetune` method on the embedding object with the `QADataset` object as an argument.
4. Evaluate the fine-tuned model using the `evaluate` method in the embedding API.
# End-to-End Examples
The following is an example of how to fine-tune an embedding model using the LanceDB embedding API.
## Example 1: Fine-tuning from a synthetic dataset
```python
import os
import pandas as pd
from lancedb.embeddings.fine_tuner.llm import Openai
from lancedb.embeddings.fine_tuner.dataset import QADataset, TextChunk
from lancedb.pydantic import LanceModel, Vector
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import MetadataMode
from lancedb.embeddings import get_registry
dataset = "Uber10KDataset2021"
lance_dataset_dir = dataset + "_lance"
valset_dir = dataset + "_lance_val"
finetuned_model_path = "./model_finetuned"
# 1. Create a QADataset object. See all datasets on llama-index here: https://github.com/run-llama/llama_index/tree/main/llama-datasets
if not os.path.exists(f"./data/{dataset}"):
os.system(
f"llamaindex-cli download-llamadataset {dataset} --download-dir ./data/{dataset}"
)
docs = SimpleDirectoryReader(input_dir=f"./data/{dataset}/source_files").load_data()
parser = SentenceSplitter()
nodes = parser.get_nodes_from_documents(docs)
# convert Llama-index TextNode to TextChunk
chunks = [TextChunk.from_llama_index_node(node) for node in nodes]
llm = Openai()
if os.path.exists(lance_dataset_dir):
trainset = QADataset.load(lance_dataset_dir)
else:
trainset = QADataset.from_llm(chunks, llm, num_questions_per_chunk=2)
trainset.save(lance_dataset_dir)
# Ideally, we should have a standard dataset for validation, but here we're just generating a synthetic dataset.
if os.path.exists(valset_dir):
valset = QADataset.load(valset_dir)
else:
valset = QADataset.from_llm(chunks, llm, num_questions_per_chunk=4)
valset.save(valset_dir)
# 2. Initialize the embedding model
model = get_registry().get("sentence-transformers").create(name="sentence-transformers/multi-qa-MiniLM-L6-cos-v1")
# 3. Fine-tune the model
model.finetune(trainset=trainset, path=finetuned_model_path, epochs=4)
# 4. Evaluate the fine-tuned model
base = get_registry().get("sentence-transformers").create(name="sentence-transformers/multi-qa-MiniLM-L6-cos-v1")
base_results = base.evaluate(valset, top_k=5)
tuned = get_registry().get("sentence-transformers").create(name=finetuned_model_path)
tuned_results = tuned.evaluate(valset, top_k=5)
openai = get_registry().get("openai").create(name="text-embedding-3-small")
openai_results = openai.evaluate(valset, top_k=5)
print("openai-embedding-v3 hit-rate - ", pd.DataFrame(openai_results)["is_hit"].mean())
print("fine-tuned hit-rate - ", pd.DataFrame(tuned_results)["is_hit"].mean())
print("Base model hite-rate - ", pd.DataFrame(base_results)["is_hit"].mean())
```
Fine-tuning workflow for embeddings consists for the following parts:
### QADataset
This class is used for managing the data for fine-tuning. It contains the following builder methods:
```
- from_llm(
nodes: 'List[TextChunk]' ,
llm: BaseLLM,
qa_generate_prompt_tmpl: str = DEFAULT_PROMPT_TMPL,
num_questions_per_chunk: int = 2,
) -> "QADataset"
```
Create synthetic data from a language model and text chunks of the original document on which the model is to be fine-tuned.
```python
from_responses(docs: List['TextChunk'], queries: Dict[str, str], relevant_docs: Dict[str, List[str]])-> "QADataset"
```
Create dataset from queries and responses based on a real-world scenario. Designed to be used for knowledge distillation from a larger LLM to a smaller one.
It also contains the following data attributes:
```
queries (Dict[str, str]): Dict id -> query.
corpus (Dict[str, str]): Dict id -> string.
relevant_docs (Dict[str, List[str]]): Dict query id -> list of doc ids.
```
### TextChunk
This class is used for managing the data for fine-tuning. It is designed to allow working with and standardize various text splitting/pre-processing tools like llama-index and langchain. It contains the following attributes:
```
text: str
id: str
metadata: Dict[str, Any] = {}
```
Builder Methods:
```python
from_llama_index_node(node) -> "TextChunk"
```
Create a text chunk from a llama index node.
```python
from_langchain_node(node) -> "TextChunk"
```
Create a text chunk from a langchain index node.
```python
from_chunk(cls, chunk: str, metadata: dict = {}) -> "TextChunk"
```
Create a text chunk from a string.
### FineTuner
This class is used for fine-tuning embeddings. It is exposed to the user via a high-level function in the base embedding api.
```python
class BaseEmbeddingTuner(ABC):
"""Base Embedding finetuning engine."""
@abstractmethod
def finetune(self) -> None:
"""Goes off and does stuff."""
def helper(self) -> None:
"""A helper method."""
pass
```
### Embedding API finetuning implementation
Each embedding API needs to implement `finetune` method in order to support fine-tuning. A vanilla evaluation technique has been implemented in the `BaseEmbedding` class that calculates hit_rate @ `top_k`.

View File

@@ -0,0 +1,4 @@
from .dataset import QADataset, TextChunk
from .llm import Gemini, Openai
__all__ = ["QADataset", "TextChunk", "Openai", "Gemini"]

View File

@@ -0,0 +1,19 @@
from abc import ABC, abstractmethod
class BaseEmbeddingTuner(ABC):
"""Base Embedding finetuning engine."""
@abstractmethod
def finetune(self) -> None:
"""
Finetune the embedding model.
"""
pass
def helper(self) -> None:
"""
A helper method called after finetuning. This is meant to provide
usage instructions or other helpful information.
"""
pass

View File

@@ -0,0 +1,283 @@
import re
import uuid
from pathlib import Path
from typing import Any, Dict, List, Tuple
import lance
import pyarrow as pa
from pydantic import BaseModel
from tqdm import tqdm
from .llm import BaseLLM
DEFAULT_PROMPT_TMPL = """\
Context information is below.
---------------------
{context_str}
---------------------
Given the context information and no prior knowledge.
generate only questions based on the below query.
You are a Teacher/ Professor. Your task is to setup \
{num_questions_per_chunk} questions for an upcoming \
quiz/examination. The questions should be diverse in nature \
across the document. Restrict the questions to the \
context information provided."
"""
class QADataset(BaseModel):
"""Embedding QA Finetuning Dataset.
Args:
queries (Dict[str, str]): Dict id -> query.
corpus (Dict[str, str]): Dict id -> string.
relevant_docs (Dict[str, List[str]]): Dict query id -> list of doc ids.
"""
queries: Dict[str, str] # id -> query
corpus: Dict[str, str] # id -> text
relevant_docs: Dict[str, List[str]] # query id -> list of retrieved doc ids
mode: str = "text"
@property
def query_docid_pairs(self) -> List[Tuple[str, List[str]]]:
"""Get query, relevant doc ids."""
return [
(query, self.relevant_docs[query_id])
for query_id, query in self.queries.items()
]
def save(self, path: str, mode: str = "overwrite") -> None:
"""
Save the current dataset to a directory as .lance files.
Parameters
----------
path : str
The path to save the dataset.
mode : str, optional
The mode to save the dataset, by default "overwrite". Accepts
lance modes.
"""
save_dir = Path(path)
save_dir.mkdir(parents=True, exist_ok=True)
# convert to pydict {"id": []}
queries = {
"id": list(self.queries.keys()),
"query": list(self.queries.values()),
}
corpus = {
"id": list(self.corpus.keys()),
"text": [
val or " " for val in self.corpus.values()
], # lance saves empty strings as null
}
relevant_docs = {
"query_id": list(self.relevant_docs.keys()),
"doc_id": list(self.relevant_docs.values()),
}
# write to lance
lance.write_dataset(
pa.Table.from_pydict(queries), save_dir / "queries.lance", mode=mode
)
lance.write_dataset(
pa.Table.from_pydict(corpus), save_dir / "corpus.lance", mode=mode
)
lance.write_dataset(
pa.Table.from_pydict(relevant_docs),
save_dir / "relevant_docs.lance",
mode=mode,
)
@classmethod
def load(cls, path: str) -> "QADataset":
"""
Load QADataset from a directory.
Parameters
----------
path : str
The path to load the dataset from.
Returns
-------
QADataset
The loaded QADataset.
"""
load_dir = Path(path)
queries = lance.dataset(load_dir / "queries.lance").to_table().to_pydict()
corpus = lance.dataset(load_dir / "corpus.lance").to_table().to_pydict()
relevant_docs = (
lance.dataset(load_dir / "relevant_docs.lance").to_table().to_pydict()
)
return cls(
queries=dict(zip(queries["id"], queries["query"])),
corpus=dict(zip(corpus["id"], corpus["text"])),
relevant_docs=dict(zip(relevant_docs["query_id"], relevant_docs["doc_id"])),
)
# generate queries as a convenience function
@classmethod
def from_llm(
cls,
nodes: "List[TextChunk]",
llm: BaseLLM,
qa_generate_prompt_tmpl: str = DEFAULT_PROMPT_TMPL,
num_questions_per_chunk: int = 2,
) -> "QADataset":
"""
Generate a QADataset from a list of TextChunks.
Parameters
----------
nodes : List[TextChunk]
The list of text chunks.
llm : BaseLLM
The language model to generate questions.
qa_generate_prompt_tmpl : str, optional
The template for generating questions, by default DEFAULT_PROMPT_TMPL.
num_questions_per_chunk : int, optional
The number of questions to generate per chunk, by default 2.
Returns
-------
QADataset
The generated QADataset.
"""
node_dict = {node.id: node.text for node in nodes}
queries = {}
relevant_docs = {}
for node_id, text in tqdm(node_dict.items()):
query = qa_generate_prompt_tmpl.format(
context_str=text, num_questions_per_chunk=num_questions_per_chunk
)
response = llm.chat_completion(query)
result = str(response).strip().split("\n")
questions = [
re.sub(r"^\d+[\).\s]", "", question).strip() for question in result
]
questions = [question for question in questions if len(question) > 0]
for question in questions:
question_id = str(uuid.uuid4())
queries[question_id] = question
relevant_docs[question_id] = [node_id]
return cls(queries=queries, corpus=node_dict, relevant_docs=relevant_docs)
@classmethod
def from_responses(
cls,
nodes: List["TextChunk"],
queries: Dict[str, str],
relevant_docs: Dict[str, List[str]],
) -> "QADataset":
"""
Create a QADataset from a list of TextChunks and a list of
questions, queries, and relevant docs.
Parameters
----------
nodes : List[TextChunk]
The list of text chunks.
queries : Dict[str, str]
The queries. query id -> query.
relevant_docs : Dict[str, List[str]]
The relevant docs. Dict query id -> list of doc ids.
Returns
-------
QADataset
The QADataset.
"""
node_dict = {node.id: node.text for node in nodes}
return cls(queries=queries, corpus=node_dict, relevant_docs=relevant_docs)
class TextChunk(BaseModel):
"""
Simple text chunk for storing text nodes. Acts as a wrapper around text.
Allow interoperability between different text processing libraries.
Args:
text (str): The text of the chunk.
id (str): The id of the chunk.
metadata (Dict[str, Any], optional): The metadata of the chunk. Defaults to {}.
"""
text: str
id: str
metadata: Dict[str, Any] = {}
@classmethod
def from_chunk(cls, chunk: str, metadata: dict = {}) -> "TextChunk":
"""
Create a SimpleTextChunk from a chunk.
Parameters
----------
chunk : str
The text chunk.
metadata : dict, optional
The metadata, by default {}.
Returns
-------
TextChunk
The text chunk.
"""
# generate a unique id
return cls(text=chunk, id=str(uuid.uuid4()), metadata=metadata)
@classmethod
def from_llama_index_node(cls, node):
"""
Generate a TextChunk from a llama index node.
Parameters
----------
node : llama_index.core.TextNode
The llama index node.
"""
return cls(text=node.text, id=node.node_id, metadata=node.metadata)
@classmethod
def from_langchain_node(cls, node):
"""
Generate a TextChunk from a langchain node.
Parameters
----------
node : langchain.core.TextNode
The langchain node.
"""
raise NotImplementedError("Not implemented yet.")
def to_dict(self) -> Dict[str, Any]:
"""
Convert to a dictionary.
Returns
-------
Dict[str, Any]
The dictionary.
"""
return self.dict()
def __str__(self) -> str:
return self.text
def __repr__(self) -> str:
return f"SimpleTextChunk(text={self.text}, id={self.id}, \
metadata={self.metadata})"

View File

@@ -0,0 +1,88 @@
import os
import re
from functools import cached_property
from typing import Optional
from pydantic import BaseModel
from ...util import attempt_import_or_raise
from ..utils import api_key_not_found_help
class BaseLLM(BaseModel):
"""
TODO:
Base class for Language Model based Embedding Functions. This class is
loosely desined rn, and will be updated as the usage gets clearer.
"""
class Config:
protected_namespaces = () # Disable protected namespace check
model_name: str
model_kwargs: dict = {}
@cached_property
def _client():
"""
Get the client for the language model
"""
raise NotImplementedError
def chat_completion(self, prompt: str, **kwargs):
"""
Get the chat completion for the given prompt
"""
raise NotImplementedError
class Openai(BaseLLM):
model_name: str = "gpt-3.5-turbo"
kwargs: dict = {}
api_key: Optional[str] = None
@cached_property
def _client(self):
"""
Get the client for the language model
"""
openai = attempt_import_or_raise("openai")
if not os.environ.get("OPENAI_API_KEY"):
api_key_not_found_help("openai")
return openai.OpenAI()
def chat_completion(self, prompt: str) -> str:
"""
Get the chat completion for the given prompt
"""
# TODO: this is legacy openai api replace with completions
completion = self._client.chat.completions.create(
model=self.model_name,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
**self.kwargs,
)
text = completion.choices[0].message.content
return text
def get_questions(self, prompt: str) -> str:
"""
Get the chat completion for the given prompt
"""
response = self.chat_completion(prompt)
result = str(response).strip().split("\n")
questions = [
re.sub(r"^\d+[\).\s]", "", question).strip() for question in result
]
questions = [question for question in questions if len(question) > 0]
return questions
class Gemini(BaseLLM):
pass

View File

@@ -1,69 +0,0 @@
# Copyright (c) 2023. LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import cached_property
from typing import TYPE_CHECKING, List, Optional, Union
from ..util import attempt_import_or_raise
from .base import TextEmbeddingFunction
from .registry import register
if TYPE_CHECKING:
import numpy as np
@register("ollama")
class OllamaEmbeddings(TextEmbeddingFunction):
"""
An embedding function that uses Ollama
https://github.com/ollama/ollama/blob/main/docs/api.md#generate-embeddings
https://ollama.com/blog/embedding-models
"""
name: str = "nomic-embed-text"
host: str = "http://localhost:11434"
options: Optional[dict] = None # type = ollama.Options
keep_alive: Optional[Union[float, str]] = None
ollama_client_kwargs: Optional[dict] = {}
def ndims(self):
return len(self.generate_embeddings(["foo"])[0])
def _compute_embedding(self, text):
return self._ollama_client.embeddings(
model=self.name,
prompt=text,
options=self.options,
keep_alive=self.keep_alive,
)["embedding"]
def generate_embeddings(
self, texts: Union[List[str], "np.ndarray"]
) -> List["np.array"]:
"""
Get the embeddings for the given texts
Parameters
----------
texts: list[str] or np.ndarray (of str)
The texts to embed
"""
# TODO retry, rate limit, token limit
embeddings = [self._compute_embedding(text) for text in texts]
return embeddings
@cached_property
def _ollama_client(self):
ollama = attempt_import_or_raise("ollama")
# ToDo explore ollama.AsyncClient
return ollama.Client(host=self.host, **self.ollama_client_kwargs)

View File

@@ -10,12 +10,15 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
from typing import Any, List, Optional, Union
import numpy as np
import logging
from lancedb.embeddings.fine_tuner import QADataset
from ..util import attempt_import_or_raise
from .base import TextEmbeddingFunction
from .fine_tuner.basetuner import BaseEmbeddingTuner
from .registry import register
from .utils import weak_lru
@@ -80,3 +83,151 @@ class SentenceTransformerEmbeddings(TextEmbeddingFunction):
"sentence_transformers", "sentence-transformers"
)
return sentence_transformers.SentenceTransformer(self.name, device=self.device)
def finetune(self, trainset: QADataset, *args, **kwargs):
"""
Finetune the Sentence Transformers model
Parameters
----------
dataset: QADataset
The dataset to use for finetuning
"""
tuner = SentenceTransformersTuner(
model=self.embedding_model,
trainset=trainset,
**kwargs,
)
tuner.finetune()
class SentenceTransformersTuner(BaseEmbeddingTuner):
"""Sentence Transformers Embedding Finetuning Engine."""
def __init__(
self,
model: Any,
trainset: QADataset,
valset: Optional[QADataset] = None,
path: Optional[str] = "~/.lancedb/embeddings/models",
batch_size: int = 8,
epochs: int = 1,
show_progress: bool = True,
eval_steps: int = 50,
max_input_per_doc: int = -1,
loss: Optional[Any] = None,
evaluator: Optional[Any] = None,
run_name: Optional[str] = None,
log_wandb: bool = False,
) -> None:
"""
Parameters
----------
model: str
The model to use for finetuning.
trainset: QADataset
The training dataset.
valset: Optional[QADataset]
The validation dataset.
path: Optional[str]
The path to save the model.
batch_size: int, default=8
The batch size.
epochs: int, default=1
The number of epochs.
show_progress: bool, default=True
Whether to show progress.
eval_steps: int, default=50
The number of steps to evaluate.
max_input_per_doc: int, default=-1
The number of input per document.
if -1, use all documents.
"""
from sentence_transformers import InputExample, losses
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from torch.utils.data import DataLoader
self.model = model
self.trainset = trainset
self.valset = valset
self.path = path
self.batch_size = batch_size
self.epochs = epochs
self.show_progress = show_progress
self.eval_steps = eval_steps
self.max_input_per_doc = max_input_per_doc
self.evaluator = None
self.epochs = epochs
self.show_progress = show_progress
self.eval_steps = eval_steps
self.run_name = run_name
self.log_wandb = log_wandb
if self.max_input_per_doc < -1:
raise ValueError("max_input_per_doc must be -1 or greater than 0.")
examples: Any = []
for query_id, query in self.trainset.queries.items():
if max_input_per_doc == -1:
for node_id in self.trainset.relevant_docs[query_id]:
text = self.trainset.corpus[node_id]
example = InputExample(texts=[query, text])
examples.append(example)
else:
node_id = self.trainset.relevant_docs[query_id][
min(max_input_per_doc, len(self.trainset.relevant_docs[query_id]))
]
text = self.trainset.corpus[node_id]
example = InputExample(texts=[query, text])
examples.append(example)
self.examples = examples
self.loader: DataLoader = DataLoader(examples, batch_size=batch_size)
if self.valset is not None:
eval_engine = evaluator or InformationRetrievalEvaluator
self.evaluator = eval_engine(
valset.queries, valset.corpus, valset.relevant_docs
)
self.evaluator = evaluator
# define loss
self.loss = loss or losses.MultipleNegativesRankingLoss(self.model)
self.warmup_steps = int(len(self.loader) * epochs * 0.1)
def finetune(self) -> None:
"""Finetune the Sentence Transformers model."""
self.model.fit(
train_objectives=[(self.loader, self.loss)],
epochs=self.epochs,
warmup_steps=self.warmup_steps,
output_path=self.path,
show_progress_bar=self.show_progress,
evaluator=self.evaluator,
evaluation_steps=self.eval_steps,
callback=self._wandb_callback if self.log_wandb else None,
)
self.helper()
def helper(self) -> None:
"""A helper method."""
logging.info("Finetuning complete.")
logging.info(f"Model saved to {self.path}.") # noqa
logging.info("You can now use the model as follows:")
logging.info(
f"model = get_registry().get('sentence-transformers').create(name='./{self.path}')" # noqa
)
def _wandb_callback(self, score, epoch, steps):
try:
import wandb
except ImportError:
raise ImportError(
"wandb is not installed. Please install it using `pip install wandb`"
)
run = wandb.run or wandb.init(
project="sbert_lancedb_finetune", name=self.run_name
)
run.log({"epoch": epoch, "steps": steps, "score": score})

View File

@@ -255,13 +255,7 @@ def retry_with_exponential_backoff(
)
delay *= exponential_base * (1 + jitter * random.random())
logging.warning(
"Error occurred: %s \n Retrying in %s seconds (retry %s of %s) \n",
e,
delay,
num_retries,
max_retries,
)
logging.info("Retrying in %s seconds...", delay)
time.sleep(delay)
return wrapper

View File

@@ -37,7 +37,7 @@ import pyarrow as pa
import pydantic
import semver
PYDANTIC_VERSION = semver.parse_version_info(pydantic.__version__)
PYDANTIC_VERSION = semver.Version.parse(pydantic.__version__)
try:
from pydantic_core import CoreSchema, core_schema
except ImportError:

View File

@@ -30,7 +30,6 @@ from typing import (
import deprecation
import numpy as np
import pyarrow as pa
import pyarrow.fs as pa_fs
import pydantic
from . import __version__
@@ -38,7 +37,7 @@ from .arrow import AsyncRecordBatchReader
from .common import VEC
from .rerankers.base import Reranker
from .rerankers.linear_combination import LinearCombinationReranker
from .util import fs_from_uri, safe_import_pandas
from .util import safe_import_pandas
if TYPE_CHECKING:
import PIL
@@ -666,14 +665,6 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
# get the index path
index_path = self._table._get_fts_index_path()
# Check that we are on local filesystem
fs, _path = fs_from_uri(index_path)
if not isinstance(fs, pa_fs.LocalFileSystem):
raise NotImplementedError(
"Full-text search is only supported on the local filesystem"
)
# check if the index exist
if not Path(index_path).exists():
raise FileNotFoundError(

View File

@@ -94,7 +94,7 @@ class RemoteDBConnection(DBConnection):
yield item
@override
def open_table(self, name: str, *, index_cache_size: Optional[int] = None) -> Table:
def open_table(self, name: str) -> Table:
"""Open a Lance Table in the database.
Parameters
@@ -110,12 +110,6 @@ class RemoteDBConnection(DBConnection):
self._client.mount_retry_adapter_for_table(name)
if index_cache_size is not None:
logging.info(
"index_cache_size is ignored in LanceDb Cloud"
" (there is no local cache to configure)"
)
# check if table exists
if self._table_cache.get(name) is None:
self._client.post(f"/v1/table/{name}/describe/")
@@ -285,25 +279,7 @@ class RemoteDBConnection(DBConnection):
self._client.post(
f"/v1/table/{name}/drop/",
)
self._table_cache.pop(name, default=None)
@override
def rename_table(self, cur_name: str, new_name: str):
"""Rename a table in the database.
Parameters
----------
cur_name: str
The current name of the table.
new_name: str
The new name of the table.
"""
self._client.post(
f"/v1/table/{cur_name}/rename/",
data={"new_table_name": new_name},
)
self._table_cache.pop(cur_name, default=None)
self._table_cache[new_name] = True
self._table_cache.pop(name)
async def close(self):
"""Close the connection to the database."""

View File

@@ -72,7 +72,7 @@ class RemoteTable(Table):
return resp
def index_stats(self, index_uuid: str):
"""List all the stats of a specified index"""
"""List all the indices on the table"""
resp = self._conn._client.post(
f"/v1/table/{self._name}/index/{index_uuid}/stats/"
)

View File

@@ -58,7 +58,7 @@ if TYPE_CHECKING:
import PIL
from lance.dataset import CleanupStats, ReaderLike
from ._lancedb import Table as LanceDBTable, OptimizeStats
from ._lancedb import Table as LanceDBTable
from .db import LanceDBConnection
from .index import BTree, IndexConfig, IvfPq
@@ -806,7 +806,6 @@ class _LanceLatestDatasetRef(_LanceDatasetRef):
"""Reference to the latest version of a LanceDataset."""
uri: str
index_cache_size: Optional[int] = None
read_consistency_interval: Optional[timedelta] = None
last_consistency_check: Optional[float] = None
_dataset: Optional[LanceDataset] = None
@@ -814,9 +813,7 @@ class _LanceLatestDatasetRef(_LanceDatasetRef):
@property
def dataset(self) -> LanceDataset:
if not self._dataset:
self._dataset = lance.dataset(
self.uri, index_cache_size=self.index_cache_size
)
self._dataset = lance.dataset(self.uri)
self.last_consistency_check = time.monotonic()
elif self.read_consistency_interval is not None:
now = time.monotonic()
@@ -845,15 +842,12 @@ class _LanceLatestDatasetRef(_LanceDatasetRef):
class _LanceTimeTravelRef(_LanceDatasetRef):
uri: str
version: int
index_cache_size: Optional[int] = None
_dataset: Optional[LanceDataset] = None
@property
def dataset(self) -> LanceDataset:
if not self._dataset:
self._dataset = lance.dataset(
self.uri, version=self.version, index_cache_size=self.index_cache_size
)
self._dataset = lance.dataset(self.uri, version=self.version)
return self._dataset
@dataset.setter
@@ -890,8 +884,6 @@ class LanceTable(Table):
connection: "LanceDBConnection",
name: str,
version: Optional[int] = None,
*,
index_cache_size: Optional[int] = None,
):
self._conn = connection
self.name = name
@@ -900,13 +892,11 @@ class LanceTable(Table):
self._ref = _LanceTimeTravelRef(
uri=self._dataset_uri,
version=version,
index_cache_size=index_cache_size,
)
else:
self._ref = _LanceLatestDatasetRef(
uri=self._dataset_uri,
read_consistency_interval=connection.read_consistency_interval,
index_cache_size=index_cache_size,
)
@classmethod
@@ -1209,11 +1199,6 @@ class LanceTable(Table):
raise ValueError("Index already exists. Use replace=True to overwrite.")
fs.delete_dir(path)
if not isinstance(fs, pa_fs.LocalFileSystem):
raise NotImplementedError(
"Full-text search is only supported on the local filesystem"
)
index = create_index(
self._get_fts_index_path(),
field_names,
@@ -2377,49 +2362,6 @@ class AsyncTable:
"""
await self._inner.restore()
async def optimize(
self, *, cleanup_older_than: Optional[timedelta] = None
) -> OptimizeStats:
"""
Optimize the on-disk data and indices for better performance.
Modeled after ``VACUUM`` in PostgreSQL.
Optimization covers three operations:
* Compaction: Merges small files into larger ones
* Prune: Removes old versions of the dataset
* Index: Optimizes the indices, adding new data to existing indices
Parameters
----------
cleanup_older_than: timedelta, optional default 7 days
All files belonging to versions older than this will be removed. Set
to 0 days to remove all versions except the latest. The latest version
is never removed.
Experimental API
----------------
The optimization process is undergoing active development and may change.
Our goal with these changes is to improve the performance of optimization and
reduce the complexity.
That being said, it is essential today to run optimize if you want the best
performance. It should be stable and safe to use in production, but it our
hope that the API may be simplified (or not even need to be called) in the
future.
The frequency an application shoudl call optimize is based on the frequency of
data modifications. If data is frequently added, deleted, or updated then
optimize should be run frequently. A good rule of thumb is to run optimize if
you have added or modified 100,000 or more records or run more than 20 data
modification operations.
"""
if cleanup_older_than is not None:
cleanup_older_than = round(cleanup_older_than.total_seconds() * 1000)
return await self._inner.optimize(cleanup_older_than)
async def list_indices(self) -> IndexConfig:
"""
List all indices that have been created with Self::create_index

View File

@@ -368,15 +368,6 @@ async def test_create_exist_ok_async(tmp_path):
# await db.create_table("test", schema=bad_schema, exist_ok=True)
def test_open_table_sync(tmp_path):
db = lancedb.connect(tmp_path)
db.create_table("test", data=[{"id": 0}])
assert db.open_table("test").count_rows() == 1
assert db.open_table("test", index_cache_size=0).count_rows() == 1
with pytest.raises(FileNotFoundError, match="does not exist"):
db.open_table("does_not_exist")
@pytest.mark.asyncio
async def test_open_table(tmp_path):
db = await lancedb.connect_async(tmp_path)
@@ -406,10 +397,6 @@ async def test_open_table(tmp_path):
}
)
# No way to verify this yet, but at least make sure we
# can pass the parameter
await db.open_table("test", index_cache_size=0)
with pytest.raises(ValueError, match="was not found"):
await db.open_table("does_not_exist")

View File

@@ -0,0 +1,45 @@
import uuid
import pytest
from lancedb.embeddings import get_registry
from lancedb.embeddings.fine_tuner import QADataset, TextChunk
from tqdm import tqdm
@pytest.mark.slow
def test_finetuning_sentence_transformers(tmp_path):
queries = {}
relevant_docs = {}
chunks = [
"This is a chunk related to legal docs",
"This is another chunk related financial docs",
"This is a chunk related to sports docs",
"This is another chunk related to fashion docs",
]
text_chunks = [TextChunk.from_chunk(chunk) for chunk in chunks]
for chunk in tqdm(text_chunks):
questions = [
"What is this chunk about?",
"What is the main topic of this chunk?",
]
for question in questions:
question_id = str(uuid.uuid4())
queries[question_id] = question
relevant_docs[question_id] = [chunk.id]
ds = QADataset.from_responses(text_chunks, queries, relevant_docs)
assert len(ds.queries) == 8
assert len(ds.corpus) == 4
model = get_registry().get("sentence-transformers").create()
model.finetune(trainset=ds, valset=ds, path=str(tmp_path / "model"), epochs=1)
model = (
get_registry().get("sentence-transformers").create(name=str(tmp_path / "model"))
)
res = model.evaluate(ds)
assert res is not None
def test_text_chunk():
# TODO
pass

View File

@@ -45,9 +45,7 @@ except Exception:
@pytest.mark.slow
@pytest.mark.parametrize(
"alias", ["sentence-transformers", "openai", "huggingface", "ollama"]
)
@pytest.mark.parametrize("alias", ["sentence-transformers", "openai", "huggingface"])
def test_basic_text_embeddings(alias, tmp_path):
db = lancedb.connect(tmp_path)
registry = get_registry()

View File

@@ -213,7 +213,7 @@ def test_syntax(table):
# https://github.com/lancedb/lancedb/issues/769
table.create_fts_index("text")
with pytest.raises(ValueError, match="Syntax Error"):
table.search("they could have been dogs OR").limit(10).to_list()
table.search("they could have been dogs OR cats").limit(10).to_list()
# these should work

View File

@@ -1025,29 +1025,3 @@ async def test_time_travel(db_async: AsyncConnection):
# Can't use restore if not checked out
with pytest.raises(ValueError, match="checkout before running restore"):
await table.restore()
@pytest.mark.asyncio
async def test_optimize(db_async: AsyncConnection):
table = await db_async.create_table(
"test",
data=[{"x": [1]}],
)
await table.add(
data=[
{"x": [2]},
],
)
stats = await table.optimize()
assert stats.compaction.files_removed == 2
assert stats.compaction.files_added == 1
assert stats.compaction.fragments_added == 1
assert stats.compaction.fragments_removed == 2
assert stats.prune.bytes_removed == 0
assert stats.prune.old_versions_removed == 0
stats = await table.optimize(cleanup_older_than=timedelta(seconds=0))
assert stats.prune.bytes_removed > 0
assert stats.prune.old_versions_removed == 3
assert await table.query().to_arrow() == pa.table({"x": [[1], [2]]})

View File

@@ -134,21 +134,17 @@ impl Connection {
})
}
#[pyo3(signature = (name, storage_options = None, index_cache_size = None))]
#[pyo3(signature = (name, storage_options = None))]
pub fn open_table(
self_: PyRef<'_, Self>,
name: String,
storage_options: Option<HashMap<String, String>>,
index_cache_size: Option<u32>,
) -> PyResult<&PyAny> {
let inner = self_.get_inner()?.clone();
let mut builder = inner.open_table(name);
if let Some(storage_options) = storage_options {
builder = builder.storage_options(storage_options);
}
if let Some(index_cache_size) = index_cache_size {
builder = builder.index_cache_size(index_cache_size);
}
future_into_py(self_.py(), async move {
let table = builder.execute().await.infer_error()?;
Ok(Table::new(table))

View File

@@ -35,16 +35,21 @@ impl<T> PythonErrorExt<T> for std::result::Result<T, LanceError> {
match &self {
Ok(_) => Ok(self.unwrap()),
Err(err) => match err {
LanceError::InvalidInput { .. }
| LanceError::InvalidTableName { .. }
| LanceError::TableNotFound { .. }
| LanceError::Schema { .. } => self.value_error(),
LanceError::InvalidInput { .. } => self.value_error(),
LanceError::InvalidTableName { .. } => self.value_error(),
LanceError::TableNotFound { .. } => self.value_error(),
LanceError::Schema { .. } => self.value_error(),
LanceError::CreateDir { .. } => self.os_error(),
LanceError::TableAlreadyExists { .. } => self.runtime_error(),
LanceError::ObjectStore { .. } => Err(PyIOError::new_err(err.to_string())),
LanceError::Lance { .. } => self.runtime_error(),
LanceError::Runtime { .. } => self.runtime_error(),
LanceError::Http { .. } => self.runtime_error(),
LanceError::Arrow { .. } => self.runtime_error(),
LanceError::NotSupported { .. } => {
Err(PyNotImplementedError::new_err(err.to_string()))
}
_ => self.runtime_error(),
LanceError::Other { .. } => self.runtime_error(),
},
}
}

View File

@@ -2,9 +2,7 @@ use arrow::{
ffi_stream::ArrowArrayStreamReader,
pyarrow::{FromPyArrow, ToPyArrow},
};
use lancedb::table::{
AddDataMode, Duration, OptimizeAction, OptimizeOptions, Table as LanceDbTable,
};
use lancedb::table::{AddDataMode, Table as LanceDbTable};
use pyo3::{
exceptions::{PyRuntimeError, PyValueError},
pyclass, pymethods,
@@ -19,40 +17,6 @@ use crate::{
query::Query,
};
/// Statistics about a compaction operation.
#[pyclass(get_all)]
#[derive(Clone, Debug)]
pub struct CompactionStats {
/// The number of fragments removed
pub fragments_removed: u64,
/// The number of new, compacted fragments added
pub fragments_added: u64,
/// The number of data files removed
pub files_removed: u64,
/// The number of new, compacted data files added
pub files_added: u64,
}
/// Statistics about a cleanup operation
#[pyclass(get_all)]
#[derive(Clone, Debug)]
pub struct RemovalStats {
/// The number of bytes removed
pub bytes_removed: u64,
/// The number of old versions removed
pub old_versions_removed: u64,
}
/// Statistics about an optimize operation
#[pyclass(get_all)]
#[derive(Clone, Debug)]
pub struct OptimizeStats {
/// Statistics about the compaction operation
pub compaction: CompactionStats,
/// Statistics about the removal operation
pub prune: RemovalStats,
}
#[pyclass]
pub struct Table {
// We keep a copy of the name to use if the inner table is dropped
@@ -227,58 +191,4 @@ impl Table {
pub fn query(&self) -> Query {
Query::new(self.inner_ref().unwrap().query())
}
pub fn optimize(self_: PyRef<'_, Self>, cleanup_since_ms: Option<u64>) -> PyResult<&PyAny> {
let inner = self_.inner_ref()?.clone();
let older_than = if let Some(ms) = cleanup_since_ms {
if ms > i64::MAX as u64 {
return Err(PyValueError::new_err(format!(
"cleanup_since_ms must be between {} and -{}",
i32::MAX,
i32::MAX
)));
}
Duration::try_milliseconds(ms as i64)
} else {
None
};
future_into_py(self_.py(), async move {
let compaction_stats = inner
.optimize(OptimizeAction::Compact {
options: lancedb::table::CompactionOptions::default(),
remap_options: None,
})
.await
.infer_error()?
.compaction
.unwrap();
let prune_stats = inner
.optimize(OptimizeAction::Prune {
older_than,
delete_unverified: None,
})
.await
.infer_error()?
.prune
.unwrap();
inner
.optimize(lancedb::table::OptimizeAction::Index(
OptimizeOptions::default(),
))
.await
.infer_error()?;
Ok(OptimizeStats {
compaction: CompactionStats {
files_added: compaction_stats.files_added as u64,
files_removed: compaction_stats.files_removed as u64,
fragments_added: compaction_stats.fragments_added as u64,
fragments_removed: compaction_stats.fragments_removed as u64,
},
prune: RemovalStats {
bytes_removed: prune_stats.bytes_removed,
old_versions_removed: prune_stats.old_versions,
},
})
})
}
}

View File

@@ -1,87 +0,0 @@
# Release process
There are five total packages we release. Three are the `lancedb` packages
for Python, Rust, and Node.js. The other two are the legacy `vectordb`
packages for Rust and node.js.
The Python package is versioned and released separately from the Rust and Node.js
ones. For Rust and Node.js, the release process is shared between `lancedb` and
`vectordb` for now.
## Preview releases
LanceDB has full releases about every 2 weeks, but in between we make frequent
preview releases. These are released as `0.x.y.betaN` versions. They receive the
same level of testing as normal releases and let you get access to the latest
features. However, we do not guarantee that preview releases will be available
more than 6 months after they are released. We may delete the preview releases
from the packaging index after a while. Once your application is stable, we
recommend switching to full releases, which will never be removed from package
indexes.
## Making releases
The release process uses a handful of GitHub actions to automate the process.
```text
┌─────────────────────┐
│Create Release Commit│
└─┬───────────────────┘
│ ┌────────────┐ ┌──►Python GH Release
├──►(tag) python-vX.Y.Z ───►│PyPI Publish├─┤
│ └────────────┘ └──►Python Wheels
│ ┌───────────┐
└──►(tag) vX.Y.Z ───┬──────►│NPM Publish├──┬──►Rust/Node GH Release
│ └───────────┘ │
│ └──►NPM Packages
│ ┌─────────────┐
└──────►│Cargo Publish├───►Cargo Release
└─────────────┘
```
To start a release, trigger a `Create Release Commit` action from
[the workflows page](https://github.com/lancedb/lancedb/actions/workflows/make-release-commit.yml)
(Click on "Run workflow").
* **For a preview release**, leave the default parameters.
* **For a stable release**, set the `release_type` input to `stable`.
> [!IMPORTANT]
> If there was a breaking change since the last stable release, and we haven't
> done so yet, we should increment the minor version. The CI will detect if this
> is needed and fail the `Create Release Commit` job. To fix, select the
> "bump minor version" option.
## Breaking changes
We try to avoid breaking changes, but sometimes they are necessary. When there
are breaking changes, we will increment the minor version. (This is valid
semantic versioning because we are still in `0.x` versions.)
When a PR makes a breaking change, the PR author should mark the PR using the
conventional commit markers: either exclamation mark after the type
(such as `feat!: change signature of func`) or have `BREAKING CHANGE` in the
body of the PR. A CI job will add a `breaking-change` label to the PR, which is
what will ultimately be used to CI to determine if the minor version should be
incremented.
> [!IMPORTANT]
> Reviewers should check that PRs with breaking changes receive the `breaking-change`
> label. If a PR is missing the label, please add it, even if after it was merged.
> This label is used in the release process.
Some things that are considered breaking changes:
* Upgrading `lance` to a new minor version. Minor version bumps in Lance are
considered breaking changes during `0.x` releases. This can change behavior
in LanceDB.
* Upgrading a dependency pin that is in the Rust API. In particular, upgrading
`DataFusion` and `Arrow` are breaking changes. Changing dependencies that are
not exposed in our public API are not considered breaking changes.
* Changing the signature of a public function or method.
* Removing a public function or method.
We do make exceptions for APIs that are marked as experimental. These are APIs
that are under active development and not in major use. These changes should not
receive the `breaking-change` label.

View File

@@ -1,6 +1,6 @@
[package]
name = "lancedb-node"
version = "0.5.0"
version = "0.4.17"
description = "Serverless, low-latency vector database for AI applications"
license.workspace = true
edition.workspace = true

View File

@@ -19,12 +19,10 @@ use snafu::Snafu;
#[derive(Debug, Snafu)]
pub enum Error {
#[allow(dead_code)]
#[snafu(display("column '{name}' is missing"))]
MissingColumn { name: String },
#[snafu(display("{name}: {message}"))]
OutOfRange { name: String, message: String },
#[allow(dead_code)]
#[snafu(display("{index_type} is not a valid index type"))]
InvalidIndexType { index_type: String },

View File

@@ -59,7 +59,7 @@ fn database_new(mut cx: FunctionContext) -> JsResult<JsPromise> {
for handle in storage_options_js {
let obj = handle.downcast::<JsArray, _>(&mut cx).unwrap();
let key = obj.get::<JsString, _, _>(&mut cx, 0)?.value(&mut cx);
let value = obj.get::<JsString, _, _>(&mut cx, 1)?.value(&mut cx);
let value = obj.get::<JsString, _, _>(&mut cx, 0)?.value(&mut cx);
storage_options.push((key, value));
}

View File

@@ -19,7 +19,6 @@ use neon::prelude::*;
pub trait JsObjectExt {
fn get_opt_u32(&self, cx: &mut FunctionContext, key: &str) -> Result<Option<u32>>;
fn get_usize(&self, cx: &mut FunctionContext, key: &str) -> Result<usize>;
#[allow(dead_code)]
fn get_opt_usize(&self, cx: &mut FunctionContext, key: &str) -> Result<Option<usize>>;
}

View File

@@ -324,7 +324,7 @@ impl JsTable {
rt.spawn(async move {
let stats = table
.optimize(OptimizeAction::Prune {
older_than: Some(older_than),
older_than,
delete_unverified,
})
.await;

Some files were not shown because too many files have changed in this diff Show More