Compare commits

..

1 Commits

Author SHA1 Message Date
zyy17
24b880f982 ci: push image to dockerhub
Signed-off-by: zyy17 <zyylsxm@gmail.com>
2022-11-04 13:05:35 +08:00
665 changed files with 17157 additions and 42789 deletions

View File

@@ -1,2 +0,0 @@
[profile.default]
slow-timeout = { period = "60s", terminate-after = 3, grace-period = "30s" }

View File

@@ -1,4 +0,0 @@
# Settings for s3 test
GT_S3_BUCKET=S3 bucket
GT_S3_ACCESS_KEY_ID=S3 access key id
GT_S3_ACCESS_KEY=S3 secret access key

View File

@@ -1,86 +0,0 @@
---
name: Bug report
description: Is something not working? Help us fix it!
labels: [ "bug" ]
body:
- type: markdown
attributes:
value: |
Take some time to fill out this bug report. Thank you!
- type: dropdown
id: type
attributes:
label: What type of bug is this?
multiple: true
options:
- Configuration
- Crash
- Data corruption
- Incorrect result
- Locking issue
- Performance issue
- Unexpected error
- Other
validations:
required: true
- type: dropdown
id: subsystem
attributes:
label: What subsystems are affected?
description: You can pick multiple subsystems.
multiple: true
options:
- Standalone mode
- Frontend
- Datanode
- Meta
- Other
validations:
required: true
- type: textarea
id: what-happened
attributes:
label: What happened?
description: |
Tell us what happened and also what you would have expected to
happen instead.
placeholder: "Describe the bug"
validations:
required: true
- type: input
id: os
attributes:
label: What operating system did you use?
description: |
Please provide OS, version, and architecture. For example:
Windows 10 x64, Ubuntu 21.04 x64, Mac OS X 10.5 ARM, Rasperry
Pi i386, etc.
placeholder: "Ubuntu 21.04 x64"
validations:
required: true
- type: textarea
id: logs
attributes:
label: Relevant log output and stack trace
description: |
Please copy and paste any relevant log output or a stack
trace. This will be automatically formatted into code, so no
need for backticks.
render: bash
- type: textarea
id: reproduce
attributes:
label: How can we reproduce the bug?
description: |
Please walk us through and provide steps and details on how
to reproduce the issue. If possible, provide scripts that we
can run to trigger the bug.
render: bash
validations:
required: true

View File

@@ -1,8 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Greptime Community Slack
url: https://greptime.com/slack
about: Get free help from the Greptime community
- name: Greptime Community Discussion
url: https://github.com/greptimeTeam/greptimedb/discussions
about: Get free help from the Greptime community

View File

@@ -1,39 +0,0 @@
---
name: Enhancement
description: Suggest an enhancement to existing functionality
labels: [ "enhancement" ]
body:
- type: dropdown
id: type
attributes:
label: What type of enhancement is this?
multiple: true
options:
- API improvement
- Configuration
- Performance
- Refactor
- Tech debt reduction
- User experience
- Other
validations:
required: true
- type: textarea
id: what
attributes:
label: What does the enhancement do?
description: |
Give a high-level overview of how you
suggest improving an existing feature or functionality.
validations:
required: true
- type: textarea
id: implementation
attributes:
label: Implementation challenges
description: |
Share any ideas of how to implement the enhancement.
validations:
required: false

View File

@@ -1,42 +0,0 @@
---
name: Feature request
description: Suggest a new feature for GreptimeDB
labels: [ "feature request" ]
body:
- type: markdown
id: info
attributes:
value: |
Only use this template to suggest a new feature that doesn't already exist in GreptimeDB.
For enhancements to existing features, use the "Enhancement" issue template. For bugs,
use the bug report template.
- type: textarea
id: what
attributes:
label: What problem does the new feature solve?
description: |
Describe the problem and why it is important to solve. Did you consider alternative
solutions, perhaps outside the database? Why is it better to add the feature to
GreptimeDB?
validations:
required: true
- type: textarea
id: how
attributes:
label: What does the feature do?
description: |
Give a high-level overview of what the feature does and how it would work.
validations:
required: true
- type: textarea
id: implementation
attributes:
label: Implementation challenges
description: |
If you have ideas of how to implement the feature, and any particularly
challenging issues to overcome, then provide them here.
validations:
required: false

View File

@@ -1,10 +1,10 @@
# codecov config
coverage:
status:
patch: off # disable patch status
project:
default:
enable: yes
threshold: 1%
patch: off
ignore:
- "**/error*.rs" # ignore all error.rs files
- "tests/runner/*.rs" # ignore integration test runner

View File

@@ -1,19 +0,0 @@
I hereby agree to the terms of the [GreptimeDB CLA](https://gist.github.com/xtang/6378857777706e568c1949c7578592cc)
## What's changed and what's your intention?
_PLEASE DO NOT LEAVE THIS EMPTY !!!_
Please explain IN DETAIL what the changes are in this PR and why they are needed:
- Summarize your change (**mandatory**)
- How does this PR work? Need a brief introduction for the changed logic (optional)
- Describe clearly one logical change and avoid lazy messages (optional)
- Describe any limitations of the current code (optional)
## Checklist
- [ ] I have written the necessary rustdoc comments.
- [ ] I have added the necessary unit tests and integration tests.
## Refer to a related PR or issue link (optional)

View File

@@ -1,25 +1,10 @@
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
paths-ignore:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
push:
branches:
- "main"
- "develop"
paths-ignore:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
workflow_dispatch:
name: Code coverage
@@ -27,33 +12,41 @@ env:
RUST_TOOLCHAIN: nightly-2022-07-14
jobs:
coverage:
grcov:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest-8-cores
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- uses: arduino/setup-protoc@v1
- name: Install toolchain
uses: dtolnay/rust-toolchain@master
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview
override: true
profile: minimal
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Collect coverage data
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info
uses: Swatinem/rust-cache@v2.0.0
- name: Cleanup disk
uses: curoky/cleanup-disk-action@v2.0
with:
retain: 'rust'
- name: Execute tests
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace
env:
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
RUSTFLAGS: "-Zprofile -Ccodegen-units=1 -Cinline-threshold=0 -Clink-dead-code -Coverflow-checks=off -Cpanic=unwind -Zpanic_abort_tests"
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
UNITTEST_LOG_DIR: "__unittest_logs"
- name: Gather coverage data
id: coverage
uses: actions-rs/grcov@v0.1
- name: Codecov upload
uses: codecov/codecov-action@v2
with:

View File

@@ -1,12 +1,6 @@
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
paths-ignore:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
push:
branches:
- develop
@@ -14,97 +8,63 @@ on:
paths-ignore:
- 'docs/**'
- 'config/**'
- '.github/**'
- '**.md'
- '**.yml'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
workflow_dispatch:
name: CI
name: Continuous integration for developing
env:
RUST_TOOLCHAIN: nightly-2022-07-14
jobs:
typos:
name: Spell Check with Typos
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: crate-ci/typos@v1.0.4
check:
name: Check
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- uses: arduino/setup-protoc@v1
- uses: dtolnay/rust-toolchain@master
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: ${{ env.RUST_TOOLCHAIN }}
override: true
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run cargo check
run: cargo check --workspace --all-targets
uses: Swatinem/rust-cache@v2.0.0
- uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --all-targets
toml:
name: Toml Check
test:
name: Test Suite
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@master
- uses: actions/checkout@v2
- uses: arduino/setup-protoc@v1
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: ${{ env.RUST_TOOLCHAIN }}
override: true
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install taplo
run: cargo install taplo-cli --version ^0.8 --locked
- name: Run taplo
run: taplo format --check --option "indent_string= "
# Use coverage to run test.
# test:
# name: Test Suite
# if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest
# timeout-minutes: 60
# steps:
# - uses: actions/checkout@v3
# - name: Cache LLVM and Clang
# id: cache-llvm
# uses: actions/cache@v3
# with:
# path: ./llvm
# key: llvm
# - uses: arduino/setup-protoc@v1
# - uses: KyleMayes/install-llvm-action@v1
# with:
# version: "14.0"
# cached: ${{ steps.cache-llvm.outputs.cache-hit }}
# - uses: dtolnay/rust-toolchain@master
# with:
# toolchain: ${{ env.RUST_TOOLCHAIN }}
# - name: Rust Cache
# uses: Swatinem/rust-cache@v2
# - name: Cleanup disk
# uses: curoky/cleanup-disk-action@v2.0
# with:
# retain: 'rust,llvm'
# - name: Install latest nextest release
# uses: taiki-e/install-action@nextest
# - name: Run tests
# run: cargo nextest run
# env:
# CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
# RUST_BACKTRACE: 1
# GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
# GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
# GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
# UNITTEST_LOG_DIR: "__unittest_logs"
uses: Swatinem/rust-cache@v2.0.0
- uses: actions-rs/cargo@v1
with:
command: test
args: --workspace
env:
RUST_BACKTRACE: 1
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
UNITTEST_LOG_DIR: "__unittest_logs"
fmt:
name: Rustfmt
@@ -112,16 +72,20 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- uses: arduino/setup-protoc@v1
- uses: dtolnay/rust-toolchain@master
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: rustfmt
override: true
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run cargo fmt
run: cargo fmt --all -- --check
uses: Swatinem/rust-cache@v2.0.0
- run: rustup component add rustfmt
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
clippy:
name: Clippy
@@ -129,13 +93,17 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- uses: arduino/setup-protoc@v1
- uses: dtolnay/rust-toolchain@master
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: clippy
override: true
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run cargo clippy
run: cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
uses: Swatinem/rust-cache@v2.0.0
- run: rustup component add clippy
- uses: actions-rs/cargo@v1
with:
command: clippy
args: --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr

View File

@@ -1,25 +0,0 @@
name: Create Issue in docs repo on doc related changes
on:
issues:
types:
- labeled
pull_request_target:
types:
- labeled
jobs:
doc_issue:
if: github.event.label.name == 'doc update required'
runs-on: ubuntu-latest
steps:
- name: create an issue in doc repo
uses: dacbd/create-issue-action@main
with:
owner: GreptimeTeam
repo: docs
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A document change request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}

View File

@@ -1,16 +0,0 @@
name: License checker
on:
push:
branches:
- develop
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
jobs:
license-header-check:
runs-on: ubuntu-latest
name: license-header-check
steps:
- uses: actions/checkout@v2
- name: Check License Header
uses: apache/skywalking-eyes/header@main

View File

@@ -2,48 +2,37 @@ on:
push:
tags:
- "v*.*.*"
schedule:
# At 00:00 on Monday.
- cron: '0 0 * * 1'
workflow_dispatch:
name: Release
env:
RUST_TOOLCHAIN: nightly-2022-07-14
# FIXME(zyy17): Would be better to use `gh release list -L 1 | cut -f 3` to get the latest release version tag, but for a long time, we will stay at 'v0.1.0-alpha-*'.
SCHEDULED_BUILD_VERSION_PREFIX: v0.1.0-alpha
# In the future, we can change SCHEDULED_PERIOD to nightly.
SCHEDULED_PERIOD: weekly
jobs:
build:
name: Build binary
strategy:
matrix:
# The file format is greptime-<os>-<arch>
# The file format is greptime-<tag>.<os>-<arch>
include:
- arch: x86_64-unknown-linux-gnu
os: ubuntu-latest-16-cores
file: greptime-linux-amd64
os: ubuntu-latest
file: greptime-${{ github.ref_name }}.linux-amd64
- arch: aarch64-unknown-linux-gnu
os: ubuntu-latest-16-cores
file: greptime-linux-arm64
os: ubuntu-latest
file: greptime-${{ github.ref_name }}.linux-arm64
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64
file: greptime-${{ github.ref_name }}.darwin-arm64
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64
file: greptime-${{ github.ref_name }}.darwin-amd64
runs-on: ${{ matrix.os }}
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Cache cargo assets
id: cache
uses: actions/cache@v3
with:
path: |
@@ -73,31 +62,35 @@ jobs:
sudo apt-get -y update
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: ${{ env.RUST_TOOLCHAIN }}
targets: ${{ matrix.arch }}
override: true
target: ${{ matrix.arch }}
- name: Output package versions
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
- name: Run cargo build
run: cargo build ${{ matrix.opts }} --release --locked --target ${{ matrix.arch }}
uses: actions-rs/cargo@v1
with:
command: build
args: ${{ matrix.opts }} --release --locked --target ${{ matrix.arch }}
- name: Calculate checksum and rename binary
shell: bash
run: |
cd target/${{ matrix.arch }}/release
chmod +x greptime
tar -zcvf ${{ matrix.file }}.tgz greptime
echo $(shasum -a 256 ${{ matrix.file }}.tgz | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
cp greptime ${{ matrix.file }}
echo $(shasum -a 256 greptime | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
- name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.file }}
path: target/${{ matrix.arch }}/release/${{ matrix.file }}.tgz
path: target/${{ matrix.arch }}/release/${{ matrix.file }}
- name: Upload checksum of artifacts
uses: actions/upload-artifact@v3
@@ -115,36 +108,12 @@ jobs:
- name: Download artifacts
uses: actions/download-artifact@v3
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}, like v0.1.0-alpha-20221119-weekly.
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
- name: Create scheduled build git tag
if: github.event_name == 'schedule'
run: |
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
- name: Publish scheduled release # configure the different release title and tags.
uses: softprops/action-gh-release@v1
if: github.event_name == 'schedule'
with:
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
generate_release_notes: true
files: |
**/greptime-*
- name: Publish release
uses: softprops/action-gh-release@v1
if: github.event_name != 'schedule'
with:
name: "Release ${{ github.ref_name }}"
files: |
**/greptime-*
**/greptime-${{ github.ref_name }}.*
docker:
name: Build docker image
@@ -152,38 +121,39 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v2
- name: Download amd64 binary
uses: actions/download-artifact@v3
with:
name: greptime-linux-amd64
name: greptime-${{ github.ref_name }}.linux-amd64
path: amd64
- name: Unzip the amd64 artifacts
- name: Rename amd64 binary
run: |
cd amd64
tar xvf greptime-linux-amd64.tgz
rm greptime-linux-amd64.tgz
mv amd64/greptime-${{ github.ref_name }}.linux-amd64 amd64/greptime
- name: Download arm64 binary
uses: actions/download-artifact@v3
with:
name: greptime-linux-arm64
name: greptime-${{ github.ref_name }}.linux-arm64
path: arm64
- name: Unzip the arm64 artifacts
- name: Rename arm64 binary
run: |
cd arm64
tar xvf greptime-linux-arm64.tgz
rm greptime-linux-arm64.tgz
mv arm64/greptime-${{ github.ref_name }}.linux-arm64 arm64/greptime
- name: Login to UCloud Container Registry
- name: Set file permissions
shell: bash
run: |
chmod +x amd64/greptime arm64/greptime
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: uhub.service.ucloud.cn
username: ${{ secrets.UCLOUD_USERNAME }}
password: ${{ secrets.UCLOUD_PASSWORD }}
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Dockerhub
uses: docker/login-action@v2
@@ -191,21 +161,6 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
@@ -220,7 +175,5 @@ jobs:
push: true
platforms: linux/amd64,linux/arm64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
uhub.service.ucloud.cn/greptime/greptimedb:latest
uhub.service.ucloud.cn/greptime/greptimedb:${{ env.IMAGE_TAG }}
ghcr.io/greptimeteam/greptimedb:${{ github.ref_name }}
greptime/greptimedb:${{ github.ref_name }}

7
.gitignore vendored
View File

@@ -18,7 +18,6 @@ debug/
# JetBrains IDE config directory
.idea/
*.iml
# VSCode IDE config directory
.vscode/
@@ -29,9 +28,3 @@ logs/
# cpython's generated python byte code
**/__pycache__/
# Benchmark dataset
benchmarks/data
# dotenv
.env

View File

@@ -1,14 +0,0 @@
header:
license:
spdx-id: Apache-2.0
copyright-owner: Greptime Team
paths:
- "**/*.rs"
- "**/*.py"
comment: on-failure
dependency:
files:
- Cargo.toml

View File

@@ -1,58 +1,16 @@
# Welcome!
# Contributing to GreptimeDB
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
Much appreciate for your interest in contributing to GreptimeDB! This document list some guidelines for contributing to our code base.
Read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
## Your First Contribution
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/develop/CODE_OF_CONDUCT.md)
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
- Check the closed issues before opening your issue.
- Try to follow the existing style of the code.
- More importantly, when in doubt, ask away.
Pull requests are great, but we accept all kinds of other help if you like. Such as
- Write tutorials or blog posts. Blog, speak about, or create tutorials about one of GreptimeDB's many features. Mention [@greptime](https://twitter.com/greptime) on Twitter and email info@greptime.com so we can give pointers and tips and help you spread the word by promoting your content on Greptime communication channels.
- Improve the documentation. [Submit documentation](http://github.com/greptimeTeam/docs/) updates, enhancements, designs, or bug fixes, and fixing any spelling or grammar errors will be very much appreciated.
- Present at meetups and conferences about your GreptimeDB projects. Your unique challenges and successes in building things with GreptimeDB can provide great speaking material. We'd love to review your talk abstract, so get in touch with us if you'd like some help!
- Submit bug reports. To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).
- Speak up feature requests. Send feedback is a great way for us to understand your different use cases of GreptimeDB better. If you want to share your experience with GreptimeDB, or if you want to discuss any ideas, you can start a discussion on [GitHub discussions](https://github.com/GreptimeTeam/greptimedb/discussions), chat with the Greptime team on [Slack](https://greptime.com/slack), or you can tweet [@greptime](https://twitter.com/greptime) on Twitter.
## Code of Conduct
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/develop/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
## License
GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptimedb/blob/master/LICENSE) to strike a balance between open contributions and allowing you to use the software however you want.
## Getting Started
### Submitting Issues
- Check if an issue already exists. Before filing an issue report, see whether it's already covered. Use the search bar and check out existing issues.
- File an issue:
- To report a bug, a security issue, or anything that you think is a problem and that isn't under the radar, go ahead and [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).
- In the given templates, look for the one that suits you.
- If you bump into anything, reach out to our [Slack](https://greptime.com/slack) for a wider audience and ask for help.
- What happens after:
- Once we spot a new issue, we identify and categorize it as soon as possible.
- Usually, it gets assigned to other developers. Follow up and see what folks are talking about and how they take care of it.
- Please be patient and offer as much information as you can to help reach a solution or a consensus. You are not alone and embrace team power.
## Pull Requests
### Before PR
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
- Make sure all unit tests are passed.
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr`).
#### `pre-commit` Hooks
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
@@ -79,14 +37,15 @@ pre-commit installed at .git/hooks/pre-pus
now `pre-commit` will run automatically on `git commit`.
### Title
The titles of pull requests should be prefixed with category names listed in [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0)
like `feat`/`fix`/`docs`, with a concise summary of code change following. DO NOT use last commit message as pull request title.
The titles of pull requests should be prefixed with category name listed in [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0)
like `feat`/`fix`/`doc`, with a concise summary of code change follows. DO NOT use last commit message as pull request title.
### Description
- Feel free to go brief if your pull request is small, like a typo fix.
- If your pull request is small, like a typo fix, feel free to go brief.
- But if it contains large code change, make sure to state the motivation/design details of this PR so that reviewers can understand what you're trying to do.
- If the PR contains any breaking change or API change, make sure that is clearly listed in your description.
@@ -94,18 +53,11 @@ like `feat`/`fix`/`docs`, with a concise summary of code change following. DO NO
All commit messages SHOULD adhere to the [Conventional Commits specification](https://conventionalcommits.org/).
## Getting Help
## Getting help
There are many ways to get help when you're stuck. It is recommended to ask for help by opening an issue, with a detailed description
of what you were trying to do and what went wrong. You can also reach for help in our [Slack channel](https://greptime.com/slack).
of what you were trying to do and what went wrong. You can also reach for help in our Slack channel.
## Community
The core team will be thrilled if you participate in any way you like. When you are stuck, try ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
- [GreptimeDB Community Slack](https://greptime.com/slack)
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
Also, see some extra GreptimeDB content:
- [GreptimeDB Docs](https://greptime.com/docs)
- [Learn GreptimeDB](https://greptime.com/products/db)
- [Greptime Inc. Website](https://greptime.com)
## Bug report
To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).

3339
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,5 @@
[workspace]
members = [
"benchmarks",
"src/api",
"src/catalog",
"src/client",
@@ -11,7 +10,6 @@ members = [
"src/common/function",
"src/common/function-macro",
"src/common/grpc",
"src/common/grpc-expr",
"src/common/query",
"src/common/recordbatch",
"src/common/runtime",
@@ -24,20 +22,13 @@ members = [
"src/log-store",
"src/meta-client",
"src/meta-srv",
"src/mito",
"src/object-store",
"src/promql",
"src/query",
"src/script",
"src/servers",
"src/session",
"src/sql",
"src/storage",
"src/store-api",
"src/table",
"tests-integration",
"tests/runner",
"src/table-engine",
]
[profile.release]
debug = true

201
LICENSE
View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2022 Greptime Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,67 +0,0 @@
IMAGE_REGISTRY ?= greptimedb
IMAGE_TAG ?= latest
##@ Build
.PHONY: build
build: ## Build debug version greptime.
cargo build
.PHONY: release
release: ## Build release version greptime.
cargo build --release
.PHONY: clean
clean: ## Clean the project.
cargo clean
.PHONY: fmt
fmt: ## Format all the Rust code.
cargo fmt --all
.PHONY: docker-image
docker-image: ## Build docker image.
docker build --network host -f docker/Dockerfile -t ${IMAGE_REGISTRY}:${IMAGE_TAG} .
##@ Test
.PHONY: unit-test
unit-test: ## Run unit test.
cargo test --workspace
.PHONY: integration-test
integration-test: ## Run integation test.
cargo test integration
.PHONY: sqlness-test
sqlness-test: ## Run sqlness test.
cargo run --bin sqlness-runner
.PHONY: check
check: ## Cargo check all the targets.
cargo check --workspace --all-targets
.PHONY: clippy
clippy: ## Check clippy rules.
cargo clippy --workspace --all-targets -- -D warnings -D clippy::print_stdout -D clippy::print_stderr
.PHONY: fmt-check
fmt-check: ## Check code format.
cargo fmt --all -- --check
##@ General
# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk commands is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# https://linuxcommand.org/lc3_adv_awk.php
.PHONY: help
help: ## Display help messages.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

233
README.md
View File

@@ -1,100 +1,101 @@
<p align="center">
<picture>
<source media="(prefers-color-scheme: light)" srcset="/docs/logo-text-padding.png">
<source media="(prefers-color-scheme: dark)" srcset="/docs/logo-text-padding-dark.png">
<img alt="GreptimeDB Logo" src="/docs/logo-text-padding.png" width="400px">
</picture>
</p>
# GreptimeDB
[![codecov](https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/develop/graph/badge.svg?token=FITFDI3J3C)](https://codecov.io/gh/GrepTimeTeam/greptimedb)
<h3 align="center">
The next-generation hybrid timeseries/analytics processing database in the cloud
</h3>
GreptimeDB: the next-generation hybrid timeseries/analytics processing database in the cloud.
<p align="center">
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb"><img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/develop/graph/badge.svg?token=FITFDI3J3C"></img></a>
&nbsp;
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml"><img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="CI"></img></a>
&nbsp;
<a href="https://github.com/greptimeTeam/greptimedb/blob/develop/LICENSE"><img src="https://img.shields.io/github/license/greptimeTeam/greptimedb"></a>
</p>
## Getting Started
<p align="center">
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
&nbsp;
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
</p>
### Prerequisites
## What is GreptimeDB
To compile GreptimeDB from source, you'll need the following:
- Rust
- Protobuf
GreptimeDB is an open-source time-series database with a special focus on
scalability, analytical capabilities and efficiency. It's designed to work on
infrastructure of the cloud era, and users benefit from its elasticity and commodity
storage.
#### Rust
Our core developers have been building time-series data platform
for years. Based on their best-practices, GreptimeDB is born to give you:
The easiest way to install Rust is to use [`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and install correct Rust version for you.
- A standalone binary that scales to highly-available distributed cluster, providing a transparent experience for cluster users
- Optimized columnar layout for handling time-series data; compacted, compressed, stored on various storage backends
- Flexible index options, tackling high cardinality issues down
- Distributed, parallel query execution, leveraging elastic computing resource
- Native SQL, and Python scripting for advanced analytical scenarios
- Widely adopted database protocols and APIs
- Extensible table engine architecture for extensive workloads
#### Protobuf
## Quick Start
`protoc` is required for compiling `.proto` files. `protobuf` is available from
major package manager on macos and linux distributions. You can find an
installation instructions [here](https://grpc.io/docs/protoc-installation/).
### Build
#### Build from Source
To compile GreptimeDB from source, you'll need:
- C/C++ Toolchain: provides basic tools for compiling and linking. This is
available as `build-essential` on ubuntu and similar name on other platforms.
- Rust: the easiest way to install Rust is to use
[`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and
install correct Rust version for you.
- Protobuf: `protoc` is required for compiling `.proto` files. `protobuf` is
available from major package manager on macos and linux distributions. You can
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
keyword. You can check it with `protoc --version`.
#### Build with Docker
A docker image with necessary dependencies is provided:
### Build the Docker Image
```
docker build --network host -f docker/Dockerfile -t greptimedb .
```
### Run
## Usage
Start GreptimeDB from source code, in standalone mode:
### Start Datanode
```
cargo run -- standalone start
// Start datanode with default options.
cargo run -- datanode start
OR
// Start datanode with `http-addr` option.
cargo run -- datanode start --http-addr=0.0.0.0:9999
OR
// Start datanode with `log-dir` and `log-level` options.
cargo run -- --log-dir=logs --log-level=debug datanode start
```
Or if you built from docker:
Start datanode with config file:
```
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
cargo run -- --log-dir=logs --log-level=debug datanode start -c ./config/datanode.example.toml
```
For more startup options, greptimedb's **distributed mode** and information
about Kubernetes deployment, check our [docs](https://docs.greptime.com/).
Start datanode by runing docker container:
### Connect
```
docker run -p 3000:3000 \
-p 3001:3001 \
-p 3306:3306 \
greptimedb
```
1. Connect to GreptimeDB via standard [MySQL
client](https://dev.mysql.com/downloads/mysql/):
### Start Frontend
Frontend should connect to Datanode, so **Datanode must have been started** at first!
```
// Connects to local Datanode at its default GRPC port: 3001
// Start Frontend with default options.
cargo run -- frontend start
OR
// Start Frontend with `mysql-addr` option.
cargo run -- frontend start --mysql-addr=0.0.0.0:9999
OR
// Start datanode with `log-dir` and `log-level` options.
cargo run -- --log-dir=logs --log-level=debug frontend start
```
Start datanode with config file:
```
cargo run -- --log-dir=logs --log-level=debug frontend start -c ./config/frontend.example.toml
```
### SQL Operations
1. Connecting DB by [mysql client](https://dev.mysql.com/downloads/mysql/):
```
# The standalone instance listen on port 4002 by default.
mysql -h 127.0.0.1 -P 4002
# The datanode listen on port 3306 by default.
mysql -h 127.0.0.1 -P 3306
```
2. Create table:
@@ -109,95 +110,29 @@ about Kubernetes deployment, check our [docs](https://docs.greptime.com/).
PRIMARY KEY(host)) ENGINE=mito WITH(regions=1);
```
3. Insert some data:
3. Insert data:
```SQL
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host1', 66.6, 1024, 1660897955000);
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host2', 77.7, 2048, 1660897956000);
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host3', 88.8, 4096, 1660897957000);
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host1', 66.6, 1024, 1660897955);
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host2', 77.7, 2048, 1660897956);
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host3', 88.8, 4096, 1660897957);
```
4. Query the data:
4. Query data:
```SQL
SELECT * FROM monitor;
```
```TEXT
+-------+---------------------+------+--------+
| host | ts | cpu | memory |
+-------+---------------------+------+--------+
| host1 | 2022-08-19 08:32:35 | 66.6 | 1024 |
| host2 | 2022-08-19 08:32:36 | 77.7 | 2048 |
| host3 | 2022-08-19 08:32:37 | 88.8 | 4096 |
+-------+---------------------+------+--------+
mysql> SELECT * FROM monitor;
+-------+------------+------+--------+
| host | ts | cpu | memory |
+-------+------------+------+--------+
| host1 | 1660897955 | 66.6 | 1024 |
| host2 | 1660897956 | 77.7 | 2048 |
| host3 | 1660897957 | 88.8 | 4096 |
+-------+------------+------+--------+
3 rows in set (0.01 sec)
```
You can always cleanup test database by removing `/tmp/greptimedb`.
## Resources
### Installation
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
downloadable pre-built binaries for Linux and MacOS
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb): pre-built
Docker images
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
Kubernetes deployment
### Documentation
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html)
- GreptimeDB [Developer
Guide](https://docs.greptime.com/developer-guide/overview.html)
### SDK
- [GreptimeDB Java
Client](https://github.com/GreptimeTeam/greptimedb-client-java)
## Project Status
This project is in its early stage and under heavy development. We move fast and
break things. Benchmark on development branch may not represent its potential
performance. We release pre-built binaries constantly for functional
evaluation. Do not use it in production at the moment.
For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam/greptimedb/issues/669).
## Community
Our core team is thrilled too see you participate in any ways you like. When you are stuck, try to
ask for help by filling an issue with a detailed description of what you were trying to do
and what went wrong. If you have any questions or if you would like to get involved in our
community, please check out:
- GreptimeDB Community on [Slack](https://greptime.com/slack)
- GreptimeDB GitHub [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
- Greptime official [Website](https://greptime.com)
In addition, you may:
- View our official [Blog](https://greptime.com/blogs/index)
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
- Follow us on [Twitter](https://twitter.com/greptime)
## License
GreptimeDB uses the [Apache 2.0 license][1] to strike a balance between
open contributions and allowing you to use the software however you want.
[1]: <https://github.com/greptimeTeam/greptimedb/blob/develop/LICENSE>
You can delete your data by removing `/tmp/greptimedb`.
## Contributing
Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
## Acknowledgement
- GreptimeDB uses [Apache Arrow](https://arrow.apache.org/) as the memory model and [Apache Parquet](https://parquet.apache.org/) as the persistent file format.
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
- [OpenDAL](https://github.com/datafuselabs/opendal) from [Datafuse Labs](https://github.com/datafuselabs) gives GreptimeDB a very general and elegant data access abstraction layer.
- GreptimeDBs meta service is based on [etcd](https://etcd.io/).
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.

View File

@@ -1,14 +0,0 @@
[package]
name = "benchmarks"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
[dependencies]
arrow = "26.0.0"
clap = { version = "4.0", features = ["derive"] }
client = { path = "../src/client" }
indicatif = "0.17.1"
itertools = "0.10.5"
parquet = "26.0.0"
tokio = { version = "1.21", features = ["full"] }

View File

@@ -1,444 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
#![feature(once_cell)]
#![allow(clippy::print_stdout)]
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::time::Instant;
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray};
use arrow::datatypes::{DataType, Float64Type, Int64Type};
use arrow::record_batch::RecordBatch;
use clap::Parser;
use client::admin::Admin;
use client::api::v1::column::Values;
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateExpr, InsertExpr};
use client::{Client, Database, Select};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
use tokio::task::JoinSet;
const DATABASE_NAME: &str = "greptime";
const CATALOG_NAME: &str = "greptime";
const SCHEMA_NAME: &str = "public";
const TABLE_NAME: &str = "nyc_taxi";
#[derive(Parser)]
#[command(name = "NYC benchmark runner")]
struct Args {
/// Path to the dataset
#[arg(short, long)]
path: Option<String>,
/// Batch size of insert request.
#[arg(short = 's', long = "batch-size", default_value_t = 4096)]
batch_size: usize,
/// Number of client threads on write (parallel on file level)
#[arg(short = 't', long = "thread-num", default_value_t = 4)]
thread_num: usize,
/// Number of query iteration
#[arg(short = 'i', long = "iter-num", default_value_t = 3)]
iter_num: usize,
#[arg(long = "skip-write")]
skip_write: bool,
#[arg(long = "skip-read")]
skip_read: bool,
#[arg(short, long, default_value_t = String::from("127.0.0.1:3001"))]
endpoint: String,
}
fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
std::fs::read_dir(path)
.unwrap()
.map(|dir| dir.unwrap().path().canonicalize().unwrap())
.collect()
}
async fn write_data(
batch_size: usize,
db: &Database,
path: PathBuf,
mpb: MultiProgress,
pb_style: ProgressStyle,
) -> u128 {
let file = std::fs::File::open(&path).unwrap();
let record_batch_reader_builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
let row_num = record_batch_reader_builder
.metadata()
.file_metadata()
.num_rows();
let record_batch_reader = record_batch_reader_builder
.with_batch_size(batch_size)
.build()
.unwrap();
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
progress_bar.set_style(pb_style);
progress_bar.set_message(format!("{:?}", path));
let mut total_rpc_elapsed_ms = 0;
for record_batch in record_batch_reader {
let record_batch = record_batch.unwrap();
let (columns, row_count) = convert_record_batch(record_batch);
let insert_expr = InsertExpr {
schema_name: "public".to_string(),
table_name: TABLE_NAME.to_string(),
region_number: 0,
columns,
row_count,
};
let now = Instant::now();
db.insert(insert_expr).await.unwrap();
let elapsed = now.elapsed();
total_rpc_elapsed_ms += elapsed.as_millis();
progress_bar.inc(row_count as _);
}
progress_bar.finish_with_message(format!(
"file {:?} done in {}ms",
path, total_rpc_elapsed_ms
));
total_rpc_elapsed_ms
}
fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
let schema = record_batch.schema();
let fields = schema.fields();
let row_count = record_batch.num_rows();
let mut columns = vec![];
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
let values = build_values(array);
let column = Column {
column_name: field.name().to_owned(),
values: Some(values),
null_mask: vec![],
// datatype and semantic_type are set to default
..Default::default()
};
columns.push(column);
}
(columns, row_count as _)
}
fn build_values(column: &ArrayRef) -> Values {
match column.data_type() {
DataType::Int64 => {
let array = column
.as_any()
.downcast_ref::<PrimitiveArray<Int64Type>>()
.unwrap();
let values = array.values();
Values {
i64_values: values.to_vec(),
..Default::default()
}
}
DataType::Float64 => {
let array = column
.as_any()
.downcast_ref::<PrimitiveArray<Float64Type>>()
.unwrap();
let values = array.values();
Values {
f64_values: values.to_vec(),
..Default::default()
}
}
DataType::Timestamp(_, _) => {
let array = column
.as_any()
.downcast_ref::<TimestampNanosecondArray>()
.unwrap();
let values = array.values();
Values {
i64_values: values.to_vec(),
..Default::default()
}
}
DataType::Utf8 => {
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
Values {
string_values: values,
..Default::default()
}
}
DataType::Null
| DataType::Boolean
| DataType::Int8
| DataType::Int16
| DataType::Int32
| DataType::UInt8
| DataType::UInt16
| DataType::UInt32
| DataType::UInt64
| DataType::Float16
| DataType::Float32
| DataType::Date32
| DataType::Date64
| DataType::Time32(_)
| DataType::Time64(_)
| DataType::Duration(_)
| DataType::Interval(_)
| DataType::Binary
| DataType::FixedSizeBinary(_)
| DataType::LargeBinary
| DataType::LargeUtf8
| DataType::List(_)
| DataType::FixedSizeList(_, _)
| DataType::LargeList(_)
| DataType::Struct(_)
| DataType::Union(_, _, _)
| DataType::Dictionary(_, _)
| DataType::Decimal128(_, _)
| DataType::Decimal256(_, _)
| DataType::Map(_, _) => todo!(),
}
}
fn create_table_expr() -> CreateExpr {
CreateExpr {
catalog_name: Some(CATALOG_NAME.to_string()),
schema_name: Some(SCHEMA_NAME.to_string()),
table_name: TABLE_NAME.to_string(),
desc: None,
column_defs: vec![
ColumnDef {
name: "VendorID".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "tpep_pickup_datetime".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "tpep_dropoff_datetime".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "passenger_count".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "trip_distance".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "RatecodeID".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "store_and_fwd_flag".to_string(),
datatype: ColumnDataType::String as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "PULocationID".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "DOLocationID".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "payment_type".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "fare_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "extra".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "mta_tax".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "tip_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "tolls_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "improvement_surcharge".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "total_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "congestion_surcharge".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
},
ColumnDef {
name: "airport_fee".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: None,
},
],
time_index: "tpep_pickup_datetime".to_string(),
primary_keys: vec!["VendorID".to_string()],
create_if_not_exists: false,
table_options: Default::default(),
region_ids: vec![0],
table_id: Some(0),
}
}
fn query_set() -> HashMap<String, String> {
let mut ret = HashMap::new();
ret.insert(
"count_all".to_string(),
format!("SELECT COUNT(*) FROM {};", TABLE_NAME),
);
ret.insert(
"fare_amt_by_passenger".to_string(),
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {} GROUP BY passenger_count",TABLE_NAME)
);
ret
}
async fn do_write(args: &Args, client: &Client) {
let admin = Admin::new("admin", client.clone());
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
let mut write_jobs = JoinSet::new();
let create_table_result = admin.create(create_table_expr()).await;
println!("Create table result: {:?}", create_table_result);
let progress_bar_style = ProgressStyle::with_template(
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
)
.unwrap()
.progress_chars("##-");
let multi_progress_bar = MultiProgress::new();
let file_progress = multi_progress_bar.add(ProgressBar::new(file_list.len() as _));
file_progress.inc(0);
let batch_size = args.batch_size;
for _ in 0..args.thread_num {
if let Some(path) = file_list.pop() {
let db = Database::new(DATABASE_NAME, client.clone());
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
}
}
while write_jobs.join_next().await.is_some() {
file_progress.inc(1);
if let Some(path) = file_list.pop() {
let db = Database::new(DATABASE_NAME, client.clone());
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
}
}
}
async fn do_query(num_iter: usize, db: &Database) {
for (query_name, query) in query_set() {
println!("Running query: {}", query);
for i in 0..num_iter {
let now = Instant::now();
let _res = db.select(Select::Sql(query.clone())).await.unwrap();
let elapsed = now.elapsed();
println!(
"query {}, iteration {}: {}ms",
query_name,
i,
elapsed.as_millis()
);
}
}
}
fn main() {
let args = Args::parse();
tokio::runtime::Builder::new_multi_thread()
.worker_threads(args.thread_num)
.enable_all()
.build()
.unwrap()
.block_on(async {
let client = Client::with_urls(vec![&args.endpoint]);
if !args.skip_write {
do_write(&args, &client).await;
}
if !args.skip_read {
let db = Database::new(DATABASE_NAME, client.clone());
do_query(args.iter_num, &db).await;
}
})
}

View File

View File

@@ -0,0 +1,71 @@
import sys
# for annoying releative import beyond top-level package
sys.path.insert(0, "../")
from greptime import mock_tester, coprocessor, greptime as gt_builtin
from greptime.greptime import interval, vector, log, prev, sqrt, datetime
import greptime.greptime as greptime
import json
import numpy as np
def data_sample(k_lines, symbol, density=5 * 30 * 86400):
"""
Only return close data for simplicty for now
"""
k_lines = k_lines["result"] if k_lines["ret_msg"] == "OK" else None
if k_lines is None:
raise Exception("Expect a `OK`ed message")
close = [float(i["close"]) for i in k_lines]
return interval(close, density, "prev")
def as_table(kline: list):
col_len = len(kline)
ret = {
k: vector([fn(row[k]) for row in kline], str(ty))
for k, fn, ty in
[
("symbol", str, "str"),
("period", str, "str"),
("open_time", int, "int"),
("open", float, "float"),
("high", float, "float"),
("low", float, "float"),
("close", float, "float")
]
}
return ret
@coprocessor(args=["open_time", "close"], returns=[
"rv_7d",
"rv_15d",
"rv_30d",
"rv_60d",
"rv_90d",
"rv_180d"
])
def calc_rvs(open_time, close):
from greptime import vector, log, prev, sqrt, datetime, pow, sum, last
import greptime as g
def calc_rv(close, open_time, time, interval):
mask = (open_time < time) & (open_time > time - interval)
close = close[mask]
open_time = open_time[mask]
close = g.interval(open_time, close, datetime("10m"), lambda x:last(x))
avg_time_interval = (open_time[-1] - open_time[0])/(len(open_time)-1)
ref = log(close/prev(close))
var = sum(pow(ref, 2)/(len(ref)-1))
return sqrt(var/avg_time_interval)
# how to get env var,
# maybe through accessing scope and serde then send to remote?
timepoint = open_time[-1]
rv_7d = vector([calc_rv(close, open_time, timepoint, datetime("7d"))])
rv_15d = vector([calc_rv(close, open_time, timepoint, datetime("15d"))])
rv_30d = vector([calc_rv(close, open_time, timepoint, datetime("30d"))])
rv_60d = vector([calc_rv(close, open_time, timepoint, datetime("60d"))])
rv_90d = vector([calc_rv(close, open_time, timepoint, datetime("90d"))])
rv_180d = vector([calc_rv(close, open_time, timepoint, datetime("180d"))])
return rv_7d, rv_15d, rv_30d, rv_60d, rv_90d, rv_180d

View File

@@ -0,0 +1 @@
curl "https://api.bybit.com/v2/public/index-price-kline?symbol=BTCUSD&interval=1&limit=$1&from=1581231260" > kline.json

View File

@@ -0,0 +1,108 @@
{
"ret_code": 0,
"ret_msg": "OK",
"ext_code": "",
"ext_info": "",
"result": [
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 300,
"open": "10107",
"high": "10109.34",
"low": "10106.71",
"close": "10106.79"
},
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 900,
"open": "10106.79",
"high": "10109.27",
"low": "10105.92",
"close": "10106.09"
},
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 1200,
"open": "10106.09",
"high": "10108.75",
"low": "10104.66",
"close": "10108.73"
},
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 1800,
"open": "10108.73",
"high": "10109.52",
"low": "10106.07",
"close": "10106.38"
},
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 2400,
"open": "10106.38",
"high": "10109.48",
"low": "10104.81",
"close": "10106.95"
},
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 3000,
"open": "10106.95",
"high": "10109.48",
"low": "10106.6",
"close": "10107.55"
},
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 3600,
"open": "10107.55",
"high": "10109.28",
"low": "10104.68",
"close": "10104.68"
},
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 4200,
"open": "10104.68",
"high": "10109.18",
"low": "10104.14",
"close": "10108.8"
},
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 4800,
"open": "10108.8",
"high": "10117.36",
"low": "10108.8",
"close": "10115.96"
},
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 5400,
"open": "10115.96",
"high": "10119.19",
"low": "10115.96",
"close": "10117.08"
},
{
"symbol": "BTCUSD",
"period": "1",
"open_time": 6000,
"open": "10117.08",
"high": "10120.73",
"low": "10116.96",
"close": "10120.43"
}
],
"time_now": "1661225351.158190"
}

View File

@@ -0,0 +1,4 @@
from .greptime import coprocessor, copr
from .greptime import vector, log, prev, next, first, last, sqrt, pow, datetime, sum, interval
from .mock import mock_tester
from .cfg import set_conn_addr, get_conn_addr

View File

@@ -0,0 +1,11 @@
GREPTIME_DB_CONN_ADDRESS = "localhost:3000"
"""The Global Variable for address for conntect to database"""
def set_conn_addr(addr: str):
"""set database address to given `addr`"""
global GREPTIME_DB_CONN_ADDRESS
GREPTIME_DB_CONN_ADDRESS = addr
def get_conn_addr()->str:
global GREPTIME_DB_CONN_ADDRESS
return GREPTIME_DB_CONN_ADDRESS

View File

@@ -0,0 +1,207 @@
"""
Be note that this is a mock library, if not connected to database,
it can only run on mock data and mock function which is supported by numpy
"""
import functools
import numpy as np
import json
from urllib import request
import inspect
import requests
from .cfg import set_conn_addr, get_conn_addr
log = np.log
sum = np.nansum
sqrt = np.sqrt
pow = np.power
nan = np.nan
class TimeStamp(str):
"""
TODO: impl date time
"""
pass
class i32(int):
"""
For Python Coprocessor Type Annotation ONLY
A signed 32-bit integer.
"""
def __repr__(self) -> str:
return "i32"
class i64(int):
"""
For Python Coprocessor Type Annotation ONLY
A signed 64-bit integer.
"""
def __repr__(self) -> str:
return "i64"
class f32(float):
"""
For Python Coprocessor Type Annotation ONLY
A 32-bit floating point number.
"""
def __repr__(self) -> str:
return "f32"
class f64(float):
"""
For Python Coprocessor Type Annotation ONLY
A 64-bit floating point number.
"""
def __repr__(self) -> str:
return "f64"
class vector(np.ndarray):
"""
A compact Vector with all elements of same Data type.
"""
_datatype: str | None = None
def __new__(
cls,
lst,
dtype=None
) -> ...:
self = np.asarray(lst).view(cls)
self._datatype = dtype
return self
def __str__(self) -> str:
return "vector({}, \"{}\")".format(super().__str__(), self.datatype())
def datatype(self):
return self._datatype
def filter(self, lst_bool):
return self[lst_bool]
def last(lst):
return lst[-1]
def first(lst):
return lst[0]
def prev(lst):
ret = np.zeros(len(lst))
ret[1:] = lst[0:-1]
ret[0] = nan
return ret
def next(lst):
ret = np.zeros(len(lst))
ret[:-1] = lst[1:]
ret[-1] = nan
return ret
def interval(ts: vector, arr: vector, duration: int, func):
"""
Note that this is a mock function with same functionailty to the actual Python Coprocessor
`arr` is a vector of integral or temporal type.
"""
start = np.min(ts)
end = np.max(ts)
masks = [(ts >= i) & (ts <= (i+duration)) for i in range(start, end, duration)]
lst_res = [func(arr[mask]) for mask in masks]
return lst_res
def factor(unit: str) -> int:
if unit == "d":
return 24 * 60 * 60
elif unit == "h":
return 60 * 60
elif unit == "m":
return 60
elif unit == "s":
return 1
else:
raise Exception("Only d,h,m,s, found{}".format(unit))
def datetime(input_time: str) -> int:
"""
support `d`(day) `h`(hour) `m`(minute) `s`(second)
support format:
`12s` `7d` `12d2h7m`
"""
prev = 0
cur = 0
state = "Num"
parse_res = []
for idx, ch in enumerate(input_time):
if ch.isdigit():
cur = idx
if state != "Num":
parse_res.append((state, input_time[prev:cur], (prev, cur)))
prev = idx
state = "Num"
else:
cur = idx
if state != "Symbol":
parse_res.append((state, input_time[prev:cur], (prev, cur)))
prev = idx
state = "Symbol"
parse_res.append((state, input_time[prev:cur+1], (prev, cur+1)))
cur_idx = 0
res_time = 0
while cur_idx < len(parse_res):
pair = parse_res[cur_idx]
if pair[0] == "Num":
val = int(pair[1])
nxt = parse_res[cur_idx+1]
res_time += val * factor(nxt[1])
cur_idx += 2
else:
raise Exception("Two symbol in a row is impossible")
return res_time
def coprocessor(args=None, returns=None, sql=None):
"""
The actual coprocessor, which will connect to database and update
whatever function decorated with `@coprocessor(args=[...], returns=[...], sql=...)`
"""
def decorator_copr(func):
@functools.wraps(func)
def wrapper_do_actual(*args, **kwargs):
if len(args)!=0 or len(kwargs)!=0:
raise Exception("Expect call with no arguements(for all args are given by coprocessor itself)")
source = inspect.getsource(func)
url = "http://{}/v1/scripts".format(get_conn_addr())
print("Posting to {}".format(url))
data = {
"script": source,
"engine": None,
}
res = requests.post(
url,
headers={"Content-Type": "application/json"},
json=data
)
return res
return wrapper_do_actual
return decorator_copr
# make a alias for short
copr = coprocessor

View File

@@ -0,0 +1,82 @@
"""
Note this is a mock library, if not connected to database,
it can only run on mock data and support by numpy
"""
from typing import Any
import numpy as np
from .greptime import i32,i64,f32,f64, vector, interval, prev, datetime, log, sum, sqrt, pow, nan, copr, coprocessor
import inspect
import functools
import ast
def mock_tester(
func,
env:dict,
table=None
):
"""
Mock tester helper function,
What it does is replace `@coprocessor` with `@mock_cpor` and add a keyword `env=env`
like `@mock_copr(args=...,returns=...,env=env)`
"""
code = inspect.getsource(func)
tree = ast.parse(code)
tree = HackyReplaceDecorator("env").visit(tree)
new_func = tree.body[0]
fn_name = new_func.name
code_obj = compile(tree, "<embedded>", "exec")
exec(code_obj)
ret = eval("{}()".format(fn_name))
return ret
def mock_copr(args, returns, sql=None, env:None|dict=None):
"""
This should not be used directly by user
"""
def decorator_copr(func):
@functools.wraps(func)
def wrapper_do_actual(*fn_args, **fn_kwargs):
real_args = [env[name] for name in args]
ret = func(*real_args)
return ret
return wrapper_do_actual
return decorator_copr
class HackyReplaceDecorator(ast.NodeTransformer):
"""
This class accept a `env` dict for environment to extract args from,
and put `env` dict in the param list of `mock_copr` decorator, i.e:
a `@copr(args=["a", "b"], returns=["c"])` with call like mock_helper(abc, env={"a":2, "b":3})
will be transform into `@mock_copr(args=["a", "b"], returns=["c"], env={"a":2, "b":3})`
"""
def __init__(self, env: str) -> None:
# just for add `env` keyword
self.env = env
def visit_FunctionDef(self, node: ast.FunctionDef) -> Any:
new_node = node
decorator_list = new_node.decorator_list
if len(decorator_list)!=1:
return node
deco = decorator_list[0]
if deco.func.id!="coprocessor" and deco.func.id !="copr":
raise Exception("Expect a @copr or @coprocessor, found {}.".format(deco.func.id))
deco.func = ast.Name(id="mock_copr", ctx=ast.Load())
new_kw = ast.keyword(arg="env", value=ast.Name(id=self.env, ctx=ast.Load()))
deco.keywords.append(new_kw)
# Tie up loose ends in the AST.
ast.copy_location(new_node, node)
ast.fix_missing_locations(new_node)
self.generic_visit(node)
return new_node

View File

@@ -0,0 +1,60 @@
from example.calc_rv import as_table, calc_rvs
from greptime import coprocessor, set_conn_addr, get_conn_addr, mock_tester
import sys
import json
import requests
'''
To run this script, you need to first start a http server of greptime, and
`
python3 component/script/python/test.py 地址:端口
`
'''
@coprocessor(sql='select number from numbers limit 10', args=['number'], returns=['n'])
def test(n):
return n+2
def init_table(close, open_time):
req_init = "/v1/sql?sql=create table k_line (close double, open_time bigint, TIME INDEX (open_time))"
print(get_db(req_init).text)
for c1, c2 in zip(close, open_time):
req = "/v1/sql?sql=INSERT INTO k_line(close, open_time) VALUES ({}, {})".format(c1, c2)
print(get_db(req).text)
print(get_db("/v1/sql?sql=select * from k_line").text)
def get_db(req:str):
return requests.get("http://{}{}".format(get_conn_addr(), req))
if __name__ == "__main__":
with open("component/script/python/example/kline.json", "r") as kline_file:
kline = json.load(kline_file)
table = as_table(kline["result"])
close = table["close"]
open_time = table["open_time"]
env = {"close":close, "open_time": open_time}
res = mock_tester(calc_rvs, env=env)
print("Mock result:", [i[0] for i in res])
exit()
if len(sys.argv)!=2:
raise Exception("Expect only one address as cmd's args")
set_conn_addr(sys.argv[1])
res = test()
print(res.headers)
print(res.text)
with open("component/script/python/example/kline.json", "r") as kline_file:
kline = json.load(kline_file)
# vec = vector([1,2,3], int)
# print(vec, vec.datatype())
table = as_table(kline["result"])
# print(table)
close = table["close"]
open_time = table["open_time"]
init_table(close, open_time)
real = calc_rvs()
print(real)
try:
print(real.text["error"])
except:
print(real.text)

View File

@@ -1,18 +1,15 @@
node_id = 42
mode = 'distributed'
rpc_addr = '127.0.0.1:3001'
http_addr = '0.0.0.0:3000'
rpc_addr = '0.0.0.0:3001'
wal_dir = '/tmp/greptimedb/wal'
rpc_runtime_size = 8
mysql_addr = '127.0.0.1:4406'
mysql_addr = '0.0.0.0:3306'
mysql_runtime_size = 4
enable_memory_catalog = false
# applied when postgres feature enbaled
postgres_addr = '0.0.0.0:5432'
postgres_runtime_size = 4
[storage]
type = 'File'
data_dir = '/tmp/greptimedb/data/'
[meta_client_opts]
metasrv_addrs = ['127.0.0.1:3002']
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = false

View File

@@ -1,12 +1,4 @@
mode = 'distributed'
datanode_rpc_addr = '127.0.0.1:3001'
[http_options]
addr = '127.0.0.1:4000'
timeout = "30s"
[meta_client_opts]
metasrv_addrs = ['127.0.0.1:3002']
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = false
http_addr = '0.0.0.0:4000'
grpc_addr = '0.0.0.0:4001'
mysql_addr = '0.0.0.0:4003'
mysql_runtime_size = 4

View File

@@ -1,4 +1,4 @@
bind_addr = '127.0.0.1:3002'
server_addr = '127.0.0.1:3002'
store_addr = '127.0.0.1:2379'
datanode_lease_secs = 15
server_addr = '0.0.0.0:3002'
store_addr = '127.0.0.1:2380'
datanode_lease_secs = 30

View File

@@ -1,36 +0,0 @@
node_id = 0
mode = 'standalone'
wal_dir = '/tmp/greptimedb/wal/'
enable_memory_catalog = false
[http_options]
addr = '127.0.0.1:4000'
timeout = "30s"
[storage]
type = 'File'
data_dir = '/tmp/greptimedb/data/'
[grpc_options]
addr = '127.0.0.1:4001'
runtime_size = 8
[mysql_options]
addr = '127.0.0.1:4002'
runtime_size = 2
[influxdb_options]
enable = true
[opentsdb_options]
addr = '127.0.0.1:4242'
enable = true
runtime_size = 2
[prometheus_options]
enable = true
[postgres_options]
addr = '127.0.0.1:4003'
runtime_size = 2
check_pwd = false

View File

@@ -55,7 +55,7 @@ The DataFusion basically execute aggregate like this:
2. Call `update_batch` on each accumulator with partitioned data, to let you update your aggregate calculation.
3. Call `state` to get each accumulator's internal state, the medial calculation result.
4. Call `merge_batch` to merge all accumulator's internal state to one.
5. Execute `evaluate` on the chosen one to get the final calculation result.
5. Execute `evalute` on the chosen one to get the final calculation result.
Once you know the meaning of each method, you can easily write your accumulator. You can refer to `Median` accumulator or `SUM` accumulator defined in file `my_sum_udaf_example.rs` for more details.
@@ -63,7 +63,7 @@ Once you know the meaning of each method, you can easily write your accumulator.
You can call `register_aggregate_function` method in query engine to register your aggregate function. To do that, you have to new an instance of struct `AggregateFunctionMeta`. The struct has three fields, first is the name of your aggregate function's name. The function name is case-sensitive due to DataFusion's restriction. We strongly recommend using lowercase for your name. If you have to use uppercase name, wrap your aggregate function with quotation marks. For example, if you define an aggregate function named "my_aggr", you can use "`SELECT MY_AGGR(x)`"; if you define "my_AGGR", you have to use "`SELECT "my_AGGR"(x)`".
The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, calculating the p_number of the column. We need to input the value of column and the value of p to cacalate, and so the count of the arguments is two.
The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, caculating the p_number of the column. We need to input the value of column and the value of p to cacalate, and so the count of the arguments is two.
The third field is a function about how to create your accumulator creator that you defined in step 1 above. Create creator, that's a bit intertwined, but it is how we make DataFusion use a newly created aggregate function each time it executes a SQL, preventing the stored input types from affecting each other. The key detail can be starting looking at our `DfContextProviderAdapter` struct's `get_aggregate_meta` method.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

View File

@@ -1,2 +1,3 @@
group_imports = "StdExternalCrate"
imports_granularity = "Module"

View File

@@ -1,63 +0,0 @@
#!/bin/sh
set -ue
OS_TYPE=
ARCH_TYPE=
VERSION=${1:-latest}
GITHUB_ORG=GreptimeTeam
GITHUB_REPO=greptimedb
BIN=greptime
get_os_type() {
os_type="$(uname -s)"
case "$os_type" in
Darwin)
OS_TYPE=darwin
;;
Linux)
OS_TYPE=linux
;;
*)
echo "Error: Unknown OS type: $os_type"
exit 1
esac
}
get_arch_type() {
arch_type="$(uname -m)"
case "$arch_type" in
arm64)
ARCH_TYPE=arm64
;;
aarch64)
ARCH_TYPE=arm64
;;
x86_64)
ARCH_TYPE=amd64
;;
amd64)
ARCH_TYPE=amd64
;;
*)
echo "Error: Unknown CPU type: $arch_type"
exit 1
esac
}
get_os_type
get_arch_type
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
if [ "${VERSION}" = "latest" ]; then
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/latest/download/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
else
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz"
fi
tar xvf ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && rm ${BIN}-${OS_TYPE}-${ARCH_TYPE}.tgz && echo "Run '${BIN} --help' to get started"
fi

View File

@@ -2,13 +2,9 @@
name = "api"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
prost = "0.11"
snafu = { version = "0.7", features = ["backtraces"] }

View File

@@ -1,26 +1,10 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::PathBuf;
fn main() {
let default_out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap());
tonic_build::configure()
.file_descriptor_set_path(default_out_dir.join("greptime_fd.bin"))
.compile(
&[
"greptime/v1/insert.proto",
"greptime/v1/select.proto",
"greptime/v1/physical_plan.proto",
"greptime/v1/greptime.proto",
"greptime/v1/meta/common.proto",
"greptime/v1/meta/heartbeat.proto",

View File

@@ -19,8 +19,6 @@ message AdminExpr {
oneof expr {
CreateExpr create = 2;
AlterExpr alter = 3;
CreateDatabaseExpr create_database = 4;
DropTableExpr drop_table = 5;
}
}
@@ -31,7 +29,6 @@ message AdminResult {
}
}
// TODO(hl): rename to CreateTableExpr
message CreateExpr {
optional string catalog_name = 1;
optional string schema_name = 2;
@@ -42,8 +39,6 @@ message CreateExpr {
repeated string primary_keys = 7;
bool create_if_not_exists = 8;
map<string, string> table_options = 9;
optional uint32 table_id = 10;
repeated uint32 region_ids = 11;
}
message AlterExpr {
@@ -51,35 +46,10 @@ message AlterExpr {
optional string schema_name = 2;
string table_name = 3;
oneof kind {
AddColumns add_columns = 4;
DropColumns drop_columns = 5;
AddColumn add_column = 4;
}
}
message DropTableExpr {
string catalog_name = 1;
string schema_name = 2;
string table_name = 3;
}
message AddColumns {
repeated AddColumn add_columns = 1;
}
message DropColumns {
repeated DropColumn drop_columns = 1;
}
message AddColumn {
ColumnDef column_def = 1;
bool is_key = 2;
}
message DropColumn {
string name = 1;
}
message CreateDatabaseExpr {
//TODO(hl): maybe rename to schema_name?
string database_name = 1;
}

View File

@@ -32,10 +32,7 @@ message Column {
repeated int32 date_values = 14;
repeated int64 datetime_values = 15;
repeated int64 ts_second_values = 16;
repeated int64 ts_millisecond_values = 17;
repeated int64 ts_microsecond_values = 18;
repeated int64 ts_nanosecond_values = 19;
repeated int64 ts_millis_values = 16;
}
// The array of non-null values in this column.
//
@@ -78,8 +75,5 @@ enum ColumnDataType {
STRING = 12;
DATE = 13;
DATETIME = 14;
TIMESTAMP_SECOND = 15;
TIMESTAMP_MILLISECOND = 16;
TIMESTAMP_MICROSECOND = 17;
TIMESTAMP_NANOSECOND = 18;
TIMESTAMP = 15;
}

View File

@@ -2,7 +2,6 @@ syntax = "proto3";
package greptime.v1;
import "greptime/v1/column.proto";
import "greptime/v1/common.proto";
message DatabaseRequest {
@@ -29,23 +28,36 @@ message SelectExpr {
oneof expr {
string sql = 1;
bytes logical_plan = 2;
PhysicalPlan physical_plan = 15;
}
}
message PhysicalPlan {
bytes original_ql = 1;
bytes plan = 2;
}
message InsertExpr {
string schema_name = 1;
string table_name = 2;
string table_name = 1;
// Data is represented here.
repeated Column columns = 3;
message Values {
repeated bytes values = 1;
}
// The row_count of all columns, which include null and non-null values.
//
// Note: the row_count of all columns in a InsertExpr must be same.
uint32 row_count = 4;
oneof expr {
Values values = 2;
// The region number of current insert request.
uint32 region_number = 5;
// TODO(LFC): Remove field "sql" in InsertExpr.
// When Frontend instance received an insertion SQL (`insert into ...`), it's anticipated to parse the SQL and
// assemble the values to insert to feed Datanode. In other words, inserting data through Datanode instance's GRPC
// interface shouldn't use SQL directly.
// Then why the "sql" field exists here? It's because the Frontend needs table schema to create the values to insert,
// which is currently not able to find anywhere. (Maybe the table schema is suppose to be fetched from Meta?)
// The "sql" field is meant to be removed in the future.
string sql = 3;
}
map<string, bytes> options = 4;
}
// TODO(jiachun)

View File

@@ -0,0 +1,10 @@
syntax = "proto3";
package greptime.v1.codec;
import "greptime/v1/column.proto";
message InsertBatch {
repeated Column columns = 1;
uint32 row_count = 2;
}

View File

@@ -39,7 +39,7 @@ message NodeStat {
uint64 wcus = 2;
// Table number in this node
uint64 table_num = 3;
// Region number in this node
// Regon number in this node
uint64 region_num = 4;
double cpu_usage = 5;

View File

@@ -5,8 +5,6 @@ package greptime.v1.meta;
import "greptime/v1/meta/common.proto";
service Router {
rpc Create(CreateRequest) returns (RouteResponse) {}
// Fetch routing information for tables. The smallest unit is the complete
// routing information(all regions) of a table.
//
@@ -28,14 +26,7 @@ service Router {
//
rpc Route(RouteRequest) returns (RouteResponse) {}
rpc Delete(DeleteRequest) returns (RouteResponse) {}
}
message CreateRequest {
RequestHeader header = 1;
TableName table_name = 2;
repeated Partition partitions = 3;
rpc Create(CreateRequest) returns (RouteResponse) {}
}
message RouteRequest {
@@ -44,12 +35,6 @@ message RouteRequest {
repeated TableName table_names = 2;
}
message DeleteRequest {
RequestHeader header = 1;
TableName table_name = 2;
}
message RouteResponse {
ResponseHeader header = 1;
@@ -57,6 +42,13 @@ message RouteResponse {
repeated TableRoute table_routes = 3;
}
message CreateRequest {
RequestHeader header = 1;
TableName table_name = 2;
repeated Partition partitions = 3;
}
message TableRoute {
Table table = 1;
repeated RegionRoute region_routes = 2;
@@ -71,13 +63,11 @@ message RegionRoute {
}
message Table {
uint64 id = 1;
TableName table_name = 2;
bytes table_schema = 3;
TableName table_name = 1;
bytes table_schema = 2;
}
message Region {
// TODO(LFC): Maybe use message RegionNumber?
uint64 id = 1;
string name = 2;
Partition partition = 3;
@@ -90,9 +80,3 @@ message Partition {
repeated bytes column_list = 1;
repeated bytes value_list = 2;
}
// This message is only for saving into store.
message TableRouteValue {
repeated Peer peers = 1;
TableRoute table_route = 2;
}

View File

@@ -20,9 +20,6 @@ service Store {
// DeleteRange deletes the given range from the key-value store.
rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse);
// MoveValue atomically renames the key to the given updated key.
rpc MoveValue(MoveValueRequest) returns (MoveValueResponse);
}
message RangeRequest {
@@ -139,21 +136,3 @@ message DeleteRangeResponse {
// returned.
repeated KeyValue prev_kvs = 3;
}
message MoveValueRequest {
RequestHeader header = 1;
// If from_key dose not exist, return the value of to_key (if it exists).
// If from_key exists, move the value of from_key to to_key (i.e. rename),
// and return the value.
bytes from_key = 2;
bytes to_key = 3;
}
message MoveValueResponse {
ResponseHeader header = 1;
// If from_key dose not exist, return the value of to_key (if it exists).
// If from_key exists, return the value of from_key.
KeyValue kv = 2;
}

View File

@@ -0,0 +1,33 @@
syntax = "proto3";
package greptime.v1.codec;
message PhysicalPlanNode {
oneof PhysicalPlanType {
ProjectionExecNode projection = 1;
MockInputExecNode mock = 99;
// TODO(fys): impl other physical plan node
}
}
message ProjectionExecNode {
PhysicalPlanNode input = 1;
repeated PhysicalExprNode expr = 2;
repeated string expr_name = 3;
}
message PhysicalExprNode {
oneof ExprType {
PhysicalColumn column = 1;
// TODO(fys): impl other physical expr node
}
}
message PhysicalColumn {
string name = 1;
uint64 index = 2;
}
message MockInputExecNode {
string name = 1;
}

View File

@@ -1,24 +1,6 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_error::ext::ErrorExt;
use common_error::prelude::StatusCode;
use datatypes::prelude::ConcreteDataType;
use snafu::prelude::*;
use snafu::{Backtrace, ErrorCompat};
use snafu::Backtrace;
pub type Result<T> = std::result::Result<T, Error>;
@@ -33,44 +15,4 @@ pub enum Error {
from: ConcreteDataType,
backtrace: Backtrace,
},
#[snafu(display(
"Failed to convert column default constraint, column: {}, source: {}",
column,
source
))]
ConvertColumnDefaultConstraint {
column: String,
#[snafu(backtrace)]
source: datatypes::error::Error,
},
#[snafu(display(
"Invalid column default constraint, column: {}, source: {}",
column,
source
))]
InvalidColumnDefaultConstraint {
column: String,
#[snafu(backtrace)]
source: datatypes::error::Error,
},
}
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
Error::IntoColumnDataType { .. } => StatusCode::Unexpected,
Error::ConvertColumnDefaultConstraint { source, .. }
| Error::InvalidColumnDefaultConstraint { source, .. } => source.status_code(),
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
ErrorCompat::backtrace(self)
}
fn as_any(&self) -> &dyn Any {
self
}
}

View File

@@ -1,28 +1,9 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_base::BitVec;
use common_time::timestamp::TimeUnit;
use datatypes::prelude::ConcreteDataType;
use datatypes::types::TimestampType;
use datatypes::value::Value;
use datatypes::vectors::VectorRef;
use snafu::prelude::*;
use crate::error::{self, Result};
use crate::v1::column::Values;
use crate::v1::{Column, ColumnDataType};
use crate::v1::ColumnDataType;
#[derive(Debug, PartialEq, Eq)]
pub struct ColumnDataTypeWrapper(ColumnDataType);
@@ -57,16 +38,7 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
ColumnDataType::String => ConcreteDataType::string_datatype(),
ColumnDataType::Date => ConcreteDataType::date_datatype(),
ColumnDataType::Datetime => ConcreteDataType::datetime_datatype(),
ColumnDataType::TimestampSecond => ConcreteDataType::timestamp_second_datatype(),
ColumnDataType::TimestampMillisecond => {
ConcreteDataType::timestamp_millisecond_datatype()
}
ColumnDataType::TimestampMicrosecond => {
ConcreteDataType::timestamp_microsecond_datatype()
}
ColumnDataType::TimestampNanosecond => {
ConcreteDataType::timestamp_nanosecond_datatype()
}
ColumnDataType::Timestamp => ConcreteDataType::timestamp_millis_datatype(),
}
}
}
@@ -91,12 +63,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
ConcreteDataType::String(_) => ColumnDataType::String,
ConcreteDataType::Date(_) => ColumnDataType::Date,
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
ConcreteDataType::Timestamp(unit) => match unit {
TimestampType::Second(_) => ColumnDataType::TimestampSecond,
TimestampType::Millisecond(_) => ColumnDataType::TimestampMillisecond,
TimestampType::Microsecond(_) => ColumnDataType::TimestampMicrosecond,
TimestampType::Nanosecond(_) => ColumnDataType::TimestampNanosecond,
},
ConcreteDataType::Timestamp(_) => ColumnDataType::Timestamp,
ConcreteDataType::Null(_) | ConcreteDataType::List(_) => {
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
}
@@ -168,73 +135,16 @@ impl Values {
datetime_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::TimestampSecond => Values {
ts_second_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::TimestampMillisecond => Values {
ts_millisecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::TimestampMicrosecond => Values {
ts_microsecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::TimestampNanosecond => Values {
ts_nanosecond_values: Vec::with_capacity(capacity),
ColumnDataType::Timestamp => Values {
ts_millis_values: Vec::with_capacity(capacity),
..Default::default()
},
}
}
}
impl Column {
// The type of vals must be same.
pub fn push_vals(&mut self, origin_count: usize, vector: VectorRef) {
let values = self.values.get_or_insert_with(Values::default);
let mut null_mask = BitVec::from_slice(&self.null_mask);
let len = vector.len();
null_mask.reserve_exact(origin_count + len);
null_mask.extend(BitVec::repeat(false, len));
(0..len).into_iter().for_each(|idx| match vector.get(idx) {
Value::Null => null_mask.set(idx + origin_count, true),
Value::Boolean(val) => values.bool_values.push(val),
Value::UInt8(val) => values.u8_values.push(val.into()),
Value::UInt16(val) => values.u16_values.push(val.into()),
Value::UInt32(val) => values.u32_values.push(val),
Value::UInt64(val) => values.u64_values.push(val),
Value::Int8(val) => values.i8_values.push(val.into()),
Value::Int16(val) => values.i16_values.push(val.into()),
Value::Int32(val) => values.i32_values.push(val),
Value::Int64(val) => values.i64_values.push(val),
Value::Float32(val) => values.f32_values.push(*val),
Value::Float64(val) => values.f64_values.push(*val),
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
Value::Binary(val) => values.binary_values.push(val.to_vec()),
Value::Date(val) => values.date_values.push(val.val()),
Value::DateTime(val) => values.datetime_values.push(val.val()),
Value::Timestamp(val) => match val.unit() {
TimeUnit::Second => values.ts_second_values.push(val.value()),
TimeUnit::Millisecond => values.ts_millisecond_values.push(val.value()),
TimeUnit::Microsecond => values.ts_microsecond_values.push(val.value()),
TimeUnit::Nanosecond => values.ts_nanosecond_values.push(val.value()),
},
Value::List(_) => unreachable!(),
});
self.null_mask = null_mask.into_vec();
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use datatypes::vectors::{
BooleanVector, TimestampMicrosecondVector, TimestampMillisecondVector,
TimestampNanosecondVector, TimestampSecondVector,
};
use super::*;
#[test]
@@ -291,8 +201,8 @@ mod tests {
let values = values.datetime_values;
assert_eq!(2, values.capacity());
let values = Values::with_capacity(ColumnDataType::TimestampMillisecond, 2);
let values = values.ts_millisecond_values;
let values = Values::with_capacity(ColumnDataType::Timestamp, 2);
let values = values.ts_millis_values;
assert_eq!(2, values.capacity());
}
@@ -359,8 +269,8 @@ mod tests {
ColumnDataTypeWrapper(ColumnDataType::Datetime).into()
);
assert_eq!(
ConcreteDataType::timestamp_millisecond_datatype(),
ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond).into()
ConcreteDataType::timestamp_millis_datatype(),
ColumnDataTypeWrapper(ColumnDataType::Timestamp).into()
);
}
@@ -427,8 +337,8 @@ mod tests {
ConcreteDataType::datetime_datatype().try_into().unwrap()
);
assert_eq!(
ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond),
ConcreteDataType::timestamp_millisecond_datatype()
ColumnDataTypeWrapper(ColumnDataType::Timestamp),
ConcreteDataType::timestamp_millis_datatype()
.try_into()
.unwrap()
);
@@ -445,73 +355,7 @@ mod tests {
assert!(result.is_err());
assert_eq!(
result.unwrap_err().to_string(),
"Failed to create column datatype from List(ListType { item_type: Boolean(BooleanType) })"
"Failed to create column datatype from List(ListType { inner: Boolean(BooleanType) })"
);
}
#[test]
fn test_column_put_timestamp_values() {
let mut column = Column {
column_name: "test".to_string(),
semantic_type: 0,
values: Some(Values {
..Default::default()
}),
null_mask: vec![],
datatype: 0,
};
let vector = Arc::new(TimestampNanosecondVector::from_vec(vec![1, 2, 3]));
column.push_vals(3, vector);
assert_eq!(
vec![1, 2, 3],
column.values.as_ref().unwrap().ts_nanosecond_values
);
let vector = Arc::new(TimestampMillisecondVector::from_vec(vec![4, 5, 6]));
column.push_vals(3, vector);
assert_eq!(
vec![4, 5, 6],
column.values.as_ref().unwrap().ts_millisecond_values
);
let vector = Arc::new(TimestampMicrosecondVector::from_vec(vec![7, 8, 9]));
column.push_vals(3, vector);
assert_eq!(
vec![7, 8, 9],
column.values.as_ref().unwrap().ts_microsecond_values
);
let vector = Arc::new(TimestampSecondVector::from_vec(vec![10, 11, 12]));
column.push_vals(3, vector);
assert_eq!(
vec![10, 11, 12],
column.values.as_ref().unwrap().ts_second_values
);
}
#[test]
fn test_column_put_vector() {
use crate::v1::column::SemanticType;
// Some(false), None, Some(true), Some(true)
let mut column = Column {
column_name: "test".to_string(),
semantic_type: SemanticType::Field as i32,
values: Some(Values {
bool_values: vec![false, true, true],
..Default::default()
}),
null_mask: vec![2],
datatype: ColumnDataType::Boolean as i32,
};
let row_count = 4;
let vector = Arc::new(BooleanVector::from(vec![Some(true), None, Some(false)]));
column.push_vals(row_count, vector);
// Some(false), None, Some(true), Some(true), Some(true), None, Some(false)
let bool_values = column.values.unwrap().bool_values;
assert_eq!(vec![false, true, true, true, false], bool_values);
let null_mask = column.null_mask;
assert_eq!(34, null_mask[0]);
}
}

View File

@@ -1,21 +1,6 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod error;
pub mod helper;
pub mod prometheus;
pub mod result;
pub mod serde;
pub mod v1;

View File

@@ -1,17 +1,3 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(clippy::derive_partial_eq_without_eq)]
pub mod remote {

View File

@@ -1,22 +1,7 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub use prost::DecodeError;
use prost::Message;
use crate::v1::codec::SelectResult;
use crate::v1::meta::TableRouteValue;
use crate::v1::codec::{InsertBatch, PhysicalPlanNode, SelectResult};
macro_rules! impl_convert_with_bytes {
($data_type: ty) => {
@@ -36,18 +21,66 @@ macro_rules! impl_convert_with_bytes {
};
}
impl_convert_with_bytes!(InsertBatch);
impl_convert_with_bytes!(SelectResult);
impl_convert_with_bytes!(TableRouteValue);
impl_convert_with_bytes!(PhysicalPlanNode);
#[cfg(test)]
mod tests {
use std::ops::Deref;
use crate::v1::codec::*;
use crate::v1::{column, Column};
use crate::v1::column;
use crate::v1::Column;
const SEMANTIC_TAG: i32 = 0;
#[test]
fn test_convert_insert_batch() {
let insert_batch = mock_insert_batch();
let bytes: Vec<u8> = insert_batch.into();
let insert: InsertBatch = bytes.deref().try_into().unwrap();
assert_eq!(8, insert.row_count);
assert_eq!(1, insert.columns.len());
let column = &insert.columns[0];
assert_eq!("foo", column.column_name);
assert_eq!(SEMANTIC_TAG, column.semantic_type);
assert_eq!(vec![1], column.null_mask);
assert_eq!(
vec![2, 3, 4, 5, 6, 7, 8],
column.values.as_ref().unwrap().i32_values
);
}
#[should_panic]
#[test]
fn test_convert_insert_batch_wrong() {
let insert_batch = mock_insert_batch();
let mut bytes: Vec<u8> = insert_batch.into();
// modify some bytes
bytes[0] = 0b1;
bytes[1] = 0b1;
let insert: InsertBatch = bytes.deref().try_into().unwrap();
assert_eq!(8, insert.row_count);
assert_eq!(1, insert.columns.len());
let column = &insert.columns[0];
assert_eq!("foo", column.column_name);
assert_eq!(SEMANTIC_TAG, column.semantic_type);
assert_eq!(vec![1], column.null_mask);
assert_eq!(
vec![2, 3, 4, 5, 6, 7, 8],
column.values.as_ref().unwrap().i32_values
);
}
#[test]
fn test_convert_select_result() {
let select_result = mock_select_result();
@@ -94,6 +127,25 @@ mod tests {
);
}
fn mock_insert_batch() -> InsertBatch {
let values = column::Values {
i32_values: vec![2, 3, 4, 5, 6, 7, 8],
..Default::default()
};
let null_mask = vec![1];
let column = Column {
column_name: "foo".to_string(),
semantic_type: SEMANTIC_TAG,
values: Some(values),
null_mask,
..Default::default()
};
InsertBatch {
columns: vec![column],
row_count: 8,
}
}
fn mock_select_result() -> SelectResult {
let values = column::Values {
i32_values: vec![2, 3, 4, 5, 6, 7, 8],

View File

@@ -1,25 +1,8 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(clippy::derive_partial_eq_without_eq)]
tonic::include_proto!("greptime.v1");
pub const GREPTIME_FD_SET: &[u8] = tonic::include_file_descriptor_set!("greptime_fd");
pub mod codec {
tonic::include_proto!("greptime.v1.codec");
}
mod column_def;
pub mod meta;

View File

@@ -1,38 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
use snafu::ResultExt;
use crate::error::{self, Result};
use crate::helper::ColumnDataTypeWrapper;
use crate::v1::ColumnDef;
impl ColumnDef {
pub fn try_as_column_schema(&self) -> Result<ColumnSchema> {
let data_type = ColumnDataTypeWrapper::try_new(self.datatype)?;
let constraint = match &self.default_constraint {
None => None,
Some(v) => Some(
ColumnDefaultConstraint::try_from(&v[..])
.context(error::ConvertColumnDefaultConstraintSnafu { column: &self.name })?,
),
};
ColumnSchema::new(&self.name, data_type.into(), self.is_nullable)
.with_default_constraint(constraint)
.context(error::InvalidColumnDefaultConstraintSnafu { column: &self.name })
}
}

View File

@@ -1,60 +1,7 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
tonic::include_proto!("greptime.v1.meta");
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
pub const PROTOCOL_VERSION: u64 = 1;
#[derive(Default)]
pub struct PeerDict {
peers: HashMap<Peer, usize>,
index: usize,
}
impl PeerDict {
pub fn get_or_insert(&mut self, peer: Peer) -> usize {
let index = self.peers.entry(peer).or_insert_with(|| {
let v = self.index;
self.index += 1;
v
});
*index
}
pub fn into_peers(self) -> Vec<Peer> {
let mut array = vec![Peer::default(); self.index];
for (p, i) in self.peers {
array[i] = p;
}
array
}
}
#[allow(clippy::derive_hash_xor_eq)]
impl Hash for Peer {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state);
self.addr.hash(state);
}
}
impl Eq for Peer {}
impl RequestHeader {
#[inline]
pub fn new((cluster_id, member_id): (u64, u64)) -> Self {
@@ -84,50 +31,6 @@ impl ResponseHeader {
error: Some(error),
}
}
#[inline]
pub fn is_not_leader(&self) -> bool {
if let Some(error) = &self.error {
if error.code == ErrorCode::NotLeader as i32 {
return true;
}
}
false
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ErrorCode {
NoActiveDatanodes = 1,
NotLeader = 2,
}
impl Error {
#[inline]
pub fn no_active_datanodes() -> Self {
Self {
code: ErrorCode::NoActiveDatanodes as i32,
err_msg: "No active datanodes".to_string(),
}
}
#[inline]
pub fn is_not_leader() -> Self {
Self {
code: ErrorCode::NotLeader as i32,
err_msg: "Current server is not leader".to_string(),
}
}
}
impl HeartbeatResponse {
#[inline]
pub fn is_not_leader(&self) -> bool {
if let Some(header) = &self.header {
return header.is_not_leader();
}
false
}
}
macro_rules! gen_set_header {
@@ -145,65 +48,7 @@ gen_set_header!(HeartbeatRequest);
gen_set_header!(RouteRequest);
gen_set_header!(CreateRequest);
gen_set_header!(RangeRequest);
gen_set_header!(DeleteRequest);
gen_set_header!(PutRequest);
gen_set_header!(BatchPutRequest);
gen_set_header!(CompareAndPutRequest);
gen_set_header!(DeleteRangeRequest);
gen_set_header!(MoveValueRequest);
#[cfg(test)]
mod tests {
use std::vec;
use super::*;
#[test]
fn test_peer_dict() {
let mut dict = PeerDict::default();
dict.get_or_insert(Peer {
id: 1,
addr: "111".to_string(),
});
dict.get_or_insert(Peer {
id: 2,
addr: "222".to_string(),
});
dict.get_or_insert(Peer {
id: 1,
addr: "111".to_string(),
});
dict.get_or_insert(Peer {
id: 1,
addr: "111".to_string(),
});
dict.get_or_insert(Peer {
id: 1,
addr: "111".to_string(),
});
dict.get_or_insert(Peer {
id: 1,
addr: "111".to_string(),
});
dict.get_or_insert(Peer {
id: 2,
addr: "222".to_string(),
});
assert_eq!(2, dict.index);
assert_eq!(
vec![
Peer {
id: 1,
addr: "111".to_string(),
},
Peer {
id: 2,
addr: "222".to_string(),
}
],
dict.into_peers()
);
}
}

View File

@@ -2,7 +2,6 @@
name = "catalog"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
@@ -10,7 +9,6 @@ api = { path = "../api" }
arc-swap = "1.0"
async-stream = "0.3"
async-trait = "0.1"
backoff = { version = "0.4", features = ["tokio"] }
common-catalog = { path = "../common/catalog" }
common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" }
@@ -19,12 +17,15 @@ common-recordbatch = { path = "../common/recordbatch" }
common-runtime = { path = "../common/runtime" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
datafusion = "14.0.0"
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = [
"simd",
] }
datatypes = { path = "../datatypes" }
futures = "0.3"
futures-util = "0.3"
lazy_static = "1.4"
meta-client = { path = "../meta-client" }
opendal = "0.17"
regex = "1.6"
serde = "1.0"
serde_json = "1.0"
@@ -36,8 +37,9 @@ tokio = { version = "1.18", features = ["full"] }
[dev-dependencies]
chrono = "0.4"
log-store = { path = "../log-store" }
mito = { path = "../mito", features = ["test"] }
object-store = { path = "../object-store" }
opendal = "0.17"
storage = { path = "../storage" }
table-engine = { path = "../table-engine" }
tempdir = "0.3"
tokio = { version = "1.0", features = ["full"] }

View File

@@ -1,24 +1,9 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::prelude::{Snafu, StatusCode};
use datafusion::error::DataFusionError;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::RawSchema;
use datatypes::arrow;
use snafu::{Backtrace, ErrorCompat};
#[derive(Debug, Snafu)]
@@ -51,28 +36,24 @@ pub enum Error {
SystemCatalog { msg: String, backtrace: Backtrace },
#[snafu(display(
"System catalog table type mismatch, expected: binary, found: {:?}",
"System catalog table type mismatch, expected: binary, found: {:?} source: {}",
data_type,
source
))]
SystemCatalogTypeMismatch {
data_type: ConcreteDataType,
backtrace: Backtrace,
data_type: arrow::datatypes::DataType,
#[snafu(backtrace)]
source: datatypes::error::Error,
},
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
InvalidEntryType {
entry_type: Option<u8>,
backtrace: Backtrace,
},
InvalidEntryType { entry_type: Option<u8> },
#[snafu(display("Invalid system catalog key: {:?}", key))]
InvalidKey {
key: Option<String>,
backtrace: Backtrace,
},
InvalidKey { key: Option<String> },
#[snafu(display("Catalog value is not present"))]
EmptyValue { backtrace: Backtrace },
EmptyValue,
#[snafu(display("Failed to deserialize value, source: {}", source))]
ValueDeserialize {
@@ -81,38 +62,20 @@ pub enum Error {
},
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
CatalogNotFound {
catalog_name: String,
backtrace: Backtrace,
},
CatalogNotFound { catalog_name: String },
#[snafu(display("Cannot find schema, schema info: {}", schema_info))]
SchemaNotFound {
schema_info: String,
backtrace: Backtrace,
},
SchemaNotFound { schema_info: String },
#[snafu(display("Table `{}` already exists", table))]
#[snafu(display("Table {} already exists", table))]
TableExists { table: String, backtrace: Backtrace },
#[snafu(display("Schema {} already exists", schema))]
SchemaExists {
schema: String,
backtrace: Backtrace,
},
#[snafu(display("Failed to register table"))]
RegisterTable {
#[snafu(backtrace)]
source: BoxedError,
},
#[snafu(display("Operation {} not implemented yet", operation))]
Unimplemented {
operation: String,
backtrace: Backtrace,
},
#[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))]
OpenTable {
table_info: String,
@@ -121,10 +84,7 @@ pub enum Error {
},
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
TableNotFound {
table_info: String,
backtrace: Backtrace,
},
TableNotFound { table_info: String },
#[snafu(display("Failed to read system catalog table records"))]
ReadSystemCatalog {
@@ -136,7 +96,7 @@ pub enum Error {
"Failed to insert table creation record to system catalog, source: {}",
source
))]
InsertCatalogRecord {
InsertTableRecord {
#[snafu(backtrace)]
source: table::error::Error,
},
@@ -150,19 +110,6 @@ pub enum Error {
source: table::error::Error,
},
#[snafu(display(
"Invalid table schema in catalog entry, table:{}, schema: {:?}, source: {}",
table_info,
schema,
source
))]
InvalidTableSchema {
table_info: String,
schema: RawSchema,
#[snafu(backtrace)]
source: datatypes::error::Error,
},
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
SystemCatalogTableScanExec {
#[snafu(backtrace)]
@@ -188,18 +135,6 @@ pub enum Error {
#[snafu(backtrace)]
source: meta_client::error::Error,
},
#[snafu(display("Invalid table info in catalog, source: {}", source))]
InvalidTableInfoInCatalog {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
#[snafu(display("Catalog internal error: {}", source))]
Internal {
#[snafu(backtrace)]
source: BoxedError,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -216,33 +151,25 @@ impl ErrorExt for Error {
| Error::CatalogStateInconsistent { .. } => StatusCode::Unexpected,
Error::SystemCatalog { .. }
| Error::EmptyValue { .. }
| Error::EmptyValue
| Error::ValueDeserialize { .. }
| Error::Io { .. } => StatusCode::StorageUnavailable,
Error::RegisterTable { .. } | Error::SystemCatalogTypeMismatch { .. } => {
StatusCode::Internal
}
Error::ReadSystemCatalog { source, .. } => source.status_code(),
Error::SystemCatalogTypeMismatch { source, .. } => source.status_code(),
Error::InvalidCatalogValue { source, .. } => source.status_code(),
Error::RegisterTable { .. } => StatusCode::Internal,
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
Error::SchemaExists { .. } => StatusCode::InvalidArguments,
Error::OpenSystemCatalog { source, .. }
| Error::CreateSystemCatalog { source, .. }
| Error::InsertCatalogRecord { source, .. }
| Error::InsertTableRecord { source, .. }
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(),
Error::SystemCatalogTableScanExec { source } => source.status_code(),
Error::InvalidTableSchema { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { .. } => StatusCode::Unexpected,
Error::Internal { source, .. } => source.status_code(),
Error::Unimplemented { .. } => StatusCode::Unsupported,
}
}
@@ -264,6 +191,7 @@ impl From<Error> for DataFusionError {
#[cfg(test)]
mod tests {
use common_error::mock::MockError;
use datatypes::arrow::datatypes::DataType;
use snafu::GenerateImplicitData;
use super::*;
@@ -281,7 +209,7 @@ mod tests {
assert_eq!(
StatusCode::Unexpected,
InvalidKeySnafu { key: None }.build().status_code()
Error::InvalidKey { key: None }.status_code()
);
assert_eq!(
@@ -312,14 +240,17 @@ mod tests {
assert_eq!(
StatusCode::Internal,
Error::SystemCatalogTypeMismatch {
data_type: ConcreteDataType::binary_datatype(),
backtrace: Backtrace::generate(),
data_type: DataType::Boolean,
source: datatypes::error::Error::UnsupportedArrowType {
arrow_type: DataType::Boolean,
backtrace: Backtrace::generate()
}
}
.status_code()
);
assert_eq!(
StatusCode::StorageUnavailable,
EmptyValueSnafu {}.build().status_code()
Error::EmptyValue.status_code()
);
}

View File

@@ -1,384 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
use common_catalog::error::{
DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu, SerializeCatalogEntryValueSnafu,
};
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize, Serializer};
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::{RawTableInfo, TableId, TableVersion};
const CATALOG_KEY_PREFIX: &str = "__c";
const SCHEMA_KEY_PREFIX: &str = "__s";
const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
lazy_static! {
static ref CATALOG_KEY_PATTERN: Regex = Regex::new(&format!(
"^{}-({})$",
CATALOG_KEY_PREFIX, ALPHANUMERICS_NAME_PATTERN
))
.unwrap();
}
lazy_static! {
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
"^{}-({})-({})$",
SCHEMA_KEY_PREFIX, ALPHANUMERICS_NAME_PATTERN, ALPHANUMERICS_NAME_PATTERN
))
.unwrap();
}
lazy_static! {
static ref TABLE_GLOBAL_KEY_PATTERN: Regex = Regex::new(&format!(
"^{}-({})-({})-({})$",
TABLE_GLOBAL_KEY_PREFIX,
ALPHANUMERICS_NAME_PATTERN,
ALPHANUMERICS_NAME_PATTERN,
ALPHANUMERICS_NAME_PATTERN
))
.unwrap();
}
lazy_static! {
static ref TABLE_REGIONAL_KEY_PATTERN: Regex = Regex::new(&format!(
"^{}-({})-({})-({})-([0-9]+)$",
TABLE_REGIONAL_KEY_PREFIX,
ALPHANUMERICS_NAME_PATTERN,
ALPHANUMERICS_NAME_PATTERN,
ALPHANUMERICS_NAME_PATTERN
))
.unwrap();
}
pub fn build_catalog_prefix() -> String {
format!("{}-", CATALOG_KEY_PREFIX)
}
pub fn build_schema_prefix(catalog_name: impl AsRef<str>) -> String {
format!("{}-{}-", SCHEMA_KEY_PREFIX, catalog_name.as_ref())
}
pub fn build_table_global_prefix(
catalog_name: impl AsRef<str>,
schema_name: impl AsRef<str>,
) -> String {
format!(
"{}-{}-{}-",
TABLE_GLOBAL_KEY_PREFIX,
catalog_name.as_ref(),
schema_name.as_ref()
)
}
pub fn build_table_regional_prefix(
catalog_name: impl AsRef<str>,
schema_name: impl AsRef<str>,
) -> String {
format!(
"{}-{}-{}-",
TABLE_REGIONAL_KEY_PREFIX,
catalog_name.as_ref(),
schema_name.as_ref()
)
}
/// Table global info has only one key across all datanodes so it does not have `node_id` field.
pub struct TableGlobalKey {
pub catalog_name: String,
pub schema_name: String,
pub table_name: String,
}
impl Display for TableGlobalKey {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(TABLE_GLOBAL_KEY_PREFIX)?;
f.write_str("-")?;
f.write_str(&self.catalog_name)?;
f.write_str("-")?;
f.write_str(&self.schema_name)?;
f.write_str("-")?;
f.write_str(&self.table_name)
}
}
impl TableGlobalKey {
pub fn parse<S: AsRef<str>>(s: S) -> Result<Self, Error> {
let key = s.as_ref();
let captures = TABLE_GLOBAL_KEY_PATTERN
.captures(key)
.context(InvalidCatalogSnafu { key })?;
ensure!(captures.len() == 4, InvalidCatalogSnafu { key });
Ok(Self {
catalog_name: captures[1].to_string(),
schema_name: captures[2].to_string(),
table_name: captures[3].to_string(),
})
}
}
/// Table global info contains necessary info for a datanode to create table regions, including
/// table id, table meta(schema...), region id allocation across datanodes.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct TableGlobalValue {
/// Id of datanode that created the global table info kv. only for debugging.
pub node_id: u64,
// TODO(LFC): Maybe remove it?
/// Allocation of region ids across all datanodes.
pub regions_id_map: HashMap<u64, Vec<u32>>,
pub table_info: RawTableInfo,
}
impl TableGlobalValue {
pub fn table_id(&self) -> TableId {
self.table_info.ident.table_id
}
}
/// Table regional info that varies between datanode, so it contains a `node_id` field.
pub struct TableRegionalKey {
pub catalog_name: String,
pub schema_name: String,
pub table_name: String,
pub node_id: u64,
}
impl Display for TableRegionalKey {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(TABLE_REGIONAL_KEY_PREFIX)?;
f.write_str("-")?;
f.write_str(&self.catalog_name)?;
f.write_str("-")?;
f.write_str(&self.schema_name)?;
f.write_str("-")?;
f.write_str(&self.table_name)?;
f.write_str("-")?;
f.serialize_u64(self.node_id)
}
}
impl TableRegionalKey {
pub fn parse<S: AsRef<str>>(s: S) -> Result<Self, Error> {
let key = s.as_ref();
let captures = TABLE_REGIONAL_KEY_PATTERN
.captures(key)
.context(InvalidCatalogSnafu { key })?;
ensure!(captures.len() == 5, InvalidCatalogSnafu { key });
let node_id = captures[4]
.to_string()
.parse()
.map_err(|_| InvalidCatalogSnafu { key }.build())?;
Ok(Self {
catalog_name: captures[1].to_string(),
schema_name: captures[2].to_string(),
table_name: captures[3].to_string(),
node_id,
})
}
}
/// Regional table info of specific datanode, including table version on that datanode and
/// region ids allocated by metasrv.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct TableRegionalValue {
pub version: TableVersion,
pub regions_ids: Vec<u32>,
}
pub struct CatalogKey {
pub catalog_name: String,
}
impl Display for CatalogKey {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(CATALOG_KEY_PREFIX)?;
f.write_str("-")?;
f.write_str(&self.catalog_name)
}
}
impl CatalogKey {
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
let key = s.as_ref();
let captures = CATALOG_KEY_PATTERN
.captures(key)
.context(InvalidCatalogSnafu { key })?;
ensure!(captures.len() == 2, InvalidCatalogSnafu { key });
Ok(Self {
catalog_name: captures[1].to_string(),
})
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CatalogValue;
pub struct SchemaKey {
pub catalog_name: String,
pub schema_name: String,
}
impl Display for SchemaKey {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(SCHEMA_KEY_PREFIX)?;
f.write_str("-")?;
f.write_str(&self.catalog_name)?;
f.write_str("-")?;
f.write_str(&self.schema_name)
}
}
impl SchemaKey {
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
let key = s.as_ref();
let captures = SCHEMA_KEY_PATTERN
.captures(key)
.context(InvalidCatalogSnafu { key })?;
ensure!(captures.len() == 3, InvalidCatalogSnafu { key });
Ok(Self {
catalog_name: captures[1].to_string(),
schema_name: captures[2].to_string(),
})
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct SchemaValue;
macro_rules! define_catalog_value {
( $($val_ty: ty), *) => {
$(
impl $val_ty {
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
serde_json::from_str(s.as_ref())
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
}
pub fn from_bytes(bytes: impl AsRef<[u8]>) -> Result<Self, Error> {
Self::parse(&String::from_utf8_lossy(bytes.as_ref()))
}
pub fn as_bytes(&self) -> Result<Vec<u8>, Error> {
Ok(serde_json::to_string(self)
.context(SerializeCatalogEntryValueSnafu)?
.into_bytes())
}
}
)*
}
}
define_catalog_value!(
TableRegionalValue,
TableGlobalValue,
CatalogValue,
SchemaValue
);
#[cfg(test)]
mod tests {
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, RawSchema, Schema};
use table::metadata::{RawTableMeta, TableIdent, TableType};
use super::*;
#[test]
fn test_parse_catalog_key() {
let key = "__c-C";
let catalog_key = CatalogKey::parse(key).unwrap();
assert_eq!("C", catalog_key.catalog_name);
assert_eq!(key, catalog_key.to_string());
}
#[test]
fn test_parse_schema_key() {
let key = "__s-C-S";
let schema_key = SchemaKey::parse(key).unwrap();
assert_eq!("C", schema_key.catalog_name);
assert_eq!("S", schema_key.schema_name);
assert_eq!(key, schema_key.to_string());
}
#[test]
fn test_parse_table_key() {
let key = "__tg-C-S-T";
let entry = TableGlobalKey::parse(key).unwrap();
assert_eq!("C", entry.catalog_name);
assert_eq!("S", entry.schema_name);
assert_eq!("T", entry.table_name);
assert_eq!(key, &entry.to_string());
}
#[test]
fn test_build_prefix() {
assert_eq!("__c-", build_catalog_prefix());
assert_eq!("__s-CATALOG-", build_schema_prefix("CATALOG"));
assert_eq!(
"__tg-CATALOG-SCHEMA-",
build_table_global_prefix("CATALOG", "SCHEMA")
);
}
#[test]
fn test_serialize_schema() {
let schema = Schema::new(vec![ColumnSchema::new(
"name",
ConcreteDataType::string_datatype(),
true,
)]);
let meta = RawTableMeta {
schema: RawSchema::from(&schema),
engine: "mito".to_string(),
created_on: chrono::DateTime::default(),
primary_key_indices: vec![0, 1],
next_column_id: 3,
engine_options: Default::default(),
value_indices: vec![2, 3],
options: Default::default(),
region_numbers: vec![1],
};
let table_info = RawTableInfo {
ident: TableIdent {
table_id: 42,
version: 1,
},
name: "table_1".to_string(),
desc: Some("blah".to_string()),
catalog_name: "catalog_1".to_string(),
schema_name: "schema_1".to_string(),
meta,
table_type: TableType::Base,
};
let value = TableGlobalValue {
node_id: 0,
regions_id_map: HashMap::from([(0, vec![1, 2, 3])]),
table_info,
};
let serialized = serde_json::to_string(&value).unwrap();
let deserialized = TableGlobalValue::parse(&serialized).unwrap();
assert_eq!(value, deserialized);
}
}

View File

@@ -1,21 +1,6 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(assert_matches)]
use std::any::Any;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use common_telemetry::info;
@@ -29,7 +14,6 @@ use crate::error::{CreateTableSnafu, Result};
pub use crate::schema::{SchemaProvider, SchemaProviderRef};
pub mod error;
pub mod helper;
pub mod local;
pub mod remote;
pub mod schema;
@@ -83,34 +67,32 @@ pub type CatalogProviderRef = Arc<dyn CatalogProvider>;
#[async_trait::async_trait]
pub trait CatalogManager: CatalogList {
/// Starts a catalog manager.
async fn start(&self) -> Result<()>;
async fn start(&self) -> error::Result<()>;
/// Registers a table within given catalog/schema to catalog manager,
/// returns whether the table registered.
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool>;
/// Returns next available table id.
fn next_table_id(&self) -> TableId;
/// Deregisters a table within given catalog/schema to catalog manager,
/// returns whether the table deregistered.
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool>;
/// Register a schema with catalog name and schema name. Retuens whether the
/// schema registered.
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
/// Registers a table given given catalog/schema to catalog manager,
/// returns table registered.
async fn register_table(&self, request: RegisterTableRequest) -> error::Result<usize>;
/// Register a system table, should be called before starting the manager.
async fn register_system_table(&self, request: RegisterSystemTableRequest)
-> error::Result<()>;
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>>;
/// Returns the table by catalog, schema and table name.
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Result<Option<TableRef>>;
fn table(
&self,
catalog: &str,
schema: &str,
table_name: &str,
) -> error::Result<Option<TableRef>>;
}
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
/// Hook called after system table opening.
pub type OpenSystemTableHook = Arc<dyn Fn(TableRef) -> Result<()> + Send + Sync>;
pub type OpenSystemTableHook = Arc<dyn Fn(TableRef) -> error::Result<()> + Send + Sync>;
/// Register system table request:
/// - When system table is already created and registered, the hook will be called
@@ -130,31 +112,6 @@ pub struct RegisterTableRequest {
pub table: TableRef,
}
impl Debug for RegisterTableRequest {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("RegisterTableRequest")
.field("catalog", &self.catalog)
.field("schema", &self.schema)
.field("table_name", &self.table_name)
.field("table_id", &self.table_id)
.field("table", &self.table.table_info())
.finish()
}
}
#[derive(Clone)]
pub struct DeregisterTableRequest {
pub catalog: String,
pub schema: String,
pub table_name: String,
}
#[derive(Debug, Clone)]
pub struct RegisterSchemaRequest {
pub catalog: String,
pub schema: String,
}
/// Formats table fully-qualified name
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
format!("{}.{}.{}", catalog, schema, table)

View File

@@ -1,21 +1,7 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod manager;
pub mod memory;
pub use manager::LocalCatalogManager;
pub use memory::{
new_memory_catalog_list, MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider,
new_memory_catalog_list, MemoryCatalogList, MemoryCatalogProvider, MemorySchemaProvider,
};

View File

@@ -1,17 +1,3 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
@@ -20,25 +6,26 @@ use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_NAME,
};
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_telemetry::{error, info};
use common_recordbatch::RecordBatch;
use common_telemetry::{debug, info};
use datatypes::prelude::ScalarVector;
use datatypes::vectors::{BinaryVector, UInt8Vector};
use futures_util::lock::Mutex;
use futures_util::StreamExt;
use snafu::{ensure, OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
use table::requests::OpenTableRequest;
use table::table::numbers::NumbersTable;
use table::table::TableIdProvider;
use table::TableRef;
use crate::error::Result;
use crate::error::{
CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu, Result,
SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu, SystemCatalogTypeMismatchSnafu,
TableExistsSnafu, TableNotFoundSnafu, UnimplementedSnafu,
CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu,
SchemaNotFoundSnafu, SystemCatalogSnafu, SystemCatalogTypeMismatchSnafu, TableExistsSnafu,
TableNotFoundSnafu,
};
use crate::local::memory::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
use crate::local::memory::{MemoryCatalogList, MemoryCatalogProvider, MemorySchemaProvider};
use crate::system::{
decode_system_catalog, Entry, SystemCatalogTable, TableEntry, ENTRY_TYPE_INDEX, KEY_INDEX,
VALUE_INDEX,
@@ -46,18 +33,17 @@ use crate::system::{
use crate::tables::SystemCatalog;
use crate::{
format_full_table_name, handle_system_table_request, CatalogList, CatalogManager,
CatalogProvider, CatalogProviderRef, DeregisterTableRequest, RegisterSchemaRequest,
RegisterSystemTableRequest, RegisterTableRequest, SchemaProvider, SchemaProviderRef,
CatalogProvider, CatalogProviderRef, RegisterSystemTableRequest, RegisterTableRequest,
SchemaProvider,
};
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
pub struct LocalCatalogManager {
system: Arc<SystemCatalog>,
catalogs: Arc<MemoryCatalogManager>,
catalogs: Arc<MemoryCatalogList>,
engine: TableEngineRef,
next_table_id: AtomicU32,
init_lock: Mutex<bool>,
register_lock: Mutex<()>,
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
}
@@ -77,7 +63,6 @@ impl LocalCatalogManager {
engine,
next_table_id: AtomicU32::new(MIN_USER_TABLE_ID),
init_lock: Mutex::new(false),
register_lock: Mutex::new(()),
system_table_requests: Mutex::new(Vec::default()),
})
}
@@ -85,10 +70,17 @@ impl LocalCatalogManager {
/// Scan all entries from system catalog table
pub async fn init(&self) -> Result<()> {
self.init_system_catalog()?;
let system_records = self.system.information_schema.system.records().await?;
let entries = self.collect_system_catalog_entries(system_records).await?;
let max_table_id = self.handle_system_catalog_entries(entries).await?;
let mut system_records = self.system.information_schema.system.records().await?;
let mut max_table_id = 0;
while let Some(records) = system_records
.next()
.await
.transpose()
.context(ReadSystemCatalogSnafu)?
{
let table_id = self.handle_system_catalog_entries(records).await?;
max_table_id = max_table_id.max(table_id);
}
info!(
"All system catalog entries processed, max table id: {}",
max_table_id
@@ -118,6 +110,7 @@ impl LocalCatalogManager {
let default_schema = Arc::new(MemorySchemaProvider::new());
// Add numbers table for test
// TODO(hl): remove this registration
let table = Arc::new(NumbersTable::default());
default_schema.register_table("numbers".to_string(), table)?;
@@ -127,72 +120,47 @@ impl LocalCatalogManager {
Ok(())
}
/// Collect stream of system catalog entries to `Vec<Entry>`
async fn collect_system_catalog_entries(
&self,
stream: SendableRecordBatchStream,
) -> Result<Vec<Entry>> {
let record_batch = common_recordbatch::util::collect(stream)
.await
.context(ReadSystemCatalogSnafu)?;
let rbs = record_batch
.into_iter()
.map(Self::record_batch_to_entry)
.collect::<Result<Vec<_>>>()?;
Ok(rbs.into_iter().flat_map(Vec::into_iter).collect::<_>())
}
/// Convert `RecordBatch` to a vector of `Entry`.
fn record_batch_to_entry(rb: RecordBatch) -> Result<Vec<Entry>> {
/// Processes records from system catalog table and returns the max table id persisted
/// in system catalog table.
async fn handle_system_catalog_entries(&self, records: RecordBatch) -> Result<TableId> {
ensure!(
rb.num_columns() >= 6,
records.df_recordbatch.columns().len() >= 6,
SystemCatalogSnafu {
msg: format!("Length mismatch: {}", rb.num_columns())
msg: format!(
"Length mismatch: {}",
records.df_recordbatch.columns().len()
)
}
);
let entry_type = rb
.column(ENTRY_TYPE_INDEX)
.as_any()
.downcast_ref::<UInt8Vector>()
.with_context(|| SystemCatalogTypeMismatchSnafu {
data_type: rb.column(ENTRY_TYPE_INDEX).data_type(),
let entry_type = UInt8Vector::try_from_arrow_array(&records.df_recordbatch.columns()[0])
.with_context(|_| SystemCatalogTypeMismatchSnafu {
data_type: records.df_recordbatch.columns()[ENTRY_TYPE_INDEX]
.data_type()
.clone(),
})?;
let key = rb
.column(KEY_INDEX)
.as_any()
.downcast_ref::<BinaryVector>()
.with_context(|| SystemCatalogTypeMismatchSnafu {
data_type: rb.column(KEY_INDEX).data_type(),
let key = BinaryVector::try_from_arrow_array(&records.df_recordbatch.columns()[1])
.with_context(|_| SystemCatalogTypeMismatchSnafu {
data_type: records.df_recordbatch.columns()[KEY_INDEX]
.data_type()
.clone(),
})?;
let value = rb
.column(VALUE_INDEX)
.as_any()
.downcast_ref::<BinaryVector>()
.with_context(|| SystemCatalogTypeMismatchSnafu {
data_type: rb.column(VALUE_INDEX).data_type(),
let value = BinaryVector::try_from_arrow_array(&records.df_recordbatch.columns()[3])
.with_context(|_| SystemCatalogTypeMismatchSnafu {
data_type: records.df_recordbatch.columns()[VALUE_INDEX]
.data_type()
.clone(),
})?;
let mut res = Vec::with_capacity(rb.num_rows());
let mut max_table_id = 0;
for ((t, k), v) in entry_type
.iter_data()
.zip(key.iter_data())
.zip(value.iter_data())
{
let entry = decode_system_catalog(t, k, v)?;
res.push(entry);
}
Ok(res)
}
/// Processes records from system catalog table and returns the max table id persisted
/// in system catalog table.
async fn handle_system_catalog_entries(&self, entries: Vec<Entry>) -> Result<TableId> {
let entries = Self::sort_entries(entries);
let mut max_table_id = 0;
for entry in entries {
match entry {
Entry::Catalog(c) => {
self.catalogs.register_catalog_if_absent(
@@ -215,6 +183,7 @@ impl LocalCatalogManager {
info!("Registered schema: {:?}", s);
}
Entry::Table(t) => {
debug!("t: {:?}", t);
self.open_and_register_table(&t).await?;
info!("Registered table: {:?}", t);
max_table_id = max_table_id.max(t.table_id);
@@ -224,13 +193,6 @@ impl LocalCatalogManager {
Ok(max_table_id)
}
/// Sort catalog entries to ensure catalog entries comes first, then schema entries,
/// and table entries is the last.
fn sort_entries(mut entries: Vec<Entry>) -> Vec<Entry> {
entries.sort();
entries
}
async fn open_and_register_table(&self, t: &TableEntry) -> Result<()> {
let catalog = self
.catalogs
@@ -250,7 +212,6 @@ impl LocalCatalogManager {
schema_name: t.schema_name.clone(),
table_name: t.table_name.clone(),
table_id: t.table_id,
region_numbers: vec![0],
};
let option = self
@@ -303,13 +264,6 @@ impl CatalogList for LocalCatalogManager {
}
}
#[async_trait::async_trait]
impl TableIdProvider for LocalCatalogManager {
async fn next_table_id(&self) -> table::Result<TableId> {
Ok(self.next_table_id.fetch_add(1, Ordering::Relaxed))
}
}
#[async_trait::async_trait]
impl CatalogManager for LocalCatalogManager {
/// Start [LocalCatalogManager] to load all information from system catalog table.
@@ -318,7 +272,12 @@ impl CatalogManager for LocalCatalogManager {
self.init().await
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
#[inline]
fn next_table_id(&self) -> TableId {
self.next_table_id.fetch_add(1, Ordering::Relaxed)
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize> {
let started = self.init_lock.lock().await;
ensure!(
@@ -341,79 +300,24 @@ impl CatalogManager for LocalCatalogManager {
schema_info: format!("{}.{}", catalog_name, schema_name),
})?;
{
let _lock = self.register_lock.lock().await;
if let Some(existing) = schema.table(&request.table_name)? {
if existing.table_info().ident.table_id != request.table_id {
error!(
"Unexpected table register request: {:?}, existing: {:?}",
request,
existing.table_info()
);
return TableExistsSnafu {
table: format_full_table_name(
catalog_name,
schema_name,
&request.table_name,
),
}
.fail();
}
// Try to register table with same table id, just ignore.
Ok(false)
} else {
// table does not exist
self.system
.register_table(
catalog_name.clone(),
schema_name.clone(),
request.table_name.clone(),
request.table_id,
)
.await?;
schema.register_table(request.table_name, request.table)?;
Ok(true)
if schema.table_exist(&request.table_name)? {
return TableExistsSnafu {
table: format_full_table_name(catalog_name, schema_name, &request.table_name),
}
.fail();
}
}
async fn deregister_table(&self, _request: DeregisterTableRequest) -> Result<bool> {
UnimplementedSnafu {
operation: "deregister table",
}
.fail()
}
self.system
.register_table(
catalog_name.clone(),
schema_name.clone(),
request.table_name.clone(),
request.table_id,
)
.await?;
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
let started = self.init_lock.lock().await;
ensure!(
*started,
IllegalManagerStateSnafu {
msg: "Catalog manager not started",
}
);
let catalog_name = &request.catalog;
let schema_name = &request.schema;
let catalog = self
.catalogs
.catalog(catalog_name)?
.context(CatalogNotFoundSnafu { catalog_name })?;
{
let _lock = self.register_lock.lock().await;
ensure!(
catalog.schema(schema_name)?.is_none(),
SchemaExistsSnafu {
schema: schema_name,
}
);
self.system
.register_schema(request.catalog, schema_name.clone())
.await?;
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
Ok(true)
}
schema.register_table(request.table_name, request.table)?;
Ok(1)
}
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
@@ -430,15 +334,6 @@ impl CatalogManager for LocalCatalogManager {
Ok(())
}
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
self.catalogs
.catalog(catalog)?
.context(CatalogNotFoundSnafu {
catalog_name: catalog,
})?
.schema(schema)
}
fn table(
&self,
catalog_name: &str,
@@ -457,50 +352,3 @@ impl CatalogManager for LocalCatalogManager {
schema.table(table_name)
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use super::*;
use crate::system::{CatalogEntry, SchemaEntry};
#[test]
fn test_sort_entry() {
let vec = vec![
Entry::Table(TableEntry {
catalog_name: "C1".to_string(),
schema_name: "S1".to_string(),
table_name: "T1".to_string(),
table_id: 1,
}),
Entry::Catalog(CatalogEntry {
catalog_name: "C2".to_string(),
}),
Entry::Schema(SchemaEntry {
catalog_name: "C1".to_string(),
schema_name: "S1".to_string(),
}),
Entry::Schema(SchemaEntry {
catalog_name: "C2".to_string(),
schema_name: "S2".to_string(),
}),
Entry::Catalog(CatalogEntry {
catalog_name: "".to_string(),
}),
Entry::Table(TableEntry {
catalog_name: "C1".to_string(),
schema_name: "S1".to_string(),
table_name: "T2".to_string(),
table_id: 2,
}),
];
let res = LocalCatalogManager::sort_entries(vec);
assert_matches!(res[0], Entry::Catalog(..));
assert_matches!(res[1], Entry::Catalog(..));
assert_matches!(res[2], Entry::Schema(..));
assert_matches!(res[3], Entry::Schema(..));
assert_matches!(res[4], Entry::Table(..));
assert_matches!(res[5], Entry::Table(..));
}
}

View File

@@ -1,151 +1,23 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use std::sync::RwLock;
use common_catalog::consts::MIN_USER_TABLE_ID;
use common_telemetry::error;
use snafu::OptionExt;
use table::metadata::TableId;
use table::table::TableIdProvider;
use table::TableRef;
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
use crate::error::{Result, TableExistsSnafu};
use crate::schema::SchemaProvider;
use crate::{
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, SchemaProviderRef,
};
use crate::{CatalogList, CatalogProvider, CatalogProviderRef, SchemaProviderRef};
/// Simple in-memory list of catalogs
pub struct MemoryCatalogManager {
#[derive(Default)]
pub struct MemoryCatalogList {
/// Collection of catalogs containing schemas and ultimately Tables
pub catalogs: RwLock<HashMap<String, CatalogProviderRef>>,
pub table_id: AtomicU32,
}
impl Default for MemoryCatalogManager {
fn default() -> Self {
let manager = Self {
table_id: AtomicU32::new(MIN_USER_TABLE_ID),
catalogs: Default::default(),
};
let default_catalog = Arc::new(MemoryCatalogProvider::new());
manager
.register_catalog("greptime".to_string(), default_catalog.clone())
.unwrap();
default_catalog
.register_schema("public".to_string(), Arc::new(MemorySchemaProvider::new()))
.unwrap();
manager
}
}
#[async_trait::async_trait]
impl TableIdProvider for MemoryCatalogManager {
async fn next_table_id(&self) -> table::error::Result<TableId> {
Ok(self.table_id.fetch_add(1, Ordering::Relaxed))
}
}
#[async_trait::async_trait]
impl CatalogManager for MemoryCatalogManager {
async fn start(&self) -> Result<()> {
self.table_id.store(MIN_USER_TABLE_ID, Ordering::Relaxed);
Ok(())
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
let catalogs = self.catalogs.write().unwrap();
let catalog = catalogs
.get(&request.catalog)
.context(CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?
.clone();
let schema = catalog
.schema(&request.schema)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", &request.catalog, &request.schema),
})?;
schema
.register_table(request.table_name, request.table)
.map(|v| v.is_none())
}
async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<bool> {
let catalogs = self.catalogs.write().unwrap();
let catalog = catalogs
.get(&request.catalog)
.context(CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?
.clone();
let schema = catalog
.schema(&request.schema)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", &request.catalog, &request.schema),
})?;
schema
.deregister_table(&request.table_name)
.map(|v| v.is_some())
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
let catalogs = self.catalogs.write().unwrap();
let catalog = catalogs
.get(&request.catalog)
.context(CatalogNotFoundSnafu {
catalog_name: &request.catalog,
})?;
catalog.register_schema(request.schema, Arc::new(MemorySchemaProvider::new()))?;
Ok(true)
}
async fn register_system_table(&self, _request: RegisterSystemTableRequest) -> Result<()> {
// TODO(ruihang): support register system table request
Ok(())
}
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
let catalogs = self.catalogs.read().unwrap();
if let Some(c) = catalogs.get(catalog) {
c.schema(schema)
} else {
Ok(None)
}
}
fn table(&self, catalog: &str, schema: &str, table_name: &str) -> Result<Option<TableRef>> {
let c = self.catalogs.read().unwrap();
let catalog = if let Some(c) = c.get(catalog) {
c.clone()
} else {
return Ok(None);
};
match catalog.schema(schema)? {
None => Ok(None),
Some(s) => s.table(table_name),
}
}
}
impl MemoryCatalogManager {
impl MemoryCatalogList {
/// Registers a catalog and return `None` if no catalog with the same name was already
/// registered, or `Some` with the previously registered catalog.
pub fn register_catalog_if_absent(
@@ -165,7 +37,7 @@ impl MemoryCatalogManager {
}
}
impl CatalogList for MemoryCatalogManager {
impl CatalogList for MemoryCatalogList {
fn as_any(&self) -> &dyn Any {
self
}
@@ -271,21 +143,11 @@ impl SchemaProvider for MemorySchemaProvider {
}
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
let mut tables = self.tables.write().unwrap();
if let Some(existing) = tables.get(name.as_str()) {
// if table with the same name but different table id exists, then it's a fatal bug
if existing.table_info().ident.table_id != table.table_info().ident.table_id {
error!(
"Unexpected table register: {:?}, existing: {:?}",
table.table_info(),
existing.table_info()
);
return TableExistsSnafu { table: name }.fail()?;
}
Ok(Some(existing.clone()))
} else {
Ok(tables.insert(name, table))
if self.table_exist(name.as_str())? {
return TableExistsSnafu { table: name }.fail()?;
}
let mut tables = self.tables.write().unwrap();
Ok(tables.insert(name, table))
}
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
@@ -300,8 +162,8 @@ impl SchemaProvider for MemorySchemaProvider {
}
/// Create a memory catalog list contains a numbers table for test
pub fn new_memory_catalog_list() -> Result<Arc<MemoryCatalogManager>> {
Ok(Arc::new(MemoryCatalogManager::default()))
pub fn new_memory_catalog_list() -> Result<Arc<MemoryCatalogList>> {
Ok(Arc::new(MemoryCatalogList::default()))
}
#[cfg(test)]
@@ -316,11 +178,23 @@ mod tests {
#[test]
fn test_new_memory_catalog_list() {
let catalog_list = new_memory_catalog_list().unwrap();
let default_catalog = catalog_list.catalog(DEFAULT_CATALOG_NAME).unwrap().unwrap();
let default_schema = default_catalog
assert!(catalog_list
.catalog(DEFAULT_CATALOG_NAME)
.unwrap()
.is_none());
let default_catalog = Arc::new(MemoryCatalogProvider::default());
catalog_list
.register_catalog(DEFAULT_CATALOG_NAME.to_string(), default_catalog.clone())
.unwrap();
assert!(default_catalog
.schema(DEFAULT_SCHEMA_NAME)
.unwrap()
.is_none());
let default_schema = Arc::new(MemorySchemaProvider::default());
default_catalog
.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema.clone())
.unwrap();
default_schema
@@ -329,6 +203,7 @@ mod tests {
let table = default_schema.table("numbers").unwrap();
assert!(table.is_some());
assert!(default_schema.table("not_exists").unwrap().is_none());
}
@@ -345,7 +220,7 @@ mod tests {
.unwrap()
.is_none());
assert!(provider.table_exist(table_name).unwrap());
let other_table = NumbersTable::new(12);
let other_table = NumbersTable::default();
let result = provider.register_table(table_name.to_string(), Arc::new(other_table));
let err = result.err().unwrap();
assert!(err.backtrace_opt().is_some());
@@ -354,7 +229,7 @@ mod tests {
#[test]
pub fn test_register_if_absent() {
let list = MemoryCatalogManager::default();
let list = MemoryCatalogList::default();
assert!(list
.register_catalog_if_absent(
"test_catalog".to_string(),
@@ -366,38 +241,6 @@ mod tests {
Arc::new(MemoryCatalogProvider::new()),
)
.unwrap();
list.as_any()
.downcast_ref::<MemoryCatalogManager>()
.unwrap();
}
#[tokio::test]
pub async fn test_catalog_deregister_table() {
let catalog = MemoryCatalogManager::default();
let schema = catalog
.schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
.unwrap()
.unwrap();
let register_table_req = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "numbers".to_string(),
table_id: 2333,
table: Arc::new(NumbersTable::default()),
};
catalog.register_table(register_table_req).await.unwrap();
assert!(schema.table_exist("numbers").unwrap());
let deregister_table_req = DeregisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "numbers".to_string(),
};
catalog
.deregister_table(deregister_table_req)
.await
.unwrap();
assert!(!schema.table_exist("numbers").unwrap());
list.as_any().downcast_ref::<MemoryCatalogList>().unwrap();
}
}

View File

@@ -1,17 +1,3 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::pin::Pin;
use std::sync::Arc;
@@ -33,27 +19,13 @@ pub type ValueIter<'a, E> = Pin<Box<dyn Stream<Item = Result<Kv, E>> + Send + 'a
#[async_trait::async_trait]
pub trait KvBackend: Send + Sync {
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, crate::error::Error>
where
'a: 'b;
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error>;
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), crate::error::Error>;
/// Compare and set value of key. `expect` is the expected value, if backend's current value associated
/// with key is the same as `expect`, the value will be updated to `val`.
///
/// - If the compare-and-set operation successfully updated value, this method will return an `Ok(Ok())`
/// - If associated value is not the same as `expect`, no value will be updated and an `Ok(Err(Vec<u8>))`
/// will be returned, the `Err(Vec<u8>)` indicates the current associated value of key.
/// - If any error happens during operation, an `Err(Error)` will be returned.
async fn compare_and_set(
&self,
key: &[u8],
expect: &[u8],
val: &[u8],
) -> Result<Result<(), Option<Vec<u8>>>, Error>;
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error>;
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), crate::error::Error>;
async fn delete(&self, key: &[u8]) -> Result<(), Error> {
self.delete_range(key, &[]).await
@@ -102,15 +74,6 @@ mod tests {
unimplemented!()
}
async fn compare_and_set(
&self,
_key: &[u8],
_expect: &[u8],
_val: &[u8],
) -> Result<Result<(), Option<Vec<u8>>>, Error> {
unimplemented!()
}
async fn delete_range(&self, _key: &[u8], _end: &[u8]) -> Result<(), Error> {
unimplemented!()
}

View File

@@ -1,31 +1,16 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::sync::Arc;
use async_stream::stream;
use common_telemetry::info;
use meta_client::client::MetaClient;
use meta_client::rpc::{CompareAndPutRequest, DeleteRangeRequest, PutRequest, RangeRequest};
use meta_client::rpc::{DeleteRangeRequest, PutRequest, RangeRequest};
use snafu::ResultExt;
use crate::error::{Error, MetaSrvSnafu};
use crate::remote::{Kv, KvBackend, ValueIter};
#[derive(Debug)]
pub struct MetaKvBackend {
pub client: Arc<MetaClient>,
pub client: MetaClient,
}
/// Implement `KvBackend` trait for `MetaKvBackend` instead of opendal's `Accessor` since
@@ -83,26 +68,4 @@ impl KvBackend for MetaKvBackend {
Ok(())
}
async fn compare_and_set(
&self,
key: &[u8],
expect: &[u8],
val: &[u8],
) -> Result<Result<(), Option<Vec<u8>>>, Error> {
let request = CompareAndPutRequest::new()
.with_key(key.to_vec())
.with_expect(expect.to_vec())
.with_value(val.to_vec());
let mut response = self
.client
.compare_and_put(request)
.await
.context(MetaSrvSnafu)?;
if response.is_success() {
Ok(Ok(()))
} else {
Ok(Err(response.take_prev_kv().map(|v| v.value().to_vec())))
}
}
}

View File

@@ -1,49 +1,36 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::collections::HashMap;
use std::pin::Pin;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use arc_swap::ArcSwap;
use async_stream::stream;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use common_catalog::{
build_catalog_prefix, build_schema_prefix, build_table_prefix, CatalogKey, CatalogValue,
SchemaKey, SchemaValue, TableKey, TableValue,
};
use common_telemetry::{debug, info};
use datatypes::schema::Schema;
use futures::Stream;
use futures_util::StreamExt;
use snafu::{OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
use table::metadata::{TableId, TableVersion};
use table::requests::{CreateTableRequest, OpenTableRequest};
use table::table::numbers::NumbersTable;
use table::TableRef;
use tokio::sync::Mutex;
use crate::error::Result;
use crate::error::{
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, InvalidTableSchemaSnafu,
OpenTableSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
};
use crate::helper::{
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue, TableRegionalKey, TableRegionalValue,
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu,
SchemaNotFoundSnafu, TableExistsSnafu,
};
use crate::remote::{Kv, KvBackendRef};
use crate::{
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
RegisterTableRequest, SchemaProvider, SchemaProviderRef,
RegisterSystemTableRequest, RegisterTableRequest, SchemaProvider, SchemaProviderRef,
};
/// Catalog manager based on metasrv.
@@ -51,6 +38,7 @@ pub struct RemoteCatalogManager {
node_id: u64,
backend: KvBackendRef,
catalogs: Arc<ArcSwap<HashMap<String, CatalogProviderRef>>>,
next_table_id: Arc<AtomicU32>,
engine: TableEngineRef,
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
mutex: Arc<Mutex<()>>,
@@ -63,6 +51,7 @@ impl RemoteCatalogManager {
node_id,
backend,
catalogs: Default::default(),
next_table_id: Default::default(),
system_table_requests: Default::default(),
mutex: Default::default(),
}
@@ -71,13 +60,14 @@ impl RemoteCatalogManager {
fn build_catalog_key(&self, catalog_name: impl AsRef<str>) -> CatalogKey {
CatalogKey {
catalog_name: catalog_name.as_ref().to_string(),
node_id: self.node_id,
}
}
fn new_catalog_provider(&self, catalog_name: &str) -> CatalogProviderRef {
Arc::new(RemoteCatalogProvider {
node_id: self.node_id,
catalog_name: catalog_name.to_string(),
node_id: self.node_id,
backend: self.backend.clone(),
schemas: Default::default(),
mutex: Default::default(),
@@ -110,7 +100,9 @@ impl RemoteCatalogManager {
}
let key = CatalogKey::parse(&String::from_utf8_lossy(&k))
.context(InvalidCatalogValueSnafu)?;
yield Ok(key)
if key.node_id == self.node_id {
yield Ok(key)
}
}
}))
}
@@ -132,18 +124,23 @@ impl RemoteCatalogManager {
let schema_key = SchemaKey::parse(&String::from_utf8_lossy(&k))
.context(InvalidCatalogValueSnafu)?;
yield Ok(schema_key)
if schema_key.node_id == self.node_id {
yield Ok(schema_key)
}
}
}))
}
/// Iterate over all table entries on metasrv
/// TODO(hl): table entries with different version is not currently considered.
/// Ideally deprecated table entry must be deleted when deregistering from catalog.
async fn iter_remote_tables(
&self,
catalog_name: &str,
schema_name: &str,
) -> Pin<Box<dyn Stream<Item = Result<(TableGlobalKey, TableGlobalValue)>> + Send + '_>> {
let table_prefix = build_table_global_prefix(catalog_name, schema_name);
) -> Pin<Box<dyn Stream<Item = Result<(TableKey, TableValue)>> + Send + '_>> {
let table_prefix = build_table_prefix(catalog_name, schema_name);
let mut tables = self.backend.range(table_prefix.as_bytes());
Box::pin(stream!({
while let Some(r) = tables.next().await {
@@ -152,22 +149,12 @@ impl RemoteCatalogManager {
debug!("Ignoring non-table prefix: {}", String::from_utf8_lossy(&k));
continue;
}
let table_key = TableGlobalKey::parse(&String::from_utf8_lossy(&k))
let table_key = TableKey::parse(&String::from_utf8_lossy(&k))
.context(InvalidCatalogValueSnafu)?;
let table_value = TableValue::parse(&String::from_utf8_lossy(&v))
.context(InvalidCatalogValueSnafu)?;
let table_value =
TableGlobalValue::from_bytes(&v).context(InvalidCatalogValueSnafu)?;
info!(
"Found catalog table entry, key: {}, value: {:?}",
table_key, table_value
);
// metasrv has allocated region ids to current datanode
if table_value
.regions_id_map
.get(&self.node_id)
.map(|v| !v.is_empty())
.unwrap_or(false)
{
if table_value.node_id == self.node_id {
yield Ok((table_key, table_value))
}
}
@@ -177,7 +164,7 @@ impl RemoteCatalogManager {
/// Fetch catalogs/schemas/tables from remote catalog manager along with max table id allocated.
async fn initiate_catalogs(&self) -> Result<(HashMap<String, CatalogProviderRef>, TableId)> {
let mut res = HashMap::new();
let max_table_id = MIN_USER_TABLE_ID - 1;
let max_table_id = MIN_USER_TABLE_ID;
// initiate default catalog and schema
let default_catalog = self.initiate_default_catalog().await?;
@@ -242,38 +229,35 @@ impl RemoteCatalogManager {
schema: SchemaProviderRef,
mut max_table_id: TableId,
) -> Result<()> {
info!("initializing tables in {}.{}", catalog_name, schema_name);
let mut table_num = 0;
let mut tables = self.iter_remote_tables(catalog_name, schema_name).await;
while let Some(r) = tables.next().await {
let (table_key, table_value) = r?;
let table_ref = self.open_or_create_table(&table_key, &table_value).await?;
schema.register_table(table_key.table_name.to_string(), table_ref)?;
info!("Registered table {}", &table_key.table_name);
max_table_id = max_table_id.max(table_value.table_id());
table_num += 1;
if table_value.id > max_table_id {
info!("Max table id: {} -> {}", max_table_id, table_value.id);
max_table_id = table_value.id;
}
}
info!(
"initialized tables in {}.{}, total: {}",
catalog_name, schema_name, table_num
);
Ok(())
}
async fn initiate_default_catalog(&self) -> Result<CatalogProviderRef> {
let default_catalog = self.new_catalog_provider(DEFAULT_CATALOG_NAME);
let default_schema = self.new_schema_provider(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema.clone())?;
default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema)?;
let schema_key = SchemaKey {
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
node_id: self.node_id,
}
.to_string();
self.backend
.set(
schema_key.as_bytes(),
&SchemaValue {}
.as_bytes()
.to_bytes()
.context(InvalidCatalogValueSnafu)?,
)
.await?;
@@ -281,13 +265,14 @@ impl RemoteCatalogManager {
let catalog_key = CatalogKey {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
node_id: self.node_id,
}
.to_string();
self.backend
.set(
catalog_key.as_bytes(),
&CatalogValue {}
.as_bytes()
.to_bytes()
.context(InvalidCatalogValueSnafu)?,
)
.await?;
@@ -297,75 +282,41 @@ impl RemoteCatalogManager {
async fn open_or_create_table(
&self,
table_key: &TableGlobalKey,
table_value: &TableGlobalValue,
table_key: &TableKey,
table_value: &TableValue,
) -> Result<TableRef> {
let context = EngineContext {};
let TableGlobalKey {
let TableKey {
catalog_name,
schema_name,
table_name,
..
} = table_key;
let table_id = table_value.table_id();
let TableGlobalValue {
table_info,
regions_id_map,
..
} = table_value;
// unwrap safety: checked in yielding this table when `iter_remote_tables`
let region_numbers = regions_id_map.get(&self.node_id).unwrap();
let TableValue { id, meta, .. } = table_value;
let request = OpenTableRequest {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
table_id,
region_numbers: region_numbers.clone(),
table_id: *id,
};
match self
.engine
.open_table(&context, request)
.await
.with_context(|_| OpenTableSnafu {
table_info: format!(
"{}.{}.{}, id:{}",
catalog_name, schema_name, table_name, table_id
),
table_info: format!("{}.{}.{}, id:{}", catalog_name, schema_name, table_name, id,),
})? {
Some(table) => {
info!(
"Table opened: {}.{}.{}",
catalog_name, schema_name, table_name
);
Ok(table)
}
Some(table) => Ok(table),
None => {
info!(
"Try create table: {}.{}.{}",
catalog_name, schema_name, table_name
);
let meta = &table_info.meta;
let schema = meta
.schema
.clone()
.try_into()
.context(InvalidTableSchemaSnafu {
table_info: format!("{}.{}.{}", catalog_name, schema_name, table_name,),
schema: meta.schema.clone(),
})?;
let req = CreateTableRequest {
id: table_id,
id: *id,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
schema: Arc::new(schema),
region_numbers: region_numbers.clone(),
schema: Arc::new(Schema::new(meta.schema.column_schemas.clone())),
primary_key_indices: meta.primary_key_indices.clone(),
create_if_not_exists: true,
table_options: meta.options.clone(),
@@ -377,7 +328,7 @@ impl RemoteCatalogManager {
.context(CreateTableSnafu {
table_info: format!(
"{}.{}.{}, id:{}",
&catalog_name, &schema_name, &table_name, table_id
&catalog_name, &schema_name, &table_name, id
),
})
}
@@ -394,24 +345,21 @@ impl CatalogManager for RemoteCatalogManager {
catalogs.keys().cloned().collect::<Vec<_>>()
);
self.catalogs.store(Arc::new(catalogs));
self.next_table_id
.store(max_table_id + 1, Ordering::Relaxed);
info!("Max table id allocated: {}", max_table_id);
let mut system_table_requests = self.system_table_requests.lock().await;
handle_system_table_request(self, self.engine.clone(), &mut system_table_requests).await?;
info!("All system table opened");
self.catalog(DEFAULT_CATALOG_NAME)
.unwrap()
.unwrap()
.schema(DEFAULT_SCHEMA_NAME)
.unwrap()
.unwrap()
.register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
.unwrap();
Ok(())
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
fn next_table_id(&self) -> TableId {
self.next_table_id.fetch_add(1, Ordering::Relaxed)
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize> {
let catalog_name = request.catalog;
let schema_name = request.schema;
let catalog_provider = self.catalog(&catalog_name)?.context(CatalogNotFoundSnafu {
@@ -430,25 +378,7 @@ impl CatalogManager for RemoteCatalogManager {
.fail();
}
schema_provider.register_table(request.table_name, request.table)?;
Ok(true)
}
async fn deregister_table(&self, _request: DeregisterTableRequest) -> Result<bool> {
UnimplementedSnafu {
operation: "deregister table",
}
.fail()
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
let catalog_name = request.catalog;
let schema_name = request.schema;
let catalog_provider = self.catalog(&catalog_name)?.context(CatalogNotFoundSnafu {
catalog_name: &catalog_name,
})?;
let schema_provider = self.new_schema_provider(&catalog_name, &schema_name);
catalog_provider.register_schema(schema_name, schema_provider)?;
Ok(true)
Ok(1)
}
async fn register_system_table(&self, request: RegisterSystemTableRequest) -> Result<()> {
@@ -457,14 +387,6 @@ impl CatalogManager for RemoteCatalogManager {
Ok(())
}
fn schema(&self, catalog: &str, schema: &str) -> Result<Option<SchemaProviderRef>> {
self.catalog(catalog)?
.context(CatalogNotFoundSnafu {
catalog_name: catalog,
})?
.schema(schema)
}
fn table(
&self,
catalog_name: &str,
@@ -505,7 +427,7 @@ impl CatalogList for RemoteCatalogManager {
.set(
key.as_bytes(),
&CatalogValue {}
.as_bytes()
.to_bytes()
.context(InvalidCatalogValueSnafu)?,
)
.await?;
@@ -533,70 +455,29 @@ impl CatalogList for RemoteCatalogManager {
}
pub struct RemoteCatalogProvider {
node_id: u64,
catalog_name: String,
node_id: u64,
backend: KvBackendRef,
schemas: Arc<ArcSwap<HashMap<String, SchemaProviderRef>>>,
mutex: Arc<Mutex<()>>,
}
impl RemoteCatalogProvider {
pub fn new(catalog_name: String, backend: KvBackendRef, node_id: u64) -> Self {
pub fn new(catalog_name: String, node_id: u64, backend: KvBackendRef) -> Self {
Self {
node_id,
catalog_name,
node_id,
backend,
schemas: Default::default(),
mutex: Default::default(),
}
}
pub fn refresh_schemas(&self) -> Result<()> {
let schemas = self.schemas.clone();
let schema_prefix = build_schema_prefix(&self.catalog_name);
let catalog_name = self.catalog_name.clone();
let mutex = self.mutex.clone();
let backend = self.backend.clone();
let node_id = self.node_id;
std::thread::spawn(move || {
common_runtime::block_on_write(async move {
let _guard = mutex.lock().await;
let prev_schemas = schemas.load();
let mut new_schemas = HashMap::with_capacity(prev_schemas.len() + 1);
new_schemas.clone_from(&prev_schemas);
let mut remote_schemas = backend.range(schema_prefix.as_bytes());
while let Some(r) = remote_schemas.next().await {
let Kv(k, _) = r?;
let schema_key = SchemaKey::parse(&String::from_utf8_lossy(&k))
.context(InvalidCatalogValueSnafu)?;
if !new_schemas.contains_key(&schema_key.schema_name) {
new_schemas.insert(
schema_key.schema_name.clone(),
Arc::new(RemoteSchemaProvider::new(
catalog_name.clone(),
schema_key.schema_name,
node_id,
backend.clone(),
)),
);
}
}
schemas.store(Arc::new(new_schemas));
Ok(())
})
})
.join()
.unwrap()?;
Ok(())
}
fn build_schema_key(&self, schema_name: impl AsRef<str>) -> SchemaKey {
SchemaKey {
catalog_name: self.catalog_name.clone(),
schema_name: schema_name.as_ref().to_string(),
node_id: self.node_id,
}
}
}
@@ -607,7 +488,6 @@ impl CatalogProvider for RemoteCatalogProvider {
}
fn schema_names(&self) -> Result<Vec<String>> {
self.refresh_schemas()?;
Ok(self.schemas.load().keys().cloned().collect::<Vec<_>>())
}
@@ -628,11 +508,10 @@ impl CatalogProvider for RemoteCatalogProvider {
.set(
key.as_bytes(),
&SchemaValue {}
.as_bytes()
.to_bytes()
.context(InvalidCatalogValueSnafu)?,
)
.await?;
let prev_schemas = schemas.load();
let mut new_schemas = HashMap::with_capacity(prev_schemas.len() + 1);
new_schemas.clone_from(&prev_schemas);
@@ -646,8 +525,6 @@ impl CatalogProvider for RemoteCatalogProvider {
}
fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
// TODO(hl): We should refresh whole catalog before calling datafusion's query engine.
self.refresh_schemas()?;
Ok(self.schemas.load().get(name).cloned())
}
}
@@ -678,11 +555,16 @@ impl RemoteSchemaProvider {
}
}
fn build_regional_table_key(&self, table_name: impl AsRef<str>) -> TableRegionalKey {
TableRegionalKey {
fn build_table_key(
&self,
table_name: impl AsRef<str>,
table_version: TableVersion,
) -> TableKey {
TableKey {
catalog_name: self.catalog_name.clone(),
schema_name: self.schema_name.clone(),
table_name: table_name.as_ref().to_string(),
version: table_version,
node_id: self.node_id,
}
}
@@ -704,14 +586,19 @@ impl SchemaProvider for RemoteSchemaProvider {
fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
let table_info = table.table_info();
let table_version = table_info.ident.version;
let table_value = TableRegionalValue {
version: table_version,
regions_ids: table.table_info().meta.region_numbers.clone(),
let table_value = TableValue {
meta: table_info.meta.clone().into(),
id: table_info.ident.table_id,
node_id: self.node_id,
regions_ids: vec![],
};
let backend = self.backend.clone();
let mutex = self.mutex.clone();
let tables = self.tables.clone();
let table_key = self.build_regional_table_key(&name).to_string();
let table_key = self
.build_table_key(name.clone(), table_version)
.to_string();
let prev = std::thread::spawn(move || {
common_runtime::block_on_read(async move {
@@ -741,11 +628,18 @@ impl SchemaProvider for RemoteSchemaProvider {
}
fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
let table_version = match self.tables.load().get(name) {
None => return Ok(None),
Some(t) => t.table_info().ident.version,
};
let table_name = name.to_string();
let table_key = self.build_regional_table_key(&table_name).to_string();
let table_key = self.build_table_key(&table_name, table_version).to_string();
let backend = self.backend.clone();
let mutex = self.mutex.clone();
let tables = self.tables.clone();
let prev = std::thread::spawn(move || {
common_runtime::block_on_read(async move {
let _guard = mutex.lock().await;

View File

@@ -1,17 +1,3 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::sync::Arc;

View File

@@ -1,33 +1,21 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::collections::HashMap;
use std::sync::Arc;
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_NAME,
SYSTEM_CATALOG_TABLE_ID, SYSTEM_CATALOG_TABLE_NAME,
INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID,
SYSTEM_CATALOG_TABLE_NAME,
};
use common_query::logical_plan::Expr;
use common_query::physical_plan::{PhysicalPlanRef, SessionContext};
use common_query::physical_plan::PhysicalPlanRef;
use common_query::physical_plan::RuntimeEnv;
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::debug;
use common_time::timestamp::Timestamp;
use common_time::util;
use datatypes::prelude::{ConcreteDataType, ScalarVector};
use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder, SchemaRef};
use datatypes::vectors::{BinaryVector, TimestampMillisecondVector, UInt8Vector};
use datatypes::vectors::{BinaryVector, TimestampVector, UInt8Vector};
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
@@ -42,6 +30,7 @@ use crate::error::{
pub const ENTRY_TYPE_INDEX: usize = 0;
pub const KEY_INDEX: usize = 1;
pub const TIMESTAMP_INDEX: usize = 2;
pub const VALUE_INDEX: usize = 3;
pub struct SystemCatalogTable {
@@ -85,7 +74,6 @@ impl SystemCatalogTable {
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
table_id: SYSTEM_CATALOG_TABLE_ID,
region_numbers: vec![0],
};
let schema = Arc::new(build_system_catalog_schema());
let ctx = EngineContext::default();
@@ -108,8 +96,7 @@ impl SystemCatalogTable {
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
desc: Some("System catalog table".to_string()),
schema: schema.clone(),
region_numbers: vec![0],
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX],
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX, TIMESTAMP_INDEX],
create_if_not_exists: true,
table_options: HashMap::new(),
};
@@ -126,14 +113,14 @@ impl SystemCatalogTable {
/// Create a stream of all entries inside system catalog table
pub async fn records(&self) -> Result<SendableRecordBatchStream> {
let full_projection = None;
let ctx = SessionContext::new();
let scan = self
.table
.scan(&full_projection, &[], None)
.await
.context(error::SystemCatalogTableScanSnafu)?;
let stream = scan
.execute(0, ctx.task_ctx())
.execute(0, Arc::new(RuntimeEnv::default()))
.await
.context(error::SystemCatalogTableScanExecSnafu)?;
Ok(stream)
}
@@ -161,10 +148,9 @@ fn build_system_catalog_schema() -> Schema {
),
ColumnSchema::new(
"timestamp".to_string(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_millis_datatype(),
false,
)
.with_time_index(true),
),
ColumnSchema::new(
"value".to_string(),
ConcreteDataType::binary_datatype(),
@@ -172,78 +158,66 @@ fn build_system_catalog_schema() -> Schema {
),
ColumnSchema::new(
"gmt_created".to_string(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_millis_datatype(),
false,
),
ColumnSchema::new(
"gmt_modified".to_string(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_millis_datatype(),
false,
),
];
// The schema of this table must be valid.
SchemaBuilder::try_from(cols).unwrap().build().unwrap()
SchemaBuilder::try_from(cols)
.unwrap()
.timestamp_index(Some(2))
.build()
.unwrap()
}
pub fn build_table_insert_request(full_table_name: String, table_id: TableId) -> InsertRequest {
build_insert_request(
EntryType::Table,
full_table_name.as_bytes(),
serde_json::to_string(&TableEntryValue { table_id })
.unwrap()
.as_bytes(),
)
}
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
let full_schema_name = format!("{}.{}", catalog_name, schema_name);
build_insert_request(
EntryType::Schema,
full_schema_name.as_bytes(),
serde_json::to_string(&SchemaEntryValue {})
.unwrap()
.as_bytes(),
)
}
pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) -> InsertRequest {
let mut columns_values = HashMap::with_capacity(6);
columns_values.insert(
"entry_type".to_string(),
Arc::new(UInt8Vector::from_slice(&[entry_type as u8])) as _,
Arc::new(UInt8Vector::from_slice(&[EntryType::Table as u8])) as _,
);
columns_values.insert(
"key".to_string(),
Arc::new(BinaryVector::from_slice(&[key])) as _,
Arc::new(BinaryVector::from_slice(&[full_table_name.as_bytes()])) as _,
);
// Timestamp in key part is intentionally left to 0
columns_values.insert(
"timestamp".to_string(),
Arc::new(TimestampMillisecondVector::from_slice(&[0])) as _,
Arc::new(TimestampVector::from_slice(&[Timestamp::from_millis(0)])) as _,
);
columns_values.insert(
"value".to_string(),
Arc::new(BinaryVector::from_slice(&[value])) as _,
Arc::new(BinaryVector::from_slice(&[serde_json::to_string(
&TableEntryValue { table_id },
)
.unwrap()
.as_bytes()])) as _,
);
let now = util::current_time_millis();
columns_values.insert(
"gmt_created".to_string(),
Arc::new(TimestampMillisecondVector::from_slice(&[now])) as _,
Arc::new(TimestampVector::from_slice(&[Timestamp::from_millis(
util::current_time_millis(),
)])) as _,
);
columns_values.insert(
"gmt_modified".to_string(),
Arc::new(TimestampMillisecondVector::from_slice(&[now])) as _,
Arc::new(TimestampVector::from_slice(&[Timestamp::from_millis(
util::current_time_millis(),
)])) as _,
);
InsertRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
columns_values,
}
@@ -331,28 +305,25 @@ impl TryFrom<u8> for EntryType {
}
}
#[derive(Debug, PartialEq, Eq, Ord, PartialOrd)]
#[derive(Debug, PartialEq, Eq)]
pub enum Entry {
Catalog(CatalogEntry),
Schema(SchemaEntry),
Table(TableEntry),
}
#[derive(Debug, PartialEq, Eq, Ord, PartialOrd)]
#[derive(Debug, PartialEq, Eq)]
pub struct CatalogEntry {
pub catalog_name: String,
}
#[derive(Debug, PartialEq, Eq, Ord, PartialOrd)]
#[derive(Debug, PartialEq, Eq)]
pub struct SchemaEntry {
pub catalog_name: String,
pub schema_name: String,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct SchemaEntryValue;
#[derive(Debug, PartialEq, Eq, Ord, PartialOrd)]
#[derive(Debug, PartialEq, Eq)]
pub struct TableEntry {
pub catalog_name: String,
pub schema_name: String,
@@ -368,19 +339,19 @@ pub struct TableEntryValue {
#[cfg(test)]
mod tests {
use log_store::fs::noop::NoopLogStore;
use mito::config::EngineConfig;
use mito::engine::MitoEngine;
use object_store::ObjectStore;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::metadata::TableType;
use table::metadata::TableType::Base;
use table_engine::config::EngineConfig;
use table_engine::engine::MitoEngine;
use tempdir::TempDir;
use super::*;
#[test]
pub fn test_decode_catalog_entry() {
pub fn test_decode_catalog_enrty() {
let entry = decode_system_catalog(
Some(EntryType::Catalog as u8),
Some("some_catalog".as_bytes()),
@@ -452,7 +423,7 @@ mod tests {
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
let dir = TempDir::new("system-table-test").unwrap();
let store_dir = dir.path().to_string_lossy();
let accessor = object_store::backend::fs::Builder::default()
let accessor = opendal::services::fs::Builder::default()
.root(&store_dir)
.build()
.unwrap();

View File

@@ -1,17 +1,3 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The `tables` table in system catalog keeps a record of all tables created by user.
use std::any::Any;
@@ -26,9 +12,9 @@ use common_query::logical_plan::Expr;
use common_query::physical_plan::PhysicalPlanRef;
use common_recordbatch::error::Result as RecordBatchResult;
use common_recordbatch::{RecordBatch, RecordBatchStream};
use datatypes::prelude::{ConcreteDataType, DataType};
use datatypes::prelude::{ConcreteDataType, VectorBuilder};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::ValueRef;
use datatypes::value::Value;
use datatypes::vectors::VectorRef;
use futures::Stream;
use snafu::ResultExt;
@@ -38,8 +24,8 @@ use table::metadata::{TableId, TableInfoRef};
use table::table::scan::SimpleTableScan;
use table::{Table, TableRef};
use crate::error::{Error, InsertCatalogRecordSnafu};
use crate::system::{build_schema_insert_request, build_table_insert_request, SystemCatalogTable};
use crate::error::{Error, InsertTableRecordSnafu};
use crate::system::{build_table_insert_request, SystemCatalogTable};
use crate::{
format_full_table_name, CatalogListRef, CatalogProvider, SchemaProvider, SchemaProviderRef,
};
@@ -149,33 +135,26 @@ fn tables_to_record_batch(
engine: &str,
) -> Vec<VectorRef> {
let mut catalog_vec =
ConcreteDataType::string_datatype().create_mutable_vector(table_names.len());
VectorBuilder::with_capacity(ConcreteDataType::string_datatype(), table_names.len());
let mut schema_vec =
ConcreteDataType::string_datatype().create_mutable_vector(table_names.len());
VectorBuilder::with_capacity(ConcreteDataType::string_datatype(), table_names.len());
let mut table_name_vec =
ConcreteDataType::string_datatype().create_mutable_vector(table_names.len());
VectorBuilder::with_capacity(ConcreteDataType::string_datatype(), table_names.len());
let mut engine_vec =
ConcreteDataType::string_datatype().create_mutable_vector(table_names.len());
VectorBuilder::with_capacity(ConcreteDataType::string_datatype(), table_names.len());
for table_name in table_names {
// Safety: All these vectors are string type.
catalog_vec
.push_value_ref(ValueRef::String(catalog_name))
.unwrap();
schema_vec
.push_value_ref(ValueRef::String(schema_name))
.unwrap();
table_name_vec
.push_value_ref(ValueRef::String(&table_name))
.unwrap();
engine_vec.push_value_ref(ValueRef::String(engine)).unwrap();
catalog_vec.push(&Value::String(catalog_name.into()));
schema_vec.push(&Value::String(schema_name.into()));
table_name_vec.push(&Value::String(table_name.into()));
engine_vec.push(&Value::String(engine.into()));
}
vec![
catalog_vec.to_vector(),
schema_vec.to_vector(),
table_name_vec.to_vector(),
engine_vec.to_vector(),
catalog_vec.finish(),
schema_vec.finish(),
table_name_vec.finish(),
engine_vec.finish(),
]
}
@@ -275,20 +254,7 @@ impl SystemCatalog {
.system
.insert(request)
.await
.context(InsertCatalogRecordSnafu)
}
pub async fn register_schema(
&self,
catalog: String,
schema: String,
) -> crate::error::Result<usize> {
let request = build_schema_insert_request(catalog, schema);
self.information_schema
.system
.insert(request)
.await
.context(InsertCatalogRecordSnafu)
.context(InsertTableRecordSnafu)
}
}
@@ -346,72 +312,85 @@ fn build_schema_for_tables() -> Schema {
#[cfg(test)]
mod tests {
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::physical_plan::SessionContext;
use common_query::physical_plan::RuntimeEnv;
use datatypes::arrow::array::Utf8Array;
use datatypes::arrow::datatypes::DataType;
use futures_util::StreamExt;
use table::table::numbers::NumbersTable;
use super::*;
use crate::local::memory::new_memory_catalog_list;
use crate::local::memory::{
new_memory_catalog_list, MemoryCatalogProvider, MemorySchemaProvider,
};
use crate::CatalogList;
#[tokio::test]
async fn test_tables() {
let catalog_list = new_memory_catalog_list().unwrap();
let schema = catalog_list
.catalog(DEFAULT_CATALOG_NAME)
.unwrap()
.unwrap()
.schema(DEFAULT_SCHEMA_NAME)
.unwrap()
.unwrap();
let catalog_provider = Arc::new(MemoryCatalogProvider::default());
let schema = Arc::new(MemorySchemaProvider::new());
schema
.register_table("test_table".to_string(), Arc::new(NumbersTable::default()))
.unwrap();
catalog_provider
.register_schema("test_schema".to_string(), schema)
.unwrap();
catalog_list
.register_catalog("test_catalog".to_string(), catalog_provider)
.unwrap();
let tables = Tables::new(catalog_list, "test_engine".to_string());
let tables_stream = tables.scan(&None, &[], None).await.unwrap();
let session_ctx = SessionContext::new();
let mut tables_stream = tables_stream.execute(0, session_ctx.task_ctx()).unwrap();
let tables_stream = tables.scan(&None, &[], None).await.unwrap();
let mut tables_stream = tables_stream
.execute(0, Arc::new(RuntimeEnv::default()))
.await
.unwrap();
if let Some(t) = tables_stream.next().await {
let batch = t.unwrap();
let batch = t.unwrap().df_recordbatch;
assert_eq!(1, batch.num_rows());
assert_eq!(4, batch.num_columns());
assert_eq!(&DataType::Utf8, batch.column(0).data_type());
assert_eq!(&DataType::Utf8, batch.column(1).data_type());
assert_eq!(&DataType::Utf8, batch.column(2).data_type());
assert_eq!(&DataType::Utf8, batch.column(3).data_type());
assert_eq!(
ConcreteDataType::string_datatype(),
batch.column(0).data_type()
);
assert_eq!(
ConcreteDataType::string_datatype(),
batch.column(1).data_type()
);
assert_eq!(
ConcreteDataType::string_datatype(),
batch.column(2).data_type()
);
assert_eq!(
ConcreteDataType::string_datatype(),
batch.column(3).data_type()
);
assert_eq!(
"greptime",
batch.column(0).get_ref(0).as_string().unwrap().unwrap()
"test_catalog",
batch
.column(0)
.as_any()
.downcast_ref::<Utf8Array<i32>>()
.unwrap()
.value(0)
);
assert_eq!(
"public",
batch.column(1).get_ref(0).as_string().unwrap().unwrap()
"test_schema",
batch
.column(1)
.as_any()
.downcast_ref::<Utf8Array<i32>>()
.unwrap()
.value(0)
);
assert_eq!(
"test_table",
batch.column(2).get_ref(0).as_string().unwrap().unwrap()
batch
.column(2)
.as_any()
.downcast_ref::<Utf8Array<i32>>()
.unwrap()
.value(0)
);
assert_eq!(
"test_engine",
batch.column(3).get_ref(0).as_string().unwrap().unwrap()
batch
.column(3)
.as_any()
.downcast_ref::<Utf8Array<i32>>()
.unwrap()
.value(0)
);
} else {
panic!("Record batch should not be empty!")

View File

@@ -1,132 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(test)]
mod tests {
use std::sync::Arc;
use catalog::local::LocalCatalogManager;
use catalog::{CatalogManager, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_telemetry::{error, info};
use mito::config::EngineConfig;
use table::table::numbers::NumbersTable;
use table::TableRef;
use tokio::sync::Mutex;
async fn create_local_catalog_manager() -> Result<LocalCatalogManager, catalog::error::Error> {
let (_dir, object_store) =
mito::table::test_util::new_test_object_store("setup_mock_engine_and_table").await;
let mock_engine = Arc::new(mito::table::test_util::MockMitoEngine::new(
EngineConfig::default(),
mito::table::test_util::MockEngine::default(),
object_store,
));
let catalog_manager = LocalCatalogManager::try_new(mock_engine).await.unwrap();
catalog_manager.start().await?;
Ok(catalog_manager)
}
#[tokio::test]
async fn test_duplicate_register() {
let catalog_manager = create_local_catalog_manager().await.unwrap();
let request = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "test_table".to_string(),
table_id: 42,
table: Arc::new(NumbersTable::new(42)),
};
assert!(catalog_manager
.register_table(request.clone())
.await
.unwrap());
// register table with same table id will succeed with 0 as return val.
assert!(!catalog_manager.register_table(request).await.unwrap());
let err = catalog_manager
.register_table(RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "test_table".to_string(),
table_id: 43,
table: Arc::new(NumbersTable::new(43)),
})
.await
.unwrap_err();
assert!(
err.to_string()
.contains("Table `greptime.public.test_table` already exists"),
"Actual error message: {}",
err
);
}
#[test]
fn test_concurrent_register() {
common_telemetry::init_default_ut_logging();
let rt = Arc::new(tokio::runtime::Builder::new_multi_thread().build().unwrap());
let catalog_manager =
Arc::new(rt.block_on(async { create_local_catalog_manager().await.unwrap() }));
let succeed: Arc<Mutex<Option<TableRef>>> = Arc::new(Mutex::new(None));
let mut handles = Vec::with_capacity(8);
for i in 0..8 {
let catalog = catalog_manager.clone();
let succeed = succeed.clone();
let handle = rt.spawn(async move {
let table_id = 42 + i;
let table = Arc::new(NumbersTable::new(table_id));
let req = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "test_table".to_string(),
table_id,
table: table.clone(),
};
match catalog.register_table(req).await {
Ok(res) => {
if res {
let mut succeed = succeed.lock().await;
info!("Successfully registered table: {}", table_id);
*succeed = Some(table);
}
}
Err(_) => {
error!("Failed to register table {}", table_id);
}
}
});
handles.push(handle);
}
rt.block_on(async move {
for handle in handles {
handle.await.unwrap();
}
let guard = succeed.lock().await;
let table = guard.as_ref().unwrap();
let table_registered = catalog_manager
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "test_table")
.unwrap()
.unwrap();
assert_eq!(
table_registered.table_info().ident.table_id,
table.table_info().ident.table_id
);
});
}
}

View File

@@ -1,18 +1,3 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::btree_map::Entry;
use std::collections::{BTreeMap, HashMap};
use std::fmt::{Display, Formatter};
use std::str::FromStr;
@@ -27,7 +12,7 @@ use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::StringVector;
use serde::Serializer;
use table::engine::{EngineContext, TableEngine, TableReference};
use table::engine::{EngineContext, TableEngine};
use table::metadata::TableId;
use table::requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest};
use table::test_util::MemTable;
@@ -83,34 +68,6 @@ impl KvBackend for MockKvBackend {
Ok(())
}
async fn compare_and_set(
&self,
key: &[u8],
expect: &[u8],
val: &[u8],
) -> Result<Result<(), Option<Vec<u8>>>, Error> {
let mut map = self.map.write().await;
let existing = map.entry(key.to_vec());
match existing {
Entry::Vacant(e) => {
if expect.is_empty() {
e.insert(val.to_vec());
Ok(Ok(()))
} else {
Ok(Err(None))
}
}
Entry::Occupied(mut existing) => {
if existing.get() == expect {
existing.insert(val.to_vec());
Ok(Ok(()))
} else {
Ok(Err(Some(existing.get().clone())))
}
}
}
}
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error> {
let start = key.to_vec();
let end = end.to_vec();
@@ -165,7 +122,6 @@ impl TableEngine for MockTableEngine {
table_id,
catalog_name,
schema_name,
vec![0],
)) as Arc<_>;
let mut tables = self.tables.write().await;
@@ -189,35 +145,19 @@ impl TableEngine for MockTableEngine {
unimplemented!()
}
fn get_table<'a>(
&self,
_ctx: &EngineContext,
table_ref: &'a TableReference,
) -> table::Result<Option<TableRef>> {
futures::executor::block_on(async {
Ok(self
.tables
.read()
.await
.get(&table_ref.to_string())
.cloned())
})
fn get_table(&self, _ctx: &EngineContext, name: &str) -> table::Result<Option<TableRef>> {
futures::executor::block_on(async { Ok(self.tables.read().await.get(name).cloned()) })
}
fn table_exists<'a>(&self, _ctx: &EngineContext, table_ref: &'a TableReference) -> bool {
futures::executor::block_on(async {
self.tables
.read()
.await
.contains_key(&table_ref.to_string())
})
fn table_exists(&self, _ctx: &EngineContext, name: &str) -> bool {
futures::executor::block_on(async { self.tables.read().await.contains_key(name) })
}
async fn drop_table(
&self,
_ctx: &EngineContext,
_request: DropTableRequest,
) -> table::Result<bool> {
) -> table::Result<()> {
unimplemented!()
}
}

View File

@@ -1,17 +1,3 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(assert_matches)]
mod mock;
@@ -22,12 +8,12 @@ mod tests {
use std::collections::HashSet;
use std::sync::Arc;
use catalog::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use catalog::remote::{
KvBackend, KvBackendRef, RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider,
};
use catalog::{CatalogList, CatalogManager, RegisterTableRequest};
use catalog::{CatalogManager, CatalogManagerRef, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use datatypes::schema::Schema;
use futures_util::StreamExt;
use table::engine::{EngineContext, TableEngineRef};
@@ -38,17 +24,19 @@ mod tests {
#[tokio::test]
async fn test_backend() {
common_telemetry::init_default_ut_logging();
let node_id = 42;
let backend = MockKvBackend::default();
let default_catalog_key = CatalogKey {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
node_id,
}
.to_string();
backend
.set(
default_catalog_key.as_bytes(),
&CatalogValue {}.as_bytes().unwrap(),
&CatalogValue {}.to_bytes().unwrap(),
)
.await
.unwrap();
@@ -56,10 +44,11 @@ mod tests {
let schema_key = SchemaKey {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
node_id,
}
.to_string();
backend
.set(schema_key.as_bytes(), &SchemaValue {}.as_bytes().unwrap())
.set(schema_key.as_bytes(), &SchemaValue {}.to_bytes().unwrap())
.await
.unwrap();
@@ -70,14 +59,12 @@ mod tests {
res.insert(String::from_utf8_lossy(&kv.0).to_string());
}
assert_eq!(
vec!["__c-greptime".to_string()],
vec!["__c-greptime-42".to_string()],
res.into_iter().collect::<Vec<_>>()
);
}
async fn prepare_components(
node_id: u64,
) -> (KvBackendRef, TableEngineRef, Arc<RemoteCatalogManager>) {
async fn prepare_components(node_id: u64) -> (KvBackendRef, TableEngineRef, CatalogManagerRef) {
let backend = Arc::new(MockKvBackend::default()) as KvBackendRef;
let table_engine = Arc::new(MockTableEngine::default());
let catalog_manager =
@@ -127,7 +114,6 @@ mod tests {
table_name: table_name.clone(),
desc: None,
schema: table_schema.clone(),
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
@@ -168,7 +154,7 @@ mod tests {
.schema(DEFAULT_SCHEMA_NAME)
.unwrap()
.unwrap();
assert_eq!(vec!["numbers"], default_schema.table_names().unwrap());
assert_eq!(Vec::<String>::new(), default_schema.table_names().unwrap());
// register a new table with an nonexistent catalog
let catalog_name = DEFAULT_CATALOG_NAME.to_string();
@@ -187,7 +173,6 @@ mod tests {
table_name: table_name.clone(),
desc: None,
schema: table_schema.clone(),
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
@@ -202,15 +187,8 @@ mod tests {
table_id,
table,
};
assert!(catalog_manager.register_table(reg_req).await.unwrap());
assert_eq!(
HashSet::from([table_name, "numbers".to_string()]),
default_schema
.table_names()
.unwrap()
.into_iter()
.collect::<HashSet<_>>()
);
assert_eq!(1, catalog_manager.register_table(reg_req).await.unwrap());
assert_eq!(vec![table_name], default_schema.table_names().unwrap());
}
#[tokio::test]
@@ -222,8 +200,8 @@ mod tests {
let schema_name = "nonexistent_schema".to_string();
let catalog = Arc::new(RemoteCatalogProvider::new(
catalog_name.clone(),
backend.clone(),
node_id,
backend.clone(),
));
// register catalog to catalog manager
@@ -247,7 +225,6 @@ mod tests {
table_name: "".to_string(),
desc: None,
schema: Arc::new(Schema::new(vec![])),
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
@@ -287,7 +264,7 @@ mod tests {
.register_schema(schema_name.clone(), schema.clone())
.expect("Register schema should not fail");
assert!(prev.is_none());
assert!(catalog_manager.register_table(reg_req).await.unwrap());
assert_eq!(1, catalog_manager.register_table(reg_req).await.unwrap());
assert_eq!(
HashSet::from([schema_name.clone()]),

View File

@@ -2,7 +2,6 @@
name = "client"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
@@ -11,11 +10,12 @@ async-stream = "0.3"
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" }
common-grpc-expr = { path = "../common/grpc-expr" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-time = { path = "../common/time" }
datafusion = "14.0.0"
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = [
"simd",
] }
datatypes = { path = "../datatypes" }
enum_dispatch = "0.3"
parking_lot = "0.12"

View File

@@ -1,18 +1,6 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use api::v1::*;
use api::v1::{codec::InsertBatch, *};
use client::{Client, Database};
fn main() {
@@ -27,21 +15,19 @@ async fn run() {
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let db = Database::new("greptime", client);
let (columns, row_count) = insert_data();
let expr = InsertExpr {
schema_name: "public".to_string(),
table_name: "demo".to_string(),
region_number: 0,
columns,
row_count,
expr: Some(insert_expr::Expr::Values(insert_expr::Values {
values: insert_batches(),
})),
options: HashMap::default(),
};
db.insert(expr).await.unwrap();
}
fn insert_data() -> (Vec<Column>, u32) {
fn insert_batches() -> Vec<Vec<u8>> {
const SEMANTIC_TAG: i32 = 0;
const SEMANTIC_FIELD: i32 = 1;
const SEMANTIC_FEILD: i32 = 1;
const SEMANTIC_TS: i32 = 2;
let row_count = 4;
@@ -69,7 +55,7 @@ fn insert_data() -> (Vec<Column>, u32) {
};
let cpu_column = Column {
column_name: "cpu".to_string(),
semantic_type: SEMANTIC_FIELD,
semantic_type: SEMANTIC_FEILD,
values: Some(cpu_vals),
null_mask: vec![2],
..Default::default()
@@ -81,7 +67,7 @@ fn insert_data() -> (Vec<Column>, u32) {
};
let mem_column = Column {
column_name: "memory".to_string(),
semantic_type: SEMANTIC_FIELD,
semantic_type: SEMANTIC_FEILD,
values: Some(mem_vals),
null_mask: vec![4],
..Default::default()
@@ -99,8 +85,9 @@ fn insert_data() -> (Vec<Column>, u32) {
..Default::default()
};
(
vec![host_column, cpu_column, mem_column, ts_column],
let insert_batch = InsertBatch {
columns: vec![host_column, cpu_column, mem_column, ts_column],
row_count,
)
};
vec![insert_batch.into()]
}

View File

@@ -1,25 +1,12 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::{ColumnDataType, ColumnDef, CreateExpr};
use client::admin::Admin;
use client::{Client, Database};
use client::{admin::Admin, Client, Database};
use prost_09::Message;
use substrait_proto::protobuf::plan_rel::RelType as PlanRelType;
use substrait_proto::protobuf::read_rel::{NamedTable, ReadType};
use substrait_proto::protobuf::rel::RelType;
use substrait_proto::protobuf::{PlanRel, ReadRel, Rel};
use substrait_proto::protobuf::{
plan_rel::RelType as PlanRelType,
read_rel::{NamedTable, ReadType},
rel::RelType,
PlanRel, ReadRel, Rel,
};
use tracing::{event, Level};
fn main() {
@@ -41,7 +28,7 @@ async fn run() {
column_defs: vec![
ColumnDef {
name: "timestamp".to_string(),
datatype: ColumnDataType::TimestampMillisecond as i32,
datatype: ColumnDataType::Timestamp as i32,
is_nullable: false,
default_constraint: None,
},
@@ -62,8 +49,6 @@ async fn run() {
primary_keys: vec!["key".to_string()],
create_if_not_exists: false,
table_options: Default::default(),
table_id: Some(1024),
region_ids: vec![0],
};
let admin = Admin::new("create table", client.clone());

View File

@@ -0,0 +1,37 @@
use std::sync::Arc;
use client::{Client, Database};
use common_grpc::MockExecution;
use datafusion::physical_plan::{
expressions::Column, projection::ProjectionExec, ExecutionPlan, PhysicalExpr,
};
use tracing::{event, Level};
fn main() {
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
.unwrap();
run();
}
#[tokio::main]
async fn run() {
let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let db = Database::new("greptime", client);
let physical = mock_physical_plan();
let result = db.physical_plan(physical, None).await;
event!(Level::INFO, "result: {:#?}", result);
}
fn mock_physical_plan() -> Arc<dyn ExecutionPlan> {
let id_expr = Arc::new(Column::new("id", 0)) as Arc<dyn PhysicalExpr>;
let age_expr = Arc::new(Column::new("age", 2)) as Arc<dyn PhysicalExpr>;
let expr = vec![(id_expr, "id".to_string()), (age_expr, "age".to_string())];
let input =
Arc::new(MockExecution::new("mock_input_exec".to_string())) as Arc<dyn ExecutionPlan>;
let projection = ProjectionExec::try_new(expr, input).unwrap();
Arc::new(projection)
}

View File

@@ -1,17 +1,3 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use client::{Client, Database, Select};
use tracing::{event, Level};

View File

@@ -1,24 +1,12 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::*;
use common_error::prelude::StatusCode;
use common_query::Output;
use snafu::prelude::*;
use crate::database::PROTOCOL_VERSION;
use crate::{error, Client, Result};
use crate::error;
use crate::Client;
use crate::Result;
#[derive(Clone, Debug)]
pub struct Admin {
@@ -58,19 +46,7 @@ impl Admin {
header: Some(header),
expr: Some(admin_expr::Expr::Alter(expr)),
};
self.do_request(expr).await
}
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<AdminResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
};
let expr = AdminExpr {
header: Some(header),
expr: Some(admin_expr::Expr::DropTable(expr)),
};
self.do_request(expr).await
Ok(self.do_requests(vec![expr]).await?.remove(0))
}
/// Invariants: the lengths of input vec (`Vec<AdminExpr>`) and output vec (`Vec<AdminResult>`) are equal.
@@ -94,17 +70,6 @@ impl Admin {
);
Ok(results)
}
pub async fn create_database(&self, expr: CreateDatabaseExpr) -> Result<AdminResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
};
let expr = AdminExpr {
header: Some(header),
expr: Some(admin_expr::Expr::CreateDatabase(expr)),
};
Ok(self.do_requests(vec![expr]).await?.remove(0))
}
}
pub fn admin_result_to_output(admin_result: AdminResult) -> Result<Output> {

View File

@@ -1,28 +1,17 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use api::v1::greptime_client::GreptimeClient;
use api::v1::*;
use common_grpc::channel_manager::ChannelManager;
use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
use snafu::OptionExt;
use snafu::ResultExt;
use tonic::transport::Channel;
use crate::load_balance::{LoadBalance, Loadbalancer};
use crate::{error, Result};
use crate::error;
use crate::load_balance::LoadBalance;
use crate::load_balance::Loadbalancer;
use crate::Result;
#[derive(Clone, Debug, Default)]
pub struct Client {
@@ -139,11 +128,8 @@ impl Client {
.context(error::IllegalGrpcClientStateSnafu {
err_msg: "No available peer found",
})?;
let mut client = self.make_client(&peer)?;
let result = client
.batch(req)
.await
.context(error::TonicStatusSnafu { addr: peer })?;
let mut client = self.make_client(peer)?;
let result = client.batch(req).await.context(error::TonicStatusSnafu)?;
Ok(result.into_inner())
}

View File

@@ -1,35 +1,31 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::v1::codec::SelectResult as GrpcSelectResult;
use api::v1::column::SemanticType;
use api::v1::{
object_expr, object_result, select_expr, DatabaseRequest, ExprHeader, InsertExpr,
MutateResult as GrpcMutateResult, ObjectExpr, ObjectResult as GrpcObjectResult, SelectExpr,
column::Values, object_expr, object_result, select_expr, Column, ColumnDataType,
DatabaseRequest, ExprHeader, InsertExpr, MutateResult as GrpcMutateResult, ObjectExpr,
ObjectResult as GrpcObjectResult, PhysicalPlan, SelectExpr,
};
use common_base::BitVec;
use common_error::status_code::StatusCode;
use common_grpc_expr::column_to_vector;
use common_grpc::AsExcutionPlan;
use common_grpc::DefaultAsPlanImpl;
use common_query::Output;
use common_recordbatch::{RecordBatch, RecordBatches};
use common_time::date::Date;
use common_time::datetime::DateTime;
use common_time::timestamp::Timestamp;
use datafusion::physical_plan::ExecutionPlan;
use datatypes::prelude::*;
use datatypes::schema::{ColumnSchema, Schema};
use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{ColumnToVectorSnafu, ConvertSchemaSnafu, DatanodeSnafu, DecodeSelectSnafu};
use crate::{error, Client, Result};
use crate::error;
use crate::{
error::{ConvertSchemaSnafu, DatanodeSnafu, DecodeSelectSnafu, EncodePhysicalSnafu},
Client, Result,
};
pub const PROTOCOL_VERSION: u32 = 1;
@@ -89,6 +85,24 @@ impl Database {
self.do_select(select_expr).await
}
pub async fn physical_plan(
&self,
physical: Arc<dyn ExecutionPlan>,
original_ql: Option<String>,
) -> Result<ObjectResult> {
let plan = DefaultAsPlanImpl::try_from_physical_plan(physical.clone())
.context(EncodePhysicalSnafu { physical })?
.bytes;
let original_ql = original_ql.unwrap_or_default();
let select_expr = SelectExpr {
expr: Some(select_expr::Expr::PhysicalPlan(PhysicalPlan {
original_ql: original_ql.into_bytes(),
plan,
})),
};
self.do_select(select_expr).await
}
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<ObjectResult> {
let select_expr = SelectExpr {
expr: Some(select_expr::Expr::LogicalPlan(logical_plan)),
@@ -110,6 +124,8 @@ impl Database {
obj_result.try_into()
}
// TODO(jiachun) update/delete
pub async fn object(&self, expr: ObjectExpr) -> Result<GrpcObjectResult> {
let res = self.objects(vec![expr]).await?.pop().unwrap();
Ok(res)
@@ -185,9 +201,7 @@ impl TryFrom<ObjectResult> for Output {
let vectors = select
.columns
.iter()
.map(|column| {
column_to_vector(column, select.row_count).context(ColumnToVectorSnafu)
})
.map(|column| column_to_vector(column, select.row_count))
.collect::<Result<Vec<VectorRef>>>()?;
let column_schemas = select
@@ -197,12 +211,7 @@ impl TryFrom<ObjectResult> for Output {
.map(|(column, vector)| {
let datatype = vector.data_type();
// nullable or not, does not affect the output
let mut column_schema =
ColumnSchema::new(&column.column_name, datatype, true);
if column.semantic_type == SemanticType::Timestamp as i32 {
column_schema = column_schema.with_time_index(true);
}
column_schema
ColumnSchema::new(&column.column_name, datatype, true)
})
.collect::<Vec<ColumnSchema>>();
@@ -230,11 +239,100 @@ impl TryFrom<ObjectResult> for Output {
}
}
fn column_to_vector(column: &Column, rows: u32) -> Result<VectorRef> {
let wrapper =
ColumnDataTypeWrapper::try_new(column.datatype).context(error::ColumnDataTypeSnafu)?;
let column_datatype = wrapper.datatype();
let rows = rows as usize;
let mut vector = VectorBuilder::with_capacity(wrapper.into(), rows);
if let Some(values) = &column.values {
let values = collect_column_values(column_datatype, values);
let mut values_iter = values.into_iter();
let null_mask = BitVec::from_slice(&column.null_mask);
let mut nulls_iter = null_mask.iter().by_vals().fuse();
for i in 0..rows {
if let Some(true) = nulls_iter.next() {
vector.push_null();
} else {
let value_ref = values_iter.next().context(error::InvalidColumnProtoSnafu {
err_msg: format!(
"value not found at position {} of column {}",
i, &column.column_name
),
})?;
vector
.try_push_ref(value_ref)
.context(error::CreateVectorSnafu)?;
}
}
} else {
(0..rows).for_each(|_| vector.push_null());
}
Ok(vector.finish())
}
fn collect_column_values(column_datatype: ColumnDataType, values: &Values) -> Vec<ValueRef> {
macro_rules! collect_values {
($value: expr, $mapper: expr) => {
$value.iter().map($mapper).collect::<Vec<ValueRef>>()
};
}
match column_datatype {
ColumnDataType::Boolean => collect_values!(values.bool_values, |v| ValueRef::from(*v)),
ColumnDataType::Int8 => collect_values!(values.i8_values, |v| ValueRef::from(*v as i8)),
ColumnDataType::Int16 => {
collect_values!(values.i16_values, |v| ValueRef::from(*v as i16))
}
ColumnDataType::Int32 => {
collect_values!(values.i32_values, |v| ValueRef::from(*v))
}
ColumnDataType::Int64 => {
collect_values!(values.i64_values, |v| ValueRef::from(*v as i64))
}
ColumnDataType::Uint8 => {
collect_values!(values.u8_values, |v| ValueRef::from(*v as u8))
}
ColumnDataType::Uint16 => {
collect_values!(values.u16_values, |v| ValueRef::from(*v as u16))
}
ColumnDataType::Uint32 => {
collect_values!(values.u32_values, |v| ValueRef::from(*v))
}
ColumnDataType::Uint64 => {
collect_values!(values.u64_values, |v| ValueRef::from(*v as u64))
}
ColumnDataType::Float32 => collect_values!(values.f32_values, |v| ValueRef::from(*v)),
ColumnDataType::Float64 => collect_values!(values.f64_values, |v| ValueRef::from(*v)),
ColumnDataType::Binary => {
collect_values!(values.binary_values, |v| ValueRef::from(v.as_slice()))
}
ColumnDataType::String => {
collect_values!(values.string_values, |v| ValueRef::from(v.as_str()))
}
ColumnDataType::Date => {
collect_values!(values.date_values, |v| ValueRef::Date(Date::new(*v)))
}
ColumnDataType::Datetime => {
collect_values!(values.datetime_values, |v| ValueRef::DateTime(
DateTime::new(*v)
))
}
ColumnDataType::Timestamp => {
collect_values!(values.ts_millis_values, |v| ValueRef::Timestamp(
Timestamp::from_millis(*v)
))
}
}
}
#[cfg(test)]
mod tests {
use api::helper::ColumnDataTypeWrapper;
use api::v1::Column;
use common_grpc::select::{null_mask, values};
use datanode::server::grpc::select::{null_mask, values};
use datatypes::vectors::{
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
Int16Vector, Int32Vector, Int64Vector, Int8Vector, StringVector, UInt16Vector,
@@ -318,11 +416,12 @@ mod tests {
fn create_test_column(vector: VectorRef) -> Column {
let wrapper: ColumnDataTypeWrapper = vector.data_type().try_into().unwrap();
let array = vector.to_arrow_array();
Column {
column_name: "test".to_string(),
semantic_type: 1,
values: Some(values(&[vector.clone()]).unwrap()),
null_mask: null_mask(&[vector.clone()], vector.len()),
values: Some(values(&[array.clone()]).unwrap()),
null_mask: null_mask(&vec![array], vector.len()),
datatype: wrapper.datatype() as i32,
}
}

View File

@@ -1,17 +1,3 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::sync::Arc;
@@ -39,9 +25,8 @@ pub enum Error {
#[snafu(display("Missing result header"))]
MissingHeader,
#[snafu(display("Tonic internal error, addr: {}, source: {}", addr, source))]
#[snafu(display("Tonic internal error, source: {}", source))]
TonicStatus {
addr: String,
source: tonic::Status,
backtrace: Backtrace,
},
@@ -62,12 +47,24 @@ pub enum Error {
#[snafu(display("Mutate result has failure {}", failure))]
MutateFailure { failure: u32, backtrace: Backtrace },
#[snafu(display("Invalid column proto: {}", err_msg))]
InvalidColumnProto {
err_msg: String,
backtrace: Backtrace,
},
#[snafu(display("Column datatype error, source: {}", source))]
ColumnDataType {
#[snafu(backtrace)]
source: api::error::Error,
},
#[snafu(display("Failed to create vector, source: {}", source))]
CreateVector {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
#[snafu(display("Failed to create RecordBatches, source: {}", source))]
CreateRecordBatches {
#[snafu(backtrace)]
@@ -99,12 +96,6 @@ pub enum Error {
#[snafu(backtrace)]
source: common_grpc::error::Error,
},
#[snafu(display("Failed to convert column to vector, source: {}", source))]
ColumnToVector {
#[snafu(backtrace)]
source: common_grpc_expr::error::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -120,13 +111,15 @@ impl ErrorExt for Error {
| Error::Datanode { .. }
| Error::EncodePhysical { .. }
| Error::MutateFailure { .. }
| Error::InvalidColumnProto { .. }
| Error::ColumnDataType { .. }
| Error::MissingField { .. } => StatusCode::Internal,
Error::ConvertSchema { source } => source.status_code(),
Error::ConvertSchema { source } | Error::CreateVector { source } => {
source.status_code()
}
Error::CreateRecordBatches { source } => source.status_code(),
Error::CreateChannel { source, .. } => source.status_code(),
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
Error::ColumnToVector { source, .. } => source.status_code(),
}
}

View File

@@ -1,25 +1,11 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod admin;
mod client;
mod database;
mod error;
pub mod load_balance;
pub use api;
pub use self::client::Client;
pub use self::database::{Database, ObjectResult, Select};
pub use self::error::{Error, Result};
pub use self::{
client::Client,
database::{Database, ObjectResult, Select},
error::{Error, Result},
};

View File

@@ -1,17 +1,3 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use enum_dispatch::enum_dispatch;
use rand::seq::SliceRandom;

View File

@@ -2,27 +2,19 @@
name = "cmd"
version = "0.1.0"
edition = "2021"
default-run = "greptime"
license = "Apache-2.0"
[[bin]]
name = "greptime"
path = "src/bin/greptime.rs"
[dependencies]
anymap = "1.0.0-beta.2"
clap = { version = "3.1", features = ["derive"] }
common-error = { path = "../common/error" }
common-telemetry = { path = "../common/telemetry", features = [
"deadlock_detection",
] }
common-telemetry = { path = "../common/telemetry", features = ["deadlock_detection"] }
datanode = { path = "../datanode" }
frontend = { path = "../frontend" }
futures = "0.3"
meta-client = { path = "../meta-client" }
meta-srv = { path = "../meta-srv" }
serde = "1.0"
servers = { path = "../servers" }
snafu = { version = "0.7", features = ["backtraces"] }
tokio = { version = "1.18", features = ["full"] }
toml = "0.5"
@@ -30,6 +22,3 @@ toml = "0.5"
[dev-dependencies]
serde = "1.0"
tempdir = "0.3"
[build-dependencies]
build-data = "0.1.3"

View File

@@ -1,29 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
const DEFAULT_VALUE: &str = "unknown";
fn main() {
println!(
"cargo:rustc-env=GIT_COMMIT={}",
build_data::get_git_commit().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
);
println!(
"cargo:rustc-env=GIT_BRANCH={}",
build_data::get_git_branch().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
);
println!(
"cargo:rustc-env=GIT_DIRTY={}",
build_data::get_git_dirty().map_or(DEFAULT_VALUE.to_string(), |v| v.to_string())
);
}

View File

@@ -1,26 +1,15 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use clap::Parser;
use cmd::datanode;
use cmd::error::Result;
use cmd::{datanode, frontend, metasrv, standalone};
use common_telemetry::logging::{error, info};
use cmd::frontend;
use cmd::metasrv;
use common_telemetry::logging::error;
use common_telemetry::logging::info;
#[derive(Parser)]
#[clap(name = "greptimedb", version = print_version())]
#[clap(name = "greptimedb")]
struct Command {
#[clap(long, default_value = "/tmp/greptimedb/logs")]
log_dir: String,
@@ -44,8 +33,6 @@ enum SubCommand {
Frontend(frontend::Command),
#[clap(name = "metasrv")]
Metasrv(metasrv::Command),
#[clap(name = "standalone")]
Standalone(standalone::Command),
}
impl SubCommand {
@@ -54,7 +41,6 @@ impl SubCommand {
SubCommand::Datanode(cmd) => cmd.run().await,
SubCommand::Frontend(cmd) => cmd.run().await,
SubCommand::Metasrv(cmd) => cmd.run().await,
SubCommand::Standalone(cmd) => cmd.run().await,
}
}
}
@@ -65,24 +51,10 @@ impl fmt::Display for SubCommand {
SubCommand::Datanode(..) => write!(f, "greptime-datanode"),
SubCommand::Frontend(..) => write!(f, "greptime-frontend"),
SubCommand::Metasrv(..) => write!(f, "greptime-metasrv"),
SubCommand::Standalone(..) => write!(f, "greptime-standalone"),
}
}
}
fn print_version() -> &'static str {
concat!(
"\nbranch: ",
env!("GIT_BRANCH"),
"\ncommit: ",
env!("GIT_COMMIT"),
"\ndirty: ",
env!("GIT_DIRTY"),
"\nversion: ",
env!("CARGO_PKG_VERSION")
)
}
#[tokio::main]
async fn main() -> Result<()> {
let cmd = Command::parse();

View File

@@ -1,25 +1,9 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use clap::Parser;
use common_telemetry::logging;
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
use meta_client::MetaClientOpts;
use servers::Mode;
use datanode::datanode::{Datanode, DatanodeOptions};
use snafu::ResultExt;
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
use crate::error::{Error, Result, StartDatanodeSnafu};
use crate::toml_loader;
#[derive(Parser)]
@@ -47,22 +31,18 @@ impl SubCommand {
}
}
#[derive(Debug, Parser, Default)]
#[derive(Debug, Parser)]
struct StartCommand {
#[clap(long)]
node_id: Option<u64>,
http_addr: Option<String>,
#[clap(long)]
rpc_addr: Option<String>,
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long)]
metasrv_addr: Option<String>,
postgres_addr: Option<String>,
#[clap(short, long)]
config_file: Option<String>,
#[clap(long)]
data_dir: Option<String>,
#[clap(long)]
wal_dir: Option<String>,
}
impl StartCommand {
@@ -91,141 +71,55 @@ impl TryFrom<StartCommand> for DatanodeOptions {
DatanodeOptions::default()
};
if let Some(addr) = cmd.http_addr {
opts.http_addr = addr;
}
if let Some(addr) = cmd.rpc_addr {
opts.rpc_addr = addr;
}
if let Some(addr) = cmd.mysql_addr {
opts.mysql_addr = addr;
}
if let Some(node_id) = cmd.node_id {
opts.node_id = Some(node_id);
if let Some(addr) = cmd.postgres_addr {
opts.postgres_addr = addr;
}
if let Some(meta_addr) = cmd.metasrv_addr {
opts.meta_client_opts
.get_or_insert_with(MetaClientOpts::default)
.metasrv_addrs = meta_addr
.split(',')
.map(&str::trim)
.map(&str::to_string)
.collect::<_>();
opts.mode = Mode::Distributed;
}
if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
return MissingConfigSnafu {
msg: "Missing node id option",
}
.fail();
}
if let Some(data_dir) = cmd.data_dir {
opts.storage = ObjectStoreConfig::File { data_dir };
}
if let Some(wal_dir) = cmd.wal_dir {
opts.wal_dir = wal_dir;
}
Ok(opts)
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use datanode::datanode::ObjectStoreConfig;
use servers::Mode;
use super::*;
#[test]
fn test_read_from_config_file() {
let cmd = StartCommand {
http_addr: None,
rpc_addr: None,
mysql_addr: None,
postgres_addr: None,
config_file: Some(format!(
"{}/../../config/datanode.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
..Default::default()
};
let options: DatanodeOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!("0.0.0.0:3000".to_string(), options.http_addr);
assert_eq!("0.0.0.0:3001".to_string(), options.rpc_addr);
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal_dir);
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
assert_eq!("0.0.0.0:3306".to_string(), options.mysql_addr);
assert_eq!(4, options.mysql_runtime_size);
let MetaClientOpts {
metasrv_addrs: metasrv_addr,
timeout_millis,
connect_timeout_millis,
tcp_nodelay,
} = options.meta_client_opts.unwrap();
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
assert_eq!(5000, connect_timeout_millis);
assert_eq!(3000, timeout_millis);
assert!(!tcp_nodelay);
assert_eq!("0.0.0.0:5432".to_string(), options.postgres_addr);
assert_eq!(4, options.postgres_runtime_size);
match options.storage {
ObjectStoreConfig::File { data_dir } => {
assert_eq!("/tmp/greptimedb/data/".to_string(), data_dir)
}
ObjectStoreConfig::S3 { .. } => unreachable!(),
};
}
#[test]
fn test_try_from_cmd() {
assert_eq!(
Mode::Standalone,
DatanodeOptions::try_from(StartCommand::default())
.unwrap()
.mode
);
let mode = DatanodeOptions::try_from(StartCommand {
node_id: Some(42),
metasrv_addr: Some("127.0.0.1:3002".to_string()),
..Default::default()
})
.unwrap()
.mode;
assert_matches!(mode, Mode::Distributed);
assert!(DatanodeOptions::try_from(StartCommand {
metasrv_addr: Some("127.0.0.1:3002".to_string()),
..Default::default()
})
.is_err());
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
DatanodeOptions::try_from(StartCommand {
node_id: Some(42),
..Default::default()
})
.unwrap();
}
#[test]
fn test_merge_config() {
let dn_opts = DatanodeOptions::try_from(StartCommand {
config_file: Some(format!(
"{}/../../config/datanode.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
..Default::default()
})
.unwrap();
assert_eq!(Some(42), dn_opts.node_id);
let MetaClientOpts {
metasrv_addrs: metasrv_addr,
timeout_millis,
connect_timeout_millis,
tcp_nodelay,
} = dn_opts.meta_client_opts.unwrap();
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
assert_eq!(3000, timeout_millis);
assert_eq!(5000, connect_timeout_millis);
assert!(!tcp_nodelay);
}
}

View File

@@ -1,17 +1,3 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_error::prelude::*;
@@ -49,18 +35,6 @@ pub enum Error {
source: toml::de::Error,
backtrace: Backtrace,
},
#[snafu(display("Missing config, msg: {}", msg))]
MissingConfig { msg: String, backtrace: Backtrace },
#[snafu(display("Illegal config: {}", msg))]
IllegalConfig { msg: String, backtrace: Backtrace },
#[snafu(display("Illegal auth config: {}", source))]
IllegalAuthConfig {
#[snafu(backtrace)]
source: servers::auth::Error,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -71,11 +45,7 @@ impl ErrorExt for Error {
Error::StartDatanode { source } => source.status_code(),
Error::StartFrontend { source } => source.status_code(),
Error::StartMetaServer { source } => source.status_code(),
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
StatusCode::InvalidArguments
}
Error::IllegalConfig { .. } => StatusCode::InvalidArguments,
Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
Error::ReadConfig { .. } | Error::ParseConfig { .. } => StatusCode::InvalidArguments,
}
}
@@ -97,7 +67,10 @@ mod tests {
#[test]
fn test_start_node_error() {
fn throw_datanode_error() -> StdResult<datanode::error::Error> {
datanode::error::MissingNodeIdSnafu {}.fail()
datanode::error::MissingFieldSnafu {
field: "test_field",
}
.fail()
}
let e = throw_datanode_error()

View File

@@ -1,18 +1,3 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use anymap::AnyMap;
use clap::Parser;
use frontend::frontend::{Frontend, FrontendOptions};
use frontend::grpc::GrpcOptions;
@@ -21,14 +6,9 @@ use frontend::instance::Instance;
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
use meta_client::MetaClientOpts;
use servers::auth::UserProviderRef;
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
use servers::{auth, Mode};
use snafu::ResultExt;
use crate::error::{self, IllegalAuthConfigSnafu, Result};
use crate::error::{self, Result};
use crate::toml_loader;
#[derive(Parser)]
@@ -57,7 +37,7 @@ impl SubCommand {
}
#[derive(Debug, Parser)]
pub struct StartCommand {
struct StartCommand {
#[clap(long)]
http_addr: Option<String>,
#[clap(long)]
@@ -72,43 +52,16 @@ pub struct StartCommand {
config_file: Option<String>,
#[clap(short, long)]
influxdb_enable: Option<bool>,
#[clap(long)]
metasrv_addr: Option<String>,
#[clap(long)]
tls_mode: Option<TlsMode>,
#[clap(long)]
tls_cert_path: Option<String>,
#[clap(long)]
tls_key_path: Option<String>,
#[clap(long)]
user_provider: Option<String>,
}
impl StartCommand {
async fn run(self) -> Result<()> {
let plugins = load_frontend_plugins(&self.user_provider)?;
let opts: FrontendOptions = self.try_into()?;
let mut frontend = Frontend::new(
opts.clone(),
Instance::try_new_distributed(&opts)
.await
.context(error::StartFrontendSnafu)?,
plugins,
);
let opts = self.try_into()?;
let mut frontend = Frontend::new(opts, Instance::new());
frontend.start().await.context(error::StartFrontendSnafu)
}
}
pub fn load_frontend_plugins(user_provider: &Option<String>) -> Result<AnyMap> {
let mut plugins = AnyMap::new();
if let Some(provider) = user_provider {
let provider = auth::user_provider_from_option(provider).context(IllegalAuthConfigSnafu)?;
plugins.insert::<UserProviderRef>(provider);
}
Ok(plugins)
}
impl TryFrom<StartCommand> for FrontendOptions {
type Error = error::Error;
@@ -119,13 +72,8 @@ impl TryFrom<StartCommand> for FrontendOptions {
FrontendOptions::default()
};
let tls_option = TlsOption::new(cmd.tls_mode, cmd.tls_cert_path, cmd.tls_key_path);
if let Some(addr) = cmd.http_addr {
opts.http_options = Some(HttpOptions {
addr,
..Default::default()
});
opts.http_addr = Some(addr);
}
if let Some(addr) = cmd.grpc_addr {
opts.grpc_options = Some(GrpcOptions {
@@ -136,14 +84,12 @@ impl TryFrom<StartCommand> for FrontendOptions {
if let Some(addr) = cmd.mysql_addr {
opts.mysql_options = Some(MysqlOptions {
addr,
tls: tls_option.clone(),
..Default::default()
});
}
if let Some(addr) = cmd.postgres_addr {
opts.postgres_options = Some(PostgresOptions {
addr,
tls: tls_option,
..Default::default()
});
}
@@ -156,26 +102,12 @@ impl TryFrom<StartCommand> for FrontendOptions {
if let Some(enable) = cmd.influxdb_enable {
opts.influxdb_options = Some(InfluxdbOptions { enable });
}
if let Some(metasrv_addr) = cmd.metasrv_addr {
opts.meta_client_opts
.get_or_insert_with(MetaClientOpts::default)
.metasrv_addrs = metasrv_addr
.split(',')
.map(&str::trim)
.map(&str::to_string)
.collect::<Vec<_>>();
opts.mode = Mode::Distributed;
}
Ok(opts)
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use servers::auth::{Identity, Password, UserProviderRef};
use super::*;
#[test]
@@ -188,15 +120,10 @@ mod tests {
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
influxdb_enable: Some(false),
config_file: None,
metasrv_addr: None,
tls_mode: None,
tls_cert_path: None,
tls_key_path: None,
user_provider: None,
};
let opts: FrontendOptions = command.try_into().unwrap();
assert_eq!(opts.http_options.as_ref().unwrap().addr, "127.0.0.1:1234");
assert_eq!(opts.http_addr, Some("127.0.0.1:1234".to_string()));
assert_eq!(opts.mysql_options.as_ref().unwrap().addr, "127.0.0.1:5678");
assert_eq!(
opts.postgres_options.as_ref().unwrap().addr,
@@ -227,66 +154,4 @@ mod tests {
assert!(!opts.influxdb_options.unwrap().enable);
}
#[test]
fn test_read_from_config_file() {
let command = StartCommand {
http_addr: None,
grpc_addr: None,
mysql_addr: None,
postgres_addr: None,
opentsdb_addr: None,
influxdb_enable: None,
config_file: Some(format!(
"{}/../../config/frontend.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
metasrv_addr: None,
tls_mode: None,
tls_cert_path: None,
tls_key_path: None,
user_provider: None,
};
let fe_opts = FrontendOptions::try_from(command).unwrap();
assert_eq!(Mode::Distributed, fe_opts.mode);
assert_eq!(
"127.0.0.1:4000".to_string(),
fe_opts.http_options.as_ref().unwrap().addr
);
assert_eq!(
Duration::from_secs(30),
fe_opts.http_options.as_ref().unwrap().timeout
);
}
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
let command = StartCommand {
http_addr: None,
grpc_addr: None,
mysql_addr: None,
postgres_addr: None,
opentsdb_addr: None,
influxdb_enable: None,
config_file: None,
metasrv_addr: None,
tls_mode: None,
tls_cert_path: None,
tls_key_path: None,
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
};
let plugins = load_frontend_plugins(&command.user_provider);
assert!(plugins.is_ok());
let plugins = plugins.unwrap();
let provider = plugins.get::<UserProviderRef>();
assert!(provider.is_some());
let provider = provider.unwrap();
let result = provider
.auth(Identity::UserId("test", None), Password::PlainText("test"))
.await;
assert!(result.is_ok());
}
}

View File

@@ -1,22 +1,5 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(assert_matches)]
pub mod datanode;
pub mod error;
pub mod frontend;
pub mod metasrv;
pub mod standalone;
mod toml_loader;

View File

@@ -1,25 +1,13 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use clap::Parser;
use common_telemetry::logging;
use meta_srv::bootstrap;
use meta_srv::metasrv::MetaSrvOptions;
use snafu::ResultExt;
use crate::error::{Error, Result};
use crate::{error, toml_loader};
use crate::error;
use crate::error::Error;
use crate::error::Result;
use crate::toml_loader;
#[derive(Parser)]
pub struct Command {
@@ -104,13 +92,13 @@ mod tests {
fn test_read_from_cmd() {
let cmd = StartCommand {
bind_addr: Some("127.0.0.1:3002".to_string()),
server_addr: Some("127.0.0.1:3002".to_string()),
server_addr: Some("0.0.0.0:3002".to_string()),
store_addr: Some("127.0.0.1:2380".to_string()),
config_file: None,
};
let options: MetaSrvOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
assert_eq!("0.0.0.0:3002".to_string(), options.server_addr);
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
}
@@ -127,8 +115,8 @@ mod tests {
};
let options: MetaSrvOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
assert_eq!(15, options.datanode_lease_secs);
assert_eq!("0.0.0.0:3002".to_string(), options.server_addr);
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
assert_eq!(30, options.datanode_lease_secs);
}
}

View File

@@ -1,354 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use anymap::AnyMap;
use clap::Parser;
use common_telemetry::info;
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
use datanode::instance::InstanceRef;
use frontend::frontend::{Frontend, FrontendOptions};
use frontend::grpc::GrpcOptions;
use frontend::influxdb::InfluxdbOptions;
use frontend::instance::Instance as FeInstance;
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
use frontend::prometheus::PrometheusOptions;
use serde::{Deserialize, Serialize};
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
use snafu::ResultExt;
use crate::error::{Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu};
use crate::frontend::load_frontend_plugins;
use crate::toml_loader;
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
subcmd: SubCommand,
}
impl Command {
pub async fn run(self) -> Result<()> {
self.subcmd.run().await
}
}
#[derive(Parser)]
enum SubCommand {
Start(StartCommand),
}
impl SubCommand {
async fn run(self) -> Result<()> {
match self {
SubCommand::Start(cmd) => cmd.run().await,
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct StandaloneOptions {
pub http_options: Option<HttpOptions>,
pub grpc_options: Option<GrpcOptions>,
pub mysql_options: Option<MysqlOptions>,
pub postgres_options: Option<PostgresOptions>,
pub opentsdb_options: Option<OpentsdbOptions>,
pub influxdb_options: Option<InfluxdbOptions>,
pub prometheus_options: Option<PrometheusOptions>,
pub mode: Mode,
pub wal_dir: String,
pub storage: ObjectStoreConfig,
pub enable_memory_catalog: bool,
}
impl Default for StandaloneOptions {
fn default() -> Self {
Self {
http_options: Some(HttpOptions::default()),
grpc_options: Some(GrpcOptions::default()),
mysql_options: Some(MysqlOptions::default()),
postgres_options: Some(PostgresOptions::default()),
opentsdb_options: Some(OpentsdbOptions::default()),
influxdb_options: Some(InfluxdbOptions::default()),
prometheus_options: Some(PrometheusOptions::default()),
mode: Mode::Standalone,
wal_dir: "/tmp/greptimedb/wal".to_string(),
storage: ObjectStoreConfig::default(),
enable_memory_catalog: false,
}
}
}
impl StandaloneOptions {
fn frontend_options(self) -> FrontendOptions {
FrontendOptions {
http_options: self.http_options,
grpc_options: self.grpc_options,
mysql_options: self.mysql_options,
postgres_options: self.postgres_options,
opentsdb_options: self.opentsdb_options,
influxdb_options: self.influxdb_options,
prometheus_options: self.prometheus_options,
mode: self.mode,
meta_client_opts: None,
}
}
fn datanode_options(self) -> DatanodeOptions {
DatanodeOptions {
wal_dir: self.wal_dir,
storage: self.storage,
enable_memory_catalog: self.enable_memory_catalog,
..Default::default()
}
}
}
#[derive(Debug, Parser)]
struct StartCommand {
#[clap(long)]
http_addr: Option<String>,
#[clap(long)]
rpc_addr: Option<String>,
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long)]
postgres_addr: Option<String>,
#[clap(long)]
opentsdb_addr: Option<String>,
#[clap(short, long)]
influxdb_enable: bool,
#[clap(short, long)]
config_file: Option<String>,
#[clap(short = 'm', long = "memory-catalog")]
enable_memory_catalog: bool,
#[clap(long)]
tls_mode: Option<TlsMode>,
#[clap(long)]
tls_cert_path: Option<String>,
#[clap(long)]
tls_key_path: Option<String>,
#[clap(long)]
user_provider: Option<String>,
}
impl StartCommand {
async fn run(self) -> Result<()> {
let enable_memory_catalog = self.enable_memory_catalog;
let config_file = self.config_file.clone();
let plugins = load_frontend_plugins(&self.user_provider)?;
let fe_opts = FrontendOptions::try_from(self)?;
let dn_opts: DatanodeOptions = {
let mut opts: StandaloneOptions = if let Some(path) = config_file {
toml_loader::from_file!(&path)?
} else {
StandaloneOptions::default()
};
opts.enable_memory_catalog = enable_memory_catalog;
opts.datanode_options()
};
info!(
"Standalone frontend options: {:#?}, datanode options: {:#?}",
fe_opts, dn_opts
);
let mut datanode = Datanode::new(dn_opts.clone())
.await
.context(StartDatanodeSnafu)?;
let mut frontend = build_frontend(fe_opts, plugins, datanode.get_instance()).await?;
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
datanode
.start_instance()
.await
.context(StartDatanodeSnafu)?;
info!("Datanode instance started");
frontend.start().await.context(StartFrontendSnafu)?;
Ok(())
}
}
/// Build frontend instance in standalone mode
async fn build_frontend(
fe_opts: FrontendOptions,
plugins: AnyMap,
datanode_instance: InstanceRef,
) -> Result<Frontend<FeInstance>> {
let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
frontend_instance.set_script_handler(datanode_instance);
Ok(Frontend::new(fe_opts, frontend_instance, plugins))
}
impl TryFrom<StartCommand> for FrontendOptions {
type Error = Error;
fn try_from(cmd: StartCommand) -> std::result::Result<Self, Self::Error> {
let opts: StandaloneOptions = if let Some(path) = cmd.config_file {
toml_loader::from_file!(&path)?
} else {
StandaloneOptions::default()
};
let mut opts = opts.frontend_options();
opts.mode = Mode::Standalone;
if let Some(addr) = cmd.http_addr {
opts.http_options = Some(HttpOptions {
addr,
..Default::default()
});
}
if let Some(addr) = cmd.rpc_addr {
// frontend grpc addr conflict with datanode default grpc addr
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
if addr == datanode_grpc_addr {
return IllegalConfigSnafu {
msg: format!(
"gRPC listen address conflicts with datanode reserved gRPC addr: {}",
datanode_grpc_addr
),
}
.fail();
}
opts.grpc_options = Some(GrpcOptions {
addr,
..Default::default()
});
}
if let Some(addr) = cmd.mysql_addr {
opts.mysql_options = Some(MysqlOptions {
addr,
..Default::default()
})
}
if let Some(addr) = cmd.postgres_addr {
opts.postgres_options = Some(PostgresOptions {
addr,
..Default::default()
})
}
if let Some(addr) = cmd.opentsdb_addr {
opts.opentsdb_options = Some(OpentsdbOptions {
addr,
..Default::default()
});
}
if cmd.influxdb_enable {
opts.influxdb_options = Some(InfluxdbOptions { enable: true });
}
let tls_option = TlsOption::new(cmd.tls_mode, cmd.tls_cert_path, cmd.tls_key_path);
if let Some(mut mysql_options) = opts.mysql_options {
mysql_options.tls = tls_option.clone();
opts.mysql_options = Some(mysql_options);
}
if let Some(mut postgres_options) = opts.postgres_options {
postgres_options.tls = tls_option;
opts.postgres_options = Some(postgres_options);
}
Ok(opts)
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use servers::auth::{Identity, Password, UserProviderRef};
use super::*;
#[test]
fn test_read_config_file() {
let cmd = StartCommand {
http_addr: None,
rpc_addr: None,
mysql_addr: None,
postgres_addr: None,
opentsdb_addr: None,
config_file: Some(format!(
"{}/../../config/standalone.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
influxdb_enable: false,
enable_memory_catalog: false,
tls_mode: None,
tls_cert_path: None,
tls_key_path: None,
user_provider: None,
};
let fe_opts = FrontendOptions::try_from(cmd).unwrap();
assert_eq!(Mode::Standalone, fe_opts.mode);
assert_eq!(
"127.0.0.1:4000".to_string(),
fe_opts.http_options.as_ref().unwrap().addr
);
assert_eq!(
Duration::from_secs(30),
fe_opts.http_options.as_ref().unwrap().timeout
);
assert_eq!(
"127.0.0.1:4001".to_string(),
fe_opts.grpc_options.unwrap().addr
);
assert_eq!(
"127.0.0.1:4002",
fe_opts.mysql_options.as_ref().unwrap().addr
);
assert_eq!(2, fe_opts.mysql_options.as_ref().unwrap().runtime_size);
assert!(fe_opts.influxdb_options.as_ref().unwrap().enable);
}
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
let command = StartCommand {
http_addr: None,
rpc_addr: None,
mysql_addr: None,
postgres_addr: None,
opentsdb_addr: None,
config_file: None,
influxdb_enable: false,
enable_memory_catalog: false,
tls_mode: None,
tls_cert_path: None,
tls_key_path: None,
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
};
let plugins = load_frontend_plugins(&command.user_provider);
assert!(plugins.is_ok());
let plugins = plugins.unwrap();
let provider = plugins.get::<UserProviderRef>();
assert!(provider.is_some());
let provider = provider.unwrap();
let result = provider
.auth(Identity::UserId("test", None), Password::PlainText("test"))
.await;
assert!(result.is_ok());
}
}

View File

@@ -1,17 +1,3 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
macro_rules! from_file {
($path: expr) => {
toml::from_str(

View File

@@ -2,7 +2,6 @@
name = "common-base"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
[dependencies]
bitvec = "1.0"

View File

@@ -1,19 +0,0 @@
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub use bitvec::prelude;
// `Lsb0` provides the best codegen for bit manipulation,
// see https://github.com/bitvecto-rs/bitvec/blob/main/doc/order/Lsb0.md
pub type BitVec = prelude::BitVec<u8>;

View File

@@ -0,0 +1,5 @@
use bitvec::prelude as bv;
// `Lsb0` provides the best codegen for bit manipulation,
// see https://github.com/bitvecto-rs/bitvec/blob/main/doc/order/Lsb0.md
pub type BitVec = bv::BitVec<u8, bv::Lsb0>;

Some files were not shown because too many files have changed in this diff Show More