Compare commits

..

1 Commits

Author SHA1 Message Date
luofucong
1e37847f48 x 2025-01-02 15:21:29 +08:00
415 changed files with 22228 additions and 17877 deletions

View File

@@ -48,11 +48,12 @@ runs:
path: /tmp/greptime-*.log
retention-days: 3
- name: Build greptime # Builds standard greptime binary
- name: Build greptime
if: ${{ inputs.dev-mode == 'false' }}
uses: ./.github/actions/build-greptime-binary
with:
base-image: ubuntu
features: servers/dashboard,pg_kvbackend
features: servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }}
@@ -70,7 +71,7 @@ runs:
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
with:
base-image: centos
features: servers/dashboard,pg_kvbackend
features: servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }}

View File

@@ -9,8 +9,8 @@ runs:
steps:
# Download artifacts from previous jobs, the artifacts will be downloaded to:
# ${WORKING_DIR}
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
# ...

View File

@@ -30,9 +30,9 @@ runs:
done
# The compressed artifacts will use the following layout:
# greptime-linux-amd64-v0.3.0sha256sum
# greptime-linux-amd64-v0.3.0.tar.gz
# greptime-linux-amd64-v0.3.0
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
# greptime-linux-amd64-pyo3-v0.3.0
# └── greptime
- name: Compress artifacts and calculate checksum
working-directory: ${{ inputs.working-dir }}

View File

@@ -27,11 +27,11 @@ function upload_artifacts() {
# ├── latest-version.txt
# ├── latest-nightly-version.txt
# ├── v0.1.0
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
# └── v0.2.0
# ├── greptime-darwin-amd64-v0.2.0.sha256sum
# └── greptime-darwin-amd64-v0.2.0.tar.gz
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
aws s3 cp \
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"

View File

@@ -1,6 +1,9 @@
name: Check Dependencies
on:
push:
branches:
- main
pull_request:
branches:
- main

View File

@@ -1,6 +1,4 @@
on:
schedule:
- cron: "0 15 * * 1-5"
merge_group:
pull_request:
types: [ opened, synchronize, reopened, ready_for_review ]
@@ -45,7 +43,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
os: [ windows-2022, ubuntu-20.04 ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
@@ -59,8 +57,6 @@ jobs:
# Shares across multiple jobs
# Shares with `Clippy` job
shared-key: "check-lint"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Run cargo check
run: cargo check --locked --workspace --all-targets
@@ -71,6 +67,11 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-toml"
- name: Install taplo
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
- name: Run taplo
@@ -93,15 +94,13 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "build-binaries"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin --force
- name: Build greptime binaries
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend
run: cargo gc -- --bin greptime --bin sqlness-runner
- name: Pack greptime binaries
shell: bash
run: |
@@ -143,6 +142,11 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
@@ -196,6 +200,11 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
@@ -246,15 +255,13 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "build-greptime-ci"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin --force
- name: Build greptime bianry
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
run: cargo gc --profile ci -- --bin greptime
- name: Pack greptime binary
shell: bash
run: |
@@ -310,6 +317,11 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
@@ -454,6 +466,11 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
@@ -556,16 +573,13 @@ jobs:
- name: "Remote WAL"
opts: "-w kafka -k 127.0.0.1:9092"
kafka: true
- name: "Pg Kvbackend"
opts: "--setup-pg"
kafka: false
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- if: matrix.mode.kafka
name: Setup kafka server
working-directory: tests-integration/fixtures
run: docker compose up -d --wait kafka
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
@@ -595,6 +609,11 @@ jobs:
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: rustfmt
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-rust-fmt"
- name: Check format
run: make fmt-check
@@ -616,99 +635,55 @@ jobs:
# Shares across multiple jobs
# Shares with `Check` job
shared-key: "check-lint"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Run cargo clippy
run: make clippy
conflict-check:
name: Check for conflict
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Merge Conflict Finder
uses: olivernybroe/action-conflict-finder@v4.0
test:
if: github.event_name != 'merge_group'
runs-on: ubuntu-20.04-8-cores
timeout-minutes: 60
needs: [conflict-check, clippy, fmt]
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: rui314/setup-mold@v1
- name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
cache: false
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares cross multiple jobs
shared-key: "coverage-test"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Setup external services
working-directory: tests-integration/fixtures
run: docker compose up -d --wait
- name: Run nextest cases
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
GT_MINIO_BUCKET: greptime
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
GT_MINIO_ACCESS_KEY: superpower_password
GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
UNITTEST_LOG_DIR: "__unittest_logs"
coverage:
if: github.event_name == 'merge_group'
if: github.event.pull_request.draft == false
runs-on: ubuntu-20.04-8-cores
timeout-minutes: 60
needs: [clippy, fmt]
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: rui314/setup-mold@v1
- uses: KyleMayes/install-llvm-action@v1
with:
version: "14.0"
- name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: llvm-tools
cache: false
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares cross multiple jobs
shared-key: "coverage-test"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Docker Cache
uses: ScribeMD/docker-cache@0.3.7
with:
key: docker-${{ runner.os }}-coverage
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Setup external services
working-directory: tests-integration/fixtures
run: docker compose up -d --wait
- name: Setup etcd server
working-directory: tests-integration/fixtures/etcd
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup minio
working-directory: tests-integration/fixtures/minio
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup postgres server
working-directory: tests-integration/fixtures/postgres
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}

View File

@@ -109,7 +109,6 @@ jobs:
UNITTEST_LOG_DIR: "__unittest_logs"
cleanbuild-linux-nix:
name: Run clean build on Linux
runs-on: ubuntu-latest-8-cores
timeout-minutes: 60
steps:

View File

@@ -436,22 +436,6 @@ jobs:
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
bump-doc-version:
name: Bump doc version
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [allocate-runners]
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Bump doc version
working-directory: cyborg
run: pnpm tsx bin/bump-doc-version.ts
env:
VERSION: ${{ needs.allocate-runners.outputs.version }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
name: Send notification to Greptime team

2264
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -55,6 +55,7 @@ members = [
"src/promql",
"src/puffin",
"src/query",
"src/script",
"src/servers",
"src/session",
"src/sql",
@@ -78,6 +79,8 @@ clippy.dbg_macro = "warn"
clippy.implicit_clone = "warn"
clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny"
# Remove this after https://github.com/PyO3/pyo3/issues/4094
rust.non_local_definitions = "allow"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
[workspace.dependencies]
@@ -96,7 +99,6 @@ arrow-schema = { version = "51.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
axum = { version = "0.6", features = ["headers"] }
backon = "1"
base64 = "0.21"
bigdecimal = "0.4.2"
bitflags = "2.4.1"
@@ -116,15 +118,13 @@ datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
deadpool = "0.10"
deadpool-postgres = "0.12"
derive_builder = "0.12"
dotenv = "0.15"
etcd-client = "0.13"
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ec801a91aa22f9666063d02805f1f60f7c93458a" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a875e976441188028353f7274a46a7e6e065c5d4" }
hex = "0.4"
http = "0.2"
humantime = "2.1"
@@ -132,7 +132,6 @@ humantime-serde = "1.1"
itertools = "0.10"
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
lazy_static = "1.4"
local-ip-address = "0.6"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
mockall = "0.11.4"
moka = "0.12"
@@ -180,17 +179,15 @@ similar-asserts = "1.6.0"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sysinfo = "0.30"
rustls = { version = "0.23.20", default-features = false } # override by patch, see [patch.crates-io]
# on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
"visitor",
"serde",
] } # on branch v0.44.x
] }
strum = { version = "0.25", features = ["derive"] }
tempfile = "3"
tokio = { version = "1.40", features = ["full"] }
tokio-postgres = "0.7"
tokio-rustls = { version = "0.26.0", default-features = false } # override by patch, see [patch.crates-io]
tokio-stream = "0.1"
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
@@ -257,6 +254,7 @@ plugins = { path = "src/plugins" }
promql = { path = "src/promql" }
puffin = { path = "src/puffin" }
query = { path = "src/query" }
script = { path = "src/script" }
servers = { path = "src/servers" }
session = { path = "src/session" }
sql = { path = "src/sql" }
@@ -266,9 +264,9 @@ table = { path = "src/table" }
[patch.crates-io]
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls", rev = "a951e03" } # version = "0.27.5" with ring patch
rustls = { git = "https://github.com/GreptimeTeam/rustls", rev = "34fd0c6" } # version = "0.23.20" with ring patch
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls", rev = "4604ca6" } # version = "0.26.0" with ring patch
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls" }
rustls = { git = "https://github.com/GreptimeTeam/rustls" }
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
# see https://github.com/aws/aws-lc-rs/pull/526
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }

View File

@@ -165,14 +165,15 @@ nextest: ## Install nextest tools.
sqlness-test: ## Run sqlness test.
cargo sqlness ${SQLNESS_OPTS}
# Run fuzz test ${FUZZ_TARGET}.
RUNS ?= 1
FUZZ_TARGET ?= fuzz_alter_table
.PHONY: fuzz
fuzz: ## Run fuzz test ${FUZZ_TARGET}.
fuzz:
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
.PHONY: fuzz-ls
fuzz-ls: ## List all fuzz targets.
fuzz-ls:
cargo fuzz list --fuzz-dir tests-fuzz
.PHONY: check

View File

@@ -138,8 +138,7 @@ Check the prerequisite:
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora)
* Python toolchain (optional): Required only if using some test scripts.
* Python toolchain (optional): Required only if built with PyO3 backend. More details for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
Build GreptimeDB binary:
@@ -229,3 +228,4 @@ Special thanks to all the contributors who have propelled GreptimeDB forward. Fo
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.

View File

@@ -91,12 +91,10 @@
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `flow` | -- | -- | flow engine options. |
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
@@ -134,10 +132,10 @@
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
@@ -145,15 +143,15 @@
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
@@ -216,7 +214,7 @@
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1:4001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
@@ -295,11 +293,9 @@
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
| `store_addrs` | Array | -- | Store server address default to etcd store. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` |
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
| `backend` | String | `EtcdStore` | The datastore for meta server. |
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
@@ -382,7 +378,7 @@
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1:3001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
@@ -470,10 +466,10 @@
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
@@ -481,15 +477,15 @@
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
@@ -538,18 +534,12 @@
| --- | -----| ------- | ----------- |
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
| `flow` | -- | -- | flow engine options. |
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |

View File

@@ -59,7 +59,7 @@ body_limit = "64MB"
addr = "127.0.0.1:3001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1:3001"
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 8
## The maximum receive message size for gRPC server.
@@ -475,18 +475,18 @@ auto_flush_interval = "1h"
## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB"
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
enable_write_cache = false
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}`.
write_cache_path = ""
experimental_write_cache_path = ""
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
write_cache_size = "5GiB"
experimental_write_cache_size = "5GiB"
## TTL for write cache.
## @toml2docs:none-default
write_cache_ttl = "8h"
experimental_write_cache_ttl = "8h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -516,15 +516,6 @@ aux_path = ""
## The max capacity of the staging directory.
staging_size = "2GB"
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
@@ -552,6 +543,15 @@ mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = ""
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "8MiB"
## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index]

View File

@@ -5,12 +5,6 @@ mode = "distributed"
## @toml2docs:none-default
node_id = 14
## flow engine options.
[flow]
## The number of flow worker in flownode.
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
#+num_workers=0
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
@@ -25,16 +19,6 @@ max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
max_send_message_size = "512MB"
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The metasrv client options.
[meta_client]

View File

@@ -38,7 +38,7 @@ body_limit = "64MB"
addr = "127.0.0.1:4001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1:4001"
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 8

View File

@@ -8,29 +8,13 @@ bind_addr = "127.0.0.1:3002"
server_addr = "127.0.0.1:3002"
## Store server address default to etcd store.
## For postgres store, the format is:
## "password=password dbname=postgres user=postgres host=localhost port=5432"
## For etcd store, the format is:
## "127.0.0.1:2379"
store_addrs = ["127.0.0.1:2379"]
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
## The datastore for meta server.
## Available values:
## - `etcd_store` (default value)
## - `memory_store`
## - `postgres_store`
backend = "etcd_store"
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
## **Only used when backend is `postgres_store`.**
meta_table_name = "greptime_metakv"
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
## Only used when backend is `postgres_store`.
meta_election_lock_id = 1
backend = "EtcdStore"
## Datanode selector type.
## - `round_robin` (default value)

View File

@@ -284,12 +284,6 @@ max_retry_times = 3
## Initial retry delay of procedures, increases exponentially
retry_delay = "500ms"
## flow engine options.
[flow]
## The number of flow worker in flownode.
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
#+num_workers=0
# Example of using S3 as the storage.
# [storage]
# type = "S3"
@@ -343,7 +337,7 @@ data_home = "/tmp/greptimedb/"
type = "File"
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
## @toml2docs:none-default
#+ cache_path = ""
@@ -524,18 +518,18 @@ auto_flush_interval = "1h"
## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB"
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
enable_write_cache = false
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}`.
write_cache_path = ""
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
experimental_write_cache_path = ""
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
write_cache_size = "5GiB"
experimental_write_cache_size = "5GiB"
## TTL for write cache.
## @toml2docs:none-default
write_cache_ttl = "8h"
experimental_write_cache_ttl = "8h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -565,15 +559,6 @@ aux_path = ""
## The max capacity of the staging directory.
staging_size = "2GB"
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
@@ -601,6 +586,15 @@ mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = ""
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "8MiB"
## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index]

View File

@@ -1,75 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from "@actions/core";
import {obtainClient} from "@/common";
async function triggerWorkflow(workflowId: string, version: string) {
const docsClient = obtainClient("DOCS_REPO_TOKEN")
try {
await docsClient.rest.actions.createWorkflowDispatch({
owner: "GreptimeTeam",
repo: "docs",
workflow_id: workflowId,
ref: "main",
inputs: {
version,
},
});
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
} catch (error) {
core.setFailed(`Failed to trigger workflow: ${error.message}`);
}
}
function determineWorkflow(version: string): [string, string] {
// Check if it's a nightly version
if (version.includes('nightly')) {
return ['bump-nightly-version.yml', version];
}
const parts = version.split('.');
if (parts.length !== 3) {
throw new Error('Invalid version format');
}
// If patch version (last number) is 0, it's a major version
// Return only major.minor version
if (parts[2] === '0') {
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
}
// Otherwise it's a patch version, use full version
return ['bump-patch-version.yml', version];
}
const version = process.env.VERSION;
if (!version) {
core.setFailed("VERSION environment variable is required");
process.exit(1);
}
// Remove 'v' prefix if exists
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
try {
const [workflowId, apiVersion] = determineWorkflow(cleanVersion);
triggerWorkflow(workflowId, apiVersion);
} catch (error) {
core.setFailed(`Error processing version: ${error.message}`);
process.exit(1);
}

View File

@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
# Install Rust
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=.,rw \

View File

@@ -7,8 +7,10 @@ ARG OUTPUT_DIR
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Add PPA for Python 3.10.
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa -y
# Install dependencies.
RUN --mount=type=cache,target=/var/cache/apt \

View File

@@ -13,7 +13,12 @@ RUN apt-get update && apt-get install -y \
curl \
git \
build-essential \
pkg-config
pkg-config \
python3 \
python3-dev \
python3-pip \
&& pip3 install --upgrade pip \
&& pip3 install pyarrow
# Trust workdir
RUN git config --global --add safe.directory /greptimedb

View File

@@ -12,6 +12,8 @@ RUN yum install -y epel-release \
openssl \
openssl-devel \
centos-release-scl \
rh-python38 \
rh-python38-python-devel \
which
# Install protoc
@@ -21,7 +23,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
# Install Rust
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
# Install Rust toolchains.
ARG RUST_TOOLCHAIN

View File

@@ -6,8 +6,11 @@ ARG DOCKER_BUILD_ROOT=.
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Add PPA for Python 3.10.
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa -y
# Install dependencies.
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
libssl-dev \
@@ -17,7 +20,9 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \
git \
build-essential \
pkg-config
pkg-config \
python3.10 \
python3.10-dev
ARG TARGETPLATFORM
RUN echo "target platform: $TARGETPLATFORM"
@@ -33,6 +38,21 @@ fi
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
# `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
apt-get install -y gcc-10 g++-10 cpp-10 make cmake && \
ln -sf /usr/bin/gcc-10 /usr/bin/gcc && ln -sf /usr/bin/g++-10 /usr/bin/g++ && \
ln -sf /usr/bin/gcc-10 /usr/bin/cc && \
ln -sf /usr/bin/g++-10 /usr/bin/cpp && ln -sf /usr/bin/g++-10 /usr/bin/c++ && \
cc --version && gcc --version && g++ --version && cpp --version && c++ --version
# Remove Python 3.8 and install pip.
RUN apt-get -y purge python3.8 && \
apt-get -y autoremove && \
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
@@ -45,6 +65,10 @@ RUN mv protoc3/include/* /usr/local/include/
# it can be a different user that have prepared the submodules.
RUN git config --global --add safe.directory '*'
# Install Python dependencies.
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y

View File

@@ -0,0 +1,5 @@
numpy>=1.24.2
pandas>=1.5.3
pyarrow>=11.0.0
requests>=2.28.2
scipy>=1.10.1

View File

@@ -20,3 +20,31 @@ Sample at 49 Hertz, for 10 seconds, output report in text format.
```bash
curl -X POST -s '0:4000/debug/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt
```
## Using `perf`
First find the pid of GreptimeDB:
Using `perf record` to profile GreptimeDB, at the sampling frequency of 99 hertz, and a duration of 60 seconds:
```bash
perf record -p <pid> --call-graph dwarf -F 99 -- sleep 60
```
The result will be saved to file `perf.data`.
Then
```bash
perf script --no-inline > perf.out
```
Produce a flame graph out of it:
```bash
git clone https://github.com/brendangregg/FlameGraph
FlameGraph/stackcollapse-perf.pl perf.out > perf.folded
FlameGraph/flamegraph.pl perf.folded > perf.svg
```

View File

@@ -5296,7 +5296,7 @@
"uid": "${metrics}"
},
"editorMode": "code",
"expr": "sum by(pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\"}[$__rate_interval]))",
"expr": "sum by(pod, scheme, operation) (rate(opendal_requests_total{pod=~\"$datanode\"}[$__rate_interval]))",
"instant": false,
"legendFormat": "[{{pod}}]-[{{scheme}}]-[{{operation}}]-qps",
"range": true,
@@ -5392,7 +5392,7 @@
"uid": "${metrics}"
},
"editorMode": "code",
"expr": "sum by(pod, scheme) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\", operation=\"read\"}[$__rate_interval]))",
"expr": "sum by(pod, scheme) (rate(opendal_requests_duration_seconds_count{pod=~\"$datanode\", operation=\"read\"}[$__rate_interval]))",
"instant": false,
"legendFormat": "[{{pod}}]-[{{scheme}}]-qps",
"range": true,
@@ -5488,7 +5488,7 @@
"uid": "${metrics}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{pod=~\"$datanode\",operation=\"read\"}[$__rate_interval])))",
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{pod=~\"$datanode\",operation=\"read\"}[$__rate_interval])))",
"instant": false,
"legendFormat": "[{{pod}}]-{{scheme}}-p99",
"range": true,
@@ -5584,7 +5584,7 @@
"uid": "${metrics}"
},
"editorMode": "code",
"expr": "sum by(pod, scheme) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\", operation=\"write\"}[$__rate_interval]))",
"expr": "sum by(pod, scheme) (rate(opendal_requests_duration_seconds_count{pod=~\"$datanode\", operation=\"write\"}[$__rate_interval]))",
"instant": false,
"legendFormat": "[{{pod}}]-[{{scheme}}]-qps",
"range": true,
@@ -5680,7 +5680,7 @@
"uid": "${metrics}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{pod=~\"$datanode\", operation=\"write\"}[$__rate_interval])))",
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{pod=~\"$datanode\", operation=\"write\"}[$__rate_interval])))",
"instant": false,
"legendFormat": "[{{pod}}]-[{{scheme}}]-p99",
"range": true,
@@ -5776,7 +5776,7 @@
"uid": "${metrics}"
},
"editorMode": "code",
"expr": "sum by(pod, scheme) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval]))",
"expr": "sum by(pod, scheme) (rate(opendal_requests_duration_seconds_count{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval]))",
"instant": false,
"interval": "",
"legendFormat": "[{{pod}}]-[{{scheme}}]-qps",
@@ -5873,7 +5873,7 @@
"uid": "${metrics}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_operation_duration_seconds_bucket{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval])))",
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval])))",
"instant": false,
"interval": "",
"legendFormat": "[{{pod}}]-[{{scheme}}]-p99",
@@ -5970,7 +5970,7 @@
"uid": "${metrics}"
},
"editorMode": "code",
"expr": "sum by(pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\",operation!~\"read|write|list|stat\"}[$__rate_interval]))",
"expr": "sum by(pod, scheme, operation) (rate(opendal_requests_duration_seconds_count{pod=~\"$datanode\",operation!~\"read|write|list|stat\"}[$__rate_interval]))",
"instant": false,
"legendFormat": "[{{pod}}]-[{{scheme}}]-[{{operation}}]-qps",
"range": true,
@@ -6066,7 +6066,7 @@
"uid": "${metrics}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{pod=~\"$datanode\", operation!~\"read|write|list\"}[$__rate_interval])))",
"expr": "histogram_quantile(0.99, sum by(pod, le, scheme, operation) (rate(opendal_requests_duration_seconds_bucket{pod=~\"$datanode\", operation!~\"read|write|list\"}[$__rate_interval])))",
"instant": false,
"legendFormat": "[{{pod}}]-[{{scheme}}]-[{{operation}}]-p99",
"range": true,
@@ -6298,6 +6298,6 @@
"timezone": "",
"title": "GreptimeDB Cluster Metrics",
"uid": "ce3q6xwn3xa0wa",
"version": 2,
"version": 1,
"weekStart": ""
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,3 @@
[toolchain]
channel = "nightly-2024-10-19"
components = ["rust-analyzer", "llvm-tools"]
components = ["rust-analyzer"]

View File

@@ -14,7 +14,6 @@
import os
import re
from multiprocessing import Pool
def find_rust_files(directory):
@@ -34,11 +33,13 @@ def extract_branch_names(file_content):
return pattern.findall(file_content)
def check_snafu_in_files(branch_name, rust_files_content):
def check_snafu_in_files(branch_name, rust_files):
branch_name_snafu = f"{branch_name}Snafu"
for content in rust_files_content.values():
if branch_name_snafu in content:
return True
for rust_file in rust_files:
with open(rust_file, "r") as file:
content = file.read()
if branch_name_snafu in content:
return True
return False
@@ -48,24 +49,21 @@ def main():
for error_file in error_files:
with open(error_file, "r") as file:
branch_names.extend(extract_branch_names(file.read()))
content = file.read()
branch_names.extend(extract_branch_names(content))
# Read all rust files into memory once
rust_files_content = {}
for rust_file in other_rust_files:
with open(rust_file, "r") as file:
rust_files_content[rust_file] = file.read()
with Pool() as pool:
results = pool.starmap(
check_snafu_in_files, [(bn, rust_files_content) for bn in branch_names]
)
unused_snafu = [bn for bn, found in zip(branch_names, results) if not found]
unused_snafu = [
branch_name
for branch_name in branch_names
if not check_snafu_in_files(branch_name, other_rust_files)
]
if unused_snafu:
print("Unused error variants:")
for name in unused_snafu:
print(name)
if unused_snafu:
raise SystemExit(1)

View File

@@ -1,5 +1,5 @@
let
nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-24.11";
nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-unstable";
fenix = import (fetchTarball "https://github.com/nix-community/fenix/archive/main.tar.gz") {};
pkgs = import nixpkgs { config = {}; overlays = []; };
in
@@ -11,20 +11,16 @@ pkgs.mkShell rec {
clang
gcc
protobuf
gnumake
mold
(fenix.fromToolchainFile {
dir = ./.;
})
cargo-nextest
cargo-llvm-cov
taplo
curl
];
buildInputs = with pkgs; [
libgit2
libz
];
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;

View File

@@ -57,13 +57,13 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
}
if let Some(options) = column_def.options.as_ref() {
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) {
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_owned());
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.clone());
}
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.to_owned());
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.clone());
}
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.clone());
}
}
@@ -82,7 +82,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
options
.options
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_owned());
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.clone());
}
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
options
@@ -181,14 +181,14 @@ mod tests {
let options = options_from_column_schema(&schema);
assert!(options.is_none());
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
.with_fulltext_options(FulltextOptions {
enable: true,
analyzer: FulltextAnalyzer::English,
case_sensitive: false,
})
.unwrap();
schema.with_inverted_index(true);
.unwrap()
.set_inverted_index(true);
let options = options_from_column_schema(&schema).unwrap();
assert_eq!(
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),

View File

@@ -122,6 +122,13 @@ pub enum Error {
source: BoxedError,
},
#[snafu(display("Failed to re-compile script due to internal error"))]
CompileScriptInternal {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to create table, table info: {}", table_info))]
CreateTable {
table_info: String,
@@ -336,7 +343,9 @@ impl ErrorExt for Error {
Error::DecodePlan { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
Error::Internal { source, .. } => source.status_code(),
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
source.status_code()
}
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),

View File

@@ -18,7 +18,7 @@ use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use common_error::ext::BoxedError;
use common_error::ext::{BoxedError, ErrorExt};
use common_meta::cache_invalidator::KvCacheInvalidator;
use common_meta::error::Error::CacheNotGet;
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
@@ -37,6 +37,7 @@ use snafu::{OptionExt, ResultExt};
use crate::metrics::{
METRIC_CATALOG_KV_BATCH_GET, METRIC_CATALOG_KV_GET, METRIC_CATALOG_KV_REMOTE_GET,
METRIC_META_CLIENT_GET,
};
const DEFAULT_CACHE_MAX_CAPACITY: u64 = 10000;
@@ -292,7 +293,7 @@ impl KvBackend for CachedKvBackend {
}
.map_err(|e| {
GetKvCacheSnafu {
err_msg: e.to_string(),
err_msg: e.output_msg(),
}
.build()
});
@@ -445,6 +446,8 @@ impl KvBackend for MetaKvBackend {
}
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
let _timer = METRIC_META_CLIENT_GET.start_timer();
let mut response = self
.client
.range(RangeRequest::new().with_key(key))

View File

@@ -34,4 +34,6 @@ lazy_static! {
register_histogram!("greptime_catalog_kv_get", "catalog kv get").unwrap();
pub static ref METRIC_CATALOG_KV_BATCH_GET: Histogram =
register_histogram!("greptime_catalog_kv_batch_get", "catalog kv batch get").unwrap();
pub static ref METRIC_META_CLIENT_GET: Histogram =
register_histogram!("greptime_meta_client_get", "meta client get").unwrap();
}

View File

@@ -58,8 +58,6 @@ pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
/// Fulltext index constraint name
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
/// Skipping index constraint name
pub(crate) const SKIPPING_INDEX_CONSTRAINT_NAME: &str = "SKIPPING INDEX";
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
pub(super) struct InformationSchemaKeyColumnUsage {
@@ -227,12 +225,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
let keys = &table_info.meta.primary_key_indices;
let schema = table.schema();
// For compatibility, use primary key columns as inverted index columns.
let pk_as_inverted_index = !schema
.column_schemas()
.iter()
.any(|c| c.has_inverted_index_key());
for (idx, column) in schema.column_schemas().iter().enumerate() {
let mut constraints = vec![];
if column.is_time_index() {
@@ -250,20 +242,14 @@ impl InformationSchemaKeyColumnUsageBuilder {
// TODO(dimbtp): foreign key constraint not supported yet
if keys.contains(&idx) {
constraints.push(PRI_CONSTRAINT_NAME);
if pk_as_inverted_index {
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
}
}
if column.is_inverted_indexed() {
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
}
if column.is_fulltext_indexed() {
if column.has_fulltext_index_key() {
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
}
if column.is_skipping_indexed() {
constraints.push(SKIPPING_INDEX_CONSTRAINT_NAME);
}
if !constraints.is_empty() {
let aggregated_constraints = constraints.join(", ");

View File

@@ -14,7 +14,6 @@
mod pg_catalog_memory_table;
mod pg_class;
mod pg_database;
mod pg_namespace;
mod table_names;
@@ -27,7 +26,6 @@ use lazy_static::lazy_static;
use paste::paste;
use pg_catalog_memory_table::get_schema_columns;
use pg_class::PGClass;
use pg_database::PGDatabase;
use pg_namespace::PGNamespace;
use session::context::{Channel, QueryContext};
use table::TableRef;
@@ -115,10 +113,6 @@ impl PGCatalogProvider {
PG_CLASS.to_string(),
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
);
tables.insert(
PG_DATABASE.to_string(),
self.build_table(PG_DATABASE).expect(PG_DATABASE),
);
self.tables = tables;
}
}
@@ -141,11 +135,6 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
self.catalog_manager.clone(),
self.namespace_oid_map.clone(),
))),
table_names::PG_DATABASE => Some(Arc::new(PGDatabase::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
self.namespace_oid_map.clone(),
))),
_ => None,
}
}

View File

@@ -1,214 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::PG_CATALOG_PG_DATABASE_TABLE_ID;
use common_error::ext::BoxedError;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{Schema, SchemaRef};
use datatypes::value::Value;
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
use snafu::{OptionExt, ResultExt};
use store_api::storage::ScanRequest;
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
use super::{query_ctx, OID_COLUMN_NAME, PG_DATABASE};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::Predicates;
use crate::system_schema::utils::tables::{string_column, u32_column};
use crate::system_schema::SystemTable;
use crate::CatalogManager;
// === column name ===
pub const DATNAME: &str = "datname";
/// The initial capacity of the vector builders.
const INIT_CAPACITY: usize = 42;
/// The `pg_catalog.database` table implementation.
pub(super) struct PGDatabase {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
// Workaround to convert schema_name to a numeric id
namespace_oid_map: PGNamespaceOidMapRef,
}
impl PGDatabase {
pub(super) fn new(
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
) -> Self {
Self {
schema: Self::schema(),
catalog_name,
catalog_manager,
namespace_oid_map,
}
}
fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
u32_column(OID_COLUMN_NAME),
string_column(DATNAME),
]))
}
fn builder(&self) -> PGCDatabaseBuilder {
PGCDatabaseBuilder::new(
self.schema.clone(),
self.catalog_name.clone(),
self.catalog_manager.clone(),
self.namespace_oid_map.clone(),
)
}
}
impl DfPartitionStream for PGDatabase {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_database(None)
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}
impl SystemTable for PGDatabase {
fn table_id(&self) -> table::metadata::TableId {
PG_CATALOG_PG_DATABASE_TABLE_ID
}
fn table_name(&self) -> &'static str {
PG_DATABASE
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(
&self,
request: ScanRequest,
) -> Result<common_recordbatch::SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_database(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
/// Builds the `pg_catalog.pg_database` table row by row
/// `oid` use schema name as a workaround since we don't have numeric schema id.
/// `nspname` is the schema name.
struct PGCDatabaseBuilder {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
oid: UInt32VectorBuilder,
datname: StringVectorBuilder,
}
impl PGCDatabaseBuilder {
fn new(
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
) -> Self {
Self {
schema,
catalog_name,
catalog_manager,
namespace_oid_map,
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
datname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
async fn make_database(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
let catalog_name = self.catalog_name.clone();
let catalog_manager = self
.catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager
.schema_names(&catalog_name, query_ctx())
.await?
{
self.add_database(&predicates, &schema_name);
}
self.finish()
}
fn add_database(&mut self, predicates: &Predicates, schema_name: &str) {
let oid = self.namespace_oid_map.get_oid(schema_name);
let row: [(&str, &Value); 2] = [
(OID_COLUMN_NAME, &Value::from(oid)),
(DATNAME, &Value::from(schema_name)),
];
if !predicates.eval(&row) {
return;
}
self.oid.push(Some(oid));
self.datname.push(Some(schema_name));
}
fn finish(&mut self) -> Result<RecordBatch> {
let columns: Vec<VectorRef> =
vec![Arc::new(self.oid.finish()), Arc::new(self.datname.finish())];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
}

View File

@@ -12,11 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// https://www.postgresql.org/docs/current/catalog-pg-database.html
pub const PG_DATABASE: &str = "pg_database";
// https://www.postgresql.org/docs/current/catalog-pg-namespace.html
pub const PG_DATABASE: &str = "pg_databases";
pub const PG_NAMESPACE: &str = "pg_namespace";
// https://www.postgresql.org/docs/current/catalog-pg-class.html
pub const PG_CLASS: &str = "pg_class";
// https://www.postgresql.org/docs/current/catalog-pg-type.html
pub const PG_TYPE: &str = "pg_type";

View File

@@ -4,9 +4,6 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[features]
pg_kvbackend = ["common-meta/pg_kvbackend"]
[lints]
workspace = true
@@ -59,6 +56,7 @@ tokio.workspace = true
tracing-appender.workspace = true
[dev-dependencies]
common-test-util.workspace = true
common-version.workspace = true
serde.workspace = true
tempfile.workspace = true

View File

@@ -22,9 +22,6 @@ use clap::Parser;
use common_error::ext::BoxedError;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::etcd::EtcdStore;
use common_meta::kv_backend::memory::MemoryKvBackend;
#[cfg(feature = "pg_kvbackend")]
use common_meta::kv_backend::postgres::PgStore;
use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute};
use common_telemetry::info;
@@ -58,34 +55,18 @@ where
#[derive(Debug, Default, Parser)]
pub struct BenchTableMetadataCommand {
#[clap(long)]
etcd_addr: Option<String>,
#[cfg(feature = "pg_kvbackend")]
#[clap(long)]
postgres_addr: Option<String>,
etcd_addr: String,
#[clap(long)]
count: u32,
}
impl BenchTableMetadataCommand {
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
let kv_backend = if let Some(etcd_addr) = &self.etcd_addr {
info!("Using etcd as kv backend");
EtcdStore::with_endpoints([etcd_addr], 128).await.unwrap()
} else {
Arc::new(MemoryKvBackend::new())
};
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
.await
.unwrap();
#[cfg(feature = "pg_kvbackend")]
let kv_backend = if let Some(postgres_addr) = &self.postgres_addr {
info!("Using postgres as kv backend");
PgStore::with_url(postgres_addr, "greptime_metakv", 128)
.await
.unwrap()
} else {
kv_backend
};
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
let tool = BenchTableMetadata {
table_metadata_manager,

View File

@@ -10,8 +10,9 @@ name = "greptime"
path = "src/bin/greptime.rs"
[features]
default = ["servers/pprof", "servers/mem-prof"]
default = ["python", "servers/pprof", "servers/mem-prof"]
tokio-console = ["common-telemetry/tokio-console"]
python = ["frontend/python"]
[lints]
workspace = true

View File

@@ -63,7 +63,9 @@ impl Instance {
&self.datanode
}
/// allow customizing datanode for downstream projects
/// Get mutable Datanode instance for changing some internal state, before starting it.
// Useful for wrapping Datanode instance. Please do not remove this method even if you find
// nowhere it is called.
pub fn datanode_mut(&mut self) -> &mut Datanode {
&mut self.datanode
}
@@ -276,8 +278,7 @@ impl StartCommand {
info!("Datanode options: {:#?}", opts);
let plugin_opts = opts.plugins;
let mut opts = opts.component;
opts.grpc.detect_hostname();
let opts = opts.component;
let mut plugins = Plugins::new();
plugins::setup_datanode_plugins(&mut plugins, &plugin_opts, &opts)
.await

View File

@@ -13,7 +13,6 @@
// limitations under the License.
use std::sync::Arc;
use std::time::Duration;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use catalog::information_extension::DistributedInformationExtension;
@@ -67,11 +66,6 @@ impl Instance {
pub fn flownode(&self) -> &FlownodeInstance {
&self.flownode
}
/// allow customizing flownode for downstream projects
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
&mut self.flownode
}
}
#[async_trait::async_trait]
@@ -143,11 +137,6 @@ struct StartCommand {
/// The prefix of environment variables, default is `GREPTIMEDB_FLOWNODE`;
#[clap(long, default_value = "GREPTIMEDB_FLOWNODE")]
env_prefix: String,
#[clap(long)]
http_addr: Option<String>,
/// HTTP request timeout in seconds.
#[clap(long)]
http_timeout: Option<u64>,
}
impl StartCommand {
@@ -204,14 +193,6 @@ impl StartCommand {
opts.mode = Mode::Distributed;
}
if let Some(http_addr) = &self.http_addr {
opts.http.addr.clone_from(http_addr);
}
if let Some(http_timeout) = self.http_timeout {
opts.http.timeout = Duration::from_secs(http_timeout);
}
if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
return MissingConfigSnafu {
msg: "Missing node id option",
@@ -236,8 +217,7 @@ impl StartCommand {
info!("Flownode start command: {:#?}", self);
info!("Flownode options: {:#?}", opts);
let mut opts = opts.component;
opts.grpc.detect_hostname();
let opts = opts.component;
// TODO(discord9): make it not optionale after cluster id is required
let cluster_id = opts.cluster_id.unwrap_or(0);

View File

@@ -268,8 +268,7 @@ impl StartCommand {
info!("Frontend options: {:#?}", opts);
let plugin_opts = opts.plugins;
let mut opts = opts.component;
opts.grpc.detect_hostname();
let opts = opts.component;
let mut plugins = Plugins::new();
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &opts)
.await

View File

@@ -249,6 +249,8 @@ impl StartCommand {
if let Some(backend) = &self.backend {
opts.backend.clone_from(backend);
} else {
opts.backend = BackendImpl::default()
}
// Disable dashboard in metasrv.
@@ -272,8 +274,7 @@ impl StartCommand {
info!("Metasrv options: {:#?}", opts);
let plugin_opts = opts.plugins;
let mut opts = opts.component;
opts.detect_server_addr();
let opts = opts.component;
let mut plugins = Plugins::new();
plugins::setup_metasrv_plugins(&mut plugins, &plugin_opts, &opts)
.await

View File

@@ -54,7 +54,7 @@ use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, Sto
use datanode::datanode::{Datanode, DatanodeBuilder};
use datanode::region_server::RegionServer;
use file_engine::config::EngineConfig as FileEngineConfig;
use flow::{FlowConfig, FlowWorkerManager, FlownodeBuilder, FlownodeOptions, FrontendInvoker};
use flow::{FlowWorkerManager, FlownodeBuilder, FrontendInvoker};
use frontend::frontend::FrontendOptions;
use frontend::instance::builder::FrontendBuilder;
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
@@ -145,7 +145,6 @@ pub struct StandaloneOptions {
pub storage: StorageConfig,
pub metadata_store: KvBackendConfig,
pub procedure: ProcedureConfig,
pub flow: FlowConfig,
pub logging: LoggingOptions,
pub user_provider: Option<String>,
/// Options for different store engines.
@@ -174,7 +173,6 @@ impl Default for StandaloneOptions {
storage: StorageConfig::default(),
metadata_store: KvBackendConfig::default(),
procedure: ProcedureConfig::default(),
flow: FlowConfig::default(),
logging: LoggingOptions::default(),
export_metrics: ExportMetricsOption::default(),
user_provider: None,
@@ -463,8 +461,7 @@ impl StartCommand {
let mut plugins = Plugins::new();
let plugin_opts = opts.plugins;
let mut opts = opts.component;
opts.grpc.detect_hostname();
let opts = opts.component;
let fe_opts = opts.frontend_options();
let dn_opts = opts.datanode_options();
@@ -525,12 +522,8 @@ impl StartCommand {
Self::create_table_metadata_manager(kv_backend.clone()).await?;
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
let flownode_options = FlownodeOptions {
flow: opts.flow.clone(),
..Default::default()
};
let flow_builder = FlownodeBuilder::new(
flownode_options,
Default::default(),
plugins.clone(),
table_metadata_manager.clone(),
catalog_manager.clone(),

View File

@@ -69,7 +69,7 @@ fn test_load_datanode_example_config() {
region_engine: vec![
RegionEngineConfig::Mito(MitoConfig {
auto_flush_interval: Duration::from_secs(3600),
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
..Default::default()
}),
RegionEngineConfig::File(EngineConfig {}),
@@ -85,9 +85,7 @@ fn test_load_datanode_example_config() {
remote_write: Some(Default::default()),
..Default::default()
},
grpc: GrpcOptions::default()
.with_addr("127.0.0.1:3001")
.with_hostname("127.0.0.1:3001"),
grpc: GrpcOptions::default().with_addr("127.0.0.1:3001"),
rpc_addr: Some("127.0.0.1:3001".to_string()),
rpc_hostname: Some("127.0.0.1".to_string()),
rpc_runtime_size: Some(8),
@@ -139,7 +137,6 @@ fn test_load_frontend_example_config() {
remote_write: Some(Default::default()),
..Default::default()
},
grpc: GrpcOptions::default().with_hostname("127.0.0.1:4001"),
..Default::default()
},
..Default::default()
@@ -157,7 +154,6 @@ fn test_load_metasrv_example_config() {
component: MetasrvOptions {
selector: SelectorType::default(),
data_home: "/tmp/metasrv/".to_string(),
server_addr: "127.0.0.1:3002".to_string(),
logging: LoggingOptions {
dir: "/tmp/greptimedb/logs".to_string(),
level: Some("info".to_string()),
@@ -207,7 +203,7 @@ fn test_load_standalone_example_config() {
region_engine: vec![
RegionEngineConfig::Mito(MitoConfig {
auto_flush_interval: Duration::from_secs(3600),
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
..Default::default()
}),
RegionEngineConfig::File(EngineConfig {}),

View File

@@ -4,9 +4,6 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[features]
testing = []
[lints]
workspace = true

View File

@@ -17,7 +17,6 @@ use std::io;
use std::ops::Range;
use std::path::Path;
use std::pin::Pin;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
@@ -34,22 +33,19 @@ pub struct Metadata {
pub content_length: u64,
}
/// `SizeAwareRangeReader` is a `RangeReader` that supports setting a file size hint.
pub trait SizeAwareRangeReader: RangeReader {
/// `RangeReader` reads a range of bytes from a source.
#[async_trait]
pub trait RangeReader: Send + Unpin {
/// Sets the file size hint for the reader.
///
/// It's used to optimize the reading process by reducing the number of remote requests.
fn with_file_size_hint(&mut self, file_size_hint: u64);
}
/// `RangeReader` reads a range of bytes from a source.
#[async_trait]
pub trait RangeReader: Sync + Send + Unpin {
/// Returns the metadata of the source.
async fn metadata(&self) -> io::Result<Metadata>;
async fn metadata(&mut self) -> io::Result<Metadata>;
/// Reads the bytes in the given range.
async fn read(&self, range: Range<u64>) -> io::Result<Bytes>;
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes>;
/// Reads the bytes in the given range into the buffer.
///
@@ -57,14 +53,18 @@ pub trait RangeReader: Sync + Send + Unpin {
/// - If the buffer is insufficient to hold the bytes, it will either:
/// - Allocate additional space (e.g., for `Vec<u8>`)
/// - Panic (e.g., for `&mut [u8]`)
async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
async fn read_into(
&mut self,
range: Range<u64>,
buf: &mut (impl BufMut + Send),
) -> io::Result<()> {
let bytes = self.read(range).await?;
buf.put_slice(&bytes);
Ok(())
}
/// Reads the bytes in the given ranges.
async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
let mut result = Vec::with_capacity(ranges.len());
for range in ranges {
result.push(self.read(range.clone()).await?);
@@ -74,20 +74,25 @@ pub trait RangeReader: Sync + Send + Unpin {
}
#[async_trait]
impl<R: ?Sized + RangeReader> RangeReader for &R {
async fn metadata(&self) -> io::Result<Metadata> {
impl<R: ?Sized + RangeReader> RangeReader for &mut R {
fn with_file_size_hint(&mut self, file_size_hint: u64) {
(*self).with_file_size_hint(file_size_hint)
}
async fn metadata(&mut self) -> io::Result<Metadata> {
(*self).metadata().await
}
async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
(*self).read(range).await
}
async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
async fn read_into(
&mut self,
range: Range<u64>,
buf: &mut (impl BufMut + Send),
) -> io::Result<()> {
(*self).read_into(range, buf).await
}
async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
(*self).read_vec(ranges).await
}
}
@@ -115,7 +120,7 @@ pub struct AsyncReadAdapter<R> {
impl<R: RangeReader + 'static> AsyncReadAdapter<R> {
pub async fn new(inner: R) -> io::Result<Self> {
let inner = inner;
let mut inner = inner;
let metadata = inner.metadata().await?;
Ok(AsyncReadAdapter {
inner: Arc::new(Mutex::new(inner)),
@@ -155,7 +160,7 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
let range = *this.position..(*this.position + size);
let inner = this.inner.clone();
let fut = async move {
let inner = inner.lock().await;
let mut inner = inner.lock().await;
inner.read(range).await
};
@@ -190,24 +195,27 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
#[async_trait]
impl RangeReader for Vec<u8> {
async fn metadata(&self) -> io::Result<Metadata> {
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
// do nothing
}
async fn metadata(&mut self) -> io::Result<Metadata> {
Ok(Metadata {
content_length: self.len() as u64,
})
}
async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
let bytes = Bytes::copy_from_slice(&self[range.start as usize..range.end as usize]);
Ok(bytes)
}
}
// TODO(weny): considers replacing `tokio::fs::File` with opendal reader.
/// `FileReader` is a `RangeReader` for reading a file.
pub struct FileReader {
content_length: u64,
position: AtomicU64,
file: Mutex<tokio::fs::File>,
position: u64,
file: tokio::fs::File,
}
impl FileReader {
@@ -217,37 +225,32 @@ impl FileReader {
let metadata = file.metadata().await?;
Ok(FileReader {
content_length: metadata.len(),
position: AtomicU64::new(0),
file: Mutex::new(file),
position: 0,
file,
})
}
}
#[cfg(any(test, feature = "testing"))]
impl SizeAwareRangeReader for FileReader {
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
// do nothing
}
}
#[async_trait]
impl RangeReader for FileReader {
async fn metadata(&self) -> io::Result<Metadata> {
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
// do nothing
}
async fn metadata(&mut self) -> io::Result<Metadata> {
Ok(Metadata {
content_length: self.content_length,
})
}
async fn read(&self, mut range: Range<u64>) -> io::Result<Bytes> {
let mut file = self.file.lock().await;
if range.start != self.position.load(Ordering::Relaxed) {
file.seek(io::SeekFrom::Start(range.start)).await?;
self.position.store(range.start, Ordering::Relaxed);
async fn read(&mut self, mut range: Range<u64>) -> io::Result<Bytes> {
if range.start != self.position {
self.file.seek(io::SeekFrom::Start(range.start)).await?;
self.position = range.start;
}
range.end = range.end.min(self.content_length);
if range.end <= self.position.load(Ordering::Relaxed) {
if range.end <= self.position {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"Start of range is out of bounds",
@@ -256,8 +259,8 @@ impl RangeReader for FileReader {
let mut buf = vec![0; (range.end - range.start) as usize];
file.read_exact(&mut buf).await?;
self.position.store(range.end, Ordering::Relaxed);
self.file.read_exact(&mut buf).await?;
self.position = range.end;
Ok(Bytes::from(buf))
}
@@ -298,7 +301,7 @@ mod tests {
let data = b"hello world";
tokio::fs::write(path, data).await.unwrap();
let reader = FileReader::new(path).await.unwrap();
let mut reader = FileReader::new(path).await.unwrap();
let metadata = reader.metadata().await.unwrap();
assert_eq!(metadata.content_length, data.len() as u64);

View File

@@ -109,7 +109,6 @@ pub const INFORMATION_SCHEMA_REGION_STATISTICS_TABLE_ID: u32 = 35;
pub const PG_CATALOG_PG_CLASS_TABLE_ID: u32 = 256;
pub const PG_CATALOG_PG_TYPE_TABLE_ID: u32 = 257;
pub const PG_CATALOG_PG_NAMESPACE_TABLE_ID: u32 = 258;
pub const PG_CATALOG_PG_DATABASE_TABLE_ID: u32 = 259;
// ----- End of pg_catalog tables -----

View File

@@ -73,21 +73,14 @@ pub trait Configurable: Serialize + DeserializeOwned + Default + Sized {
layered_config = layered_config.add_source(File::new(config_file, FileFormat::Toml));
}
let mut opts: Self = layered_config
let opts = layered_config
.build()
.and_then(|x| x.try_deserialize())
.context(LoadLayeredConfigSnafu)?;
opts.validate_sanitize()?;
Ok(opts)
}
/// Validate(and possibly sanitize) the configuration.
fn validate_sanitize(&mut self) -> Result<()> {
Ok(())
}
/// List of toml keys that should be parsed as a list.
fn env_list_keys() -> Option<&'static [&'static str]> {
None

View File

@@ -180,7 +180,7 @@ pub enum Error {
#[snafu(display("Failed to parse format {} with value: {}", key, value))]
ParseFormat {
key: String,
key: &'static str,
value: String,
#[snafu(implicit)]
location: Location,

View File

@@ -35,23 +35,10 @@ data = {
"bigint_other": [5, -5, 1, 5, 5],
"utf8_increase": ["a", "bb", "ccc", "dddd", "eeeee"],
"utf8_decrease": ["eeeee", "dddd", "ccc", "bb", "a"],
"timestamp_simple": [
datetime.datetime(2023, 4, 1, 20, 15, 30, 2000),
datetime.datetime.fromtimestamp(int("1629617204525777000") / 1000000000),
datetime.datetime(2023, 1, 1),
datetime.datetime(2023, 2, 1),
datetime.datetime(2023, 3, 1),
],
"date_simple": [
datetime.date(2023, 4, 1),
datetime.date(2023, 3, 1),
datetime.date(2023, 1, 1),
datetime.date(2023, 2, 1),
datetime.date(2023, 3, 1),
],
"timestamp_simple": [datetime.datetime(2023, 4, 1, 20, 15, 30, 2000), datetime.datetime.fromtimestamp(int('1629617204525777000')/1000000000), datetime.datetime(2023, 1, 1), datetime.datetime(2023, 2, 1), datetime.datetime(2023, 3, 1)],
"date_simple": [datetime.date(2023, 4, 1), datetime.date(2023, 3, 1), datetime.date(2023, 1, 1), datetime.date(2023, 2, 1), datetime.date(2023, 3, 1)]
}
def infer_schema(data):
schema = "struct<"
for key, value in data.items():
@@ -69,7 +56,7 @@ def infer_schema(data):
elif key.startswith("date"):
dt = "date"
else:
print(key, value, dt)
print(key,value,dt)
raise NotImplementedError
if key.startswith("double"):
dt = "double"
@@ -81,6 +68,7 @@ def infer_schema(data):
return schema
def _write(
schema: str,
data,

View File

@@ -32,7 +32,6 @@ pub use scipy_stats_norm_cdf::ScipyStatsNormCdfAccumulatorCreator;
pub use scipy_stats_norm_pdf::ScipyStatsNormPdfAccumulatorCreator;
use crate::function_registry::FunctionRegistry;
use crate::scalars::vector::product::VectorProductCreator;
use crate::scalars::vector::sum::VectorSumCreator;
/// A function creates `AggregateFunctionCreator`.
@@ -94,7 +93,6 @@ impl AggregateFunctions {
register_aggr_func!("scipystatsnormcdf", 2, ScipyStatsNormCdfAccumulatorCreator);
register_aggr_func!("scipystatsnormpdf", 2, ScipyStatsNormPdfAccumulatorCreator);
register_aggr_func!("vec_sum", 1, VectorSumCreator);
register_aggr_func!("vec_product", 1, VectorProductCreator);
#[cfg(feature = "geo")]
register_aggr_func!(

View File

@@ -14,17 +14,14 @@
mod convert;
mod distance;
mod elem_product;
mod elem_sum;
pub mod impl_conv;
pub(crate) mod product;
mod scalar_add;
mod scalar_mul;
mod sub;
pub(crate) mod sum;
mod vector_div;
mod vector_mul;
mod vector_norm;
use std::sync::Arc;
@@ -49,10 +46,8 @@ impl VectorFunction {
// vector calculation
registry.register(Arc::new(vector_mul::VectorMulFunction));
registry.register(Arc::new(vector_norm::VectorNormFunction));
registry.register(Arc::new(vector_div::VectorDivFunction));
registry.register(Arc::new(sub::SubFunction));
registry.register(Arc::new(elem_sum::ElemSumFunction));
registry.register(Arc::new(elem_product::ElemProductFunction));
}
}

View File

@@ -1,142 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::borrow::Cow;
use std::fmt::Display;
use common_query::error::InvalidFuncArgsSnafu;
use common_query::prelude::{Signature, TypeSignature, Volatility};
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::vectors::{Float32VectorBuilder, MutableVector, VectorRef};
use nalgebra::DVectorView;
use snafu::ensure;
use crate::function::{Function, FunctionContext};
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const};
const NAME: &str = "vec_elem_product";
/// Multiplies all elements of the vector, returns a scalar.
///
/// # Example
///
/// ```sql
/// SELECT vec_elem_product(parse_vec('[1.0, 2.0, 3.0, 4.0]'));
///
// +-----------------------------------------------------------+
// | vec_elem_product(parse_vec(Utf8("[1.0, 2.0, 3.0, 4.0]"))) |
// +-----------------------------------------------------------+
// | 24.0 |
// +-----------------------------------------------------------+
/// ``````
#[derive(Debug, Clone, Default)]
pub struct ElemProductFunction;
impl Function for ElemProductFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(
&self,
_input_types: &[ConcreteDataType],
) -> common_query::error::Result<ConcreteDataType> {
Ok(ConcreteDataType::float32_datatype())
}
fn signature(&self) -> Signature {
Signature::one_of(
vec![
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
TypeSignature::Exact(vec![ConcreteDataType::binary_datatype()]),
],
Volatility::Immutable,
)
}
fn eval(
&self,
_func_ctx: FunctionContext,
columns: &[VectorRef],
) -> common_query::error::Result<VectorRef> {
ensure!(
columns.len() == 1,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly one, have: {}",
columns.len()
)
}
);
let arg0 = &columns[0];
let len = arg0.len();
let mut result = Float32VectorBuilder::with_capacity(len);
if len == 0 {
return Ok(result.to_vector());
}
let arg0_const = as_veclit_if_const(arg0)?;
for i in 0..len {
let arg0 = match arg0_const.as_ref() {
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
None => as_veclit(arg0.get_ref(i))?,
};
let Some(arg0) = arg0 else {
result.push_null();
continue;
};
result.push(Some(DVectorView::from_slice(&arg0, arg0.len()).product()));
}
Ok(result.to_vector())
}
}
impl Display for ElemProductFunction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", NAME.to_ascii_uppercase())
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use datatypes::vectors::StringVector;
use super::*;
use crate::function::FunctionContext;
#[test]
fn test_elem_product() {
let func = ElemProductFunction;
let input0 = Arc::new(StringVector::from(vec![
Some("[1.0,2.0,3.0]".to_string()),
Some("[4.0,5.0,6.0]".to_string()),
None,
]));
let result = func.eval(FunctionContext::default(), &[input0]).unwrap();
let result = result.as_ref();
assert_eq!(result.len(), 3);
assert_eq!(result.get_ref(0).as_f32().unwrap(), Some(6.0));
assert_eq!(result.get_ref(1).as_f32().unwrap(), Some(120.0));
assert_eq!(result.get_ref(2).as_f32().unwrap(), None);
}
}

View File

@@ -1,211 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{CreateAccumulatorSnafu, Error, InvalidFuncArgsSnafu};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::AccumulatorCreatorFunction;
use datatypes::prelude::{ConcreteDataType, Value, *};
use datatypes::vectors::VectorRef;
use nalgebra::{Const, DVectorView, Dyn, OVector};
use snafu::ensure;
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
/// Aggregates by multiplying elements across the same dimension, returns a vector.
#[derive(Debug, Default)]
pub struct VectorProduct {
product: Option<OVector<f32, Dyn>>,
has_null: bool,
}
#[as_aggr_func_creator]
#[derive(Debug, Default, AggrFuncTypeStore)]
pub struct VectorProductCreator {}
impl AggregateFunctionCreator for VectorProductCreator {
fn creator(&self) -> AccumulatorCreatorFunction {
let creator: AccumulatorCreatorFunction = Arc::new(move |types: &[ConcreteDataType]| {
ensure!(
types.len() == 1,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly one, have: {}",
types.len()
)
}
);
let input_type = &types[0];
match input_type {
ConcreteDataType::String(_) | ConcreteDataType::Binary(_) => {
Ok(Box::new(VectorProduct::default()))
}
_ => {
let err_msg = format!(
"\"VEC_PRODUCT\" aggregate function not support data type {:?}",
input_type.logical_type_id(),
);
CreateAccumulatorSnafu { err_msg }.fail()?
}
}
});
creator
}
fn output_type(&self) -> common_query::error::Result<ConcreteDataType> {
Ok(ConcreteDataType::binary_datatype())
}
fn state_types(&self) -> common_query::error::Result<Vec<ConcreteDataType>> {
Ok(vec![self.output_type()?])
}
}
impl VectorProduct {
fn inner(&mut self, len: usize) -> &mut OVector<f32, Dyn> {
self.product.get_or_insert_with(|| {
OVector::from_iterator_generic(Dyn(len), Const::<1>, (0..len).map(|_| 1.0))
})
}
fn update(&mut self, values: &[VectorRef], is_update: bool) -> Result<(), Error> {
if values.is_empty() || self.has_null {
return Ok(());
};
let column = &values[0];
let len = column.len();
match as_veclit_if_const(column)? {
Some(column) => {
let vec_column = DVectorView::from_slice(&column, column.len()).scale(len as f32);
*self.inner(vec_column.len()) =
(*self.inner(vec_column.len())).component_mul(&vec_column);
}
None => {
for i in 0..len {
let Some(arg0) = as_veclit(column.get_ref(i))? else {
if is_update {
self.has_null = true;
self.product = None;
}
return Ok(());
};
let vec_column = DVectorView::from_slice(&arg0, arg0.len());
*self.inner(vec_column.len()) =
(*self.inner(vec_column.len())).component_mul(&vec_column);
}
}
}
Ok(())
}
}
impl Accumulator for VectorProduct {
fn state(&self) -> common_query::error::Result<Vec<Value>> {
self.evaluate().map(|v| vec![v])
}
fn update_batch(&mut self, values: &[VectorRef]) -> common_query::error::Result<()> {
self.update(values, true)
}
fn merge_batch(&mut self, states: &[VectorRef]) -> common_query::error::Result<()> {
self.update(states, false)
}
fn evaluate(&self) -> common_query::error::Result<Value> {
match &self.product {
None => Ok(Value::Null),
Some(vector) => {
let v = vector.as_slice();
Ok(Value::from(veclit_to_binlit(v)))
}
}
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use datatypes::vectors::{ConstantVector, StringVector};
use super::*;
#[test]
fn test_update_batch() {
// test update empty batch, expect not updating anything
let mut vec_product = VectorProduct::default();
vec_product.update_batch(&[]).unwrap();
assert!(vec_product.product.is_none());
assert!(!vec_product.has_null);
assert_eq!(Value::Null, vec_product.evaluate().unwrap());
// test update one not-null value
let mut vec_product = VectorProduct::default();
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![Some(
"[1.0,2.0,3.0]".to_string(),
)]))];
vec_product.update_batch(&v).unwrap();
assert_eq!(
Value::from(veclit_to_binlit(&[1.0, 2.0, 3.0])),
vec_product.evaluate().unwrap()
);
// test update one null value
let mut vec_product = VectorProduct::default();
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![Option::<String>::None]))];
vec_product.update_batch(&v).unwrap();
assert_eq!(Value::Null, vec_product.evaluate().unwrap());
// test update no null-value batch
let mut vec_product = VectorProduct::default();
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![
Some("[1.0,2.0,3.0]".to_string()),
Some("[4.0,5.0,6.0]".to_string()),
Some("[7.0,8.0,9.0]".to_string()),
]))];
vec_product.update_batch(&v).unwrap();
assert_eq!(
Value::from(veclit_to_binlit(&[28.0, 80.0, 162.0])),
vec_product.evaluate().unwrap()
);
// test update null-value batch
let mut vec_product = VectorProduct::default();
let v: Vec<VectorRef> = vec![Arc::new(StringVector::from(vec![
Some("[1.0,2.0,3.0]".to_string()),
None,
Some("[7.0,8.0,9.0]".to_string()),
]))];
vec_product.update_batch(&v).unwrap();
assert_eq!(Value::Null, vec_product.evaluate().unwrap());
// test update with constant vector
let mut vec_product = VectorProduct::default();
let v: Vec<VectorRef> = vec![Arc::new(ConstantVector::new(
Arc::new(StringVector::from_vec(vec!["[1.0,2.0,3.0]".to_string()])),
4,
))];
vec_product.update_batch(&v).unwrap();
assert_eq!(
Value::from(veclit_to_binlit(&[4.0, 8.0, 12.0])),
vec_product.evaluate().unwrap()
);
}
}

View File

@@ -1,168 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::borrow::Cow;
use std::fmt::Display;
use common_query::error::{InvalidFuncArgsSnafu, Result};
use common_query::prelude::{Signature, TypeSignature, Volatility};
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
use nalgebra::DVectorView;
use snafu::ensure;
use crate::function::{Function, FunctionContext};
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
const NAME: &str = "vec_norm";
/// Normalizes the vector to length 1, returns a vector.
/// This's equivalent to `VECTOR_SCALAR_MUL(1/SQRT(VECTOR_ELEM_SUM(VECTOR_MUL(v, v))), v)`.
///
/// # Example
///
/// ```sql
/// SELECT vec_to_string(vec_norm('[7.0, 8.0, 9.0]'));
///
/// +--------------------------------------------------+
/// | vec_to_string(vec_norm(Utf8("[7.0, 8.0, 9.0]"))) |
/// +--------------------------------------------------+
/// | [0.013888889,0.015873017,0.017857144] |
/// +--------------------------------------------------+
///
/// ```
#[derive(Debug, Clone, Default)]
pub struct VectorNormFunction;
impl Function for VectorNormFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::binary_datatype())
}
fn signature(&self) -> Signature {
Signature::one_of(
vec![
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
TypeSignature::Exact(vec![ConcreteDataType::binary_datatype()]),
],
Volatility::Immutable,
)
}
fn eval(
&self,
_func_ctx: FunctionContext,
columns: &[VectorRef],
) -> common_query::error::Result<VectorRef> {
ensure!(
columns.len() == 1,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly one, have: {}",
columns.len()
)
}
);
let arg0 = &columns[0];
let len = arg0.len();
let mut result = BinaryVectorBuilder::with_capacity(len);
if len == 0 {
return Ok(result.to_vector());
}
let arg0_const = as_veclit_if_const(arg0)?;
for i in 0..len {
let arg0 = match arg0_const.as_ref() {
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
None => as_veclit(arg0.get_ref(i))?,
};
let Some(arg0) = arg0 else {
result.push_null();
continue;
};
let vec0 = DVectorView::from_slice(&arg0, arg0.len());
let vec1 = DVectorView::from_slice(&arg0, arg0.len());
let vec2scalar = vec1.component_mul(&vec0);
let scalar_var = vec2scalar.sum().sqrt();
let vec = DVectorView::from_slice(&arg0, arg0.len());
// Use unscale to avoid division by zero and keep more precision as possible
let vec_res = vec.unscale(scalar_var);
let veclit = vec_res.as_slice();
let binlit = veclit_to_binlit(veclit);
result.push(Some(&binlit));
}
Ok(result.to_vector())
}
}
impl Display for VectorNormFunction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", NAME.to_ascii_uppercase())
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use datatypes::vectors::StringVector;
use super::*;
#[test]
fn test_vec_norm() {
let func = VectorNormFunction;
let input0 = Arc::new(StringVector::from(vec![
Some("[0.0,2.0,3.0]".to_string()),
Some("[1.0,2.0,3.0]".to_string()),
Some("[7.0,8.0,9.0]".to_string()),
Some("[7.0,-8.0,9.0]".to_string()),
None,
]));
let result = func.eval(FunctionContext::default(), &[input0]).unwrap();
let result = result.as_ref();
assert_eq!(result.len(), 5);
assert_eq!(
result.get_ref(0).as_binary().unwrap(),
Some(veclit_to_binlit(&[0.0, 0.5547002, 0.8320503]).as_slice())
);
assert_eq!(
result.get_ref(1).as_binary().unwrap(),
Some(veclit_to_binlit(&[0.26726124, 0.5345225, 0.8017837]).as_slice())
);
assert_eq!(
result.get_ref(2).as_binary().unwrap(),
Some(veclit_to_binlit(&[0.5025707, 0.5743665, 0.64616233]).as_slice())
);
assert_eq!(
result.get_ref(3).as_binary().unwrap(),
Some(veclit_to_binlit(&[0.5025707, -0.5743665, 0.64616233]).as_slice())
);
assert!(result.get_ref(4).is_null());
}
}

View File

@@ -22,7 +22,7 @@ mod version;
use std::sync::Arc;
use build::BuildFunction;
use database::{CurrentSchemaFunction, DatabaseFunction, SessionUserFunction};
use database::{CurrentSchemaFunction, DatabaseFunction};
use pg_catalog::PGCatalogFunction;
use procedure_state::ProcedureStateFunction;
use timezone::TimezoneFunction;
@@ -36,9 +36,8 @@ impl SystemFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(BuildFunction));
registry.register(Arc::new(VersionFunction));
registry.register(Arc::new(CurrentSchemaFunction));
registry.register(Arc::new(DatabaseFunction));
registry.register(Arc::new(SessionUserFunction));
registry.register(Arc::new(CurrentSchemaFunction));
registry.register(Arc::new(TimezoneFunction));
registry.register_async(Arc::new(ProcedureStateFunction));
PGCatalogFunction::register(registry);

View File

@@ -28,11 +28,9 @@ pub struct DatabaseFunction;
#[derive(Clone, Debug, Default)]
pub struct CurrentSchemaFunction;
pub struct SessionUserFunction;
const DATABASE_FUNCTION_NAME: &str = "database";
const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
const SESSION_USER_FUNCTION_NAME: &str = "session_user";
impl Function for DatabaseFunction {
fn name(&self) -> &str {
@@ -74,26 +72,6 @@ impl Function for CurrentSchemaFunction {
}
}
impl Function for SessionUserFunction {
fn name(&self) -> &str {
SESSION_USER_FUNCTION_NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::string_datatype())
}
fn signature(&self) -> Signature {
Signature::uniform(0, vec![], Volatility::Immutable)
}
fn eval(&self, func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
let user = func_ctx.query_ctx.current_user();
Ok(Arc::new(StringVector::from_slice(&[user.username()])) as _)
}
}
impl fmt::Display for DatabaseFunction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "DATABASE")
@@ -106,12 +84,6 @@ impl fmt::Display for CurrentSchemaFunction {
}
}
impl fmt::Display for SessionUserFunction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "SESSION_USER")
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;

View File

@@ -25,15 +25,12 @@ use datatypes::schema::{ColumnSchema, FulltextOptions, RawSchema};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::region_request::{SetRegionOption, UnsetRegionOption};
use table::metadata::TableId;
use table::requests::{
AddColumnRequest, AlterKind, AlterTableRequest, ModifyColumnTypeRequest, SetIndexOptions,
UnsetIndexOptions,
};
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, ModifyColumnTypeRequest};
use crate::error::{
InvalidColumnDefSnafu, InvalidSetFulltextOptionRequestSnafu, InvalidSetTableOptionRequestSnafu,
InvalidUnsetTableOptionRequestSnafu, MissingAlterIndexOptionSnafu, MissingFieldSnafu,
MissingTimestampColumnSnafu, Result, UnknownLocationTypeSnafu,
InvalidUnsetTableOptionRequestSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu, Result,
UnknownLocationTypeSnafu,
};
const LOCATION_TYPE_FIRST: i32 = LocationType::First as i32;
@@ -63,7 +60,6 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
column_schema: schema,
is_key: column_def.semantic_type == SemanticType::Tag as i32,
location: parse_location(ac.location)?,
add_if_not_exists: ac.add_if_not_exists,
})
})
.collect::<Result<Vec<_>>>()?;
@@ -117,43 +113,18 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
.context(InvalidUnsetTableOptionRequestSnafu)?,
}
}
Kind::SetIndex(o) => match o.options {
Some(opt) => match opt {
api::v1::set_index::Options::Fulltext(f) => AlterKind::SetIndex {
options: SetIndexOptions::Fulltext {
column_name: f.column_name.clone(),
options: FulltextOptions {
enable: f.enable,
analyzer: as_fulltext_option(
Analyzer::try_from(f.analyzer)
.context(InvalidSetFulltextOptionRequestSnafu)?,
),
case_sensitive: f.case_sensitive,
},
},
},
api::v1::set_index::Options::Inverted(i) => AlterKind::SetIndex {
options: SetIndexOptions::Inverted {
column_name: i.column_name,
},
},
Kind::SetColumnFulltext(c) => AlterKind::SetColumnFulltext {
column_name: c.column_name,
options: FulltextOptions {
enable: c.enable,
analyzer: as_fulltext_option(
Analyzer::try_from(c.analyzer).context(InvalidSetFulltextOptionRequestSnafu)?,
),
case_sensitive: c.case_sensitive,
},
None => return MissingAlterIndexOptionSnafu.fail(),
},
Kind::UnsetIndex(o) => match o.options {
Some(opt) => match opt {
api::v1::unset_index::Options::Fulltext(f) => AlterKind::UnsetIndex {
options: UnsetIndexOptions::Fulltext {
column_name: f.column_name,
},
},
api::v1::unset_index::Options::Inverted(i) => AlterKind::UnsetIndex {
options: UnsetIndexOptions::Inverted {
column_name: i.column_name,
},
},
},
None => return MissingAlterIndexOptionSnafu.fail(),
Kind::UnsetColumnFulltext(c) => AlterKind::UnsetColumnFulltext {
column_name: c.column_name,
},
};
@@ -249,7 +220,6 @@ mod tests {
..Default::default()
}),
location: None,
add_if_not_exists: true,
}],
})),
};
@@ -270,7 +240,6 @@ mod tests {
add_column.column_schema.data_type
);
assert_eq!(None, add_column.location);
assert!(add_column.add_if_not_exists);
}
#[test]
@@ -296,7 +265,6 @@ mod tests {
location_type: LocationType::First.into(),
after_column_name: String::default(),
}),
add_if_not_exists: false,
},
AddColumn {
column_def: Some(ColumnDef {
@@ -312,7 +280,6 @@ mod tests {
location_type: LocationType::After.into(),
after_column_name: "ts".to_string(),
}),
add_if_not_exists: true,
},
],
})),
@@ -341,7 +308,6 @@ mod tests {
}),
add_column.location
);
assert!(add_column.add_if_not_exists);
let add_column = add_columns.pop().unwrap();
assert!(!add_column.is_key);
@@ -351,7 +317,6 @@ mod tests {
add_column.column_schema.data_type
);
assert_eq!(Some(AddColumnLocation::First), add_column.location);
assert!(!add_column.add_if_not_exists);
}
#[test]

View File

@@ -139,12 +139,6 @@ pub enum Error {
#[snafu(source)]
error: prost::DecodeError,
},
#[snafu(display("Missing alter index options"))]
MissingAlterIndexOption {
#[snafu(implicit)]
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -170,8 +164,7 @@ impl ErrorExt for Error {
}
Error::InvalidSetTableOptionRequest { .. }
| Error::InvalidUnsetTableOptionRequest { .. }
| Error::InvalidSetFulltextOptionRequest { .. }
| Error::MissingAlterIndexOption { .. } => StatusCode::InvalidArguments,
| Error::InvalidSetFulltextOptionRequest { .. } => StatusCode::InvalidArguments,
}
}

View File

@@ -299,7 +299,6 @@ mod tests {
.unwrap()
)
);
assert!(host_column.add_if_not_exists);
let memory_column = &add_columns.add_columns[1];
assert_eq!(
@@ -312,7 +311,6 @@ mod tests {
.unwrap()
)
);
assert!(host_column.add_if_not_exists);
let time_column = &add_columns.add_columns[2];
assert_eq!(
@@ -325,7 +323,6 @@ mod tests {
.unwrap()
)
);
assert!(host_column.add_if_not_exists);
let interval_column = &add_columns.add_columns[3];
assert_eq!(
@@ -338,7 +335,6 @@ mod tests {
.unwrap()
)
);
assert!(host_column.add_if_not_exists);
let decimal_column = &add_columns.add_columns[4];
assert_eq!(
@@ -356,7 +352,6 @@ mod tests {
.unwrap()
)
);
assert!(host_column.add_if_not_exists);
}
#[test]

View File

@@ -119,30 +119,29 @@ pub fn build_create_table_expr(
}
let mut column_defs = Vec::with_capacity(column_exprs.len());
let mut primary_keys = Vec::with_capacity(column_exprs.len());
let mut primary_keys = Vec::default();
let mut time_index = None;
for expr in column_exprs {
let ColumnExpr {
column_name,
datatype,
semantic_type,
datatype_extension,
options,
} = expr;
for ColumnExpr {
column_name,
datatype,
semantic_type,
datatype_extension,
options,
} in column_exprs
{
let mut is_nullable = true;
match semantic_type {
v if v == SemanticType::Tag as i32 => primary_keys.push(column_name.to_owned()),
v if v == SemanticType::Tag as i32 => primary_keys.push(column_name.to_string()),
v if v == SemanticType::Timestamp as i32 => {
ensure!(
time_index.is_none(),
DuplicatedTimestampColumnSnafu {
exists: time_index.as_ref().unwrap(),
exists: time_index.unwrap(),
duplicated: column_name,
}
);
time_index = Some(column_name.to_owned());
time_index = Some(column_name.to_string());
// Timestamp column must not be null.
is_nullable = false;
}
@@ -159,8 +158,8 @@ pub fn build_create_table_expr(
}
);
column_defs.push(ColumnDef {
name: column_name.to_owned(),
let column_def = ColumnDef {
name: column_name.to_string(),
data_type: datatype,
is_nullable,
default_constraint: vec![],
@@ -168,14 +167,15 @@ pub fn build_create_table_expr(
comment: String::new(),
datatype_extension: datatype_extension.clone(),
options: options.clone(),
});
};
column_defs.push(column_def);
}
let time_index = time_index.context(MissingTimestampColumnSnafu {
msg: format!("table is {}", table_name.table),
})?;
Ok(CreateTableExpr {
let expr = CreateTableExpr {
catalog_name: table_name.catalog.to_string(),
schema_name: table_name.schema.to_string(),
table_name: table_name.table.to_string(),
@@ -187,12 +187,11 @@ pub fn build_create_table_expr(
table_options: Default::default(),
table_id: table_id.map(|id| api::v1::TableId { id }),
engine: engine.to_string(),
})
};
Ok(expr)
}
/// Find columns that are not present in the schema and return them as `AddColumns`
/// for adding columns automatically.
/// It always sets `add_if_not_exists` to `true` for now.
pub fn extract_new_columns(
schema: &Schema,
column_exprs: Vec<ColumnExpr>,
@@ -214,7 +213,6 @@ pub fn extract_new_columns(
AddColumn {
column_def,
location: None,
add_if_not_exists: true,
}
})
.collect::<Vec<_>>();

View File

@@ -6,7 +6,7 @@ license.workspace = true
[features]
testing = []
pg_kvbackend = ["dep:tokio-postgres", "dep:backon"]
pg_kvbackend = ["dep:tokio-postgres"]
[lints]
workspace = true
@@ -17,7 +17,6 @@ api.workspace = true
async-recursion = "1.0"
async-stream = "0.3"
async-trait.workspace = true
backon = { workspace = true, optional = true }
base64.workspace = true
bytes.workspace = true
chrono.workspace = true
@@ -36,8 +35,6 @@ common-wal.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
datatypes.workspace = true
deadpool.workspace = true
deadpool-postgres.workspace = true
derive_builder.workspace = true
etcd-client.workspace = true
futures.workspace = true

View File

@@ -105,7 +105,7 @@ impl AlterLogicalTablesProcedure {
.context(ConvertAlterTableRequestSnafu)?;
let new_meta = table_info
.meta
.builder_with_alter_kind(table_ref.table, &request.alter_kind)
.builder_with_alter_kind(table_ref.table, &request.alter_kind, true)
.context(error::TableSnafu)?
.build()
.with_context(|_| error::BuildTableMetaSnafu {

View File

@@ -28,13 +28,13 @@ use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSn
use common_procedure::{
Context as ProcedureContext, Error as ProcedureError, LockKey, Procedure, Status, StringKey,
};
use common_telemetry::{debug, error, info};
use common_telemetry::{debug, info};
use futures::future;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use store_api::storage::RegionId;
use strum::AsRefStr;
use table::metadata::{RawTableInfo, TableId, TableInfo};
use table::metadata::{RawTableInfo, TableId};
use table::table_reference::TableReference;
use crate::cache_invalidator::Context;
@@ -51,14 +51,10 @@ use crate::{metrics, ClusterId};
/// The alter table procedure
pub struct AlterTableProcedure {
/// The runtime context.
// The runtime context.
context: DdlContext,
/// The serialized data.
// The serialized data.
data: AlterTableData,
/// Cached new table metadata in the prepare step.
/// If we recover the procedure from json, then the table info value is not cached.
/// But we already validated it in the prepare step.
new_table_info: Option<TableInfo>,
}
impl AlterTableProcedure {
@@ -74,31 +70,18 @@ impl AlterTableProcedure {
Ok(Self {
context,
data: AlterTableData::new(task, table_id, cluster_id),
new_table_info: None,
})
}
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
let data: AlterTableData = serde_json::from_str(json).context(FromJsonSnafu)?;
Ok(AlterTableProcedure {
context,
data,
new_table_info: None,
})
Ok(AlterTableProcedure { context, data })
}
// Checks whether the table exists.
pub(crate) async fn on_prepare(&mut self) -> Result<Status> {
self.check_alter().await?;
self.fill_table_info().await?;
// Validates the request and builds the new table info.
// We need to build the new table info here because we should ensure the alteration
// is valid in `UpdateMeta` state as we already altered the region.
// Safety: `fill_table_info()` already set it.
let table_info_value = self.data.table_info_value.as_ref().unwrap();
self.new_table_info = Some(self.build_new_table_info(&table_info_value.table_info)?);
// Safety: Checked in `AlterTableProcedure::new`.
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
if matches!(alter_kind, Kind::RenameTable { .. }) {
@@ -123,14 +106,6 @@ impl AlterTableProcedure {
let leaders = find_leaders(&physical_table_route.region_routes);
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
let alter_kind = self.make_region_alter_kind()?;
info!(
"Submitting alter region requests for table {}, table_id: {}, alter_kind: {:?}",
self.data.table_ref(),
table_id,
alter_kind,
);
for datanode in leaders {
let requester = self.context.node_manager.datanode(&datanode).await;
@@ -138,7 +113,7 @@ impl AlterTableProcedure {
for region in regions {
let region_id = RegionId::new(table_id, region);
let request = self.make_alter_region_request(region_id, alter_kind.clone())?;
let request = self.make_alter_region_request(region_id)?;
debug!("Submitting {request:?} to {datanode}");
let datanode = datanode.clone();
@@ -175,15 +150,7 @@ impl AlterTableProcedure {
let table_ref = self.data.table_ref();
// Safety: checked before.
let table_info_value = self.data.table_info_value.as_ref().unwrap();
// Gets the table info from the cache or builds it.
let new_info = match &self.new_table_info {
Some(cached) => cached.clone(),
None => self.build_new_table_info(&table_info_value.table_info)
.inspect_err(|e| {
// We already check the table info in the prepare step so this should not happen.
error!(e; "Unable to build info for table {} in update metadata step, table_id: {}", table_ref, table_id);
})?,
};
let new_info = self.build_new_table_info(&table_info_value.table_info)?;
debug!(
"Starting update table: {} metadata, new table info {:?}",
@@ -207,7 +174,7 @@ impl AlterTableProcedure {
.await?;
}
info!("Updated table metadata for table {table_ref}, table_id: {table_id}, kind: {alter_kind:?}");
info!("Updated table metadata for table {table_ref}, table_id: {table_id}");
self.data.state = AlterTableState::InvalidateTableCache;
Ok(Status::executing(true))
}

View File

@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use api::v1::alter_table_expr::Kind;
use api::v1::region::region_request::Body;
use api::v1::region::{
@@ -29,15 +27,13 @@ use crate::ddl::alter_table::AlterTableProcedure;
use crate::error::{InvalidProtoMsgSnafu, Result};
impl AlterTableProcedure {
/// Makes alter region request from existing an alter kind.
/// Region alter request always add columns if not exist.
pub(crate) fn make_alter_region_request(
&self,
region_id: RegionId,
kind: Option<alter_request::Kind>,
) -> Result<RegionRequest> {
/// Makes alter region request.
pub(crate) fn make_alter_region_request(&self, region_id: RegionId) -> Result<RegionRequest> {
// Safety: Checked in `AlterTableProcedure::new`.
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
// Safety: checked
let table_info = self.data.table_info().unwrap();
let kind = create_proto_alter_kind(table_info, alter_kind)?;
Ok(RegionRequest {
header: Some(RegionRequestHeader {
@@ -51,66 +47,45 @@ impl AlterTableProcedure {
})),
})
}
/// Makes alter kind proto that all regions can reuse.
/// Region alter request always add columns if not exist.
pub(crate) fn make_region_alter_kind(&self) -> Result<Option<alter_request::Kind>> {
// Safety: Checked in `AlterTableProcedure::new`.
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
// Safety: checked
let table_info = self.data.table_info().unwrap();
let kind = create_proto_alter_kind(table_info, alter_kind)?;
Ok(kind)
}
}
/// Creates region proto alter kind from `table_info` and `alter_kind`.
///
/// It always adds column if not exists and drops column if exists.
/// It skips the column if it already exists in the table.
/// Returns the kind and next column id if it adds new columns.
fn create_proto_alter_kind(
table_info: &RawTableInfo,
alter_kind: &Kind,
) -> Result<Option<alter_request::Kind>> {
match alter_kind {
Kind::AddColumns(x) => {
// Construct a set of existing columns in the table.
let existing_columns: HashSet<_> = table_info
.meta
.schema
.column_schemas
.iter()
.map(|col| &col.name)
.collect();
let mut next_column_id = table_info.meta.next_column_id;
let mut add_columns = Vec::with_capacity(x.add_columns.len());
for add_column in &x.add_columns {
let column_def = add_column
.column_def
.as_ref()
.context(InvalidProtoMsgSnafu {
err_msg: "'column_def' is absent",
})?;
let add_columns = x
.add_columns
.iter()
.map(|add_column| {
let column_def =
add_column
.column_def
.as_ref()
.context(InvalidProtoMsgSnafu {
err_msg: "'column_def' is absent",
})?;
// Skips existing columns.
if existing_columns.contains(&column_def.name) {
continue;
}
let column_id = next_column_id;
next_column_id += 1;
let column_id = next_column_id;
next_column_id += 1;
let column_def = RegionColumnDef {
column_def: Some(column_def.clone()),
column_id,
};
let column_def = RegionColumnDef {
column_def: Some(column_def.clone()),
column_id,
};
add_columns.push(AddColumn {
column_def: Some(column_def),
location: add_column.location.clone(),
});
}
Ok(AddColumn {
column_def: Some(column_def),
location: add_column.location.clone(),
})
})
.collect::<Result<Vec<_>>>()?;
Ok(Some(alter_request::Kind::AddColumns(AddColumns {
add_columns,
@@ -133,8 +108,10 @@ fn create_proto_alter_kind(
Kind::RenameTable(_) => Ok(None),
Kind::SetTableOptions(v) => Ok(Some(alter_request::Kind::SetTableOptions(v.clone()))),
Kind::UnsetTableOptions(v) => Ok(Some(alter_request::Kind::UnsetTableOptions(v.clone()))),
Kind::SetIndex(v) => Ok(Some(alter_request::Kind::SetIndex(v.clone()))),
Kind::UnsetIndex(v) => Ok(Some(alter_request::Kind::UnsetIndex(v.clone()))),
Kind::SetColumnFulltext(v) => Ok(Some(alter_request::Kind::SetColumnFulltext(v.clone()))),
Kind::UnsetColumnFulltext(v) => {
Ok(Some(alter_request::Kind::UnsetColumnFulltext(v.clone())))
}
}
}
@@ -166,7 +143,6 @@ mod tests {
use crate::rpc::router::{Region, RegionRoute};
use crate::test_util::{new_ddl_context, MockDatanodeManager};
/// Prepares a region with schema `[ts: Timestamp, host: Tag, cpu: Field]`.
async fn prepare_ddl_context() -> (DdlContext, u64, TableId, RegionId, String) {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
@@ -195,7 +171,6 @@ mod tests {
.name("cpu")
.data_type(ColumnDataType::Float64)
.semantic_type(SemanticType::Field)
.is_nullable(true)
.build()
.unwrap()
.into(),
@@ -250,16 +225,15 @@ mod tests {
name: "my_tag3".to_string(),
data_type: ColumnDataType::String as i32,
is_nullable: true,
default_constraint: Vec::new(),
default_constraint: b"hello".to_vec(),
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
..Default::default()
}),
location: Some(AddColumnLocation {
location_type: LocationType::After as i32,
after_column_name: "host".to_string(),
after_column_name: "my_tag2".to_string(),
}),
add_if_not_exists: false,
}],
})),
},
@@ -268,11 +242,8 @@ mod tests {
let mut procedure =
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
procedure.on_prepare().await.unwrap();
let alter_kind = procedure.make_region_alter_kind().unwrap();
let Some(Body::Alter(alter_region_request)) = procedure
.make_alter_region_request(region_id, alter_kind)
.unwrap()
.body
let Some(Body::Alter(alter_region_request)) =
procedure.make_alter_region_request(region_id).unwrap().body
else {
unreachable!()
};
@@ -288,7 +259,7 @@ mod tests {
name: "my_tag3".to_string(),
data_type: ColumnDataType::String as i32,
is_nullable: true,
default_constraint: Vec::new(),
default_constraint: b"hello".to_vec(),
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
..Default::default()
@@ -297,7 +268,7 @@ mod tests {
}),
location: Some(AddColumnLocation {
location_type: LocationType::After as i32,
after_column_name: "host".to_string(),
after_column_name: "my_tag2".to_string(),
}),
}]
}
@@ -328,11 +299,8 @@ mod tests {
let mut procedure =
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
procedure.on_prepare().await.unwrap();
let alter_kind = procedure.make_region_alter_kind().unwrap();
let Some(Body::Alter(alter_region_request)) = procedure
.make_alter_region_request(region_id, alter_kind)
.unwrap()
.body
let Some(Body::Alter(alter_region_request)) =
procedure.make_alter_region_request(region_id).unwrap().body
else {
unreachable!()
};

View File

@@ -23,9 +23,7 @@ use crate::key::table_info::TableInfoValue;
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
impl AlterTableProcedure {
/// Builds new table info after alteration.
/// It bumps the column id of the table by the number of the add column requests.
/// So there may be holes in the column id sequence.
/// Builds new_meta
pub(crate) fn build_new_table_info(&self, table_info: &RawTableInfo) -> Result<TableInfo> {
let table_info =
TableInfo::try_from(table_info.clone()).context(error::ConvertRawTableInfoSnafu)?;
@@ -36,7 +34,7 @@ impl AlterTableProcedure {
let new_meta = table_info
.meta
.builder_with_alter_kind(table_ref.table, &request.alter_kind)
.builder_with_alter_kind(table_ref.table, &request.alter_kind, false)
.context(error::TableSnafu)?
.build()
.with_context(|_| error::BuildTableMetaSnafu {
@@ -48,9 +46,6 @@ impl AlterTableProcedure {
new_info.ident.version = table_info.ident.version + 1;
match request.alter_kind {
AlterKind::AddColumns { columns } => {
// Bumps the column id for the new columns.
// It may bump more than the actual number of columns added if there are
// existing columns, but it's fine.
new_info.meta.next_column_id += columns.len() as u32;
}
AlterKind::RenameTable { new_table_name } => {
@@ -60,8 +55,8 @@ impl AlterTableProcedure {
| AlterKind::ModifyColumnTypes { .. }
| AlterKind::SetTableOptions { .. }
| AlterKind::UnsetTableOptions { .. }
| AlterKind::SetIndex { .. }
| AlterKind::UnsetIndex { .. } => {}
| AlterKind::SetColumnFulltext { .. }
| AlterKind::UnsetColumnFulltext { .. } => {}
}
Ok(new_info)

View File

@@ -21,7 +21,7 @@ use api::v1::CreateTableExpr;
use async_trait::async_trait;
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
use common_telemetry::{debug, warn};
use common_telemetry::warn;
use futures_util::future::join_all;
use serde::{Deserialize, Serialize};
use snafu::{ensure, ResultExt};
@@ -143,12 +143,7 @@ impl CreateLogicalTablesProcedure {
for peer in leaders {
let requester = self.context.node_manager.datanode(&peer).await;
let Some(request) = self.make_request(&peer, region_routes)? else {
debug!("no region request to send to datanode {}", peer);
// We can skip the rest of the datanodes,
// the rest of the datanodes should have the same result.
break;
};
let request = self.make_request(&peer, region_routes)?;
create_region_tasks.push(async move {
requester

View File

@@ -25,7 +25,7 @@ impl CreateLogicalTablesProcedure {
Ok(())
}
pub async fn check_tables_already_exist(&mut self) -> Result<()> {
pub(crate) async fn check_tables_already_exist(&mut self) -> Result<()> {
let table_name_keys = self
.data
.all_create_table_exprs()

View File

@@ -15,7 +15,6 @@
use std::collections::HashMap;
use api::v1::region::{region_request, CreateRequests, RegionRequest, RegionRequestHeader};
use common_telemetry::debug;
use common_telemetry::tracing_context::TracingContext;
use store_api::storage::RegionId;
@@ -32,15 +31,11 @@ impl CreateLogicalTablesProcedure {
&self,
peer: &Peer,
region_routes: &[RegionRoute],
) -> Result<Option<RegionRequest>> {
) -> Result<RegionRequest> {
let tasks = &self.data.tasks;
let table_ids_already_exists = &self.data.table_ids_already_exists;
let regions_on_this_peer = find_leader_regions(region_routes, peer);
let mut requests = Vec::with_capacity(tasks.len() * regions_on_this_peer.len());
for (task, table_id_already_exists) in tasks.iter().zip(table_ids_already_exists) {
if table_id_already_exists.is_some() {
continue;
}
for task in tasks {
let create_table_expr = &task.create_table;
let catalog = &create_table_expr.catalog_name;
let schema = &create_table_expr.schema_name;
@@ -56,18 +51,13 @@ impl CreateLogicalTablesProcedure {
}
}
if requests.is_empty() {
debug!("no region request to send to datanodes");
return Ok(None);
}
Ok(Some(RegionRequest {
Ok(RegionRequest {
header: Some(RegionRequestHeader {
tracing_context: TracingContext::from_current_span().to_w3c(),
..Default::default()
}),
body: Some(region_request::Body::Creates(CreateRequests { requests })),
}))
})
}
fn create_region_request_builder(

View File

@@ -30,8 +30,6 @@ pub struct TestAlterTableExpr {
add_columns: Vec<ColumnDef>,
#[builder(setter(into, strip_option))]
new_table_name: Option<String>,
#[builder(setter)]
add_if_not_exists: bool,
}
impl From<TestAlterTableExpr> for AlterTableExpr {
@@ -55,7 +53,6 @@ impl From<TestAlterTableExpr> for AlterTableExpr {
.map(|col| AddColumn {
column_def: Some(col),
location: None,
add_if_not_exists: value.add_if_not_exists,
})
.collect(),
})),

View File

@@ -56,7 +56,6 @@ fn make_alter_logical_table_add_column_task(
let alter_table = alter_table
.table_name(table.to_string())
.add_columns(add_columns)
.add_if_not_exists(true)
.build()
.unwrap();

View File

@@ -139,7 +139,7 @@ async fn test_on_submit_alter_request() {
table_name: table_name.to_string(),
kind: Some(Kind::DropColumns(DropColumns {
drop_columns: vec![DropColumn {
name: "cpu".to_string(),
name: "my_field_column".to_string(),
}],
})),
},
@@ -225,7 +225,7 @@ async fn test_on_submit_alter_request_with_outdated_request() {
table_name: table_name.to_string(),
kind: Some(Kind::DropColumns(DropColumns {
drop_columns: vec![DropColumn {
name: "cpu".to_string(),
name: "my_field_column".to_string(),
}],
})),
},
@@ -330,7 +330,6 @@ async fn test_on_update_metadata_add_columns() {
..Default::default()
}),
location: None,
add_if_not_exists: false,
}],
})),
},

View File

@@ -639,6 +639,15 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to parse {} from str to utf8", name))]
StrFromUtf8 {
name: String,
#[snafu(source)]
error: std::str::Utf8Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Value not exists"))]
ValueNotExist {
#[snafu(implicit)]
@@ -649,9 +658,8 @@ pub enum Error {
GetCache { source: Arc<Error> },
#[cfg(feature = "pg_kvbackend")]
#[snafu(display("Failed to execute via Postgres, sql: {}", sql))]
#[snafu(display("Failed to execute via Postgres"))]
PostgresExecution {
sql: String,
#[snafu(source)]
error: tokio_postgres::Error,
#[snafu(implicit)]
@@ -659,37 +667,12 @@ pub enum Error {
},
#[cfg(feature = "pg_kvbackend")]
#[snafu(display("Failed to create connection pool for Postgres"))]
CreatePostgresPool {
#[snafu(source)]
error: deadpool_postgres::CreatePoolError,
#[snafu(implicit)]
location: Location,
},
#[cfg(feature = "pg_kvbackend")]
#[snafu(display("Failed to get Postgres connection from pool: {}", reason))]
GetPostgresConnection {
reason: String,
#[snafu(implicit)]
location: Location,
},
#[cfg(feature = "pg_kvbackend")]
#[snafu(display("Failed to {} Postgres transaction", operation))]
PostgresTransaction {
#[snafu(display("Failed to connect to Postgres"))]
ConnectPostgres {
#[snafu(source)]
error: tokio_postgres::Error,
#[snafu(implicit)]
location: Location,
operation: String,
},
#[cfg(feature = "pg_kvbackend")]
#[snafu(display("Postgres transaction retry failed"))]
PostgresTransactionRetryFailed {
#[snafu(implicit)]
location: Location,
},
#[snafu(display(
@@ -755,7 +738,8 @@ impl ErrorExt for Error {
| UnexpectedLogicalRouteTable { .. }
| ProcedureOutput { .. }
| FromUtf8 { .. }
| MetadataCorruption { .. } => StatusCode::Unexpected,
| MetadataCorruption { .. }
| StrFromUtf8 { .. } => StatusCode::Unexpected,
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
@@ -802,11 +786,9 @@ impl ErrorExt for Error {
| EmptyDdlTasks { .. } => StatusCode::InvalidArguments,
#[cfg(feature = "pg_kvbackend")]
PostgresExecution { .. }
| CreatePostgresPool { .. }
| GetPostgresConnection { .. }
| PostgresTransaction { .. }
| PostgresTransactionRetryFailed { .. } => StatusCode::Internal,
PostgresExecution { .. } => StatusCode::Internal,
#[cfg(feature = "pg_kvbackend")]
ConnectPostgres { .. } => StatusCode::Internal,
Error::DatanodeTableInfoNotFound { .. } => StatusCode::Internal,
}
}
@@ -817,20 +799,6 @@ impl ErrorExt for Error {
}
impl Error {
#[cfg(feature = "pg_kvbackend")]
/// Check if the error is a serialization error.
pub fn is_serialization_error(&self) -> bool {
match self {
Error::PostgresTransaction { error, .. } => {
error.code() == Some(&tokio_postgres::error::SqlState::T_R_SERIALIZATION_FAILURE)
}
Error::PostgresExecution { error, .. } => {
error.code() == Some(&tokio_postgres::error::SqlState::T_R_SERIALIZATION_FAILURE)
}
_ => false,
}
}
/// Creates a new [Error::RetryLater] error from source `err`.
pub fn retry_later<E: ErrorExt + Send + Sync + 'static>(err: E) -> Error {
Error::RetryLater {

View File

@@ -29,6 +29,7 @@ use crate::error::{self, Error, InvalidMetadataSnafu, ParseOptionSnafu, Result};
use crate::key::{MetadataKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::metrics::METRIC_META_SCHEMA_INFO_GET;
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
use crate::rpc::store::RangeRequest;
use crate::rpc::KeyValue;
@@ -209,6 +210,8 @@ impl SchemaManager {
&self,
schema: SchemaNameKey<'_>,
) -> Result<Option<DeserializedValueWithBytes<SchemaNameValue>>> {
let _timer = METRIC_META_SCHEMA_INFO_GET.start_timer();
let raw_key = schema.to_bytes();
self.kv_backend
.get(&raw_key)

View File

@@ -29,6 +29,7 @@ use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{DeserializedValueWithBytes, MetadataKey, MetadataValue, TABLE_INFO_KEY_PREFIX};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::metrics::METRIC_META_TABLE_INFO_GET;
use crate::rpc::store::BatchGetRequest;
/// The key stores the metadata of the table.
@@ -190,17 +191,12 @@ impl TableInfoManager {
))
}
/// Checks if the table exists.
pub async fn exists(&self, table_id: TableId) -> Result<bool> {
let key = TableInfoKey::new(table_id);
let raw_key = key.to_bytes();
self.kv_backend.exists(&raw_key).await
}
pub async fn get(
&self,
table_id: TableId,
) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>> {
let _timer = METRIC_META_TABLE_INFO_GET.start_timer();
let key = TableInfoKey::new(table_id);
let raw_key = key.to_bytes();
self.kv_backend

View File

@@ -542,8 +542,6 @@ mod tests {
prepare_kv_with_prefix, test_kv_batch_delete_with_prefix, test_kv_batch_get_with_prefix,
test_kv_compare_and_put_with_prefix, test_kv_delete_range_with_prefix,
test_kv_put_with_prefix, test_kv_range_2_with_prefix, test_kv_range_with_prefix,
test_txn_compare_equal, test_txn_compare_greater, test_txn_compare_less,
test_txn_compare_not_equal, test_txn_one_compare_op, text_txn_multi_compare_op,
unprepare_kv,
};
@@ -591,7 +589,7 @@ mod tests {
#[tokio::test]
async fn test_range_2() {
if let Some(kv_backend) = build_kv_backend().await {
test_kv_range_2_with_prefix(&kv_backend, b"range2/".to_vec()).await;
test_kv_range_2_with_prefix(kv_backend, b"range2/".to_vec()).await;
}
}
@@ -618,8 +616,7 @@ mod tests {
if let Some(kv_backend) = build_kv_backend().await {
let prefix = b"deleteRange/";
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
test_kv_delete_range_with_prefix(&kv_backend, prefix.to_vec()).await;
unprepare_kv(&kv_backend, prefix).await;
test_kv_delete_range_with_prefix(kv_backend, prefix.to_vec()).await;
}
}
@@ -628,20 +625,7 @@ mod tests {
if let Some(kv_backend) = build_kv_backend().await {
let prefix = b"batchDelete/";
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
test_kv_batch_delete_with_prefix(&kv_backend, prefix.to_vec()).await;
unprepare_kv(&kv_backend, prefix).await;
}
}
#[tokio::test]
async fn test_etcd_txn() {
if let Some(kv_backend) = build_kv_backend().await {
test_txn_one_compare_op(&kv_backend).await;
text_txn_multi_compare_op(&kv_backend).await;
test_txn_compare_equal(&kv_backend).await;
test_txn_compare_greater(&kv_backend).await;
test_txn_compare_less(&kv_backend).await;
test_txn_compare_not_equal(&kv_backend).await;
test_kv_batch_delete_with_prefix(kv_backend, prefix.to_vec()).await;
}
}
}

View File

@@ -325,9 +325,7 @@ mod tests {
use crate::error::Error;
use crate::kv_backend::test::{
prepare_kv, test_kv_batch_delete, test_kv_batch_get, test_kv_compare_and_put,
test_kv_delete_range, test_kv_put, test_kv_range, test_kv_range_2, test_txn_compare_equal,
test_txn_compare_greater, test_txn_compare_less, test_txn_compare_not_equal,
test_txn_one_compare_op, text_txn_multi_compare_op,
test_kv_delete_range, test_kv_put, test_kv_range, test_kv_range_2,
};
async fn mock_mem_store_with_data() -> MemoryKvBackend<Error> {
@@ -355,7 +353,7 @@ mod tests {
async fn test_range_2() {
let kv = MemoryKvBackend::<Error>::new();
test_kv_range_2(&kv).await;
test_kv_range_2(kv).await;
}
#[tokio::test]
@@ -376,24 +374,13 @@ mod tests {
async fn test_delete_range() {
let kv_backend = mock_mem_store_with_data().await;
test_kv_delete_range(&kv_backend).await;
test_kv_delete_range(kv_backend).await;
}
#[tokio::test]
async fn test_batch_delete() {
let kv_backend = mock_mem_store_with_data().await;
test_kv_batch_delete(&kv_backend).await;
}
#[tokio::test]
async fn test_memory_txn() {
let kv_backend = MemoryKvBackend::<Error>::new();
test_txn_one_compare_op(&kv_backend).await;
text_txn_multi_compare_op(&kv_backend).await;
test_txn_compare_equal(&kv_backend).await;
test_txn_compare_greater(&kv_backend).await;
test_txn_compare_less(&kv_backend).await;
test_txn_compare_not_equal(&kv_backend).await;
test_kv_batch_delete(kv_backend).await;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -15,8 +15,6 @@
use std::sync::atomic::{AtomicU8, Ordering};
use std::sync::Arc;
use txn::{Compare, CompareOp, TxnOp};
use super::{KvBackend, *};
use crate::error::Error;
use crate::rpc::store::{BatchGetRequest, PutRequest};
@@ -61,18 +59,14 @@ pub async fn prepare_kv_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>
pub async fn unprepare_kv(kv_backend: &impl KvBackend, prefix: &[u8]) {
let range_end = util::get_prefix_end_key(prefix);
assert!(
kv_backend
.delete_range(DeleteRangeRequest {
key: prefix.to_vec(),
range_end,
..Default::default()
})
.await
.is_ok(),
"prefix: {:?}",
std::str::from_utf8(prefix).unwrap()
);
assert!(kv_backend
.delete_range(DeleteRangeRequest {
key: prefix.to_vec(),
range_end,
..Default::default()
})
.await
.is_ok());
}
pub async fn test_kv_put(kv_backend: &impl KvBackend) {
@@ -174,11 +168,11 @@ pub async fn test_kv_range_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<
assert_eq!(b"val1", resp.kvs[0].value());
}
pub async fn test_kv_range_2(kv_backend: &impl KvBackend) {
pub async fn test_kv_range_2(kv_backend: impl KvBackend) {
test_kv_range_2_with_prefix(kv_backend, vec![]).await;
}
pub async fn test_kv_range_2_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>) {
pub async fn test_kv_range_2_with_prefix(kv_backend: impl KvBackend, prefix: Vec<u8>) {
let atest = [prefix.clone(), b"atest".to_vec()].concat();
let test = [prefix.clone(), b"test".to_vec()].concat();
@@ -352,11 +346,11 @@ pub async fn test_kv_compare_and_put_with_prefix(
assert!(resp.is_none());
}
pub async fn test_kv_delete_range(kv_backend: &impl KvBackend) {
pub async fn test_kv_delete_range(kv_backend: impl KvBackend) {
test_kv_delete_range_with_prefix(kv_backend, vec![]).await;
}
pub async fn test_kv_delete_range_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>) {
pub async fn test_kv_delete_range_with_prefix(kv_backend: impl KvBackend, prefix: Vec<u8>) {
let key3 = [prefix.clone(), b"key3".to_vec()].concat();
let req = DeleteRangeRequest {
key: key3.clone(),
@@ -407,11 +401,11 @@ pub async fn test_kv_delete_range_with_prefix(kv_backend: &impl KvBackend, prefi
assert!(resp.kvs.is_empty());
}
pub async fn test_kv_batch_delete(kv_backend: &impl KvBackend) {
pub async fn test_kv_batch_delete(kv_backend: impl KvBackend) {
test_kv_batch_delete_with_prefix(kv_backend, vec![]).await;
}
pub async fn test_kv_batch_delete_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>) {
pub async fn test_kv_batch_delete_with_prefix(kv_backend: impl KvBackend, prefix: Vec<u8>) {
let key1 = [prefix.clone(), b"key1".to_vec()].concat();
let key100 = [prefix.clone(), b"key100".to_vec()].concat();
assert!(kv_backend.get(&key1).await.unwrap().is_some());
@@ -450,207 +444,3 @@ pub async fn test_kv_batch_delete_with_prefix(kv_backend: &impl KvBackend, prefi
assert!(kv_backend.get(&key3).await.unwrap().is_none());
assert!(kv_backend.get(&key11).await.unwrap().is_none());
}
pub async fn test_txn_one_compare_op(kv_backend: &impl KvBackend) {
let _ = kv_backend
.put(PutRequest {
key: vec![11],
value: vec![3],
..Default::default()
})
.await
.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_value(
vec![11],
CompareOp::Greater,
vec![1],
)])
.and_then(vec![TxnOp::Put(vec![11], vec![1])])
.or_else(vec![TxnOp::Put(vec![11], vec![2])]);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(txn_response.succeeded);
assert_eq!(txn_response.responses.len(), 1);
}
pub async fn text_txn_multi_compare_op(kv_backend: &impl KvBackend) {
for i in 1..3 {
let _ = kv_backend
.put(PutRequest {
key: vec![i],
value: vec![i],
..Default::default()
})
.await
.unwrap();
}
let when: Vec<_> = (1..3u8)
.map(|i| Compare::with_value(vec![i], CompareOp::Equal, vec![i]))
.collect();
let txn = Txn::new()
.when(when)
.and_then(vec![
TxnOp::Put(vec![1], vec![10]),
TxnOp::Put(vec![2], vec![20]),
])
.or_else(vec![TxnOp::Put(vec![1], vec![11])]);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(txn_response.succeeded);
assert_eq!(txn_response.responses.len(), 2);
}
pub async fn test_txn_compare_equal(kv_backend: &impl KvBackend) {
let key = vec![101u8];
kv_backend.delete(&key, false).await.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_value_not_exists(
key.clone(),
CompareOp::Equal,
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
assert!(txn_response.succeeded);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(!txn_response.succeeded);
let txn = Txn::new()
.when(vec![Compare::with_value(
key.clone(),
CompareOp::Equal,
vec![2],
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
.or_else(vec![TxnOp::Put(key, vec![4])]);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(txn_response.succeeded);
}
pub async fn test_txn_compare_greater(kv_backend: &impl KvBackend) {
let key = vec![102u8];
kv_backend.delete(&key, false).await.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_value_not_exists(
key.clone(),
CompareOp::Greater,
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
assert!(!txn_response.succeeded);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(txn_response.succeeded);
let txn = Txn::new()
.when(vec![Compare::with_value(
key.clone(),
CompareOp::Greater,
vec![1],
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
.or_else(vec![TxnOp::Get(key.clone())]);
let mut txn_response = kv_backend.txn(txn).await.unwrap();
assert!(!txn_response.succeeded);
let res = txn_response.responses.pop().unwrap();
assert_eq!(
res,
TxnOpResponse::ResponseGet(RangeResponse {
kvs: vec![KeyValue {
key,
value: vec![1]
}],
more: false,
})
);
}
pub async fn test_txn_compare_less(kv_backend: &impl KvBackend) {
let key = vec![103u8];
kv_backend.delete(&[3], false).await.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_value_not_exists(
key.clone(),
CompareOp::Less,
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
assert!(!txn_response.succeeded);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(!txn_response.succeeded);
let txn = Txn::new()
.when(vec![Compare::with_value(
key.clone(),
CompareOp::Less,
vec![2],
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
.or_else(vec![TxnOp::Get(key.clone())]);
let mut txn_response = kv_backend.txn(txn).await.unwrap();
assert!(!txn_response.succeeded);
let res = txn_response.responses.pop().unwrap();
assert_eq!(
res,
TxnOpResponse::ResponseGet(RangeResponse {
kvs: vec![KeyValue {
key,
value: vec![2]
}],
more: false,
})
);
}
pub async fn test_txn_compare_not_equal(kv_backend: &impl KvBackend) {
let key = vec![104u8];
kv_backend.delete(&key, false).await.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_value_not_exists(
key.clone(),
CompareOp::NotEqual,
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
assert!(!txn_response.succeeded);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(txn_response.succeeded);
let txn = Txn::new()
.when(vec![Compare::with_value(
key.clone(),
CompareOp::Equal,
vec![2],
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
.or_else(vec![TxnOp::Get(key.clone())]);
let mut txn_response = kv_backend.txn(txn).await.unwrap();
assert!(!txn_response.succeeded);
let res = txn_response.responses.pop().unwrap();
assert_eq!(
res,
TxnOpResponse::ResponseGet(RangeResponse {
kvs: vec![KeyValue {
key,
value: vec![1]
}],
more: false,
})
);
}

View File

@@ -131,9 +131,9 @@ pub struct TxnResponse {
pub struct Txn {
// HACK - chroot would modify this field
pub(super) req: TxnRequest,
pub(super) c_when: bool,
pub(super) c_then: bool,
pub(super) c_else: bool,
c_when: bool,
c_then: bool,
c_else: bool,
}
#[cfg(any(test, feature = "testing"))]
@@ -241,7 +241,14 @@ impl From<Txn> for TxnRequest {
#[cfg(test)]
mod tests {
use std::sync::Arc;
use super::*;
use crate::error::Error;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::kv_backend::KvBackendRef;
use crate::rpc::store::PutRequest;
use crate::rpc::KeyValue;
#[test]
fn test_compare() {
@@ -303,4 +310,232 @@ mod tests {
}
);
}
#[tokio::test]
async fn test_txn_one_compare_op() {
let kv_backend = create_kv_backend().await;
let _ = kv_backend
.put(PutRequest {
key: vec![11],
value: vec![3],
..Default::default()
})
.await
.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_value(
vec![11],
CompareOp::Greater,
vec![1],
)])
.and_then(vec![TxnOp::Put(vec![11], vec![1])])
.or_else(vec![TxnOp::Put(vec![11], vec![2])]);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(txn_response.succeeded);
assert_eq!(txn_response.responses.len(), 1);
}
#[tokio::test]
async fn test_txn_multi_compare_op() {
let kv_backend = create_kv_backend().await;
for i in 1..3 {
let _ = kv_backend
.put(PutRequest {
key: vec![i],
value: vec![i],
..Default::default()
})
.await
.unwrap();
}
let when: Vec<_> = (1..3u8)
.map(|i| Compare::with_value(vec![i], CompareOp::Equal, vec![i]))
.collect();
let txn = Txn::new()
.when(when)
.and_then(vec![
TxnOp::Put(vec![1], vec![10]),
TxnOp::Put(vec![2], vec![20]),
])
.or_else(vec![TxnOp::Put(vec![1], vec![11])]);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(txn_response.succeeded);
assert_eq!(txn_response.responses.len(), 2);
}
#[tokio::test]
async fn test_txn_compare_equal() {
let kv_backend = create_kv_backend().await;
let key = vec![101u8];
kv_backend.delete(&key, false).await.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_value_not_exists(
key.clone(),
CompareOp::Equal,
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
assert!(txn_response.succeeded);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(!txn_response.succeeded);
let txn = Txn::new()
.when(vec![Compare::with_value(
key.clone(),
CompareOp::Equal,
vec![2],
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
.or_else(vec![TxnOp::Put(key, vec![4])]);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(txn_response.succeeded);
}
#[tokio::test]
async fn test_txn_compare_greater() {
let kv_backend = create_kv_backend().await;
let key = vec![102u8];
kv_backend.delete(&key, false).await.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_value_not_exists(
key.clone(),
CompareOp::Greater,
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
assert!(!txn_response.succeeded);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(txn_response.succeeded);
let txn = Txn::new()
.when(vec![Compare::with_value(
key.clone(),
CompareOp::Greater,
vec![1],
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
.or_else(vec![TxnOp::Get(key.clone())]);
let mut txn_response = kv_backend.txn(txn).await.unwrap();
assert!(!txn_response.succeeded);
let res = txn_response.responses.pop().unwrap();
assert_eq!(
res,
TxnOpResponse::ResponseGet(RangeResponse {
kvs: vec![KeyValue {
key,
value: vec![1]
}],
more: false,
})
);
}
#[tokio::test]
async fn test_txn_compare_less() {
let kv_backend = create_kv_backend().await;
let key = vec![103u8];
kv_backend.delete(&[3], false).await.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_value_not_exists(
key.clone(),
CompareOp::Less,
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
assert!(!txn_response.succeeded);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(!txn_response.succeeded);
let txn = Txn::new()
.when(vec![Compare::with_value(
key.clone(),
CompareOp::Less,
vec![2],
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
.or_else(vec![TxnOp::Get(key.clone())]);
let mut txn_response = kv_backend.txn(txn).await.unwrap();
assert!(!txn_response.succeeded);
let res = txn_response.responses.pop().unwrap();
assert_eq!(
res,
TxnOpResponse::ResponseGet(RangeResponse {
kvs: vec![KeyValue {
key,
value: vec![2]
}],
more: false,
})
);
}
#[tokio::test]
async fn test_txn_compare_not_equal() {
let kv_backend = create_kv_backend().await;
let key = vec![104u8];
kv_backend.delete(&key, false).await.unwrap();
let txn = Txn::new()
.when(vec![Compare::with_value_not_exists(
key.clone(),
CompareOp::NotEqual,
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
assert!(!txn_response.succeeded);
let txn_response = kv_backend.txn(txn).await.unwrap();
assert!(txn_response.succeeded);
let txn = Txn::new()
.when(vec![Compare::with_value(
key.clone(),
CompareOp::Equal,
vec![2],
)])
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
.or_else(vec![TxnOp::Get(key.clone())]);
let mut txn_response = kv_backend.txn(txn).await.unwrap();
assert!(!txn_response.succeeded);
let res = txn_response.responses.pop().unwrap();
assert_eq!(
res,
TxnOpResponse::ResponseGet(RangeResponse {
kvs: vec![KeyValue {
key,
value: vec![1]
}],
more: false,
})
);
}
async fn create_kv_backend() -> KvBackendRef {
Arc::new(MemoryKvBackend::<Error>::new())
// TODO(jiachun): Add a feature to test against etcd in github CI
//
// The same test can be run against etcd by uncommenting the following line
// crate::service::store::etcd::EtcdStore::with_endpoints(["127.0.0.1:2379"])
// .await
// .unwrap()
}
}

View File

@@ -108,4 +108,9 @@ lazy_static! {
&["name"]
)
.unwrap();
pub static ref METRIC_META_TABLE_INFO_GET: Histogram =
register_histogram!("greptime_meta_table_info_get", "get table info from kvbackend").unwrap();
pub static ref METRIC_META_SCHEMA_INFO_GET: Histogram =
register_histogram!("greptime_meta_schema_info_get", "get schema info from kvbackend").unwrap();
}

View File

@@ -266,7 +266,7 @@ impl PutRequest {
}
}
#[derive(Debug, Clone, PartialEq, Default)]
#[derive(Debug, Clone, PartialEq)]
pub struct PutResponse {
pub prev_kv: Option<KeyValue>,
}
@@ -425,7 +425,7 @@ impl BatchPutRequest {
}
}
#[derive(Debug, Clone, Default)]
#[derive(Debug, Clone)]
pub struct BatchPutResponse {
pub prev_kvs: Vec<KeyValue>,
}
@@ -509,7 +509,7 @@ impl BatchDeleteRequest {
}
}
#[derive(Debug, Clone, Default)]
#[derive(Debug, Clone)]
pub struct BatchDeleteResponse {
pub prev_kvs: Vec<KeyValue>,
}
@@ -754,19 +754,6 @@ impl TryFrom<PbDeleteRangeResponse> for DeleteRangeResponse {
}
impl DeleteRangeResponse {
/// Creates a new [`DeleteRangeResponse`] with the given deleted count.
pub fn new(deleted: i64) -> Self {
Self {
deleted,
prev_kvs: vec![],
}
}
/// Creates a new [`DeleteRangeResponse`] with the given deleted count and previous key-value pairs.
pub fn with_prev_kvs(&mut self, prev_kvs: Vec<KeyValue>) {
self.prev_kvs = prev_kvs;
}
pub fn to_proto_resp(self, header: PbResponseHeader) -> PbDeleteRangeResponse {
PbDeleteRangeResponse {
header: Some(header),

View File

@@ -12,7 +12,7 @@ snafu.workspace = true
tokio.workspace = true
[target.'cfg(unix)'.dependencies]
pprof = { version = "0.14", features = [
pprof = { version = "0.13", features = [
"flamegraph",
"prost-codec",
"protobuf",

View File

@@ -13,7 +13,7 @@ workspace = true
[dependencies]
async-stream.workspace = true
async-trait.workspace = true
backon.workspace = true
backon = "1"
common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true

View File

@@ -189,7 +189,7 @@ impl StateStore for ObjectStateStore {
async fn batch_delete(&self, keys: &[String]) -> Result<()> {
self.store
.delete_iter(keys.iter().map(String::as_str))
.remove(keys.to_vec())
.await
.with_context(|_| DeleteStateSnafu {
key: format!("{:?}", keys),

View File

@@ -18,6 +18,7 @@ use arrow::error::ArrowError;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use common_recordbatch::error::Error as RecordbatchError;
use datafusion_common::DataFusionError;
use datatypes::arrow;
use datatypes::arrow::datatypes::DataType as ArrowDatatype;
@@ -30,6 +31,21 @@ use statrs::StatsError;
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Failed to execute Python UDF: {}", msg))]
PyUdf {
// TODO(discord9): find a way that prevent circle depend(query<-script<-query) and can use script's error type
msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to create temporary recordbatch when eval Python UDF"))]
UdfTempRecordBatch {
#[snafu(implicit)]
location: Location,
source: RecordbatchError,
},
#[snafu(display("Failed to execute function"))]
ExecuteFunction {
#[snafu(source)]
@@ -244,7 +260,9 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::CreateAccumulator { .. }
Error::UdfTempRecordBatch { .. }
| Error::PyUdf { .. }
| Error::CreateAccumulator { .. }
| Error::DowncastVector { .. }
| Error::InvalidInputState { .. }
| Error::InvalidInputCol { .. }

View File

@@ -28,13 +28,14 @@ pub fn build_same_type_ts_filter(
ts_schema: &ColumnSchema,
time_range: Option<TimestampRange>,
) -> Option<Expr> {
let ts_type = ts_schema.data_type.clone();
let time_range = time_range?;
let start = time_range
.start()
.and_then(|start| ts_schema.data_type.try_cast(Value::Timestamp(start)));
.and_then(|start| ts_type.try_cast(Value::Timestamp(start)));
let end = time_range
.end()
.and_then(|end| ts_schema.data_type.try_cast(Value::Timestamp(end)));
.and_then(|end| ts_type.try_cast(Value::Timestamp(end)));
let time_range = match (start, end) {
(Some(Value::Timestamp(start)), Some(Value::Timestamp(end))) => {

View File

@@ -35,7 +35,7 @@ use crate::DfRecordBatch;
#[derive(Clone, Debug, PartialEq)]
pub struct RecordBatch {
pub schema: SchemaRef,
pub columns: Vec<VectorRef>,
columns: Vec<VectorRef>,
df_record_batch: DfRecordBatch,
}

View File

@@ -108,6 +108,11 @@ impl Time {
self.as_formatted_string("%H:%M:%S%.f%z", None)
}
/// Format Time for system timeszone.
pub fn to_system_tz_string(&self) -> String {
self.as_formatted_string("%H:%M:%S%.f", None)
}
/// Format Time for given timezone.
/// When timezone is None, using system timezone by default.
pub fn to_timezone_aware_string(&self, tz: Option<&Timezone>) -> String {

View File

@@ -19,7 +19,7 @@ futures-util.workspace = true
humantime-serde.workspace = true
num_cpus.workspace = true
rskafka.workspace = true
rustls = { workspace = true, default-features = false, features = ["ring", "logging", "std", "tls12"] }
rustls = { version = "0.23", default-features = false, features = ["ring", "logging", "std", "tls12"] }
rustls-native-certs = "0.7"
rustls-pemfile = "2.1"
serde.workspace = true

View File

@@ -433,8 +433,8 @@ impl DatanodeBuilder {
) -> Result<MitoEngine> {
if opts.storage.is_object_storage() {
// Enable the write cache when setting object storage
config.enable_write_cache = true;
info!("Configured 'enable_write_cache=true' for mito engine.");
config.enable_experimental_write_cache = true;
info!("Configured 'enable_experimental_write_cache=true' for mito engine.");
}
let mito_engine = match &opts.wal {

View File

@@ -123,14 +123,6 @@ impl ColumnSchema {
self.default_constraint.as_ref()
}
/// Check if the default constraint is a impure function.
pub fn is_default_impure(&self) -> bool {
self.default_constraint
.as_ref()
.map(|c| c.is_function())
.unwrap_or(false)
}
#[inline]
pub fn metadata(&self) -> &Metadata {
&self.metadata
@@ -158,22 +150,11 @@ impl ColumnSchema {
self
}
pub fn with_inverted_index(&mut self, value: bool) {
match value {
true => {
self.metadata
.insert(INVERTED_INDEX_KEY.to_string(), value.to_string());
}
false => {
self.metadata.remove(INVERTED_INDEX_KEY);
}
}
}
// Put a placeholder to invalidate schemas.all(!has_inverted_index_key).
pub fn insert_inverted_index_placeholder(&mut self) {
self.metadata
.insert(INVERTED_INDEX_KEY.to_string(), "".to_string());
pub fn set_inverted_index(mut self, value: bool) -> Self {
let _ = self
.metadata
.insert(INVERTED_INDEX_KEY.to_string(), value.to_string());
self
}
pub fn is_inverted_indexed(&self) -> bool {
@@ -183,15 +164,8 @@ impl ColumnSchema {
.unwrap_or(false)
}
pub fn is_fulltext_indexed(&self) -> bool {
self.fulltext_options()
.unwrap_or_default()
.map(|option| option.enable)
.unwrap_or_default()
}
pub fn is_skipping_indexed(&self) -> bool {
self.skipping_index_options().unwrap_or_default().is_some()
pub fn has_fulltext_index_key(&self) -> bool {
self.metadata.contains_key(FULLTEXT_KEY)
}
pub fn has_inverted_index_key(&self) -> bool {
@@ -309,15 +283,6 @@ impl ColumnSchema {
}
}
/// Creates an impure default value for this column, only if it have a impure default constraint.
/// Otherwise, returns `Ok(None)`.
pub fn create_impure_default(&self) -> Result<Option<Value>> {
match &self.default_constraint {
Some(c) => c.create_impure_default(&self.data_type),
None => Ok(None),
}
}
/// Retrieves the fulltext options for the column.
pub fn fulltext_options(&self) -> Result<Option<FulltextOptions>> {
match self.metadata.get(FULLTEXT_KEY) {

View File

@@ -178,63 +178,12 @@ impl ColumnDefaultConstraint {
}
}
/// Only create default vector if it's impure, i.e., it's a function.
///
/// This helps to delay creating constant default values to mito engine while also keeps impure default have consistent values
pub fn create_impure_default_vector(
&self,
data_type: &ConcreteDataType,
num_rows: usize,
) -> Result<Option<VectorRef>> {
assert!(num_rows > 0);
match self {
ColumnDefaultConstraint::Function(expr) => {
// Functions should also ensure its return value is not null when
// is_nullable is true.
match &expr[..] {
// TODO(dennis): we only supports current_timestamp right now,
// it's better to use a expression framework in future.
CURRENT_TIMESTAMP | CURRENT_TIMESTAMP_FN | NOW_FN => {
create_current_timestamp_vector(data_type, num_rows).map(Some)
}
_ => error::UnsupportedDefaultExprSnafu { expr }.fail(),
}
}
ColumnDefaultConstraint::Value(_) => Ok(None),
}
}
/// Only create default value if it's impure, i.e., it's a function.
///
/// This helps to delay creating constant default values to mito engine while also keeps impure default have consistent values
pub fn create_impure_default(&self, data_type: &ConcreteDataType) -> Result<Option<Value>> {
match self {
ColumnDefaultConstraint::Function(expr) => {
// Functions should also ensure its return value is not null when
// is_nullable is true.
match &expr[..] {
CURRENT_TIMESTAMP | CURRENT_TIMESTAMP_FN | NOW_FN => {
create_current_timestamp(data_type).map(Some)
}
_ => error::UnsupportedDefaultExprSnafu { expr }.fail(),
}
}
ColumnDefaultConstraint::Value(_) => Ok(None),
}
}
/// Returns true if this constraint might creates NULL.
fn maybe_null(&self) -> bool {
// Once we support more functions, we may return true if given function
// could return null.
matches!(self, ColumnDefaultConstraint::Value(Value::Null))
}
/// Returns true if this constraint is a function.
pub fn is_function(&self) -> bool {
matches!(self, ColumnDefaultConstraint::Function(_))
}
}
fn create_current_timestamp(data_type: &ConcreteDataType) -> Result<Value> {

View File

@@ -32,7 +32,6 @@ common-runtime.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
common-version.workspace = true
config.workspace = true
datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
@@ -41,6 +40,7 @@ datatypes.workspace = true
enum-as-inner = "0.6.0"
enum_dispatch = "0.3"
futures = "0.3"
get-size-derive2 = "0.1.2"
get-size2 = "0.1.2"
greptime-proto.workspace = true
# This fork of hydroflow is simply for keeping our dependency in our org, and pin the version

View File

@@ -36,7 +36,6 @@ use query::QueryEngine;
use serde::{Deserialize, Serialize};
use servers::grpc::GrpcOptions;
use servers::heartbeat_options::HeartbeatOptions;
use servers::http::HttpOptions;
use servers::Mode;
use session::context::QueryContext;
use snafu::{ensure, OptionExt, ResultExt};
@@ -46,20 +45,23 @@ use tokio::sync::broadcast::error::TryRecvError;
use tokio::sync::{broadcast, watch, Mutex, RwLock};
pub(crate) use crate::adapter::node_context::FlownodeContext;
use crate::adapter::refill::RefillTask;
use crate::adapter::table_source::ManagedTableSource;
use crate::adapter::util::relation_desc_to_column_schemas_with_fallback;
pub(crate) use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
use crate::adapter::table_source::TableSource;
use crate::adapter::util::{
relation_desc_to_column_schemas_with_fallback, table_info_value_to_relation_desc,
};
use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
use crate::compute::ErrCollector;
use crate::df_optimizer::sql_to_flow_plan;
use crate::error::{EvalSnafu, ExternalSnafu, InternalSnafu, InvalidQuerySnafu, UnexpectedSnafu};
use crate::error::{
EvalSnafu, ExternalSnafu, FlowAlreadyExistSnafu, InternalSnafu, InvalidQuerySnafu,
UnexpectedSnafu,
};
use crate::expr::Batch;
use crate::metrics::{METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_ROWS, METRIC_FLOW_RUN_INTERVAL_MS};
use crate::repr::{self, DiffRow, RelationDesc, Row, BATCH_SIZE};
mod flownode_impl;
mod parse_expr;
pub(crate) mod refill;
mod stat;
#[cfg(test)]
mod tests;
@@ -67,7 +69,7 @@ mod util;
mod worker;
pub(crate) mod node_context;
pub(crate) mod table_source;
mod table_source;
use crate::error::Error;
use crate::utils::StateReportHandler;
@@ -83,21 +85,6 @@ pub const UPDATE_AT_TS_COL: &str = "update_at";
pub type FlowId = u64;
pub type TableName = [String; 3];
/// Flow config that exists both in standalone&distributed mode
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[serde(default)]
pub struct FlowConfig {
pub num_workers: usize,
}
impl Default for FlowConfig {
fn default() -> Self {
Self {
num_workers: (common_config::utils::get_cpus() / 2).max(1),
}
}
}
/// Options for flow node
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
@@ -105,9 +92,7 @@ pub struct FlownodeOptions {
pub mode: Mode,
pub cluster_id: Option<u64>,
pub node_id: Option<u64>,
pub flow: FlowConfig,
pub grpc: GrpcOptions,
pub http: HttpOptions,
pub meta_client: Option<MetaClientOptions>,
pub logging: LoggingOptions,
pub tracing: TracingOptions,
@@ -120,9 +105,7 @@ impl Default for FlownodeOptions {
mode: servers::Mode::Standalone,
cluster_id: None,
node_id: None,
flow: FlowConfig::default(),
grpc: GrpcOptions::default().with_addr("127.0.0.1:3004"),
http: HttpOptions::default(),
meta_client: None,
logging: LoggingOptions::default(),
tracing: TracingOptions::default(),
@@ -131,14 +114,7 @@ impl Default for FlownodeOptions {
}
}
impl Configurable for FlownodeOptions {
fn validate_sanitize(&mut self) -> common_config::error::Result<()> {
if self.flow.num_workers == 0 {
self.flow.num_workers = (common_config::utils::get_cpus() / 2).max(1);
}
Ok(())
}
}
impl Configurable for FlownodeOptions {}
/// Arc-ed FlowNodeManager, cheaper to clone
pub type FlowWorkerManagerRef = Arc<FlowWorkerManager>;
@@ -149,18 +125,14 @@ pub type FlowWorkerManagerRef = Arc<FlowWorkerManager>;
pub struct FlowWorkerManager {
/// The handler to the worker that will run the dataflow
/// which is `!Send` so a handle is used
pub worker_handles: Vec<WorkerHandle>,
/// The selector to select a worker to run the dataflow
worker_selector: Mutex<usize>,
pub worker_handles: Vec<Mutex<WorkerHandle>>,
/// The query engine that will be used to parse the query and convert it to a dataflow plan
pub query_engine: Arc<dyn QueryEngine>,
/// Getting table name and table schema from table info manager
table_info_source: ManagedTableSource,
table_info_source: TableSource,
frontend_invoker: RwLock<Option<FrontendInvoker>>,
/// contains mapping from table name to global id, and table schema
node_context: RwLock<FlownodeContext>,
/// Contains all refill tasks
refill_tasks: RwLock<BTreeMap<FlowId, RefillTask>>,
flow_err_collectors: RwLock<BTreeMap<FlowId, ErrCollector>>,
src_send_buf_lens: RwLock<BTreeMap<TableId, watch::Receiver<usize>>>,
tick_manager: FlowTickManager,
@@ -186,21 +158,19 @@ impl FlowWorkerManager {
query_engine: Arc<dyn QueryEngine>,
table_meta: TableMetadataManagerRef,
) -> Self {
let srv_map = ManagedTableSource::new(
let srv_map = TableSource::new(
table_meta.table_info_manager().clone(),
table_meta.table_name_manager().clone(),
);
let node_context = FlownodeContext::new(Box::new(srv_map.clone()) as _);
let node_context = FlownodeContext::default();
let tick_manager = FlowTickManager::new();
let worker_handles = Vec::new();
FlowWorkerManager {
worker_handles,
worker_selector: Mutex::new(0),
query_engine,
table_info_source: srv_map,
frontend_invoker: RwLock::new(None),
node_context: RwLock::new(node_context),
refill_tasks: Default::default(),
flow_err_collectors: Default::default(),
src_send_buf_lens: Default::default(),
tick_manager,
@@ -216,27 +186,20 @@ impl FlowWorkerManager {
}
/// Create a flownode manager with one worker
pub fn new_with_workers<'s>(
pub fn new_with_worker<'s>(
node_id: Option<u32>,
query_engine: Arc<dyn QueryEngine>,
table_meta: TableMetadataManagerRef,
num_workers: usize,
) -> (Self, Vec<Worker<'s>>) {
) -> (Self, Worker<'s>) {
let mut zelf = Self::new(node_id, query_engine, table_meta);
let workers: Vec<_> = (0..num_workers)
.map(|_| {
let (handle, worker) = create_worker();
zelf.add_worker_handle(handle);
worker
})
.collect();
(zelf, workers)
let (handle, worker) = create_worker();
zelf.add_worker_handle(handle);
(zelf, worker)
}
/// add a worker handler to manager, meaning this corresponding worker is under it's manage
pub fn add_worker_handle(&mut self, handle: WorkerHandle) {
self.worker_handles.push(handle);
self.worker_handles.push(Mutex::new(handle));
}
}
@@ -284,29 +247,12 @@ impl FlowWorkerManager {
let (catalog, schema) = (table_name[0].clone(), table_name[1].clone());
let ctx = Arc::new(QueryContext::with(&catalog, &schema));
let (is_ts_placeholder, proto_schema) = match self
let (is_ts_placeholder, proto_schema) = self
.try_fetch_existing_table(&table_name)
.await?
.context(UnexpectedSnafu {
reason: format!("Table not found: {}", table_name.join(".")),
}) {
Ok(r) => r,
Err(e) => {
if self
.table_info_source
.get_opt_table_id_from_name(&table_name)
.await?
.is_none()
{
// deal with both flow&sink table no longer exists
// but some output is still in output buf
common_telemetry::warn!(e; "Table `{}` no longer exists, skip writeback", table_name.join("."));
continue;
} else {
return Err(e);
}
}
};
})?;
let schema_len = proto_schema.len();
let total_rows = reqs.iter().map(|r| r.len()).sum::<usize>();
@@ -463,7 +409,7 @@ impl FlowWorkerManager {
) -> Result<Option<(Vec<String>, Option<usize>, Vec<ColumnSchema>)>, Error> {
if let Some(table_id) = self
.table_info_source
.get_opt_table_id_from_name(table_name)
.get_table_id_from_name(table_name)
.await?
{
let table_info = self
@@ -594,16 +540,13 @@ impl FlowWorkerManager {
pub async fn run(&self, mut shutdown: Option<broadcast::Receiver<()>>) {
debug!("Starting to run");
let default_interval = Duration::from_secs(1);
let mut tick_interval = tokio::time::interval(default_interval);
// burst mode, so that if we miss a tick, we will run immediately to fully utilize the cpu
tick_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Burst);
let mut avg_spd = 0; // rows/sec
let mut since_last_run = tokio::time::Instant::now();
let run_per_trace = 10;
let mut run_cnt = 0;
loop {
// TODO(discord9): only run when new inputs arrive or scheduled to
let row_cnt = self.run_available(false).await.unwrap_or_else(|err| {
let row_cnt = self.run_available(true).await.unwrap_or_else(|err| {
common_telemetry::error!(err;"Run available errors");
0
});
@@ -633,9 +576,9 @@ impl FlowWorkerManager {
// for now we want to batch rows until there is around `BATCH_SIZE` rows in send buf
// before trigger a run of flow's worker
// (plus one for prevent div by zero)
let wait_for = since_last_run.elapsed();
// last runs insert speed
let cur_spd = row_cnt * 1000 / wait_for.as_millis().max(1) as usize;
// rapid increase, slow decay
avg_spd = if cur_spd > avg_spd {
@@ -658,10 +601,7 @@ impl FlowWorkerManager {
METRIC_FLOW_RUN_INTERVAL_MS.set(new_wait.as_millis() as i64);
since_last_run = tokio::time::Instant::now();
tokio::select! {
_ = tick_interval.tick() => (),
_ = tokio::time::sleep(new_wait) => ()
}
tokio::time::sleep(new_wait).await;
}
// flow is now shutdown, drop frontend_invoker early so a ref cycle(in standalone mode) can be prevent:
// FlowWorkerManager.frontend_invoker -> FrontendInvoker.inserter
@@ -672,9 +612,9 @@ impl FlowWorkerManager {
/// Run all available subgraph in the flow node
/// This will try to run all dataflow in this node
///
/// set `blocking` to true to wait until worker finish running
/// false to just trigger run and return immediately
/// return numbers of rows send to worker(Inaccuary)
/// set `blocking` to true to wait until lock is acquired
/// and false to return immediately if lock is not acquired
/// return numbers of rows send to worker
/// TODO(discord9): add flag for subgraph that have input since last run
pub async fn run_available(&self, blocking: bool) -> Result<usize, Error> {
let mut row_cnt = 0;
@@ -682,7 +622,13 @@ impl FlowWorkerManager {
let now = self.tick_manager.tick();
for worker in self.worker_handles.iter() {
// TODO(discord9): consider how to handle error in individual worker
worker.run_available(now, blocking).await?;
if blocking {
worker.lock().await.run_available(now, blocking).await?;
} else if let Ok(worker) = worker.try_lock() {
worker.run_available(now, blocking).await?;
} else {
return Ok(row_cnt);
}
}
// check row send and rows remain in send buf
let flush_res = if blocking {
@@ -753,6 +699,7 @@ impl FlowWorkerManager {
/// remove a flow by it's id
pub async fn remove_flow(&self, flow_id: FlowId) -> Result<(), Error> {
for handle in self.worker_handles.iter() {
let handle = handle.lock().await;
if handle.contains_flow(flow_id).await? {
handle.remove_flow(flow_id).await?;
break;
@@ -782,6 +729,43 @@ impl FlowWorkerManager {
query_ctx,
} = args;
let already_exist = {
let mut flag = false;
// check if the task already exists
for handle in self.worker_handles.iter() {
if handle.lock().await.contains_flow(flow_id).await? {
flag = true;
break;
}
}
flag
};
match (create_if_not_exists, or_replace, already_exist) {
// do replace
(_, true, true) => {
info!("Replacing flow with id={}", flow_id);
self.remove_flow(flow_id).await?;
}
(false, false, true) => FlowAlreadyExistSnafu { id: flow_id }.fail()?,
// do nothing if exists
(true, false, true) => {
info!("Flow with id={} already exists, do nothing", flow_id);
return Ok(None);
}
// create if not exists
(_, _, false) => (),
}
if create_if_not_exists {
// check if the task already exists
for handle in self.worker_handles.iter() {
if handle.lock().await.contains_flow(flow_id).await? {
return Ok(None);
}
}
}
let mut node_ctx = self.node_context.write().await;
// assign global id to source and sink table
for source in &source_table_ids {
@@ -844,9 +828,27 @@ impl FlowWorkerManager {
.fail()?,
}
}
let table_id = self
.table_info_source
.get_table_id_from_name(&sink_table_name)
.await?
.context(UnexpectedSnafu {
reason: format!("Can't get table id for table name {:?}", sink_table_name),
})?;
let table_info_value = self
.table_info_source
.get_table_info_value(&table_id)
.await?
.context(UnexpectedSnafu {
reason: format!("Can't get table info value for table id {:?}", table_id),
})?;
let real_schema = table_info_value_to_relation_desc(table_info_value)?;
node_ctx.assign_table_schema(&sink_table_name, real_schema.clone())?;
} else {
// assign inferred schema to sink table
// create sink table
node_ctx.assign_table_schema(&sink_table_name, flow_plan.schema.clone())?;
let did_create = self
.create_table_from_relation(
&format!("flow-id={flow_id}"),
@@ -862,8 +864,6 @@ impl FlowWorkerManager {
}
}
node_ctx.add_flow_plan(flow_id, flow_plan.clone());
let _ = comment;
let _ = flow_options;
@@ -888,8 +888,7 @@ impl FlowWorkerManager {
.write()
.await
.insert(flow_id, err_collector.clone());
// TODO(discord9): load balance?
let handle = self.get_worker_handle_for_create_flow().await;
let handle = &self.worker_handles[0].lock().await;
let create_request = worker::Request::Create {
flow_id,
plan: flow_plan,
@@ -898,11 +897,9 @@ impl FlowWorkerManager {
source_ids,
src_recvs: source_receivers,
expire_after,
or_replace,
create_if_not_exists,
err_collector,
};
handle.create_flow(create_request).await?;
info!("Successfully create flow with id={}", flow_id);
Ok(Some(flow_id))

View File

@@ -24,26 +24,21 @@ use common_error::ext::BoxedError;
use common_meta::error::{ExternalSnafu, Result, UnexpectedSnafu};
use common_meta::node_manager::Flownode;
use common_telemetry::{debug, trace};
use datatypes::value::Value;
use itertools::Itertools;
use snafu::{IntoError, OptionExt, ResultExt};
use snafu::{OptionExt, ResultExt};
use store_api::storage::RegionId;
use super::util::from_proto_to_data_type;
use crate::adapter::{CreateFlowArgs, FlowWorkerManager};
use crate::error::{CreateFlowSnafu, InsertIntoFlowSnafu, InternalSnafu};
use crate::error::InternalSnafu;
use crate::metrics::METRIC_FLOW_TASK_COUNT;
use crate::repr::{self, DiffRow};
/// return a function to convert `crate::error::Error` to `common_meta::error::Error`
fn to_meta_err(
location: snafu::Location,
) -> impl FnOnce(crate::error::Error) -> common_meta::error::Error {
move |err: crate::error::Error| -> common_meta::error::Error {
common_meta::error::Error::External {
location,
source: BoxedError::new(err),
}
}
fn to_meta_err(err: crate::error::Error) -> common_meta::error::Error {
// TODO(discord9): refactor this
Err::<(), _>(BoxedError::new(err))
.with_context(|_| ExternalSnafu)
.unwrap_err()
}
#[async_trait::async_trait]
@@ -80,16 +75,11 @@ impl Flownode for FlowWorkerManager {
or_replace,
expire_after,
comment: Some(comment),
sql: sql.clone(),
sql,
flow_options,
query_ctx,
};
let ret = self
.create_flow(args)
.await
.map_err(BoxedError::new)
.with_context(|_| CreateFlowSnafu { sql: sql.clone() })
.map_err(to_meta_err(snafu::location!()))?;
let ret = self.create_flow(args).await.map_err(to_meta_err)?;
METRIC_FLOW_TASK_COUNT.inc();
Ok(FlowResponse {
affected_flows: ret
@@ -104,7 +94,7 @@ impl Flownode for FlowWorkerManager {
})) => {
self.remove_flow(flow_id.id as u64)
.await
.map_err(to_meta_err(snafu::location!()))?;
.map_err(to_meta_err)?;
METRIC_FLOW_TASK_COUNT.dec();
Ok(Default::default())
}
@@ -122,15 +112,9 @@ impl Flownode for FlowWorkerManager {
.await
.flush_all_sender()
.await
.map_err(to_meta_err(snafu::location!()))?;
let rows_send = self
.run_available(true)
.await
.map_err(to_meta_err(snafu::location!()))?;
let row = self
.send_writeback_requests()
.await
.map_err(to_meta_err(snafu::location!()))?;
.map_err(to_meta_err)?;
let rows_send = self.run_available(true).await.map_err(to_meta_err)?;
let row = self.send_writeback_requests().await.map_err(to_meta_err)?;
debug!(
"Done to flush flow_id={:?} with {} input rows flushed, {} rows sended and {} output rows flushed",
@@ -170,41 +154,17 @@ impl Flownode for FlowWorkerManager {
// TODO(discord9): reconsider time assignment mechanism
let now = self.tick_manager.tick();
let (table_types, fetch_order) = {
let fetch_order = {
let ctx = self.node_context.read().await;
// TODO(discord9): also check schema version so that altered table can be reported
let table_schema = ctx
.table_source
.table_from_id(&table_id)
.await
.map_err(to_meta_err(snafu::location!()))?;
let default_vals = table_schema
.default_values
.iter()
.zip(table_schema.relation_desc.typ().column_types.iter())
.map(|(v, ty)| {
v.as_ref().and_then(|v| {
match v.create_default(ty.scalar_type(), ty.nullable()) {
Ok(v) => Some(v),
Err(err) => {
common_telemetry::error!(err; "Failed to create default value");
None
}
}
})
})
.collect_vec();
let table_types = table_schema
.relation_desc
.typ()
.column_types
.clone()
.into_iter()
.map(|t| t.scalar_type)
.collect_vec();
let table_col_names = table_schema.relation_desc.names;
let table_col_names = ctx
.table_repr
.get_by_table_id(&table_id)
.map(|r| r.1)
.and_then(|id| ctx.schema.get(&id))
.map(|desc| &desc.names)
.context(UnexpectedSnafu {
err_msg: format!("Table not found: {}", table_id),
})?;
let table_col_names = table_col_names
.iter().enumerate()
.map(|(idx,name)| match name {
@@ -221,80 +181,44 @@ impl Flownode for FlowWorkerManager {
.enumerate()
.map(|(i, name)| (&name.column_name, i)),
);
let fetch_order: Vec<FetchFromRow> = table_col_names
let fetch_order: Vec<usize> = table_col_names
.iter()
.zip(default_vals.into_iter())
.map(|(col_name, col_default_val)| {
name_to_col
.get(col_name)
.copied()
.map(FetchFromRow::Idx)
.or_else(|| col_default_val.clone().map(FetchFromRow::Default))
.with_context(|| UnexpectedSnafu {
err_msg: format!(
"Column not found: {}, default_value: {:?}",
col_name, col_default_val
),
})
.map(|names| {
name_to_col.get(names).copied().context(UnexpectedSnafu {
err_msg: format!("Column not found: {}", names),
})
})
.try_collect()?;
trace!("Reordering columns: {:?}", fetch_order);
(table_types, fetch_order)
if !fetch_order.iter().enumerate().all(|(i, &v)| i == v) {
trace!("Reordering columns: {:?}", fetch_order)
}
fetch_order
};
// TODO(discord9): use column instead of row
let rows: Vec<DiffRow> = rows_proto
.into_iter()
.map(|r| {
let r = repr::Row::from(r);
let reordered = fetch_order.iter().map(|i| i.fetch(&r)).collect_vec();
let reordered = fetch_order
.iter()
.map(|&i| r.inner[i].clone())
.collect_vec();
repr::Row::new(reordered)
})
.map(|r| (r, now, 1))
.collect_vec();
if let Err(err) = self
.handle_write_request(region_id.into(), rows, &table_types)
let batch_datatypes = insert_schema
.iter()
.map(from_proto_to_data_type)
.collect::<std::result::Result<Vec<_>, _>>()
.map_err(to_meta_err)?;
self.handle_write_request(region_id.into(), rows, &batch_datatypes)
.await
{
let err = BoxedError::new(err);
let flow_ids = self
.node_context
.read()
.await
.get_flow_ids(table_id)
.into_iter()
.flatten()
.cloned()
.collect_vec();
let err = InsertIntoFlowSnafu {
region_id,
flow_ids,
}
.into_error(err);
common_telemetry::error!(err; "Failed to handle write request");
let err = to_meta_err(snafu::location!())(err);
return Err(err);
}
.map_err(|err| {
common_telemetry::error!(err;"Failed to handle write request");
to_meta_err(err)
})?;
}
Ok(Default::default())
}
}
/// Simple helper enum for fetching value from row with default value
#[derive(Debug, Clone)]
enum FetchFromRow {
Idx(usize),
Default(Value),
}
impl FetchFromRow {
/// Panic if idx is out of bound
fn fetch(&self, row: &repr::Row) -> Value {
match self {
FetchFromRow::Idx(idx) => row.get(*idx).unwrap().clone(),
FetchFromRow::Default(v) => v.clone(),
}
}
}

Some files were not shown because too many files have changed in this diff Show More