mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-25 23:49:58 +00:00
Compare commits
22 Commits
release/v0
...
v0.14.0-ni
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4eb0771afe | ||
|
|
a0739a96e4 | ||
|
|
77ccf1eac8 | ||
|
|
1dc4a196bf | ||
|
|
2431cd3bdf | ||
|
|
cd730e0486 | ||
|
|
a19441bed8 | ||
|
|
162e3b8620 | ||
|
|
83642dab87 | ||
|
|
46070958c9 | ||
|
|
eea8b1c730 | ||
|
|
1ab4ddab8d | ||
|
|
9e63018198 | ||
|
|
594bec8c36 | ||
|
|
1586732d20 | ||
|
|
16fddd97a7 | ||
|
|
2260782c12 | ||
|
|
09dacc8e9b | ||
|
|
dec439db2b | ||
|
|
dc76571166 | ||
|
|
3e17f8c426 | ||
|
|
a5df3954f3 |
@@ -8,7 +8,7 @@ inputs:
|
||||
default: 2
|
||||
description: "Number of Datanode replicas"
|
||||
meta-replicas:
|
||||
default: 1
|
||||
default: 2
|
||||
description: "Number of Metasrv replicas"
|
||||
image-registry:
|
||||
default: "docker.io"
|
||||
|
||||
5
.github/workflows/develop.yml
vendored
5
.github/workflows/develop.yml
vendored
@@ -576,9 +576,12 @@ jobs:
|
||||
- name: "Remote WAL"
|
||||
opts: "-w kafka -k 127.0.0.1:9092"
|
||||
kafka: true
|
||||
- name: "Pg Kvbackend"
|
||||
- name: "PostgreSQL KvBackend"
|
||||
opts: "--setup-pg"
|
||||
kafka: false
|
||||
- name: "MySQL Kvbackend"
|
||||
opts: "--setup-mysql"
|
||||
kafka: false
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.13.0
|
||||
NEXT_RELEASE_VERSION: v0.14.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -54,3 +54,6 @@ tests-fuzz/corpus/
|
||||
# Nix
|
||||
.direnv
|
||||
.envrc
|
||||
|
||||
## default data home
|
||||
greptimedb_data
|
||||
|
||||
146
Cargo.lock
generated
146
Cargo.lock
generated
@@ -185,7 +185,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -710,7 +710,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1324,7 +1324,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1348,7 +1348,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -1661,7 +1661,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
|
||||
[[package]]
|
||||
name = "cli"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1704,7 +1704,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -1713,7 +1713,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1740,7 +1740,7 @@ dependencies = [
|
||||
"rand",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.37.3",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -1781,7 +1781,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1842,7 +1842,7 @@ dependencies = [
|
||||
"similar-asserts",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -1888,7 +1888,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"async-trait",
|
||||
@@ -1910,11 +1910,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -1939,7 +1939,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -1975,7 +1975,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"bigdecimal 0.4.5",
|
||||
"common-error",
|
||||
@@ -1988,7 +1988,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"http 1.1.0",
|
||||
"snafu 0.8.5",
|
||||
@@ -1998,7 +1998,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2008,7 +2008,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -2059,7 +2059,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2076,7 +2076,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2104,7 +2104,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2123,7 +2123,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2137,7 +2137,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2150,7 +2150,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2211,7 +2211,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-options"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-grpc",
|
||||
"humantime-serde",
|
||||
@@ -2220,11 +2220,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2236,7 +2236,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2263,7 +2263,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2271,7 +2271,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2297,7 +2297,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2316,7 +2316,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -2346,7 +2346,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2374,7 +2374,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2386,7 +2386,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2404,7 +2404,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"const_format",
|
||||
@@ -2414,7 +2414,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -3345,7 +3345,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3397,7 +3397,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -3406,7 +3406,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4050,7 +4050,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4160,7 +4160,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -4222,7 +4222,7 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.12.3",
|
||||
@@ -4277,7 +4277,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -4705,7 +4705,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=c5419bbd20cb42e568ec325a4d71a3c94cc327e1#c5419bbd20cb42e568ec325a4d71a3c94cc327e1"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=a7274ddce299f33d23dbe8af5bbe6219f07c559a#a7274ddce299f33d23dbe8af5bbe6219f07c559a"
|
||||
dependencies = [
|
||||
"prost 0.13.3",
|
||||
"serde",
|
||||
@@ -5545,7 +5545,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -6338,7 +6338,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "log-query"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -6350,7 +6350,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -6643,7 +6643,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6670,7 +6670,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6720,6 +6720,7 @@ dependencies = [
|
||||
"servers",
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"table",
|
||||
@@ -6757,7 +6758,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6855,7 +6856,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7552,7 +7553,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -7801,7 +7802,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -7849,7 +7850,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -8086,7 +8087,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -8354,7 +8355,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8494,7 +8495,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"clap 4.5.19",
|
||||
@@ -8756,7 +8757,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -9003,7 +9004,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-compression 0.4.13",
|
||||
"async-trait",
|
||||
@@ -9044,7 +9045,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -9109,7 +9110,7 @@ dependencies = [
|
||||
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
||||
"statrs",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -10464,7 +10465,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -10581,7 +10582,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10890,7 +10891,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -10944,7 +10945,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -11261,7 +11262,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -11391,7 +11392,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -11572,7 +11573,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -11823,7 +11824,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -11867,7 +11868,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -11920,6 +11921,7 @@ dependencies = [
|
||||
"operator",
|
||||
"partition",
|
||||
"paste",
|
||||
"pipeline",
|
||||
"prost 0.13.3",
|
||||
"query",
|
||||
"rand",
|
||||
@@ -11933,7 +11935,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
|
||||
@@ -67,7 +67,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -129,7 +129,7 @@ etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "c5419bbd20cb42e568ec325a4d71a3c94cc327e1" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a7274ddce299f33d23dbe8af5bbe6219f07c559a" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -191,6 +191,8 @@ snafu = "0.8"
|
||||
sqlx = { version = "0.8", features = [
|
||||
"runtime-tokio-rustls",
|
||||
"mysql",
|
||||
"postgres",
|
||||
"chrono",
|
||||
] }
|
||||
sysinfo = "0.30"
|
||||
# on branch v0.52.x
|
||||
|
||||
14
README.md
14
README.md
@@ -6,7 +6,7 @@
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
<h2 align="center">Unified & Cost-Effective Time Series Database for Metrics, Logs, and Events</h2>
|
||||
<h2 align="center">Unified & Cost-Effective Observerability Database for Metrics, Logs, and Events</h2>
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
@@ -62,15 +62,19 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
**GreptimeDB** is an open-source unified & cost-effective time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at Any Scale.
|
||||
**GreptimeDB** is an open-source unified & cost-effective observerability database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at Any Scale.
|
||||
|
||||
## News
|
||||
|
||||
**[GreptimeDB archives 1 billion cold run #1 in JSONBench!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)**
|
||||
|
||||
## Why GreptimeDB
|
||||
|
||||
Our core developers have been building time-series data platforms for years. Based on our best practices, GreptimeDB was born to give you:
|
||||
Our core developers have been building observerability data platforms for years. Based on our best practices, GreptimeDB was born to give you:
|
||||
|
||||
* **Unified Processing of Metrics, Logs, and Events**
|
||||
|
||||
GreptimeDB unifies time series data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/flow-computation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
||||
GreptimeDB unifies observerability data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/flow-computation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
||||
|
||||
* **Cloud-native Distributed Database**
|
||||
|
||||
@@ -112,7 +116,7 @@ Start a GreptimeDB container with:
|
||||
|
||||
```shell
|
||||
docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
-v "$(pwd)/greptimedb:/tmp/greptimedb" \
|
||||
-v "$(pwd)/greptimedb:./greptimedb_data" \
|
||||
--name greptime --rm \
|
||||
greptime/greptimedb:latest standalone start \
|
||||
--http-addr 0.0.0.0:4000 \
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
@@ -101,7 +101,7 @@
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -181,7 +181,7 @@
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -222,7 +222,7 @@
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
@@ -279,7 +279,7 @@
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -308,7 +308,7 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||
| `data_home` | String | `./greptimedb_data/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||
@@ -352,7 +352,7 @@
|
||||
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -390,7 +390,7 @@
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
@@ -442,7 +442,7 @@
|
||||
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -522,7 +522,7 @@
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -563,7 +563,7 @@
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
@@ -579,7 +579,7 @@
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
|
||||
@@ -27,7 +27,7 @@ max_concurrent_queries = 0
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
@@ -119,7 +119,7 @@ provider = "raft_engine"
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
@@ -265,7 +265,7 @@ overwrite_entry_start_id = false
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -618,7 +618,7 @@ experimental_sparse_primary_key_encoding = false
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -30,7 +30,7 @@ max_send_message_size = "512MB"
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
@@ -76,7 +76,7 @@ retry_interval = "3s"
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
@@ -121,4 +121,3 @@ sample_ratio = 1.0
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ retry_interval = "3s"
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
@@ -189,7 +189,7 @@ tcp_nodelay = true
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
## The working home directory.
|
||||
data_home = "/tmp/metasrv/"
|
||||
data_home = "./greptimedb_data/metasrv/"
|
||||
|
||||
## The bind address of metasrv.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
@@ -177,7 +177,7 @@ backoff_deadline = "5mins"
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -34,7 +34,7 @@ max_concurrent_queries = 0
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
@@ -164,7 +164,7 @@ provider = "raft_engine"
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
@@ -352,7 +352,7 @@ retry_delay = "500ms"
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -705,7 +705,7 @@ experimental_sparse_primary_key_encoding = false
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
- --initial-cluster-state=new
|
||||
- *etcd_initial_cluster_token
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
- ./greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
healthcheck:
|
||||
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
|
||||
interval: 5s
|
||||
@@ -68,12 +68,13 @@ services:
|
||||
- datanode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --data-home=/greptimedb_data
|
||||
- --rpc-bind-addr=0.0.0.0:3001
|
||||
- --rpc-server-addr=datanode0:3001
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:5000
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||
- ./greptimedb-cluster-docker-compose/datanode0:/greptimedb_data
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ]
|
||||
interval: 5s
|
||||
|
||||
@@ -4782,7 +4782,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Current counts for stalled write requests by instance\n\nWrite stalls when memtable is full and pending for flush\n\n",
|
||||
"description": "Ingestion size by row counts.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -4844,7 +4844,7 @@
|
||||
"x": 12,
|
||||
"y": 138
|
||||
},
|
||||
"id": 221,
|
||||
"id": 277,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
@@ -4864,14 +4864,14 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (greptime_mito_write_stall_total{pod=~\"$datanode\"})",
|
||||
"expr": "rate(greptime_mito_write_rows_total{pod=~\"$datanode\"}[$__rate_interval])",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Write Stall per Instance",
|
||||
"title": "Write Rows per Instance",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -4976,7 +4976,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Cache size by instance.\n",
|
||||
"description": "Current counts for stalled write requests by instance\n\nWrite stalls when memtable is full and pending for flush\n\n",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -5028,7 +5028,7 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "decbytes"
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
@@ -5038,7 +5038,7 @@
|
||||
"x": 12,
|
||||
"y": 146
|
||||
},
|
||||
"id": 229,
|
||||
"id": 221,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
@@ -5058,14 +5058,14 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "greptime_mito_cache_bytes{pod=~\"$datanode\"}",
|
||||
"expr": "sum by(pod) (greptime_mito_write_stall_total{pod=~\"$datanode\"})",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}-{{type}}",
|
||||
"legendFormat": "{{pod}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Cached Bytes per Instance",
|
||||
"title": "Write Stall per Instance",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -5172,7 +5172,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "P99 latency of each type of reads by instance",
|
||||
"description": "Cache size by instance.\n",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -5224,7 +5224,7 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "s"
|
||||
"unit": "decbytes"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
@@ -5234,17 +5234,13 @@
|
||||
"x": 12,
|
||||
"y": 154
|
||||
},
|
||||
"id": 228,
|
||||
"id": 229,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"calcs": [],
|
||||
"displayMode": "table",
|
||||
"placement": "bottom",
|
||||
"showLegend": true,
|
||||
"sortBy": "Last *",
|
||||
"sortDesc": true
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
@@ -5258,14 +5254,14 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
|
||||
"expr": "greptime_mito_cache_bytes{pod=~\"$datanode\"}",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}-{{stage}}-p99",
|
||||
"legendFormat": "{{pod}}-{{type}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Read Stage P99 per Instance",
|
||||
"title": "Cached Bytes per Instance",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -5317,7 +5313,8 @@
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
@@ -5370,7 +5367,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Latency of compaction task, at p99",
|
||||
"description": "P99 latency of each type of reads by instance",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -5414,7 +5411,8 @@
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
@@ -5432,7 +5430,7 @@
|
||||
"x": 12,
|
||||
"y": 162
|
||||
},
|
||||
"id": 230,
|
||||
"id": 228,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
@@ -5440,7 +5438,9 @@
|
||||
],
|
||||
"displayMode": "table",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
"showLegend": true,
|
||||
"sortBy": "Last *",
|
||||
"sortDesc": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
@@ -5454,14 +5454,14 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le) (rate(greptime_mito_compaction_total_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-compaction-p99",
|
||||
"legendFormat": "{{pod}}-{{stage}}-p99",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Compaction P99 per Instance",
|
||||
"title": "Read Stage P99 per Instance",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -5570,7 +5570,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Compaction latency by stage",
|
||||
"description": "Latency of compaction task, at p99",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -5632,7 +5632,7 @@
|
||||
"x": 12,
|
||||
"y": 170
|
||||
},
|
||||
"id": 232,
|
||||
"id": 230,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
@@ -5654,9 +5654,9 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le) (rate(greptime_mito_compaction_total_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}-{{stage}}-p99",
|
||||
"legendFormat": "[{{pod}}]-compaction-p99",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
@@ -5794,7 +5794,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Write-ahead log operations latency at p99",
|
||||
"description": "Compaction latency by stage",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -5856,13 +5856,13 @@
|
||||
"x": 12,
|
||||
"y": 178
|
||||
},
|
||||
"id": 269,
|
||||
"id": 232,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"displayMode": "list",
|
||||
"displayMode": "table",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
@@ -5878,14 +5878,14 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(le,logstore,optype,pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}-{{logstore}}-{{optype}}-p99",
|
||||
"legendFormat": "{{pod}}-{{stage}}-p99",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Log Store op duration seconds",
|
||||
"title": "Compaction P99 per Instance",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -5993,7 +5993,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Ongoing compaction task count",
|
||||
"description": "Write-ahead log operations latency at p99",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -6045,7 +6045,7 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
"unit": "s"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
@@ -6055,13 +6055,13 @@
|
||||
"x": 12,
|
||||
"y": 186
|
||||
},
|
||||
"id": 271,
|
||||
"id": 269,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"displayMode": "table",
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
@@ -6078,14 +6078,14 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "greptime_mito_inflight_compaction_count",
|
||||
"expr": "histogram_quantile(0.99, sum by(le,logstore,optype,pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}",
|
||||
"legendFormat": "{{pod}}-{{logstore}}-{{optype}}-p99",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Inflight Compaction",
|
||||
"title": "Log Store op duration seconds",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -6188,6 +6188,105 @@
|
||||
"title": "Inflight Flush",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Ongoing compaction task count",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "points",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 194
|
||||
},
|
||||
"id": 271,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"displayMode": "table",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.1.3",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "greptime_mito_inflight_compaction_count",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Inflight Compaction",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
|
||||
@@ -406,7 +406,7 @@ mod tests {
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
@@ -420,7 +420,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -467,7 +467,7 @@ mod tests {
|
||||
assert_eq!(10000, ddl_timeout.as_millis());
|
||||
assert_eq!(3000, timeout.as_millis());
|
||||
assert!(tcp_nodelay);
|
||||
assert_eq!("/tmp/greptimedb/", options.storage.data_home);
|
||||
assert_eq!("./greptimedb_data/", options.storage.data_home);
|
||||
assert!(matches!(
|
||||
&options.storage.store,
|
||||
ObjectStoreConfig::File(FileConfig { .. })
|
||||
@@ -483,7 +483,10 @@ mod tests {
|
||||
));
|
||||
|
||||
assert_eq!("debug", options.logging.level.unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
options.logging.dir
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -526,7 +529,7 @@ mod tests {
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -536,7 +539,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
@@ -565,11 +568,11 @@ mod tests {
|
||||
|
||||
[storage]
|
||||
type = "File"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
|
||||
@@ -440,7 +440,7 @@ mod tests {
|
||||
|
||||
[http]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
body_limit = "2GB"
|
||||
|
||||
[opentsdb]
|
||||
@@ -448,7 +448,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -461,12 +461,15 @@ mod tests {
|
||||
let fe_opts = command.load_options(&Default::default()).unwrap().component;
|
||||
|
||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
|
||||
assert_eq!(Duration::from_secs(0), fe_opts.http.timeout);
|
||||
|
||||
assert_eq!(ReadableSize::gb(2), fe_opts.http.body_limit);
|
||||
|
||||
assert_eq!("debug", fe_opts.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
fe_opts.logging.dir
|
||||
);
|
||||
assert!(!fe_opts.opentsdb.enable);
|
||||
}
|
||||
|
||||
@@ -505,7 +508,7 @@ mod tests {
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -515,7 +518,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
|
||||
@@ -337,7 +337,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
@@ -358,7 +358,10 @@ mod tests {
|
||||
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
options.logging.dir
|
||||
);
|
||||
assert_eq!(8.0, options.failure_detector.threshold);
|
||||
assert_eq!(
|
||||
100.0,
|
||||
@@ -396,7 +399,7 @@ mod tests {
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -406,7 +409,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
@@ -424,7 +427,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
|
||||
@@ -852,7 +852,7 @@ mod tests {
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/tmp/greptimedb/test/wal"
|
||||
dir = "./greptimedb_data/test/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
@@ -860,7 +860,7 @@ mod tests {
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
@@ -892,7 +892,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
let cmd = StartCommand {
|
||||
@@ -922,7 +922,10 @@ mod tests {
|
||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = dn_opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("/tmp/greptimedb/test/wal", raft_engine_config.dir.unwrap());
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/wal",
|
||||
raft_engine_config.dir.unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
&dn_opts.storage.store,
|
||||
@@ -946,7 +949,7 @@ mod tests {
|
||||
}
|
||||
|
||||
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), logging_opts.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs".to_string(), logging_opts.dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -958,7 +961,7 @@ mod tests {
|
||||
|
||||
let opts = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -967,7 +970,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.component;
|
||||
|
||||
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", opts.logging.dir);
|
||||
assert_eq!("debug", opts.logging.level.unwrap());
|
||||
}
|
||||
|
||||
|
||||
@@ -56,13 +56,13 @@ fn test_load_datanode_example_config() {
|
||||
metadata_cache_tti: Duration::from_secs(300),
|
||||
}),
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||
dir: Some("./greptimedb_data/wal".to_string()),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
recovery_parallelism: 2,
|
||||
..Default::default()
|
||||
}),
|
||||
storage: StorageConfig {
|
||||
data_home: "/tmp/greptimedb/".to_string(),
|
||||
data_home: "./greptimedb_data/".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
region_engine: vec![
|
||||
@@ -159,10 +159,10 @@ fn test_load_metasrv_example_config() {
|
||||
let expected = GreptimeOptions::<MetasrvOptions> {
|
||||
component: MetasrvOptions {
|
||||
selector: SelectorType::default(),
|
||||
data_home: "/tmp/metasrv/".to_string(),
|
||||
data_home: "./greptimedb_data/metasrv/".to_string(),
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
logging: LoggingOptions {
|
||||
dir: "/tmp/greptimedb/logs".to_string(),
|
||||
dir: "./greptimedb_data/logs".to_string(),
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
@@ -202,7 +202,7 @@ fn test_load_standalone_example_config() {
|
||||
component: StandaloneOptions {
|
||||
default_timezone: Some("UTC".to_string()),
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||
dir: Some("./greptimedb_data/wal".to_string()),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
recovery_parallelism: 2,
|
||||
..Default::default()
|
||||
@@ -219,7 +219,7 @@ fn test_load_standalone_example_config() {
|
||||
}),
|
||||
],
|
||||
storage: StorageConfig {
|
||||
data_home: "/tmp/greptimedb/".to_string(),
|
||||
data_home: "./greptimedb_data/".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
logging: LoggingOptions {
|
||||
|
||||
@@ -135,5 +135,6 @@ pub fn is_readonly_schema(schema: &str) -> bool {
|
||||
pub const TRACE_ID_COLUMN: &str = "trace_id";
|
||||
pub const SPAN_ID_COLUMN: &str = "span_id";
|
||||
pub const SPAN_NAME_COLUMN: &str = "span_name";
|
||||
pub const SERVICE_NAME_COLUMN: &str = "service_name";
|
||||
pub const PARENT_SPAN_ID_COLUMN: &str = "parent_span_id";
|
||||
// ---- End of special table and fields ----
|
||||
|
||||
@@ -161,7 +161,7 @@ mod tests {
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
@@ -170,7 +170,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -246,7 +246,7 @@ mod tests {
|
||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(raft_engine_config.dir.unwrap(), "/tmp/greptimedb/wal");
|
||||
assert_eq!(raft_engine_config.dir.unwrap(), "./greptimedb_data/wal");
|
||||
|
||||
// Should be default values.
|
||||
assert_eq!(opts.node_id, None);
|
||||
|
||||
@@ -24,9 +24,11 @@ pub(crate) mod sum;
|
||||
mod vector_add;
|
||||
mod vector_dim;
|
||||
mod vector_div;
|
||||
mod vector_kth_elem;
|
||||
mod vector_mul;
|
||||
mod vector_norm;
|
||||
mod vector_sub;
|
||||
mod vector_subvector;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -56,6 +58,8 @@ impl VectorFunction {
|
||||
registry.register(Arc::new(vector_div::VectorDivFunction));
|
||||
registry.register(Arc::new(vector_norm::VectorNormFunction));
|
||||
registry.register(Arc::new(vector_dim::VectorDimFunction));
|
||||
registry.register(Arc::new(vector_kth_elem::VectorKthElemFunction));
|
||||
registry.register(Arc::new(vector_subvector::VectorSubvectorFunction));
|
||||
registry.register(Arc::new(elem_sum::ElemSumFunction));
|
||||
registry.register(Arc::new(elem_product::ElemProductFunction));
|
||||
}
|
||||
|
||||
211
src/common/function/src/scalars/vector/vector_kth_elem.rs
Normal file
211
src/common/function/src/scalars/vector/vector_kth_elem.rs
Normal file
@@ -0,0 +1,211 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{Float32VectorBuilder, MutableVector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const};
|
||||
|
||||
const NAME: &str = "vec_kth_elem";
|
||||
|
||||
/// Returns the k-th(0-based index) element of the vector.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_kth_elem("[2, 4, 6]",1) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | 4 |
|
||||
/// +---------+
|
||||
///
|
||||
/// ```
|
||||
///
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorKthElemFunction;
|
||||
|
||||
impl Function for VectorKthElemFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(
|
||||
&self,
|
||||
_input_types: &[ConcreteDataType],
|
||||
) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float32_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
vec![ConcreteDataType::int64_datatype()],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = Float32VectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
};
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let Some(arg0) = arg0 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
let arg1 = arg1.get(i).as_f64_lossy();
|
||||
let Some(arg1) = arg1 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
ensure!(
|
||||
arg1 >= 0.0 && arg1.fract() == 0.0,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Invalid argument: k must be a non-negative integer, but got k = {}.",
|
||||
arg1
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let k = arg1 as usize;
|
||||
|
||||
ensure!(
|
||||
k < arg0.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Out of range: k must be in the range [0, {}], but got k = {}.",
|
||||
arg0.len() - 1,
|
||||
k
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let value = arg0[k];
|
||||
|
||||
result.push(Some(value));
|
||||
}
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorKthElemFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error;
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_vec_kth_elem() {
|
||||
let func = VectorKthElemFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(0), Some(2), None, Some(1)]));
|
||||
|
||||
let result = func
|
||||
.eval(&FunctionContext::default(), &[input0, input1])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(result.get_ref(0).as_f32().unwrap(), Some(1.0));
|
||||
assert_eq!(result.get_ref(1).as_f32().unwrap(), Some(6.0));
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert!(result.get_ref(3).is_null());
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![Some("[1.0,2.0,3.0]".to_string())]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(3)]));
|
||||
|
||||
let err = func
|
||||
.eval(&FunctionContext::default(), &[input0, input1])
|
||||
.unwrap_err();
|
||||
match err {
|
||||
error::Error::InvalidFuncArgs { err_msg, .. } => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
format!("Out of range: k must be in the range [0, 2], but got k = 3.")
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![Some("[1.0,2.0,3.0]".to_string())]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(-1)]));
|
||||
|
||||
let err = func
|
||||
.eval(&FunctionContext::default(), &[input0, input1])
|
||||
.unwrap_err();
|
||||
match err {
|
||||
error::Error::InvalidFuncArgs { err_msg, .. } => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
format!("Invalid argument: k must be a non-negative integer, but got k = -1.")
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
240
src/common/function/src/scalars/vector/vector_subvector.rs
Normal file
240
src/common/function/src/scalars/vector/vector_subvector.rs
Normal file
@@ -0,0 +1,240 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_subvector";
|
||||
|
||||
/// Returns a subvector from start(included) to end(excluded) index.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_subvector("[1, 2, 3, 4, 5]", 1, 3)) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | [2, 3] |
|
||||
/// +---------+
|
||||
///
|
||||
/// ```
|
||||
///
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorSubvectorFunction;
|
||||
|
||||
impl Function for VectorSubvectorFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
]),
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::binary_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
]),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly three, have: {}",
|
||||
columns.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
let arg2 = &columns[2];
|
||||
|
||||
ensure!(
|
||||
arg0.len() == arg1.len() && arg1.len() == arg2.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The lengths of the vector are not aligned, args 0: {}, args 1: {}, args 2: {}",
|
||||
arg0.len(),
|
||||
arg1.len(),
|
||||
arg2.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let arg1 = arg1.get(i).as_i64();
|
||||
let arg2 = arg2.get(i).as_i64();
|
||||
let (Some(arg0), Some(arg1), Some(arg2)) = (arg0, arg1, arg2) else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
ensure!(
|
||||
0 <= arg1 && arg1 <= arg2 && arg2 as usize <= arg0.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Invalid start and end indices: start={}, end={}, vec_len={}",
|
||||
arg1,
|
||||
arg2,
|
||||
arg0.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let subvector = &arg0[arg1 as usize..arg2 as usize];
|
||||
let binlit = veclit_to_binlit(subvector);
|
||||
result.push(Some(&binlit));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorSubvectorFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Error;
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
|
||||
use super::*;
|
||||
use crate::function::FunctionContext;
|
||||
#[test]
|
||||
fn test_subvector() {
|
||||
let func = VectorSubvectorFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0, 2.0, 3.0, 4.0, 5.0]".to_string()),
|
||||
Some("[6.0, 7.0, 8.0, 9.0, 10.0]".to_string()),
|
||||
None,
|
||||
Some("[11.0, 12.0, 13.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(1), Some(0), Some(0), Some(1)]));
|
||||
let input2 = Arc::new(Int64Vector::from(vec![Some(3), Some(5), Some(2), Some(3)]));
|
||||
|
||||
let result = func
|
||||
.eval(&FunctionContext::default(), &[input0, input1, input2])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[2.0, 3.0]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[6.0, 7.0, 8.0, 9.0, 10.0]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert_eq!(
|
||||
result.get_ref(3).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[12.0, 13.0]).as_slice())
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
fn test_subvector_error() {
|
||||
let func = VectorSubvectorFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0, 2.0, 3.0]".to_string()),
|
||||
Some("[4.0, 5.0, 6.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(1), Some(2)]));
|
||||
let input2 = Arc::new(Int64Vector::from(vec![Some(3)]));
|
||||
|
||||
let result = func.eval(&FunctionContext::default(), &[input0, input1, input2]);
|
||||
|
||||
match result {
|
||||
Err(Error::InvalidFuncArgs { err_msg, .. }) => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
"The lengths of the vector are not aligned, args 0: 2, args 1: 2, args 2: 1"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_subvector_invalid_indices() {
|
||||
let func = VectorSubvectorFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0, 2.0, 3.0]".to_string()),
|
||||
Some("[4.0, 5.0, 6.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(1), Some(3)]));
|
||||
let input2 = Arc::new(Int64Vector::from(vec![Some(3), Some(4)]));
|
||||
|
||||
let result = func.eval(&FunctionContext::default(), &[input0, input1, input2]);
|
||||
|
||||
match result {
|
||||
Err(Error::InvalidFuncArgs { err_msg, .. }) => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
"Invalid start and end indices: start=3, end=4, vec_len=3"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -155,21 +155,21 @@ impl<'a> MySqlTemplateFactory<'a> {
|
||||
table_name: table_name.to_string(),
|
||||
create_table_statement: format!(
|
||||
// Cannot be more than 3072 bytes in PRIMARY KEY
|
||||
"CREATE TABLE IF NOT EXISTS {table_name}(k VARBINARY(3072) PRIMARY KEY, v BLOB);",
|
||||
"CREATE TABLE IF NOT EXISTS `{table_name}`(k VARBINARY(3072) PRIMARY KEY, v BLOB);",
|
||||
),
|
||||
range_template: RangeTemplate {
|
||||
point: format!("SELECT k, v FROM {table_name} WHERE k = ?"),
|
||||
range: format!("SELECT k, v FROM {table_name} WHERE k >= ? AND k < ? ORDER BY k"),
|
||||
full: format!("SELECT k, v FROM {table_name} ? ORDER BY k"),
|
||||
left_bounded: format!("SELECT k, v FROM {table_name} WHERE k >= ? ORDER BY k"),
|
||||
prefix: format!("SELECT k, v FROM {table_name} WHERE k LIKE ? ORDER BY k"),
|
||||
point: format!("SELECT k, v FROM `{table_name}` WHERE k = ?"),
|
||||
range: format!("SELECT k, v FROM `{table_name}` WHERE k >= ? AND k < ? ORDER BY k"),
|
||||
full: format!("SELECT k, v FROM `{table_name}` ? ORDER BY k"),
|
||||
left_bounded: format!("SELECT k, v FROM `{table_name}` WHERE k >= ? ORDER BY k"),
|
||||
prefix: format!("SELECT k, v FROM `{table_name}` WHERE k LIKE ? ORDER BY k"),
|
||||
},
|
||||
delete_template: RangeTemplate {
|
||||
point: format!("DELETE FROM {table_name} WHERE k = ?;"),
|
||||
range: format!("DELETE FROM {table_name} WHERE k >= ? AND k < ?;"),
|
||||
full: format!("DELETE FROM {table_name}"),
|
||||
left_bounded: format!("DELETE FROM {table_name} WHERE k >= ?;"),
|
||||
prefix: format!("DELETE FROM {table_name} WHERE k LIKE ?;"),
|
||||
point: format!("DELETE FROM `{table_name}` WHERE k = ?;"),
|
||||
range: format!("DELETE FROM `{table_name}` WHERE k >= ? AND k < ?;"),
|
||||
full: format!("DELETE FROM `{table_name}`"),
|
||||
left_bounded: format!("DELETE FROM `{table_name}` WHERE k >= ?;"),
|
||||
prefix: format!("DELETE FROM `{table_name}` WHERE k LIKE ?;"),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -189,14 +189,17 @@ impl MySqlTemplateSet {
|
||||
fn generate_batch_get_query(&self, key_len: usize) -> String {
|
||||
let table_name = &self.table_name;
|
||||
let in_clause = mysql_generate_in_placeholders(1, key_len).join(", ");
|
||||
format!("SELECT k, v FROM {table_name} WHERE k in ({});", in_clause)
|
||||
format!(
|
||||
"SELECT k, v FROM `{table_name}` WHERE k in ({});",
|
||||
in_clause
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates the sql for batch delete.
|
||||
fn generate_batch_delete_query(&self, key_len: usize) -> String {
|
||||
let table_name = &self.table_name;
|
||||
let in_clause = mysql_generate_in_placeholders(1, key_len).join(", ");
|
||||
format!("DELETE FROM {table_name} WHERE k in ({});", in_clause)
|
||||
format!("DELETE FROM `{table_name}` WHERE k in ({});", in_clause)
|
||||
}
|
||||
|
||||
/// Generates the sql for batch upsert.
|
||||
@@ -212,9 +215,9 @@ impl MySqlTemplateSet {
|
||||
let values_clause = values_placeholders.join(", ");
|
||||
|
||||
(
|
||||
format!(r#"SELECT k, v FROM {table_name} WHERE k IN ({in_clause})"#,),
|
||||
format!(r#"SELECT k, v FROM `{table_name}` WHERE k IN ({in_clause})"#,),
|
||||
format!(
|
||||
r#"INSERT INTO {table_name} (k, v) VALUES {values_clause} ON DUPLICATE KEY UPDATE v = VALUES(v);"#,
|
||||
r#"INSERT INTO `{table_name}` (k, v) VALUES {values_clause} ON DUPLICATE KEY UPDATE v = VALUES(v);"#,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -157,21 +157,25 @@ impl<'a> PgSqlTemplateFactory<'a> {
|
||||
PgSqlTemplateSet {
|
||||
table_name: table_name.to_string(),
|
||||
create_table_statement: format!(
|
||||
"CREATE TABLE IF NOT EXISTS {table_name}(k bytea PRIMARY KEY, v bytea)",
|
||||
"CREATE TABLE IF NOT EXISTS \"{table_name}\"(k bytea PRIMARY KEY, v bytea)",
|
||||
),
|
||||
range_template: RangeTemplate {
|
||||
point: format!("SELECT k, v FROM {table_name} WHERE k = $1"),
|
||||
range: format!("SELECT k, v FROM {table_name} WHERE k >= $1 AND k < $2 ORDER BY k"),
|
||||
full: format!("SELECT k, v FROM {table_name} $1 ORDER BY k"),
|
||||
left_bounded: format!("SELECT k, v FROM {table_name} WHERE k >= $1 ORDER BY k"),
|
||||
prefix: format!("SELECT k, v FROM {table_name} WHERE k LIKE $1 ORDER BY k"),
|
||||
point: format!("SELECT k, v FROM \"{table_name}\" WHERE k = $1"),
|
||||
range: format!(
|
||||
"SELECT k, v FROM \"{table_name}\" WHERE k >= $1 AND k < $2 ORDER BY k"
|
||||
),
|
||||
full: format!("SELECT k, v FROM \"{table_name}\" $1 ORDER BY k"),
|
||||
left_bounded: format!("SELECT k, v FROM \"{table_name}\" WHERE k >= $1 ORDER BY k"),
|
||||
prefix: format!("SELECT k, v FROM \"{table_name}\" WHERE k LIKE $1 ORDER BY k"),
|
||||
},
|
||||
delete_template: RangeTemplate {
|
||||
point: format!("DELETE FROM {table_name} WHERE k = $1 RETURNING k,v;"),
|
||||
range: format!("DELETE FROM {table_name} WHERE k >= $1 AND k < $2 RETURNING k,v;"),
|
||||
full: format!("DELETE FROM {table_name} RETURNING k,v"),
|
||||
left_bounded: format!("DELETE FROM {table_name} WHERE k >= $1 RETURNING k,v;"),
|
||||
prefix: format!("DELETE FROM {table_name} WHERE k LIKE $1 RETURNING k,v;"),
|
||||
point: format!("DELETE FROM \"{table_name}\" WHERE k = $1 RETURNING k,v;"),
|
||||
range: format!(
|
||||
"DELETE FROM \"{table_name}\" WHERE k >= $1 AND k < $2 RETURNING k,v;"
|
||||
),
|
||||
full: format!("DELETE FROM \"{table_name}\" RETURNING k,v"),
|
||||
left_bounded: format!("DELETE FROM \"{table_name}\" WHERE k >= $1 RETURNING k,v;"),
|
||||
prefix: format!("DELETE FROM \"{table_name}\" WHERE k LIKE $1 RETURNING k,v;"),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -191,7 +195,10 @@ impl PgSqlTemplateSet {
|
||||
fn generate_batch_get_query(&self, key_len: usize) -> String {
|
||||
let table_name = &self.table_name;
|
||||
let in_clause = pg_generate_in_placeholders(1, key_len).join(", ");
|
||||
format!("SELECT k, v FROM {table_name} WHERE k in ({});", in_clause)
|
||||
format!(
|
||||
"SELECT k, v FROM \"{table_name}\" WHERE k in ({});",
|
||||
in_clause
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates the sql for batch delete.
|
||||
@@ -199,7 +206,7 @@ impl PgSqlTemplateSet {
|
||||
let table_name = &self.table_name;
|
||||
let in_clause = pg_generate_in_placeholders(1, key_len).join(", ");
|
||||
format!(
|
||||
"DELETE FROM {table_name} WHERE k in ({}) RETURNING k,v;",
|
||||
"DELETE FROM \"{table_name}\" WHERE k in ({}) RETURNING k,v;",
|
||||
in_clause
|
||||
)
|
||||
}
|
||||
@@ -220,9 +227,9 @@ impl PgSqlTemplateSet {
|
||||
format!(
|
||||
r#"
|
||||
WITH prev AS (
|
||||
SELECT k,v FROM {table_name} WHERE k IN ({in_clause})
|
||||
SELECT k,v FROM "{table_name}" WHERE k IN ({in_clause})
|
||||
), update AS (
|
||||
INSERT INTO {table_name} (k, v) VALUES
|
||||
INSERT INTO "{table_name}" (k, v) VALUES
|
||||
{values_clause}
|
||||
ON CONFLICT (
|
||||
k
|
||||
|
||||
@@ -12,63 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::Peer as PbPeer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
pub use api::v1::meta::Peer;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::{DatanodeId, FlownodeId};
|
||||
|
||||
#[derive(Debug, Default, Clone, Hash, Eq, PartialEq, Deserialize, Serialize)]
|
||||
pub struct Peer {
|
||||
/// Node identifier. Unique in a cluster.
|
||||
pub id: u64,
|
||||
pub addr: String,
|
||||
}
|
||||
|
||||
impl From<PbPeer> for Peer {
|
||||
fn from(p: PbPeer) -> Self {
|
||||
Self {
|
||||
id: p.id,
|
||||
addr: p.addr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Peer> for PbPeer {
|
||||
fn from(p: Peer) -> Self {
|
||||
Self {
|
||||
id: p.id,
|
||||
addr: p.addr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Peer {
|
||||
pub fn new(id: u64, addr: impl Into<String>) -> Self {
|
||||
Self {
|
||||
id,
|
||||
addr: addr.into(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn empty(id: u64) -> Self {
|
||||
Self {
|
||||
id,
|
||||
addr: String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Peer {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "peer-{}({})", self.id, self.addr)
|
||||
}
|
||||
}
|
||||
|
||||
/// can query peer given a node id
|
||||
#[async_trait::async_trait]
|
||||
pub trait PeerLookupService {
|
||||
|
||||
@@ -111,7 +111,7 @@ impl Eq for LoggingOptions {}
|
||||
impl Default for LoggingOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
dir: "/tmp/greptimedb/logs".to_string(),
|
||||
dir: "./greptimedb_data/logs".to_string(),
|
||||
level: None,
|
||||
log_format: LogFormat::Text,
|
||||
enable_otlp_tracing: false,
|
||||
|
||||
@@ -36,7 +36,7 @@ use servers::Mode;
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize::gb(5);
|
||||
|
||||
/// Default data home in file storage
|
||||
const DEFAULT_DATA_HOME: &str = "/tmp/greptimedb";
|
||||
const DEFAULT_DATA_HOME: &str = "./greptimedb_data";
|
||||
|
||||
/// Object storage config
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
|
||||
@@ -25,6 +25,6 @@ pub mod heartbeat;
|
||||
pub mod metrics;
|
||||
pub mod region_server;
|
||||
pub mod service;
|
||||
mod store;
|
||||
pub mod store;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod tests;
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
//! object storage utilities
|
||||
|
||||
mod azblob;
|
||||
mod fs;
|
||||
pub mod fs;
|
||||
mod gcs;
|
||||
mod oss;
|
||||
mod s3;
|
||||
|
||||
@@ -24,7 +24,8 @@ use crate::config::FileConfig;
|
||||
use crate::error::{self, Result};
|
||||
use crate::store;
|
||||
|
||||
pub(crate) async fn new_fs_object_store(
|
||||
/// A helper function to create a file system object store.
|
||||
pub async fn new_fs_object_store(
|
||||
data_home: &str,
|
||||
_file_config: &FileConfig,
|
||||
) -> Result<ObjectStore> {
|
||||
|
||||
@@ -285,6 +285,20 @@ impl Value {
|
||||
}
|
||||
}
|
||||
|
||||
/// Cast Value to i64. Return None if value is not a valid int64 data type.
|
||||
pub fn as_i64(&self) -> Option<i64> {
|
||||
match self {
|
||||
Value::Int8(v) => Some(*v as _),
|
||||
Value::Int16(v) => Some(*v as _),
|
||||
Value::Int32(v) => Some(*v as _),
|
||||
Value::Int64(v) => Some(*v),
|
||||
Value::UInt8(v) => Some(*v as _),
|
||||
Value::UInt16(v) => Some(*v as _),
|
||||
Value::UInt32(v) => Some(*v as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Cast Value to u64. Return None if value is not a valid uint64 data type.
|
||||
pub fn as_u64(&self) -> Option<u64> {
|
||||
match self {
|
||||
@@ -295,7 +309,6 @@ impl Value {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Cast Value to f64. Return None if it's not castable;
|
||||
pub fn as_f64_lossy(&self) -> Option<f64> {
|
||||
match self {
|
||||
|
||||
@@ -28,14 +28,14 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use datafusion::dataframe::DataFrame;
|
||||
use datafusion::execution::context::SessionContext;
|
||||
use datafusion::execution::SessionStateBuilder;
|
||||
use datafusion_expr::{col, lit, lit_timestamp_nano, Expr};
|
||||
use datafusion_expr::{col, lit, lit_timestamp_nano, wildcard, Expr};
|
||||
use query::QueryEngineRef;
|
||||
use serde_json::Value as JsonValue;
|
||||
use servers::error::{
|
||||
CatalogSnafu, CollectRecordbatchSnafu, DataFusionSnafu, Result as ServerResult,
|
||||
TableNotFoundSnafu,
|
||||
};
|
||||
use servers::http::jaeger::{QueryTraceParams, FIND_TRACES_COLS};
|
||||
use servers::http::jaeger::{QueryTraceParams, JAEGER_QUERY_TABLE_NAME_KEY};
|
||||
use servers::otlp::trace::{
|
||||
DURATION_NANO_COLUMN, SERVICE_NAME_COLUMN, SPAN_ATTRIBUTES_COLUMN, SPAN_KIND_COLUMN,
|
||||
SPAN_KIND_PREFIX, SPAN_NAME_COLUMN, TIMESTAMP_COLUMN, TRACE_ID_COLUMN, TRACE_TABLE_NAME,
|
||||
@@ -43,6 +43,7 @@ use servers::otlp::trace::{
|
||||
use servers::query_handler::JaegerQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::requests::{TABLE_DATA_MODEL, TABLE_DATA_MODEL_TRACE_V1};
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use super::Instance;
|
||||
@@ -82,7 +83,19 @@ impl JaegerQueryHandler for Instance {
|
||||
))));
|
||||
}
|
||||
|
||||
// It's equivalent to `SELECT span_name, span_kind FROM {db}.{trace_table} WHERE service_name = '{service_name}'`.
|
||||
// It's equivalent to
|
||||
//
|
||||
// ```
|
||||
// SELECT
|
||||
// span_name,
|
||||
// span_kind
|
||||
// FROM
|
||||
// {db}.{trace_table}
|
||||
// WHERE
|
||||
// service_name = '{service_name}'
|
||||
// ORDER BY
|
||||
// timestamp
|
||||
// ```.
|
||||
Ok(query_trace_table(
|
||||
ctx,
|
||||
self.catalog_manager(),
|
||||
@@ -101,9 +114,19 @@ impl JaegerQueryHandler for Instance {
|
||||
}
|
||||
|
||||
async fn get_trace(&self, ctx: QueryContextRef, trace_id: &str) -> ServerResult<Output> {
|
||||
// It's equivalent to `SELECT trace_id, timestamp, duration_nano, service_name, span_name, span_id, span_attributes, resource_attributes, parent_span_id
|
||||
// FROM {db}.{trace_table} WHERE trace_id = '{trace_id}'`.
|
||||
let selects: Vec<Expr> = FIND_TRACES_COLS.clone();
|
||||
// It's equivalent to
|
||||
//
|
||||
// ```
|
||||
// SELECT
|
||||
// *
|
||||
// FROM
|
||||
// {db}.{trace_table}
|
||||
// WHERE
|
||||
// trace_id = '{trace_id}'
|
||||
// ORDER BY
|
||||
// timestamp
|
||||
// ```.
|
||||
let selects = vec![wildcard()];
|
||||
|
||||
let filters = vec![col(TRACE_ID_COLUMN).eq(lit(trace_id))];
|
||||
|
||||
@@ -125,7 +148,7 @@ impl JaegerQueryHandler for Instance {
|
||||
ctx: QueryContextRef,
|
||||
query_params: QueryTraceParams,
|
||||
) -> ServerResult<Output> {
|
||||
let selects: Vec<Expr> = FIND_TRACES_COLS.clone();
|
||||
let selects = vec![wildcard()];
|
||||
|
||||
let mut filters = vec![];
|
||||
|
||||
@@ -174,17 +197,34 @@ async fn query_trace_table(
|
||||
tags: Option<HashMap<String, JsonValue>>,
|
||||
distinct: bool,
|
||||
) -> ServerResult<Output> {
|
||||
let db = ctx.get_db_string();
|
||||
let table_name = ctx
|
||||
.extension(JAEGER_QUERY_TABLE_NAME_KEY)
|
||||
.unwrap_or(TRACE_TABLE_NAME);
|
||||
|
||||
let table = catalog_manager
|
||||
.table(ctx.current_catalog(), &db, TRACE_TABLE_NAME, Some(&ctx))
|
||||
.table(
|
||||
ctx.current_catalog(),
|
||||
&ctx.current_schema(),
|
||||
table_name,
|
||||
Some(&ctx),
|
||||
)
|
||||
.await
|
||||
.context(CatalogSnafu)?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table: TRACE_TABLE_NAME,
|
||||
table: table_name,
|
||||
catalog: ctx.current_catalog(),
|
||||
schema: db,
|
||||
schema: ctx.current_schema(),
|
||||
})?;
|
||||
|
||||
let is_data_model_v1 = table
|
||||
.table_info()
|
||||
.meta
|
||||
.options
|
||||
.extra_options
|
||||
.get(TABLE_DATA_MODEL)
|
||||
.map(|s| s.as_str())
|
||||
== Some(TABLE_DATA_MODEL_TRACE_V1);
|
||||
|
||||
let df_context = create_df_context(query_engine, ctx.clone())?;
|
||||
|
||||
let dataframe = df_context
|
||||
@@ -196,7 +236,9 @@ async fn query_trace_table(
|
||||
// Apply all filters.
|
||||
let dataframe = filters
|
||||
.into_iter()
|
||||
.chain(tags.map_or(Ok(vec![]), |t| tags_filters(&dataframe, t))?)
|
||||
.chain(tags.map_or(Ok(vec![]), |t| {
|
||||
tags_filters(&dataframe, t, is_data_model_v1)
|
||||
})?)
|
||||
.try_fold(dataframe, |df, expr| {
|
||||
df.filter(expr).context(DataFusionSnafu)
|
||||
})?;
|
||||
@@ -205,7 +247,10 @@ async fn query_trace_table(
|
||||
let dataframe = if distinct {
|
||||
dataframe.distinct().context(DataFusionSnafu)?
|
||||
} else {
|
||||
// for non distinct query, sort by timestamp to make results stable
|
||||
dataframe
|
||||
.sort_by(vec![col(TIMESTAMP_COLUMN)])
|
||||
.context(DataFusionSnafu)?
|
||||
};
|
||||
|
||||
// Apply the limit if needed.
|
||||
@@ -237,7 +282,7 @@ fn create_df_context(
|
||||
SessionStateBuilder::new_from_existing(query_engine.engine_state().session_state()).build(),
|
||||
);
|
||||
|
||||
// The following JSON UDFs will be used for tags filters.
|
||||
// The following JSON UDFs will be used for tags filters on v0 data model.
|
||||
let udfs: Vec<FunctionRef> = vec![
|
||||
Arc::new(JsonGetInt),
|
||||
Arc::new(JsonGetFloat),
|
||||
@@ -256,7 +301,7 @@ fn create_df_context(
|
||||
Ok(df_context)
|
||||
}
|
||||
|
||||
fn tags_filters(
|
||||
fn json_tag_filters(
|
||||
dataframe: &DataFrame,
|
||||
tags: HashMap<String, JsonValue>,
|
||||
) -> ServerResult<Vec<Expr>> {
|
||||
@@ -322,3 +367,41 @@ fn tags_filters(
|
||||
|
||||
Ok(filters)
|
||||
}
|
||||
|
||||
fn flatten_tag_filters(tags: HashMap<String, JsonValue>) -> ServerResult<Vec<Expr>> {
|
||||
let filters = tags
|
||||
.into_iter()
|
||||
.filter_map(|(key, value)| {
|
||||
let key = format!("\"span_attributes.{}\"", key);
|
||||
match value {
|
||||
JsonValue::String(value) => Some(col(key).eq(lit(value))),
|
||||
JsonValue::Number(value) => {
|
||||
if value.is_f64() {
|
||||
// safe to unwrap as checked previously
|
||||
Some(col(key).eq(lit(value.as_f64().unwrap())))
|
||||
} else {
|
||||
Some(col(key).eq(lit(value.as_i64().unwrap())))
|
||||
}
|
||||
}
|
||||
JsonValue::Bool(value) => Some(col(key).eq(lit(value))),
|
||||
JsonValue::Null => Some(col(key).is_null()),
|
||||
// not supported at the moment
|
||||
JsonValue::Array(_value) => None,
|
||||
JsonValue::Object(_value) => None,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(filters)
|
||||
}
|
||||
|
||||
fn tags_filters(
|
||||
dataframe: &DataFrame,
|
||||
tags: HashMap<String, JsonValue>,
|
||||
is_data_model_v1: bool,
|
||||
) -> ServerResult<Vec<Expr>> {
|
||||
if is_data_model_v1 {
|
||||
flatten_tag_filters(tags)
|
||||
} else {
|
||||
json_tag_filters(dataframe, tags)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::ops::Deref;
|
||||
|
||||
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
|
||||
use client::Output;
|
||||
use common_error::ext::BoxedError;
|
||||
@@ -20,7 +22,7 @@ use server_error::Result as ServerResult;
|
||||
use servers::error::{self as server_error, AuthSnafu, ExecuteQuerySnafu};
|
||||
use servers::interceptor::{LogQueryInterceptor, LogQueryInterceptorRef};
|
||||
use servers::query_handler::LogQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::ResultExt;
|
||||
use tonic::async_trait;
|
||||
|
||||
@@ -64,4 +66,8 @@ impl LogQueryHandler for Instance {
|
||||
|
||||
Ok(interceptor.as_ref().post_query(output, ctx.clone())?)
|
||||
}
|
||||
|
||||
fn catalog_manager(&self, _ctx: &QueryContext) -> ServerResult<&dyn catalog::CatalogManager> {
|
||||
Ok(self.catalog_manager.deref())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,6 +90,8 @@ impl OpenTelemetryProtocolHandler for Instance {
|
||||
.get::<OpenTelemetryProtocolInterceptorRef<servers::error::Error>>();
|
||||
interceptor_ref.pre_execute(ctx.clone())?;
|
||||
|
||||
let is_trace_v1_model = matches!(pipeline, PipelineWay::OtlpTraceDirectV1);
|
||||
|
||||
let (requests, rows) = otlp::trace::to_grpc_insert_requests(
|
||||
request,
|
||||
pipeline,
|
||||
@@ -101,10 +103,17 @@ impl OpenTelemetryProtocolHandler for Instance {
|
||||
|
||||
OTLP_TRACES_ROWS.inc_by(rows as u64);
|
||||
|
||||
self.handle_trace_inserts(requests, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ExecuteGrpcQuerySnafu)
|
||||
if is_trace_v1_model {
|
||||
self.handle_trace_inserts(requests, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ExecuteGrpcQuerySnafu)
|
||||
} else {
|
||||
self.handle_log_inserts(requests, ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ExecuteGrpcQuerySnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
|
||||
@@ -284,7 +284,7 @@ impl ClusterInfo for MetaClient {
|
||||
followers
|
||||
.into_iter()
|
||||
.map(|node| NodeInfo {
|
||||
peer: node.peer.map(|p| p.into()).unwrap_or_default(),
|
||||
peer: node.peer.unwrap_or_default(),
|
||||
last_activity_ts,
|
||||
status: NodeStatus::Metasrv(MetasrvStatus { is_leader: false }),
|
||||
version: node.version,
|
||||
@@ -292,7 +292,7 @@ impl ClusterInfo for MetaClient {
|
||||
start_time_ms: node.start_time_ms,
|
||||
})
|
||||
.chain(leader.into_iter().map(|node| NodeInfo {
|
||||
peer: node.peer.map(|p| p.into()).unwrap_or_default(),
|
||||
peer: node.peer.unwrap_or_default(),
|
||||
last_activity_ts,
|
||||
status: NodeStatus::Metasrv(MetasrvStatus { is_leader: true }),
|
||||
version: node.version,
|
||||
|
||||
@@ -6,7 +6,8 @@ license.workspace = true
|
||||
|
||||
[features]
|
||||
mock = []
|
||||
pg_kvbackend = ["dep:tokio-postgres", "common-meta/pg_kvbackend"]
|
||||
pg_kvbackend = ["dep:tokio-postgres", "common-meta/pg_kvbackend", "dep:deadpool-postgres", "dep:deadpool"]
|
||||
mysql_kvbackend = ["dep:sqlx", "common-meta/mysql_kvbackend"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -38,8 +39,8 @@ common-version.workspace = true
|
||||
common-wal.workspace = true
|
||||
dashmap.workspace = true
|
||||
datatypes.workspace = true
|
||||
deadpool.workspace = true
|
||||
deadpool-postgres.workspace = true
|
||||
deadpool = { workspace = true, optional = true }
|
||||
deadpool-postgres = { workspace = true, optional = true }
|
||||
derive_builder.workspace = true
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
@@ -60,6 +61,7 @@ serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
servers.workspace = true
|
||||
snafu.workspace = true
|
||||
sqlx = { workspace = true, optional = true }
|
||||
store-api.workspace = true
|
||||
strum.workspace = true
|
||||
table.workspace = true
|
||||
|
||||
@@ -23,6 +23,8 @@ use common_config::Configurable;
|
||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
use common_meta::kv_backend::rds::MySqlStore;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_meta::kv_backend::rds::PgStore;
|
||||
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
|
||||
@@ -38,9 +40,15 @@ use servers::export_metrics::ExportMetricsTask;
|
||||
use servers::http::{HttpServer, HttpServerBuilder};
|
||||
use servers::metrics_handler::MetricsHandler;
|
||||
use servers::server::Server;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
use snafu::OptionExt;
|
||||
use snafu::ResultExt;
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
use sqlx::mysql::MySqlConnectOptions;
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
use sqlx::mysql::{MySqlConnection, MySqlPool};
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
use sqlx::Connection;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
@@ -49,9 +57,11 @@ use tonic::codec::CompressionEncoding;
|
||||
use tonic::transport::server::{Router, TcpIncoming};
|
||||
|
||||
use crate::election::etcd::EtcdElection;
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
use crate::election::mysql::MySqlElection;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use crate::election::postgres::PgElection;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
use crate::election::CANDIDATE_LEASE_SECS;
|
||||
use crate::metasrv::builder::MetasrvBuilder;
|
||||
use crate::metasrv::{BackendImpl, Metasrv, MetasrvOptions, SelectorRef};
|
||||
@@ -229,7 +239,6 @@ pub async fn metasrv_builder(
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
(None, BackendImpl::PostgresStore) => {
|
||||
let pool = create_postgres_pool(opts).await?;
|
||||
// TODO(CookiePie): use table name from config.
|
||||
let kv_backend = PgStore::with_pg_pool(pool, &opts.meta_table_name, opts.max_txn_ops)
|
||||
.await
|
||||
.context(error::KvBackendSnafu)?;
|
||||
@@ -246,6 +255,26 @@ pub async fn metasrv_builder(
|
||||
.await?;
|
||||
(kv_backend, Some(election))
|
||||
}
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
(None, BackendImpl::MysqlStore) => {
|
||||
let pool = create_mysql_pool(opts).await?;
|
||||
let kv_backend =
|
||||
MySqlStore::with_mysql_pool(pool, &opts.meta_table_name, opts.max_txn_ops)
|
||||
.await
|
||||
.context(error::KvBackendSnafu)?;
|
||||
// Since election will acquire a lock of the table, we need a separate table for election.
|
||||
let election_table_name = opts.meta_table_name.clone() + "_election";
|
||||
let election_client = create_mysql_client(opts).await?;
|
||||
let election = MySqlElection::with_mysql_client(
|
||||
opts.server_addr.clone(),
|
||||
election_client,
|
||||
opts.store_key_prefix.clone(),
|
||||
CANDIDATE_LEASE_SECS,
|
||||
&election_table_name,
|
||||
)
|
||||
.await?;
|
||||
(kv_backend, Some(election))
|
||||
}
|
||||
};
|
||||
|
||||
if !opts.store_key_prefix.is_empty() {
|
||||
@@ -323,3 +352,41 @@ async fn create_postgres_pool(opts: &MetasrvOptions) -> Result<deadpool_postgres
|
||||
.context(error::CreatePostgresPoolSnafu)?;
|
||||
Ok(pool)
|
||||
}
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
async fn setup_mysql_options(opts: &MetasrvOptions) -> Result<MySqlConnectOptions> {
|
||||
let mysql_url = opts
|
||||
.store_addrs
|
||||
.first()
|
||||
.context(error::InvalidArgumentsSnafu {
|
||||
err_msg: "empty store addrs",
|
||||
})?;
|
||||
// Avoid `SET` commands in sqlx
|
||||
let opts: MySqlConnectOptions = mysql_url
|
||||
.parse()
|
||||
.context(error::ParseMySqlUrlSnafu { mysql_url })?;
|
||||
let opts = opts
|
||||
.no_engine_substitution(false)
|
||||
.pipes_as_concat(false)
|
||||
.timezone(None)
|
||||
.set_names(false);
|
||||
Ok(opts)
|
||||
}
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
async fn create_mysql_pool(opts: &MetasrvOptions) -> Result<MySqlPool> {
|
||||
let opts = setup_mysql_options(opts).await?;
|
||||
let pool = MySqlPool::connect_with(opts)
|
||||
.await
|
||||
.context(error::CreateMySqlPoolSnafu)?;
|
||||
Ok(pool)
|
||||
}
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
async fn create_mysql_client(opts: &MetasrvOptions) -> Result<MySqlConnection> {
|
||||
let opts = setup_mysql_options(opts).await?;
|
||||
let client = MySqlConnection::connect_with(&opts)
|
||||
.await
|
||||
.context(error::ConnectMySqlSnafu)?;
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod etcd;
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
pub mod mysql;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
pub mod postgres;
|
||||
|
||||
|
||||
800
src/meta-srv/src/election/mysql.rs
Normal file
800
src/meta-srv/src/election/mysql.rs
Normal file
@@ -0,0 +1,800 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_meta::distributed_time_constants::{META_KEEP_ALIVE_INTERVAL_SECS, META_LEASE_SECS};
|
||||
use common_telemetry::{error, warn};
|
||||
use common_time::Timestamp;
|
||||
use itertools::Itertools;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sqlx::mysql::{MySqlArguments, MySqlRow};
|
||||
use sqlx::query::Query;
|
||||
use sqlx::{MySql, MySqlConnection, MySqlTransaction, Row};
|
||||
use tokio::sync::{broadcast, Mutex, MutexGuard};
|
||||
use tokio::time::{Interval, MissedTickBehavior};
|
||||
|
||||
use crate::election::{
|
||||
listen_leader_change, Election, LeaderChangeMessage, LeaderKey, CANDIDATES_ROOT, ELECTION_KEY,
|
||||
};
|
||||
use crate::error::{
|
||||
DeserializeFromJsonSnafu, MySqlExecutionSnafu, NoLeaderSnafu, Result, SerializeToJsonSnafu,
|
||||
UnexpectedSnafu,
|
||||
};
|
||||
use crate::metasrv::{ElectionRef, LeaderValue, MetasrvNodeInfo};
|
||||
|
||||
// Separator between value and expire time.
|
||||
const LEASE_SEP: &str = r#"||__metadata_lease_sep||"#;
|
||||
|
||||
/// Lease information.
|
||||
/// TODO(CookiePie): PgElection can also use this struct. Refactor it to a common module.
|
||||
#[derive(Default, Clone)]
|
||||
struct Lease {
|
||||
leader_value: String,
|
||||
expire_time: Timestamp,
|
||||
current: Timestamp,
|
||||
// origin is the origin value of the lease, used for CAS.
|
||||
origin: String,
|
||||
}
|
||||
|
||||
struct ElectionSqlFactory<'a> {
|
||||
table_name: &'a str,
|
||||
}
|
||||
|
||||
struct ElectionSqlSet {
|
||||
campaign: String,
|
||||
// SQL to put a value with expire time.
|
||||
//
|
||||
// Parameters for the query:
|
||||
// `$1`: key,
|
||||
// `$2`: value,
|
||||
// `$3`: lease time in seconds
|
||||
//
|
||||
// Returns:
|
||||
// If the key already exists, return the previous value.
|
||||
put_value_with_lease: String,
|
||||
// SQL to update a value with expire time.
|
||||
//
|
||||
// Parameters for the query:
|
||||
// `$1`: updated value,
|
||||
// `$2`: lease time in seconds
|
||||
// `$3`: key,
|
||||
// `$4`: previous value,
|
||||
update_value_with_lease: String,
|
||||
// SQL to get a value with expire time.
|
||||
//
|
||||
// Parameters:
|
||||
// `$1`: key
|
||||
get_value_with_lease: String,
|
||||
// SQL to get all values with expire time with the given key prefix.
|
||||
//
|
||||
// Parameters:
|
||||
// `$1`: key prefix like 'prefix%'
|
||||
//
|
||||
// Returns:
|
||||
// column 0: value,
|
||||
// column 1: current timestamp
|
||||
get_value_with_lease_by_prefix: String,
|
||||
// SQL to delete a value.
|
||||
//
|
||||
// Parameters:
|
||||
// `?`: key
|
||||
//
|
||||
// Returns:
|
||||
// Rows affected
|
||||
delete_value: String,
|
||||
}
|
||||
|
||||
impl<'a> ElectionSqlFactory<'a> {
|
||||
fn new(table_name: &'a str) -> Self {
|
||||
Self { table_name }
|
||||
}
|
||||
|
||||
fn build(self) -> ElectionSqlSet {
|
||||
ElectionSqlSet {
|
||||
campaign: self.campaign_sql(),
|
||||
put_value_with_lease: self.put_value_with_lease_sql(),
|
||||
update_value_with_lease: self.update_value_with_lease_sql(),
|
||||
get_value_with_lease: self.get_value_with_lease_sql(),
|
||||
get_value_with_lease_by_prefix: self.get_value_with_lease_by_prefix_sql(),
|
||||
delete_value: self.delete_value_sql(),
|
||||
}
|
||||
}
|
||||
|
||||
// Currently the session timeout is longer than the leader lease time.
|
||||
// So the leader will renew the lease twice before the session timeout if everything goes well.
|
||||
fn set_idle_session_timeout_sql(&self) -> String {
|
||||
format!("SET SESSION wait_timeout = {};", META_LEASE_SECS + 1)
|
||||
}
|
||||
|
||||
fn set_lock_wait_timeout_sql(&self) -> &str {
|
||||
"SET SESSION innodb_lock_wait_timeout = 1;"
|
||||
}
|
||||
|
||||
fn create_table_sql(&self) -> String {
|
||||
format!(
|
||||
r#"
|
||||
CREATE TABLE IF NOT EXISTS `{}` (
|
||||
k VARBINARY(3072) PRIMARY KEY,
|
||||
v BLOB
|
||||
);
|
||||
"#,
|
||||
self.table_name
|
||||
)
|
||||
}
|
||||
|
||||
fn insert_once(&self) -> String {
|
||||
format!(
|
||||
"INSERT IGNORE INTO `{}` (k, v) VALUES ('__place_holder_for_lock', '');",
|
||||
self.table_name
|
||||
)
|
||||
}
|
||||
|
||||
fn check_version(&self) -> &str {
|
||||
"SELECT @@version;"
|
||||
}
|
||||
|
||||
fn campaign_sql(&self) -> String {
|
||||
format!("SELECT * FROM `{}` FOR UPDATE;", self.table_name)
|
||||
}
|
||||
|
||||
fn put_value_with_lease_sql(&self) -> String {
|
||||
format!(
|
||||
r#"
|
||||
INSERT INTO `{}` (k, v) VALUES (
|
||||
?,
|
||||
CONCAT(
|
||||
?,
|
||||
'{}',
|
||||
DATE_FORMAT(DATE_ADD(NOW(4), INTERVAL ? SECOND), '%Y-%m-%d %T.%f')
|
||||
)
|
||||
)
|
||||
ON DUPLICATE KEY UPDATE v = VALUES(v);
|
||||
"#,
|
||||
self.table_name, LEASE_SEP
|
||||
)
|
||||
}
|
||||
|
||||
fn update_value_with_lease_sql(&self) -> String {
|
||||
format!(
|
||||
r#"UPDATE `{}`
|
||||
SET v = CONCAT(?, '{}', DATE_FORMAT(DATE_ADD(NOW(4), INTERVAL ? SECOND), '%Y-%m-%d %T.%f'))
|
||||
WHERE k = ? AND v = ?"#,
|
||||
self.table_name, LEASE_SEP
|
||||
)
|
||||
}
|
||||
|
||||
fn get_value_with_lease_sql(&self) -> String {
|
||||
format!(
|
||||
r#"SELECT v, DATE_FORMAT(NOW(4), '%Y-%m-%d %T.%f') FROM `{}` WHERE k = ?"#,
|
||||
self.table_name
|
||||
)
|
||||
}
|
||||
|
||||
fn get_value_with_lease_by_prefix_sql(&self) -> String {
|
||||
format!(
|
||||
r#"SELECT v, DATE_FORMAT(NOW(4), '%Y-%m-%d %T.%f') FROM `{}` WHERE k LIKE ?"#,
|
||||
self.table_name
|
||||
)
|
||||
}
|
||||
|
||||
fn delete_value_sql(&self) -> String {
|
||||
format!("DELETE FROM {} WHERE k = ?;", self.table_name)
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse the value and expire time from the given string. The value should be in the format "value || LEASE_SEP || expire_time".
|
||||
fn parse_value_and_expire_time(value: &str) -> Result<(String, Timestamp)> {
|
||||
let (value, expire_time) =
|
||||
value
|
||||
.split(LEASE_SEP)
|
||||
.collect_tuple()
|
||||
.with_context(|| UnexpectedSnafu {
|
||||
violated: format!(
|
||||
"Invalid value {}, expect node info || {} || expire time",
|
||||
value, LEASE_SEP
|
||||
),
|
||||
})?;
|
||||
// Given expire_time is in the format 'YYYY-MM-DD HH24:MI:SS.MS'
|
||||
let expire_time = match Timestamp::from_str(expire_time, None) {
|
||||
Ok(ts) => ts,
|
||||
Err(_) => UnexpectedSnafu {
|
||||
violated: format!("Invalid timestamp: {}", expire_time),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
Ok((value.to_string(), expire_time))
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
struct MySqlLeaderKey {
|
||||
name: Vec<u8>,
|
||||
key: Vec<u8>,
|
||||
rev: i64,
|
||||
lease: i64,
|
||||
}
|
||||
|
||||
impl LeaderKey for MySqlLeaderKey {
|
||||
fn name(&self) -> &[u8] {
|
||||
&self.name
|
||||
}
|
||||
|
||||
fn key(&self) -> &[u8] {
|
||||
&self.key
|
||||
}
|
||||
|
||||
fn revision(&self) -> i64 {
|
||||
self.rev
|
||||
}
|
||||
|
||||
fn lease_id(&self) -> i64 {
|
||||
self.lease
|
||||
}
|
||||
}
|
||||
|
||||
enum Executor<'a> {
|
||||
Default(MutexGuard<'a, MySqlConnection>),
|
||||
Txn(MySqlTransaction<'a>),
|
||||
}
|
||||
|
||||
impl Executor<'_> {
|
||||
async fn query(
|
||||
&mut self,
|
||||
query: Query<'_, MySql, MySqlArguments>,
|
||||
sql: &str,
|
||||
) -> Result<Vec<MySqlRow>> {
|
||||
match self {
|
||||
Executor::Default(client) => {
|
||||
let res = query
|
||||
.fetch_all(&mut **client)
|
||||
.await
|
||||
.context(MySqlExecutionSnafu { sql })?;
|
||||
Ok(res)
|
||||
}
|
||||
Executor::Txn(txn) => {
|
||||
let res = query
|
||||
.fetch_all(&mut **txn)
|
||||
.await
|
||||
.context(MySqlExecutionSnafu { sql })?;
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute(&mut self, query: Query<'_, MySql, MySqlArguments>, sql: &str) -> Result<u64> {
|
||||
match self {
|
||||
Executor::Default(client) => {
|
||||
let res = query
|
||||
.execute(&mut **client)
|
||||
.await
|
||||
.context(MySqlExecutionSnafu { sql })?;
|
||||
Ok(res.rows_affected())
|
||||
}
|
||||
Executor::Txn(txn) => {
|
||||
let res = query
|
||||
.execute(&mut **txn)
|
||||
.await
|
||||
.context(MySqlExecutionSnafu { sql })?;
|
||||
Ok(res.rows_affected())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn commit(self) -> Result<()> {
|
||||
match self {
|
||||
Executor::Txn(txn) => {
|
||||
txn.commit()
|
||||
.await
|
||||
.context(MySqlExecutionSnafu { sql: "COMMIT" })?;
|
||||
Ok(())
|
||||
}
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// MySQL implementation of Election.
|
||||
pub struct MySqlElection {
|
||||
leader_value: String,
|
||||
client: Mutex<MySqlConnection>,
|
||||
is_leader: AtomicBool,
|
||||
leader_infancy: AtomicBool,
|
||||
leader_watcher: broadcast::Sender<LeaderChangeMessage>,
|
||||
store_key_prefix: String,
|
||||
candidate_lease_ttl_secs: u64,
|
||||
sql_set: ElectionSqlSet,
|
||||
}
|
||||
|
||||
impl MySqlElection {
|
||||
pub async fn with_mysql_client(
|
||||
leader_value: String,
|
||||
mut client: sqlx::MySqlConnection,
|
||||
store_key_prefix: String,
|
||||
candidate_lease_ttl_secs: u64,
|
||||
table_name: &str,
|
||||
) -> Result<ElectionRef> {
|
||||
let sql_factory = ElectionSqlFactory::new(table_name);
|
||||
sqlx::query(&sql_factory.create_table_sql())
|
||||
.execute(&mut client)
|
||||
.await
|
||||
.context(MySqlExecutionSnafu {
|
||||
sql: &sql_factory.create_table_sql(),
|
||||
})?;
|
||||
// Set idle session timeout to IDLE_SESSION_TIMEOUT to avoid dead lock.
|
||||
sqlx::query(&sql_factory.set_idle_session_timeout_sql())
|
||||
.execute(&mut client)
|
||||
.await
|
||||
.context(MySqlExecutionSnafu {
|
||||
sql: &sql_factory.set_idle_session_timeout_sql(),
|
||||
})?;
|
||||
// Set lock wait timeout to LOCK_WAIT_TIMEOUT to avoid waiting too long.
|
||||
sqlx::query(sql_factory.set_lock_wait_timeout_sql())
|
||||
.execute(&mut client)
|
||||
.await
|
||||
.context(MySqlExecutionSnafu {
|
||||
sql: sql_factory.set_lock_wait_timeout_sql(),
|
||||
})?;
|
||||
// Insert at least one row for `SELECT * FOR UPDATE` to work.
|
||||
sqlx::query(&sql_factory.insert_once())
|
||||
.execute(&mut client)
|
||||
.await
|
||||
.context(MySqlExecutionSnafu {
|
||||
sql: &sql_factory.insert_once(),
|
||||
})?;
|
||||
// Check MySQL version
|
||||
Self::check_version(&mut client, sql_factory.check_version()).await?;
|
||||
let tx = listen_leader_change(leader_value.clone());
|
||||
Ok(Arc::new(Self {
|
||||
leader_value,
|
||||
client: Mutex::new(client),
|
||||
is_leader: AtomicBool::new(false),
|
||||
leader_infancy: AtomicBool::new(false),
|
||||
leader_watcher: tx,
|
||||
store_key_prefix,
|
||||
candidate_lease_ttl_secs,
|
||||
sql_set: sql_factory.build(),
|
||||
}))
|
||||
}
|
||||
|
||||
fn election_key(&self) -> String {
|
||||
format!("{}{}", self.store_key_prefix, ELECTION_KEY)
|
||||
}
|
||||
|
||||
fn candidate_root(&self) -> String {
|
||||
format!("{}{}", self.store_key_prefix, CANDIDATES_ROOT)
|
||||
}
|
||||
|
||||
fn candidate_key(&self) -> String {
|
||||
format!("{}{}", self.candidate_root(), self.leader_value)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Election for MySqlElection {
|
||||
type Leader = LeaderValue;
|
||||
|
||||
fn is_leader(&self) -> bool {
|
||||
self.is_leader.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
fn in_leader_infancy(&self) -> bool {
|
||||
self.leader_infancy
|
||||
.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
async fn register_candidate(&self, node_info: &MetasrvNodeInfo) -> Result<()> {
|
||||
let key = self.candidate_key();
|
||||
let node_info =
|
||||
serde_json::to_string(node_info).with_context(|_| SerializeToJsonSnafu {
|
||||
input: format!("{node_info:?}"),
|
||||
})?;
|
||||
|
||||
{
|
||||
let client = self.client.lock().await;
|
||||
let mut executor = Executor::Default(client);
|
||||
let res = self
|
||||
.put_value_with_lease(
|
||||
&key,
|
||||
&node_info,
|
||||
self.candidate_lease_ttl_secs,
|
||||
&mut executor,
|
||||
)
|
||||
.await?;
|
||||
// May registered before, just update the lease.
|
||||
if !res {
|
||||
warn!("Candidate already registered, update the lease");
|
||||
self.delete_value(&key, &mut executor).await?;
|
||||
self.put_value_with_lease(
|
||||
&key,
|
||||
&node_info,
|
||||
self.candidate_lease_ttl_secs,
|
||||
&mut executor,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the current lease has expired and renew the lease.
|
||||
let mut keep_alive_interval =
|
||||
tokio::time::interval(Duration::from_secs(self.candidate_lease_ttl_secs / 2));
|
||||
loop {
|
||||
let _ = keep_alive_interval.tick().await;
|
||||
let client = self.client.lock().await;
|
||||
let mut executor = Executor::Default(client);
|
||||
let lease = self
|
||||
.get_value_with_lease(&key, &mut executor)
|
||||
.await?
|
||||
.unwrap_or_default();
|
||||
|
||||
ensure!(
|
||||
lease.expire_time > lease.current,
|
||||
UnexpectedSnafu {
|
||||
violated: format!(
|
||||
"Candidate lease expired at {:?} (current time: {:?}), key: {:?}",
|
||||
lease.expire_time,
|
||||
lease.current,
|
||||
String::from_utf8_lossy(&key.into_bytes())
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
self.update_value_with_lease(&key, &lease.origin, &node_info, &mut executor)
|
||||
.await?;
|
||||
std::mem::drop(executor);
|
||||
}
|
||||
}
|
||||
|
||||
async fn all_candidates(&self) -> Result<Vec<MetasrvNodeInfo>> {
|
||||
let key_prefix = self.candidate_root();
|
||||
let client = self.client.lock().await;
|
||||
let mut executor = Executor::Default(client);
|
||||
let (mut candidates, current) = self
|
||||
.get_value_with_lease_by_prefix(&key_prefix, &mut executor)
|
||||
.await?;
|
||||
// Remove expired candidates
|
||||
candidates.retain(|c| c.1 > current);
|
||||
let mut valid_candidates = Vec::with_capacity(candidates.len());
|
||||
for (c, _) in candidates {
|
||||
let node_info: MetasrvNodeInfo =
|
||||
serde_json::from_str(&c).with_context(|_| DeserializeFromJsonSnafu {
|
||||
input: format!("{:?}", c),
|
||||
})?;
|
||||
valid_candidates.push(node_info);
|
||||
}
|
||||
Ok(valid_candidates)
|
||||
}
|
||||
|
||||
async fn campaign(&self) -> Result<()> {
|
||||
let mut keep_alive_interval =
|
||||
tokio::time::interval(Duration::from_secs(META_KEEP_ALIVE_INTERVAL_SECS));
|
||||
keep_alive_interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
|
||||
loop {
|
||||
let _ = self.do_campaign(&mut keep_alive_interval).await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn leader(&self) -> Result<Self::Leader> {
|
||||
if self.is_leader.load(Ordering::Relaxed) {
|
||||
Ok(self.leader_value.as_bytes().into())
|
||||
} else {
|
||||
let key = self.election_key();
|
||||
|
||||
let client = self.client.lock().await;
|
||||
let mut executor = Executor::Default(client);
|
||||
if let Some(lease) = self.get_value_with_lease(&key, &mut executor).await? {
|
||||
ensure!(lease.expire_time > lease.current, NoLeaderSnafu);
|
||||
Ok(lease.leader_value.as_bytes().into())
|
||||
} else {
|
||||
NoLeaderSnafu.fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn resign(&self) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn subscribe_leader_change(&self) -> broadcast::Receiver<LeaderChangeMessage> {
|
||||
self.leader_watcher.subscribe()
|
||||
}
|
||||
}
|
||||
|
||||
impl MySqlElection {
|
||||
/// Returns value, expire time and current time. If `with_origin` is true, the origin string is also returned.
|
||||
async fn get_value_with_lease(
|
||||
&self,
|
||||
key: &str,
|
||||
executor: &mut Executor<'_>,
|
||||
) -> Result<Option<Lease>> {
|
||||
let key = key.as_bytes();
|
||||
let query = sqlx::query(&self.sql_set.get_value_with_lease).bind(key);
|
||||
let res = executor
|
||||
.query(query, &self.sql_set.get_value_with_lease)
|
||||
.await?;
|
||||
|
||||
if res.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
// Safety: Checked if res is empty above.
|
||||
let current_time_str = String::from_utf8_lossy(res[0].try_get(1).unwrap());
|
||||
let current_time = match Timestamp::from_str(¤t_time_str, None) {
|
||||
Ok(ts) => ts,
|
||||
Err(_) => UnexpectedSnafu {
|
||||
violated: format!("Invalid timestamp: {}", current_time_str),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
// Safety: Checked if res is empty above.
|
||||
let value_and_expire_time = String::from_utf8_lossy(res[0].try_get(0).unwrap_or_default());
|
||||
let (value, expire_time) = parse_value_and_expire_time(&value_and_expire_time)?;
|
||||
|
||||
Ok(Some(Lease {
|
||||
leader_value: value,
|
||||
expire_time,
|
||||
current: current_time,
|
||||
origin: value_and_expire_time.to_string(),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Returns all values and expire time with the given key prefix. Also returns the current time.
|
||||
async fn get_value_with_lease_by_prefix(
|
||||
&self,
|
||||
key_prefix: &str,
|
||||
executor: &mut Executor<'_>,
|
||||
) -> Result<(Vec<(String, Timestamp)>, Timestamp)> {
|
||||
let key_prefix = format!("{}%", key_prefix).as_bytes().to_vec();
|
||||
let query = sqlx::query(&self.sql_set.get_value_with_lease_by_prefix).bind(key_prefix);
|
||||
let res = executor
|
||||
.query(query, &self.sql_set.get_value_with_lease_by_prefix)
|
||||
.await?;
|
||||
|
||||
let mut values_with_leases = vec![];
|
||||
let mut current = Timestamp::default();
|
||||
for row in res {
|
||||
let current_time_str = row.try_get(1).unwrap_or_default();
|
||||
current = match Timestamp::from_str(current_time_str, None) {
|
||||
Ok(ts) => ts,
|
||||
Err(_) => UnexpectedSnafu {
|
||||
violated: format!("Invalid timestamp: {}", current_time_str),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
|
||||
let value_and_expire_time = String::from_utf8_lossy(row.try_get(0).unwrap_or_default());
|
||||
let (value, expire_time) = parse_value_and_expire_time(&value_and_expire_time)?;
|
||||
|
||||
values_with_leases.push((value, expire_time));
|
||||
}
|
||||
Ok((values_with_leases, current))
|
||||
}
|
||||
|
||||
async fn update_value_with_lease(
|
||||
&self,
|
||||
key: &str,
|
||||
prev: &str,
|
||||
updated: &str,
|
||||
executor: &mut Executor<'_>,
|
||||
) -> Result<()> {
|
||||
let key = key.as_bytes();
|
||||
let prev = prev.as_bytes();
|
||||
let updated = updated.as_bytes();
|
||||
|
||||
let query = sqlx::query(&self.sql_set.update_value_with_lease)
|
||||
.bind(updated)
|
||||
.bind(self.candidate_lease_ttl_secs as f64)
|
||||
.bind(key)
|
||||
.bind(prev);
|
||||
let res = executor
|
||||
.execute(query, &self.sql_set.update_value_with_lease)
|
||||
.await?;
|
||||
|
||||
ensure!(
|
||||
res == 1,
|
||||
UnexpectedSnafu {
|
||||
violated: format!("Failed to update key: {}", String::from_utf8_lossy(key)),
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns `true` if the insertion is successful
|
||||
async fn put_value_with_lease(
|
||||
&self,
|
||||
key: &str,
|
||||
value: &str,
|
||||
lease_ttl_secs: u64,
|
||||
executor: &mut Executor<'_>,
|
||||
) -> Result<bool> {
|
||||
let key = key.as_bytes();
|
||||
let lease_ttl_secs = lease_ttl_secs as f64;
|
||||
let query = sqlx::query(&self.sql_set.put_value_with_lease)
|
||||
.bind(key)
|
||||
.bind(value)
|
||||
.bind(lease_ttl_secs);
|
||||
let res = executor
|
||||
.query(query, &self.sql_set.put_value_with_lease)
|
||||
.await?;
|
||||
Ok(res.is_empty())
|
||||
}
|
||||
|
||||
/// Returns `true` if the deletion is successful.
|
||||
/// Caution: Should only delete the key if the lease is expired.
|
||||
async fn delete_value(&self, key: &str, executor: &mut Executor<'_>) -> Result<bool> {
|
||||
let key = key.as_bytes();
|
||||
let query = sqlx::query(&self.sql_set.delete_value).bind(key);
|
||||
let res = executor.execute(query, &self.sql_set.delete_value).await?;
|
||||
|
||||
Ok(res == 1)
|
||||
}
|
||||
|
||||
/// Attempts to acquire leadership by executing a campaign. This function continuously checks
|
||||
/// if the current lease is still valid.
|
||||
async fn do_campaign(&self, interval: &mut Interval) -> Result<()> {
|
||||
// Need to restrict the scope of the client to avoid ambiguous overloads.
|
||||
use sqlx::Acquire;
|
||||
|
||||
loop {
|
||||
let client = self.client.lock().await;
|
||||
let executor = Executor::Default(client);
|
||||
let mut lease = Lease::default();
|
||||
match (
|
||||
self.lease_check(executor, &mut lease).await,
|
||||
self.is_leader(),
|
||||
) {
|
||||
// If the leader lease is valid and I'm the leader, renew the lease.
|
||||
(Ok(_), true) => {
|
||||
let mut client = self.client.lock().await;
|
||||
let txn = client
|
||||
.begin()
|
||||
.await
|
||||
.context(MySqlExecutionSnafu { sql: "BEGIN" })?;
|
||||
let mut executor = Executor::Txn(txn);
|
||||
let query = sqlx::query(&self.sql_set.campaign);
|
||||
executor.query(query, &self.sql_set.campaign).await?;
|
||||
self.renew_lease(executor, lease).await?;
|
||||
}
|
||||
// If the leader lease expires and I'm the leader, notify the leader watcher and step down.
|
||||
// Another instance should be elected as the leader in this case.
|
||||
(Err(_), true) => {
|
||||
warn!("Leader lease expired, re-initiate the campaign");
|
||||
self.step_down_without_lock().await?;
|
||||
}
|
||||
// If the leader lease expires and I'm not the leader, elect myself.
|
||||
(Err(_), false) => {
|
||||
warn!("Leader lease expired, re-initiate the campaign");
|
||||
let mut client = self.client.lock().await;
|
||||
let txn = client
|
||||
.begin()
|
||||
.await
|
||||
.context(MySqlExecutionSnafu { sql: "BEGIN" })?;
|
||||
let mut executor = Executor::Txn(txn);
|
||||
let query = sqlx::query(&self.sql_set.campaign);
|
||||
executor.query(query, &self.sql_set.campaign).await?;
|
||||
self.elected(&mut executor).await?;
|
||||
executor.commit().await?;
|
||||
}
|
||||
// If the leader lease is valid and I'm not the leader, do nothing.
|
||||
(Ok(_), false) => {}
|
||||
}
|
||||
interval.tick().await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Renew the lease
|
||||
async fn renew_lease(&self, mut executor: Executor<'_>, lease: Lease) -> Result<()> {
|
||||
let key = self.election_key();
|
||||
self.update_value_with_lease(&key, &lease.origin, &self.leader_value, &mut executor)
|
||||
.await?;
|
||||
executor.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Performs a lease check during the election process.
|
||||
///
|
||||
/// This function performs the following checks and actions:
|
||||
///
|
||||
/// - **Case 1**: If the current instance is not the leader but the lease has expired, it raises an error
|
||||
/// to re-initiate the campaign. If the leader failed to renew the lease, its session will expire and the lock
|
||||
/// will be released.
|
||||
/// - **Case 2**: If all checks pass, the function returns without performing any actions.
|
||||
async fn lease_check(&self, mut executor: Executor<'_>, lease: &mut Lease) -> Result<()> {
|
||||
let key = self.election_key();
|
||||
let check_lease = self
|
||||
.get_value_with_lease(&key, &mut executor)
|
||||
.await?
|
||||
.context(NoLeaderSnafu)?;
|
||||
*lease = check_lease;
|
||||
// Case 1: Lease expired
|
||||
ensure!(lease.expire_time > lease.current, NoLeaderSnafu);
|
||||
// Case 2: Everything is fine
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Still consider itself as the leader locally but failed to acquire the lock. Step down without deleting the key.
|
||||
async fn step_down_without_lock(&self) -> Result<()> {
|
||||
let key = self.election_key().into_bytes();
|
||||
let leader_key = MySqlLeaderKey {
|
||||
name: self.leader_value.clone().into_bytes(),
|
||||
key: key.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
if self
|
||||
.is_leader
|
||||
.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
{
|
||||
if let Err(e) = self
|
||||
.leader_watcher
|
||||
.send(LeaderChangeMessage::StepDown(Arc::new(leader_key)))
|
||||
{
|
||||
error!(e; "Failed to send leader change message");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Elected as leader. The leader should put the key and notify the leader watcher.
|
||||
/// Caution: Should only elected while holding the lock.
|
||||
async fn elected(&self, executor: &mut Executor<'_>) -> Result<()> {
|
||||
let key = self.election_key();
|
||||
let leader_key = MySqlLeaderKey {
|
||||
name: self.leader_value.clone().into_bytes(),
|
||||
key: key.clone().into_bytes(),
|
||||
..Default::default()
|
||||
};
|
||||
self.delete_value(&key, executor).await?;
|
||||
self.put_value_with_lease(&key, &self.leader_value, META_LEASE_SECS, executor)
|
||||
.await?;
|
||||
|
||||
if self
|
||||
.is_leader
|
||||
.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
{
|
||||
self.leader_infancy.store(true, Ordering::Relaxed);
|
||||
|
||||
if let Err(e) = self
|
||||
.leader_watcher
|
||||
.send(LeaderChangeMessage::Elected(Arc::new(leader_key)))
|
||||
{
|
||||
error!(e; "Failed to send leader change message");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if the MySQL version is supported.
|
||||
async fn check_version(client: &mut MySqlConnection, sql: &str) -> Result<()> {
|
||||
let query = sqlx::query(sql);
|
||||
match query.fetch_one(client).await {
|
||||
Ok(row) => {
|
||||
let version: String = row.try_get(0).unwrap();
|
||||
if !version.starts_with("8.0") || !version.starts_with("5.7") {
|
||||
warn!(
|
||||
"Unsupported MySQL version: {}, expected: [5.7, 8.0]",
|
||||
version
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(e; "Failed to check MySQL version through sql: {}", sql);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -109,10 +109,10 @@ impl<'a> ElectionSqlFactory<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
// Currently the session timeout is longer than the leader lease time, so the leader lease may expire while the session is still alive.
|
||||
// Either the leader reconnects and step down or the session expires and the lock is released.
|
||||
fn set_idle_session_timeout_sql(&self) -> &str {
|
||||
"SET idle_session_timeout = '10s';"
|
||||
// Currently the session timeout is longer than the leader lease time.
|
||||
// So the leader will renew the lease twice before the session timeout if everything goes well.
|
||||
fn set_idle_session_timeout_sql(&self) -> String {
|
||||
format!("SET idle_session_timeout = '{}s';", META_LEASE_SECS + 1)
|
||||
}
|
||||
|
||||
fn campaign_sql(&self) -> String {
|
||||
@@ -126,9 +126,9 @@ impl<'a> ElectionSqlFactory<'a> {
|
||||
fn put_value_with_lease_sql(&self) -> String {
|
||||
format!(
|
||||
r#"WITH prev AS (
|
||||
SELECT k, v FROM {} WHERE k = $1
|
||||
SELECT k, v FROM "{}" WHERE k = $1
|
||||
), insert AS (
|
||||
INSERT INTO {}
|
||||
INSERT INTO "{}"
|
||||
VALUES($1, convert_to($2 || '{}' || TO_CHAR(CURRENT_TIMESTAMP + INTERVAL '1 second' * $3, 'YYYY-MM-DD HH24:MI:SS.MS'), 'UTF8'))
|
||||
ON CONFLICT (k) DO NOTHING
|
||||
)
|
||||
@@ -140,7 +140,7 @@ impl<'a> ElectionSqlFactory<'a> {
|
||||
|
||||
fn update_value_with_lease_sql(&self) -> String {
|
||||
format!(
|
||||
r#"UPDATE {}
|
||||
r#"UPDATE "{}"
|
||||
SET v = convert_to($3 || '{}' || TO_CHAR(CURRENT_TIMESTAMP + INTERVAL '1 second' * $4, 'YYYY-MM-DD HH24:MI:SS.MS'), 'UTF8')
|
||||
WHERE k = $1 AND v = $2"#,
|
||||
self.table_name, LEASE_SEP
|
||||
@@ -149,21 +149,21 @@ impl<'a> ElectionSqlFactory<'a> {
|
||||
|
||||
fn get_value_with_lease_sql(&self) -> String {
|
||||
format!(
|
||||
r#"SELECT v, TO_CHAR(CURRENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS.MS') FROM {} WHERE k = $1"#,
|
||||
r#"SELECT v, TO_CHAR(CURRENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS.MS') FROM "{}" WHERE k = $1"#,
|
||||
self.table_name
|
||||
)
|
||||
}
|
||||
|
||||
fn get_value_with_lease_by_prefix_sql(&self) -> String {
|
||||
format!(
|
||||
r#"SELECT v, TO_CHAR(CURRENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS.MS') FROM {} WHERE k LIKE $1"#,
|
||||
r#"SELECT v, TO_CHAR(CURRENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS.MS') FROM "{}" WHERE k LIKE $1"#,
|
||||
self.table_name
|
||||
)
|
||||
}
|
||||
|
||||
fn delete_value_sql(&self) -> String {
|
||||
format!(
|
||||
"DELETE FROM {} WHERE k = $1 RETURNING k,v;",
|
||||
"DELETE FROM \"{}\" WHERE k = $1 RETURNING k,v;",
|
||||
self.table_name
|
||||
)
|
||||
}
|
||||
@@ -241,7 +241,7 @@ impl PgElection {
|
||||
let sql_factory = ElectionSqlFactory::new(lock_id, table_name);
|
||||
// Set idle session timeout to IDLE_SESSION_TIMEOUT to avoid dead advisory lock.
|
||||
client
|
||||
.execute(sql_factory.set_idle_session_timeout_sql(), &[])
|
||||
.execute(&sql_factory.set_idle_session_timeout_sql(), &[])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
|
||||
@@ -285,7 +285,6 @@ impl Election for PgElection {
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
/// TODO(CookiePie): Split the candidate registration and keep alive logic into separate methods, so that upper layers can call them separately.
|
||||
async fn register_candidate(&self, node_info: &MetasrvNodeInfo) -> Result<()> {
|
||||
let key = self.candidate_key();
|
||||
let node_info =
|
||||
@@ -317,7 +316,9 @@ impl Election for PgElection {
|
||||
prev_expire_time > current_time,
|
||||
UnexpectedSnafu {
|
||||
violated: format!(
|
||||
"Candidate lease expired, key: {:?}",
|
||||
"Candidate lease expired at {:?} (current time {:?}), key: {:?}",
|
||||
prev_expire_time,
|
||||
current_time,
|
||||
String::from_utf8_lossy(&key.into_bytes())
|
||||
),
|
||||
}
|
||||
@@ -369,23 +370,19 @@ impl Election for PgElection {
|
||||
.query(&self.sql_set.campaign, &[])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu)?;
|
||||
if let Some(row) = res.first() {
|
||||
match row.try_get(0) {
|
||||
Ok(true) => self.leader_action().await?,
|
||||
Ok(false) => self.follower_action().await?,
|
||||
Err(_) => {
|
||||
return UnexpectedSnafu {
|
||||
violated: "Failed to get the result of acquiring advisory lock"
|
||||
.to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
let row = res.first().context(UnexpectedSnafu {
|
||||
violated: "Failed to get the result of acquiring advisory lock",
|
||||
})?;
|
||||
let is_leader = row.try_get(0).map_err(|_| {
|
||||
UnexpectedSnafu {
|
||||
violated: "Failed to get the result of get lock",
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
if is_leader {
|
||||
self.leader_action().await?;
|
||||
} else {
|
||||
return UnexpectedSnafu {
|
||||
violated: "Failed to get the result of acquiring advisory lock".to_string(),
|
||||
}
|
||||
.fail();
|
||||
self.follower_action().await?;
|
||||
}
|
||||
let _ = keep_alive_interval.tick().await;
|
||||
}
|
||||
@@ -747,7 +744,7 @@ mod tests {
|
||||
});
|
||||
if let Some(table_name) = table_name {
|
||||
let create_table_sql = format!(
|
||||
"CREATE TABLE IF NOT EXISTS {}(k bytea PRIMARY KEY, v bytea);",
|
||||
"CREATE TABLE IF NOT EXISTS \"{}\"(k bytea PRIMARY KEY, v bytea);",
|
||||
table_name
|
||||
);
|
||||
client.execute(&create_table_sql, &[]).await.unwrap();
|
||||
@@ -756,7 +753,7 @@ mod tests {
|
||||
}
|
||||
|
||||
async fn drop_table(client: &Client, table_name: &str) {
|
||||
let sql = format!("DROP TABLE IF EXISTS {};", table_name);
|
||||
let sql = format!("DROP TABLE IF EXISTS \"{}\";", table_name);
|
||||
client.execute(&sql, &[]).await.unwrap();
|
||||
}
|
||||
|
||||
|
||||
@@ -343,6 +343,16 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
#[snafu(display("Failed to parse mysql url: {}", mysql_url))]
|
||||
ParseMySqlUrl {
|
||||
#[snafu(source)]
|
||||
error: sqlx::error::Error,
|
||||
mysql_url: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find table route for {table_id}"))]
|
||||
TableRouteNotFound {
|
||||
table_id: TableId,
|
||||
@@ -729,6 +739,34 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
#[snafu(display("Failed to execute via mysql, sql: {}", sql))]
|
||||
MySqlExecution {
|
||||
#[snafu(source)]
|
||||
error: sqlx::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
sql: String,
|
||||
},
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
#[snafu(display("Failed to create mysql pool"))]
|
||||
CreateMySqlPool {
|
||||
#[snafu(source)]
|
||||
error: sqlx::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
#[snafu(display("Failed to connect to mysql"))]
|
||||
ConnectMySql {
|
||||
#[snafu(source)]
|
||||
error: sqlx::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Handler not found: {}", name))]
|
||||
HandlerNotFound {
|
||||
name: String,
|
||||
@@ -911,6 +949,11 @@ impl ErrorExt for Error {
|
||||
| Error::GetPostgresConnection { .. }
|
||||
| Error::PostgresExecution { .. }
|
||||
| Error::ConnectPostgres { .. } => StatusCode::Internal,
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
Error::MySqlExecution { .. }
|
||||
| Error::CreateMySqlPool { .. }
|
||||
| Error::ConnectMySql { .. }
|
||||
| Error::ParseMySqlUrl { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -153,7 +153,7 @@ fn extract_base_info(request: &HeartbeatRequest) -> Option<(NodeInfoKey, Peer, P
|
||||
return None;
|
||||
};
|
||||
|
||||
Some((key, Peer::from(peer.clone()), info.clone()))
|
||||
Some((key, peer.clone(), info.clone()))
|
||||
}
|
||||
|
||||
async fn put_into_memory_store(ctx: &mut Context, key: NodeInfoKey, value: NodeInfo) -> Result<()> {
|
||||
|
||||
@@ -70,7 +70,7 @@ impl HeartbeatHandler for RemapFlowPeerHandler {
|
||||
|
||||
async fn rewrite_node_address(ctx: &mut Context, peer: &Peer) {
|
||||
let key = NodeAddressKey::with_flownode(peer.id).to_bytes();
|
||||
if let Ok(value) = NodeAddressValue::new(peer.clone().into()).try_as_raw_value() {
|
||||
if let Ok(value) = NodeAddressValue::new(peer.clone()).try_as_raw_value() {
|
||||
let put = PutRequest {
|
||||
key,
|
||||
value,
|
||||
|
||||
@@ -70,11 +70,11 @@ use crate::state::{become_follower, become_leader, StateRef};
|
||||
|
||||
pub const TABLE_ID_SEQ: &str = "table_id";
|
||||
pub const FLOW_ID_SEQ: &str = "flow_id";
|
||||
pub const METASRV_HOME: &str = "/tmp/metasrv";
|
||||
pub const METASRV_HOME: &str = "./greptimedb_data/metasrv";
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
pub const DEFAULT_META_TABLE_NAME: &str = "greptime_metakv";
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
pub const DEFAULT_META_ELECTION_LOCK_ID: u64 = 1;
|
||||
|
||||
// The datastores that implements metadata kvbackend.
|
||||
@@ -89,6 +89,9 @@ pub enum BackendImpl {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
// Postgres as metadata storage.
|
||||
PostgresStore,
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
// MySql as metadata storage.
|
||||
MysqlStore,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
@@ -146,7 +149,7 @@ pub struct MetasrvOptions {
|
||||
pub tracing: TracingOptions,
|
||||
/// The datastore for kv metadata.
|
||||
pub backend: BackendImpl,
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
/// Table name of rds kv backend.
|
||||
pub meta_table_name: String,
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
@@ -191,7 +194,7 @@ impl Default for MetasrvOptions {
|
||||
flush_stats_factor: 3,
|
||||
tracing: TracingOptions::default(),
|
||||
backend: BackendImpl::EtcdStore,
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
meta_table_name: DEFAULT_META_TABLE_NAME.to_string(),
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
meta_election_lock_id: DEFAULT_META_ELECTION_LOCK_ID,
|
||||
|
||||
@@ -59,7 +59,7 @@ pub mod engine;
|
||||
pub mod error;
|
||||
mod metadata_region;
|
||||
mod metrics;
|
||||
mod row_modifier;
|
||||
pub mod row_modifier;
|
||||
#[cfg(test)]
|
||||
mod test_util;
|
||||
mod utils;
|
||||
|
||||
@@ -40,7 +40,7 @@ const TSID_HASH_SEED: u32 = 846793005;
|
||||
///
|
||||
/// - For [`PrimaryKeyEncoding::Dense`] encoding,
|
||||
/// it adds two columns(`__table_id`, `__tsid`) to the row.
|
||||
pub struct RowModifier {
|
||||
pub(crate) struct RowModifier {
|
||||
codec: SparsePrimaryKeyCodec,
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ impl RowModifier {
|
||||
}
|
||||
|
||||
/// Modify rows with the given primary key encoding.
|
||||
pub fn modify_rows(
|
||||
pub(crate) fn modify_rows(
|
||||
&self,
|
||||
iter: RowsIter,
|
||||
table_id: TableId,
|
||||
@@ -145,16 +145,14 @@ impl RowModifier {
|
||||
|
||||
/// Fills internal columns of a row with table name and a hash of tag values.
|
||||
fn fill_internal_columns(&self, table_id: TableId, iter: &RowIter<'_>) -> (Value, Value) {
|
||||
let mut hasher = mur3::Hasher128::with_seed(TSID_HASH_SEED);
|
||||
let mut hasher = TsidGenerator::default();
|
||||
for (name, value) in iter.primary_keys_with_name() {
|
||||
// The type is checked before. So only null is ignored.
|
||||
if let Some(ValueData::StringValue(string)) = &value.value_data {
|
||||
name.hash(&mut hasher);
|
||||
string.hash(&mut hasher);
|
||||
hasher.write_label(name, string);
|
||||
}
|
||||
}
|
||||
// TSID is 64 bits, simply truncate the 128 bits hash
|
||||
let (hash, _) = hasher.finish128();
|
||||
let hash = hasher.finish();
|
||||
|
||||
(
|
||||
ValueData::U32Value(table_id).into(),
|
||||
@@ -163,6 +161,34 @@ impl RowModifier {
|
||||
}
|
||||
}
|
||||
|
||||
/// Tsid generator.
|
||||
pub struct TsidGenerator {
|
||||
hasher: mur3::Hasher128,
|
||||
}
|
||||
|
||||
impl Default for TsidGenerator {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
hasher: mur3::Hasher128::with_seed(TSID_HASH_SEED),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TsidGenerator {
|
||||
/// Writes a label pair to the generator.
|
||||
pub fn write_label(&mut self, name: &str, value: &str) {
|
||||
name.hash(&mut self.hasher);
|
||||
value.hash(&mut self.hasher);
|
||||
}
|
||||
|
||||
/// Generates a new TSID.
|
||||
pub fn finish(&mut self) -> u64 {
|
||||
// TSID is 64 bits, simply truncate the 128 bits hash
|
||||
let (hash, _) = self.hasher.finish128();
|
||||
hash
|
||||
}
|
||||
}
|
||||
|
||||
/// Index of a value.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct ValueIndex {
|
||||
|
||||
@@ -121,7 +121,7 @@ impl AccessLayer {
|
||||
/// Writes a SST with specific `file_id` and `metadata` to the layer.
|
||||
///
|
||||
/// Returns the info of the SST. If no data written, returns None.
|
||||
pub(crate) async fn write_sst(
|
||||
pub async fn write_sst(
|
||||
&self,
|
||||
request: SstWriteRequest,
|
||||
write_opts: &WriteOptions,
|
||||
@@ -191,26 +191,26 @@ impl AccessLayer {
|
||||
|
||||
/// `OperationType` represents the origin of the `SstWriteRequest`.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub(crate) enum OperationType {
|
||||
pub enum OperationType {
|
||||
Flush,
|
||||
Compact,
|
||||
}
|
||||
|
||||
/// Contents to build a SST.
|
||||
pub(crate) struct SstWriteRequest {
|
||||
pub(crate) op_type: OperationType,
|
||||
pub(crate) metadata: RegionMetadataRef,
|
||||
pub(crate) source: Source,
|
||||
pub(crate) cache_manager: CacheManagerRef,
|
||||
pub struct SstWriteRequest {
|
||||
pub op_type: OperationType,
|
||||
pub metadata: RegionMetadataRef,
|
||||
pub source: Source,
|
||||
pub cache_manager: CacheManagerRef,
|
||||
#[allow(dead_code)]
|
||||
pub(crate) storage: Option<String>,
|
||||
pub(crate) max_sequence: Option<SequenceNumber>,
|
||||
pub storage: Option<String>,
|
||||
pub max_sequence: Option<SequenceNumber>,
|
||||
|
||||
/// Configs for index
|
||||
pub(crate) index_options: IndexOptions,
|
||||
pub(crate) inverted_index_config: InvertedIndexConfig,
|
||||
pub(crate) fulltext_index_config: FulltextIndexConfig,
|
||||
pub(crate) bloom_filter_index_config: BloomFilterConfig,
|
||||
pub index_options: IndexOptions,
|
||||
pub inverted_index_config: InvertedIndexConfig,
|
||||
pub fulltext_index_config: FulltextIndexConfig,
|
||||
pub bloom_filter_index_config: BloomFilterConfig,
|
||||
}
|
||||
|
||||
pub(crate) async fn new_fs_cache_store(root: &str) -> Result<ObjectStore> {
|
||||
|
||||
@@ -46,6 +46,7 @@ const INDEX_CREATE_MEM_THRESHOLD_FACTOR: u64 = 16;
|
||||
pub(crate) const FETCH_OPTION_TIMEOUT: Duration = Duration::from_secs(3);
|
||||
|
||||
/// Configuration for [MitoEngine](crate::engine::MitoEngine).
|
||||
/// Before using the config, make sure to call `MitoConfig::validate()` to check if the config is valid.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct MitoConfig {
|
||||
|
||||
@@ -42,6 +42,14 @@ use crate::worker::WorkerId;
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("External error, context: {}", context))]
|
||||
External {
|
||||
source: BoxedError,
|
||||
context: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode sparse primary key, reason: {}", reason))]
|
||||
EncodeSparsePrimaryKey {
|
||||
reason: String,
|
||||
@@ -773,6 +781,50 @@ pub enum Error {
|
||||
#[snafu(display("checksum mismatch (actual: {}, expected: {})", actual, expected))]
|
||||
ChecksumMismatch { actual: u32, expected: u32 },
|
||||
|
||||
#[snafu(display(
|
||||
"No checkpoint found, region: {}, last_version: {}",
|
||||
region_id,
|
||||
last_version
|
||||
))]
|
||||
NoCheckpoint {
|
||||
region_id: RegionId,
|
||||
last_version: ManifestVersion,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"No manifests found in range: [{}..{}), region: {}, last_version: {}",
|
||||
start_version,
|
||||
end_version,
|
||||
region_id,
|
||||
last_version
|
||||
))]
|
||||
NoManifests {
|
||||
region_id: RegionId,
|
||||
start_version: ManifestVersion,
|
||||
end_version: ManifestVersion,
|
||||
last_version: ManifestVersion,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to install manifest to {}, region: {}, available manifest version: {}, last version: {}",
|
||||
target_version,
|
||||
available_version,
|
||||
region_id,
|
||||
last_version
|
||||
))]
|
||||
InstallManifestTo {
|
||||
region_id: RegionId,
|
||||
target_version: ManifestVersion,
|
||||
available_version: ManifestVersion,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
last_version: ManifestVersion,
|
||||
},
|
||||
|
||||
#[snafu(display("Region {} is stopped", region_id))]
|
||||
RegionStopped {
|
||||
region_id: RegionId,
|
||||
@@ -1011,7 +1063,10 @@ impl ErrorExt for Error {
|
||||
| OperateAbortedIndex { .. }
|
||||
| UnexpectedReplay { .. }
|
||||
| IndexEncodeNull { .. }
|
||||
| UnexpectedImpureDefault { .. } => StatusCode::Unexpected,
|
||||
| UnexpectedImpureDefault { .. }
|
||||
| NoCheckpoint { .. }
|
||||
| NoManifests { .. }
|
||||
| InstallManifestTo { .. } => StatusCode::Unexpected,
|
||||
RegionNotFound { .. } => StatusCode::RegionNotFound,
|
||||
ObjectStoreNotFound { .. }
|
||||
| InvalidScanIndex { .. }
|
||||
@@ -1090,6 +1145,8 @@ impl ErrorExt for Error {
|
||||
InvalidConfig { .. } => StatusCode::InvalidArguments,
|
||||
StaleLogEntry { .. } => StatusCode::Unexpected,
|
||||
|
||||
External { source, .. } => source.status_code(),
|
||||
|
||||
FilterRecordBatch { source, .. } => source.status_code(),
|
||||
|
||||
Download { .. } | Upload { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
@@ -23,8 +23,8 @@
|
||||
#[cfg_attr(feature = "test", allow(unused))]
|
||||
pub mod test_util;
|
||||
|
||||
mod access_layer;
|
||||
mod cache;
|
||||
pub mod access_layer;
|
||||
pub mod cache;
|
||||
pub mod compaction;
|
||||
pub mod config;
|
||||
pub mod engine;
|
||||
|
||||
@@ -23,7 +23,9 @@ use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::manifest::{ManifestVersion, MAX_VERSION, MIN_VERSION};
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
|
||||
use crate::error::{self, RegionStoppedSnafu, Result};
|
||||
use crate::error::{
|
||||
self, InstallManifestToSnafu, NoCheckpointSnafu, NoManifestsSnafu, RegionStoppedSnafu, Result,
|
||||
};
|
||||
use crate::manifest::action::{
|
||||
RegionChange, RegionCheckpoint, RegionManifest, RegionManifestBuilder, RegionMetaAction,
|
||||
RegionMetaActionList,
|
||||
@@ -197,9 +199,9 @@ impl RegionManifestManager {
|
||||
let checkpoint = Self::last_checkpoint(&mut store).await?;
|
||||
let last_checkpoint_version = checkpoint
|
||||
.as_ref()
|
||||
.map(|checkpoint| checkpoint.last_version)
|
||||
.map(|(checkpoint, _)| checkpoint.last_version)
|
||||
.unwrap_or(MIN_VERSION);
|
||||
let mut manifest_builder = if let Some(checkpoint) = checkpoint {
|
||||
let mut manifest_builder = if let Some((checkpoint, _)) = checkpoint {
|
||||
info!(
|
||||
"Recover region manifest {} from checkpoint version {}",
|
||||
options.manifest_dir, checkpoint.last_version
|
||||
@@ -275,6 +277,153 @@ impl RegionManifestManager {
|
||||
self.stopped = true;
|
||||
}
|
||||
|
||||
/// Installs the manifest changes from the current version to the target version (inclusive).
|
||||
///
|
||||
/// Returns installed version.
|
||||
/// **Note**: This function is not guaranteed to install the target version strictly.
|
||||
/// The installed version may be greater than the target version.
|
||||
pub async fn install_manifest_to(
|
||||
&mut self,
|
||||
target_version: ManifestVersion,
|
||||
) -> Result<ManifestVersion> {
|
||||
let _t = MANIFEST_OP_ELAPSED
|
||||
.with_label_values(&["install_manifest_to"])
|
||||
.start_timer();
|
||||
|
||||
// Case 1: If the target version is less than the current version, return the current version.
|
||||
if self.last_version >= target_version {
|
||||
debug!(
|
||||
"Target version {} is less than or equal to the current version {}, region: {}, skip install",
|
||||
target_version, self.last_version, self.manifest.metadata.region_id
|
||||
);
|
||||
return Ok(self.last_version);
|
||||
}
|
||||
|
||||
ensure!(
|
||||
!self.stopped,
|
||||
RegionStoppedSnafu {
|
||||
region_id: self.manifest.metadata.region_id,
|
||||
}
|
||||
);
|
||||
|
||||
// Fetches manifests from the last version strictly.
|
||||
let mut manifests = self
|
||||
.store
|
||||
// Invariant: last_version < target_version.
|
||||
.fetch_manifests_strict_from(self.last_version + 1, target_version + 1)
|
||||
.await?;
|
||||
|
||||
// Case 2: No manifests in range: [current_version+1, target_version+1)
|
||||
//
|
||||
// |---------Has been deleted------------| [Checkpoint Version]...[Latest Version]
|
||||
// [Leader region]
|
||||
// [Current Version]......[Target Version]
|
||||
// [Follower region]
|
||||
if manifests.is_empty() {
|
||||
debug!(
|
||||
"Manifests are not strict from {}, region: {}, tries to install the last checkpoint",
|
||||
self.last_version, self.manifest.metadata.region_id
|
||||
);
|
||||
let last_version = self.install_last_checkpoint().await?;
|
||||
// Case 2.1: If the installed checkpoint version is greater than or equal to the target version, return the last version.
|
||||
if last_version >= target_version {
|
||||
return Ok(last_version);
|
||||
}
|
||||
|
||||
// Fetches manifests from the installed version strictly.
|
||||
manifests = self
|
||||
.store
|
||||
// Invariant: last_version < target_version.
|
||||
.fetch_manifests_strict_from(last_version + 1, target_version + 1)
|
||||
.await?;
|
||||
}
|
||||
|
||||
if manifests.is_empty() {
|
||||
return NoManifestsSnafu {
|
||||
region_id: self.manifest.metadata.region_id,
|
||||
start_version: self.last_version + 1,
|
||||
end_version: target_version + 1,
|
||||
last_version: self.last_version,
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
debug_assert_eq!(manifests.first().unwrap().0, self.last_version + 1);
|
||||
let mut manifest_builder =
|
||||
RegionManifestBuilder::with_checkpoint(Some(self.manifest.as_ref().clone()));
|
||||
|
||||
for (manifest_version, raw_action_list) in manifests {
|
||||
self.store
|
||||
.set_delta_file_size(manifest_version, raw_action_list.len() as u64);
|
||||
let action_list = RegionMetaActionList::decode(&raw_action_list)?;
|
||||
for action in action_list.actions {
|
||||
match action {
|
||||
RegionMetaAction::Change(action) => {
|
||||
manifest_builder.apply_change(manifest_version, action);
|
||||
}
|
||||
RegionMetaAction::Edit(action) => {
|
||||
manifest_builder.apply_edit(manifest_version, action);
|
||||
}
|
||||
RegionMetaAction::Remove(_) => {
|
||||
debug!(
|
||||
"Unhandled action for region {}, action: {:?}",
|
||||
self.manifest.metadata.region_id, action
|
||||
);
|
||||
}
|
||||
RegionMetaAction::Truncate(action) => {
|
||||
manifest_builder.apply_truncate(manifest_version, action);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let new_manifest = manifest_builder.try_build()?;
|
||||
ensure!(
|
||||
new_manifest.manifest_version >= target_version,
|
||||
InstallManifestToSnafu {
|
||||
region_id: self.manifest.metadata.region_id,
|
||||
target_version,
|
||||
available_version: new_manifest.manifest_version,
|
||||
last_version: self.last_version,
|
||||
}
|
||||
);
|
||||
|
||||
let version = self.last_version;
|
||||
self.manifest = Arc::new(new_manifest);
|
||||
self.last_version = self.manifest.manifest_version;
|
||||
info!(
|
||||
"Install manifest changes from {} to {}, region: {}",
|
||||
version, self.last_version, self.manifest.metadata.region_id
|
||||
);
|
||||
|
||||
Ok(self.last_version)
|
||||
}
|
||||
|
||||
/// Installs the last checkpoint.
|
||||
pub(crate) async fn install_last_checkpoint(&mut self) -> Result<ManifestVersion> {
|
||||
let Some((checkpoint, checkpoint_size)) = Self::last_checkpoint(&mut self.store).await?
|
||||
else {
|
||||
return NoCheckpointSnafu {
|
||||
region_id: self.manifest.metadata.region_id,
|
||||
last_version: self.last_version,
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
self.store.reset_manifest_size();
|
||||
self.store
|
||||
.set_checkpoint_file_size(checkpoint.last_version, checkpoint_size);
|
||||
let builder = RegionManifestBuilder::with_checkpoint(checkpoint.checkpoint);
|
||||
let manifest = builder.try_build()?;
|
||||
self.last_version = manifest.manifest_version;
|
||||
self.manifest = Arc::new(manifest);
|
||||
info!(
|
||||
"Installed region manifest from checkpoint: {}, region: {}",
|
||||
checkpoint.last_version, self.manifest.metadata.region_id
|
||||
);
|
||||
|
||||
Ok(self.last_version)
|
||||
}
|
||||
|
||||
/// Updates the manifest. Returns the current manifest version number.
|
||||
pub async fn update(&mut self, action_list: RegionMetaActionList) -> Result<ManifestVersion> {
|
||||
let _t = MANIFEST_OP_ELAPSED
|
||||
@@ -371,14 +520,17 @@ impl RegionManifestManager {
|
||||
}
|
||||
|
||||
/// Fetches the last [RegionCheckpoint] from storage.
|
||||
///
|
||||
/// If the checkpoint is not found, returns `None`.
|
||||
/// Otherwise, returns the checkpoint and the size of the checkpoint.
|
||||
pub(crate) async fn last_checkpoint(
|
||||
store: &mut ManifestObjectStore,
|
||||
) -> Result<Option<RegionCheckpoint>> {
|
||||
) -> Result<Option<(RegionCheckpoint, u64)>> {
|
||||
let last_checkpoint = store.load_last_checkpoint().await?;
|
||||
|
||||
if let Some((_, bytes)) = last_checkpoint {
|
||||
let checkpoint = RegionCheckpoint::decode(&bytes)?;
|
||||
Ok(Some(checkpoint))
|
||||
Ok(Some((checkpoint, bytes.len() as u64)))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
@@ -236,7 +236,31 @@ impl ManifestObjectStore {
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
/// Fetch all manifests in concurrent.
|
||||
/// Fetches manifests in range [start_version, end_version).
|
||||
///
|
||||
/// This functions is guaranteed to return manifests from the `start_version` strictly (must contain `start_version`).
|
||||
pub async fn fetch_manifests_strict_from(
|
||||
&self,
|
||||
start_version: ManifestVersion,
|
||||
end_version: ManifestVersion,
|
||||
) -> Result<Vec<(ManifestVersion, Vec<u8>)>> {
|
||||
let mut manifests = self.fetch_manifests(start_version, end_version).await?;
|
||||
let start_index = manifests.iter().position(|(v, _)| *v == start_version);
|
||||
debug!(
|
||||
"fetches manifests in range [{},{}), start_index: {:?}",
|
||||
start_version, end_version, start_index
|
||||
);
|
||||
if let Some(start_index) = start_index {
|
||||
Ok(manifests.split_off(start_index))
|
||||
} else {
|
||||
Ok(vec![])
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetch all manifests in concurrent, and return the manifests in range [start_version, end_version)
|
||||
///
|
||||
/// **Notes**: This function is no guarantee to return manifests from the `start_version` strictly.
|
||||
/// Uses [fetch_manifests_strict_from](ManifestObjectStore::fetch_manifests_strict_from) to get manifests from the `start_version`.
|
||||
pub async fn fetch_manifests(
|
||||
&self,
|
||||
start_version: ManifestVersion,
|
||||
@@ -576,6 +600,12 @@ impl ManifestObjectStore {
|
||||
self.manifest_size_map.read().unwrap().values().sum()
|
||||
}
|
||||
|
||||
/// Resets the size of all files.
|
||||
pub(crate) fn reset_manifest_size(&mut self) {
|
||||
self.manifest_size_map.write().unwrap().clear();
|
||||
self.total_manifest_size.store(0, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Set the size of the delta file by delta version.
|
||||
pub(crate) fn set_delta_file_size(&mut self, version: ManifestVersion, size: u64) {
|
||||
let mut m = self.manifest_size_map.write().unwrap();
|
||||
@@ -585,7 +615,7 @@ impl ManifestObjectStore {
|
||||
}
|
||||
|
||||
/// Set the size of the checkpoint file by checkpoint version.
|
||||
fn set_checkpoint_file_size(&self, version: ManifestVersion, size: u64) {
|
||||
pub(crate) fn set_checkpoint_file_size(&self, version: ManifestVersion, size: u64) {
|
||||
let mut m = self.manifest_size_map.write().unwrap();
|
||||
m.insert(FileKey::Checkpoint(version), size);
|
||||
|
||||
@@ -595,6 +625,7 @@ impl ManifestObjectStore {
|
||||
fn unset_file_size(&self, key: &FileKey) {
|
||||
let mut m = self.manifest_size_map.write().unwrap();
|
||||
if let Some(val) = m.remove(key) {
|
||||
debug!("Unset file size: {:?}, size: {}", key, val);
|
||||
self.dec_total_manifest_size(val);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,6 +44,18 @@ async fn build_manager(
|
||||
(env, manager)
|
||||
}
|
||||
|
||||
async fn build_manager_with_initial_metadata(
|
||||
env: &TestEnv,
|
||||
checkpoint_distance: u64,
|
||||
compress_type: CompressionType,
|
||||
) -> RegionManifestManager {
|
||||
let metadata = Arc::new(basic_region_metadata());
|
||||
env.create_manifest_manager(compress_type, checkpoint_distance, Some(metadata.clone()))
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn reopen_manager(
|
||||
env: &TestEnv,
|
||||
checkpoint_distance: u64,
|
||||
@@ -265,4 +277,142 @@ async fn generate_checkpoint_with_compression_types(
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.0
|
||||
}
|
||||
|
||||
fn generate_action_lists(num: usize) -> (Vec<FileId>, Vec<RegionMetaActionList>) {
|
||||
let mut files = vec![];
|
||||
let mut actions = vec![];
|
||||
for _ in 0..num {
|
||||
let file_id = FileId::random();
|
||||
files.push(file_id);
|
||||
let file_meta = FileMeta {
|
||||
region_id: RegionId::new(123, 456),
|
||||
file_id,
|
||||
time_range: (0.into(), 10000000.into()),
|
||||
level: 0,
|
||||
file_size: 1024000,
|
||||
available_indexes: Default::default(),
|
||||
index_file_size: 0,
|
||||
num_rows: 0,
|
||||
num_row_groups: 0,
|
||||
sequence: None,
|
||||
};
|
||||
let action = RegionMetaActionList::new(vec![RegionMetaAction::Edit(RegionEdit {
|
||||
files_to_add: vec![file_meta],
|
||||
files_to_remove: vec![],
|
||||
compaction_time_window: None,
|
||||
flushed_entry_id: None,
|
||||
flushed_sequence: None,
|
||||
})]);
|
||||
actions.push(action);
|
||||
}
|
||||
(files, actions)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn manifest_install_manifest_to() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (env, mut manager) = build_manager(0, CompressionType::Uncompressed).await;
|
||||
let (files, actions) = generate_action_lists(10);
|
||||
for action in actions {
|
||||
manager.update(action).await.unwrap();
|
||||
}
|
||||
|
||||
// Nothing to install
|
||||
let target_version = manager.manifest().manifest_version;
|
||||
let installed_version = manager.install_manifest_to(target_version).await.unwrap();
|
||||
assert_eq!(target_version, installed_version);
|
||||
|
||||
let mut another_manager =
|
||||
build_manager_with_initial_metadata(&env, 0, CompressionType::Uncompressed).await;
|
||||
|
||||
// install manifest changes
|
||||
let target_version = manager.manifest().manifest_version;
|
||||
let installed_version = another_manager
|
||||
.install_manifest_to(target_version - 1)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(target_version - 1, installed_version);
|
||||
for file_id in files[0..9].iter() {
|
||||
assert!(another_manager.manifest().files.contains_key(file_id));
|
||||
}
|
||||
|
||||
let installed_version = another_manager
|
||||
.install_manifest_to(target_version)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(target_version, installed_version);
|
||||
for file_id in files.iter() {
|
||||
assert!(another_manager.manifest().files.contains_key(file_id));
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn manifest_install_manifest_to_with_checkpoint() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (env, mut manager) = build_manager(3, CompressionType::Uncompressed).await;
|
||||
let (files, actions) = generate_action_lists(10);
|
||||
for action in actions {
|
||||
manager.update(action).await.unwrap();
|
||||
|
||||
while manager.checkpointer().is_doing_checkpoint() {
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
}
|
||||
}
|
||||
|
||||
// has checkpoint
|
||||
assert!(manager
|
||||
.store()
|
||||
.load_last_checkpoint()
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
|
||||
// check files
|
||||
let mut expected = vec![
|
||||
"/",
|
||||
"00000000000000000006.checkpoint",
|
||||
"00000000000000000007.json",
|
||||
"00000000000000000008.json",
|
||||
"00000000000000000009.checkpoint",
|
||||
"00000000000000000009.json",
|
||||
"00000000000000000010.json",
|
||||
"_last_checkpoint",
|
||||
];
|
||||
expected.sort_unstable();
|
||||
let mut paths = manager
|
||||
.store()
|
||||
.get_paths(|e| Some(e.name().to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
paths.sort_unstable();
|
||||
assert_eq!(expected, paths);
|
||||
|
||||
let mut another_manager =
|
||||
build_manager_with_initial_metadata(&env, 0, CompressionType::Uncompressed).await;
|
||||
|
||||
// Install 9 manifests
|
||||
let target_version = manager.manifest().manifest_version;
|
||||
let installed_version = another_manager
|
||||
.install_manifest_to(target_version - 1)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(target_version - 1, installed_version);
|
||||
for file_id in files[0..9].iter() {
|
||||
assert!(another_manager.manifest().files.contains_key(file_id));
|
||||
}
|
||||
|
||||
// Install all manifests
|
||||
let target_version = manager.manifest().manifest_version;
|
||||
let installed_version = another_manager
|
||||
.install_manifest_to(target_version)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(target_version, installed_version);
|
||||
for file_id in files.iter() {
|
||||
assert!(another_manager.manifest().files.contains_key(file_id));
|
||||
}
|
||||
assert_eq!(4217, another_manager.store().total_manifest_size());
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
//! Mito region.
|
||||
|
||||
pub(crate) mod opener;
|
||||
pub mod opener;
|
||||
pub mod options;
|
||||
pub(crate) mod version;
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
use std::any::TypeId;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::AtomicI64;
|
||||
use std::sync::atomic::{AtomicI64, AtomicU64};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
@@ -30,7 +30,9 @@ use object_store::util::{join_dir, normalize_dir};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::logstore::provider::Provider;
|
||||
use store_api::logstore::LogStore;
|
||||
use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataBuilder};
|
||||
use store_api::metadata::{
|
||||
ColumnMetadata, RegionMetadata, RegionMetadataBuilder, RegionMetadataRef,
|
||||
};
|
||||
use store_api::region_engine::RegionRole;
|
||||
use store_api::storage::{ColumnId, RegionId};
|
||||
|
||||
@@ -42,6 +44,7 @@ use crate::error::{
|
||||
EmptyRegionDirSnafu, InvalidMetadataSnafu, ObjectStoreNotFoundSnafu, RegionCorruptedSnafu,
|
||||
Result, StaleLogEntrySnafu,
|
||||
};
|
||||
use crate::manifest::action::RegionManifest;
|
||||
use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions};
|
||||
use crate::manifest::storage::manifest_compress_type;
|
||||
use crate::memtable::time_partition::TimePartitions;
|
||||
@@ -207,11 +210,16 @@ impl RegionOpener {
|
||||
}
|
||||
// Safety: must be set before calling this method.
|
||||
let options = self.options.take().unwrap();
|
||||
let object_store = self.object_store(&options.storage)?.clone();
|
||||
let object_store = get_object_store(&options.storage, &self.object_store_manager)?;
|
||||
let provider = self.provider::<S>(&options.wal_options)?;
|
||||
let metadata = Arc::new(metadata);
|
||||
// Create a manifest manager for this region and writes regions to the manifest file.
|
||||
let region_manifest_options = self.manifest_options(config, &options)?;
|
||||
let region_manifest_options = Self::manifest_options(
|
||||
config,
|
||||
&options,
|
||||
&self.region_dir,
|
||||
&self.object_store_manager,
|
||||
)?;
|
||||
let manifest_manager = RegionManifestManager::new(
|
||||
metadata.clone(),
|
||||
region_manifest_options,
|
||||
@@ -334,7 +342,12 @@ impl RegionOpener {
|
||||
) -> Result<Option<MitoRegion>> {
|
||||
let region_options = self.options.as_ref().unwrap().clone();
|
||||
|
||||
let region_manifest_options = self.manifest_options(config, ®ion_options)?;
|
||||
let region_manifest_options = Self::manifest_options(
|
||||
config,
|
||||
®ion_options,
|
||||
&self.region_dir,
|
||||
&self.object_store_manager,
|
||||
)?;
|
||||
let Some(manifest_manager) = RegionManifestManager::open(
|
||||
region_manifest_options,
|
||||
self.stats.total_manifest_size.clone(),
|
||||
@@ -354,7 +367,7 @@ impl RegionOpener {
|
||||
.take()
|
||||
.unwrap_or_else(|| wal.wal_entry_reader(&provider, region_id, None));
|
||||
let on_region_opened = wal.on_region_opened();
|
||||
let object_store = self.object_store(®ion_options.storage)?.clone();
|
||||
let object_store = get_object_store(®ion_options.storage, &self.object_store_manager)?;
|
||||
|
||||
debug!("Open region {} with options: {:?}", region_id, self.options);
|
||||
|
||||
@@ -444,13 +457,14 @@ impl RegionOpener {
|
||||
|
||||
/// Returns a new manifest options.
|
||||
fn manifest_options(
|
||||
&self,
|
||||
config: &MitoConfig,
|
||||
options: &RegionOptions,
|
||||
region_dir: &str,
|
||||
object_store_manager: &ObjectStoreManagerRef,
|
||||
) -> Result<RegionManifestOptions> {
|
||||
let object_store = self.object_store(&options.storage)?.clone();
|
||||
let object_store = get_object_store(&options.storage, object_store_manager)?;
|
||||
Ok(RegionManifestOptions {
|
||||
manifest_dir: new_manifest_dir(&self.region_dir),
|
||||
manifest_dir: new_manifest_dir(region_dir),
|
||||
object_store,
|
||||
// We don't allow users to set the compression algorithm as we use it as a file suffix.
|
||||
// Currently, the manifest storage doesn't have good support for changing compression algorithms.
|
||||
@@ -458,20 +472,72 @@ impl RegionOpener {
|
||||
checkpoint_distance: config.manifest_checkpoint_distance,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an object store corresponding to `name`. If `name` is `None`, this method returns the default object store.
|
||||
fn object_store(&self, name: &Option<String>) -> Result<&object_store::ObjectStore> {
|
||||
if let Some(name) = name {
|
||||
Ok(self
|
||||
.object_store_manager
|
||||
.find(name)
|
||||
.context(ObjectStoreNotFoundSnafu {
|
||||
object_store: name.to_string(),
|
||||
})?)
|
||||
} else {
|
||||
Ok(self.object_store_manager.default_object_store())
|
||||
/// Returns an object store corresponding to `name`. If `name` is `None`, this method returns the default object store.
|
||||
pub fn get_object_store(
|
||||
name: &Option<String>,
|
||||
object_store_manager: &ObjectStoreManagerRef,
|
||||
) -> Result<object_store::ObjectStore> {
|
||||
if let Some(name) = name {
|
||||
Ok(object_store_manager
|
||||
.find(name)
|
||||
.with_context(|| ObjectStoreNotFoundSnafu {
|
||||
object_store: name.to_string(),
|
||||
})?
|
||||
.clone())
|
||||
} else {
|
||||
Ok(object_store_manager.default_object_store().clone())
|
||||
}
|
||||
}
|
||||
|
||||
/// A loader for loading metadata from a region dir.
|
||||
pub struct RegionMetadataLoader {
|
||||
config: Arc<MitoConfig>,
|
||||
object_store_manager: ObjectStoreManagerRef,
|
||||
}
|
||||
|
||||
impl RegionMetadataLoader {
|
||||
/// Creates a new `RegionOpenerBuilder`.
|
||||
pub fn new(config: Arc<MitoConfig>, object_store_manager: ObjectStoreManagerRef) -> Self {
|
||||
Self {
|
||||
config,
|
||||
object_store_manager,
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads the metadata of the region from the region dir.
|
||||
pub async fn load(
|
||||
&self,
|
||||
region_dir: &str,
|
||||
region_options: &RegionOptions,
|
||||
) -> Result<Option<RegionMetadataRef>> {
|
||||
let manifest = self.load_manifest(region_dir, region_options).await?;
|
||||
Ok(manifest.map(|m| m.metadata.clone()))
|
||||
}
|
||||
|
||||
/// Loads the manifest of the region from the region dir.
|
||||
pub async fn load_manifest(
|
||||
&self,
|
||||
region_dir: &str,
|
||||
region_options: &RegionOptions,
|
||||
) -> Result<Option<Arc<RegionManifest>>> {
|
||||
let region_manifest_options = RegionOpener::manifest_options(
|
||||
&self.config,
|
||||
region_options,
|
||||
region_dir,
|
||||
&self.object_store_manager,
|
||||
)?;
|
||||
let Some(manifest_manager) =
|
||||
RegionManifestManager::open(region_manifest_options, Arc::new(AtomicU64::new(0)))
|
||||
.await?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let manifest = manifest_manager.manifest();
|
||||
Ok(Some(manifest))
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks whether the recovered region has the same schema as region to create.
|
||||
|
||||
@@ -33,6 +33,8 @@ use crate::row_converter::dense::SortField;
|
||||
use crate::row_converter::{CompositeValues, PrimaryKeyCodec, PrimaryKeyFilter};
|
||||
|
||||
/// A codec for sparse key of metrics.
|
||||
/// It requires the input primary key columns are sorted by the column name in lexicographical order.
|
||||
/// It encodes the column id of the physical region.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SparsePrimaryKeyCodec {
|
||||
inner: Arc<SparsePrimaryKeyCodecInner>,
|
||||
|
||||
@@ -16,9 +16,9 @@ pub(crate) mod bloom_filter;
|
||||
mod codec;
|
||||
pub(crate) mod fulltext_index;
|
||||
mod indexer;
|
||||
pub(crate) mod intermediate;
|
||||
pub mod intermediate;
|
||||
pub(crate) mod inverted_index;
|
||||
pub(crate) mod puffin_manager;
|
||||
pub mod puffin_manager;
|
||||
mod statistics;
|
||||
pub(crate) mod store;
|
||||
|
||||
|
||||
@@ -49,6 +49,11 @@ impl IntermediateManager {
|
||||
/// Create a new `IntermediateManager` with the given root path.
|
||||
/// It will clean up all garbage intermediate files from previous runs.
|
||||
pub async fn init_fs(aux_path: impl AsRef<str>) -> Result<Self> {
|
||||
common_telemetry::info!(
|
||||
"Initializing intermediate manager, aux_path: {}",
|
||||
aux_path.as_ref()
|
||||
);
|
||||
|
||||
let store = new_fs_cache_store(&normalize_dir(aux_path.as_ref())).await?;
|
||||
let store = InstrumentedStore::new(store);
|
||||
|
||||
|
||||
@@ -61,6 +61,7 @@ impl Default for WriteOptions {
|
||||
}
|
||||
|
||||
/// Parquet SST info returned by the writer.
|
||||
#[derive(Debug)]
|
||||
pub struct SstInfo {
|
||||
/// SST file id.
|
||||
pub file_id: FileId,
|
||||
|
||||
@@ -28,7 +28,7 @@ use api::v1::{
|
||||
use catalog::CatalogManagerRef;
|
||||
use client::{OutputData, OutputMeta};
|
||||
use common_catalog::consts::{
|
||||
default_engine, PARENT_SPAN_ID_COLUMN, SPAN_NAME_COLUMN, TRACE_ID_COLUMN,
|
||||
default_engine, PARENT_SPAN_ID_COLUMN, SERVICE_NAME_COLUMN, TRACE_ID_COLUMN,
|
||||
};
|
||||
use common_grpc_expr::util::ColumnExpr;
|
||||
use common_meta::cache::TableFlownodeSetCacheRef;
|
||||
@@ -54,7 +54,10 @@ use store_api::metric_engine_consts::{
|
||||
use store_api::mito_engine_options::{APPEND_MODE_KEY, MERGE_MODE_KEY};
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
use table::metadata::TableInfo;
|
||||
use table::requests::{InsertRequest as TableInsertRequest, AUTO_CREATE_TABLE_KEY, TTL_KEY};
|
||||
use table::requests::{
|
||||
InsertRequest as TableInsertRequest, AUTO_CREATE_TABLE_KEY, TABLE_DATA_MODEL,
|
||||
TABLE_DATA_MODEL_TRACE_V1, TTL_KEY,
|
||||
};
|
||||
use table::table_reference::TableReference;
|
||||
use table::TableRef;
|
||||
|
||||
@@ -578,7 +581,8 @@ impl Inserter {
|
||||
// - trace_id: when searching by trace id
|
||||
// - parent_span_id: when searching root span
|
||||
// - span_name: when searching certain types of span
|
||||
let index_columns = [TRACE_ID_COLUMN, PARENT_SPAN_ID_COLUMN, SPAN_NAME_COLUMN];
|
||||
let index_columns =
|
||||
[TRACE_ID_COLUMN, PARENT_SPAN_ID_COLUMN, SERVICE_NAME_COLUMN];
|
||||
for index_column in index_columns {
|
||||
if let Some(col) = create_table
|
||||
.column_defs
|
||||
@@ -595,6 +599,12 @@ impl Inserter {
|
||||
}
|
||||
}
|
||||
|
||||
// use table_options to mark table model version
|
||||
create_table.table_options.insert(
|
||||
TABLE_DATA_MODEL.to_string(),
|
||||
TABLE_DATA_MODEL_TRACE_V1.to_string(),
|
||||
);
|
||||
|
||||
let table = self
|
||||
.create_physical_table(
|
||||
create_table,
|
||||
|
||||
@@ -13,7 +13,8 @@
|
||||
// limitations under the License.
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use pipeline::{json_to_intermediate_state, parse, Content, GreptimeTransformer, Pipeline, Result};
|
||||
use pipeline::error::Result;
|
||||
use pipeline::{json_to_intermediate_state, parse, Content, GreptimeTransformer, Pipeline};
|
||||
use serde_json::{Deserializer, Value};
|
||||
|
||||
fn processor_mut(
|
||||
|
||||
@@ -16,7 +16,7 @@ use common_telemetry::debug;
|
||||
use snafu::OptionExt;
|
||||
use yaml_rust::Yaml;
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
Error, FieldRequiredForDispatcherSnafu, Result, TableSuffixRequiredForDispatcherRuleSnafu,
|
||||
ValueRequiredForDispatcherRuleSnafu,
|
||||
};
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::any::Any;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datatypes::timestamp::TimestampNanosecond;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
@@ -51,7 +52,7 @@ pub enum Error {
|
||||
#[snafu(display("Processor {processor}: expect string value, but got {v:?}"))]
|
||||
ProcessorExpectString {
|
||||
processor: String,
|
||||
v: crate::etl::Value,
|
||||
v: crate::Value,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
@@ -607,13 +608,197 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Pipeline table not found"))]
|
||||
PipelineTableNotFound {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to insert pipeline to pipelines table"))]
|
||||
InsertPipeline {
|
||||
#[snafu(source)]
|
||||
source: operator::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Pipeline not found, name: {}, version: {}", name, version.map(|ts| ts.0.to_iso8601_string()).unwrap_or("latest".to_string())))]
|
||||
PipelineNotFound {
|
||||
name: String,
|
||||
version: Option<TimestampNanosecond>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect record batch"))]
|
||||
CollectRecords {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to cast type, msg: {}", msg))]
|
||||
CastType {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build DataFusion logical plan"))]
|
||||
BuildDfLogicalPlan {
|
||||
#[snafu(source)]
|
||||
error: datafusion_common::DataFusionError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute internal statement"))]
|
||||
ExecuteInternalStatement {
|
||||
#[snafu(source)]
|
||||
source: query::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create dataframe"))]
|
||||
DataFrame {
|
||||
#[snafu(source)]
|
||||
source: query::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("General catalog error"))]
|
||||
Catalog {
|
||||
#[snafu(source)]
|
||||
source: catalog::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create table"))]
|
||||
CreateTable {
|
||||
#[snafu(source)]
|
||||
source: operator::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid pipeline version format: {}", version))]
|
||||
InvalidPipelineVersion {
|
||||
version: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
StatusCode::InvalidArguments
|
||||
use Error::*;
|
||||
match self {
|
||||
CastType { .. } => StatusCode::Unexpected,
|
||||
PipelineTableNotFound { .. } => StatusCode::TableNotFound,
|
||||
InsertPipeline { source, .. } => source.status_code(),
|
||||
CollectRecords { source, .. } => source.status_code(),
|
||||
PipelineNotFound { .. } | InvalidPipelineVersion { .. } => StatusCode::InvalidArguments,
|
||||
BuildDfLogicalPlan { .. } => StatusCode::Internal,
|
||||
ExecuteInternalStatement { source, .. } => source.status_code(),
|
||||
DataFrame { source, .. } => source.status_code(),
|
||||
Catalog { source, .. } => source.status_code(),
|
||||
CreateTable { source, .. } => source.status_code(),
|
||||
|
||||
EmptyInputField { .. }
|
||||
| MissingInputField { .. }
|
||||
| ProcessorMustBeMap { .. }
|
||||
| ProcessorMissingField { .. }
|
||||
| ProcessorExpectString { .. }
|
||||
| ProcessorUnsupportedValue { .. }
|
||||
| ProcessorKeyMustBeString { .. }
|
||||
| ProcessorFailedToParseString { .. }
|
||||
| ProcessorMustHaveStringKey { .. }
|
||||
| UnsupportedProcessor { .. }
|
||||
| FieldMustBeType { .. }
|
||||
| FailedParseFieldFromString { .. }
|
||||
| FailedToParseIntKey { .. }
|
||||
| FailedToParseInt { .. }
|
||||
| FailedToParseFloatKey { .. }
|
||||
| IntermediateKeyIndex { .. }
|
||||
| CmcdMissingValue { .. }
|
||||
| CmcdMissingKey { .. }
|
||||
| KeyMustBeString { .. }
|
||||
| CsvRead { .. }
|
||||
| CsvNoRecord { .. }
|
||||
| CsvSeparatorName { .. }
|
||||
| CsvQuoteName { .. }
|
||||
| DateParseTimezone { .. }
|
||||
| DateParse { .. }
|
||||
| DateFailedToGetLocalTimezone { .. }
|
||||
| DateFailedToGetTimestamp { .. }
|
||||
| DateInvalidFormat { .. }
|
||||
| DissectInvalidPattern { .. }
|
||||
| DissectEmptyPattern { .. }
|
||||
| DissectSplitExceedsInput { .. }
|
||||
| DissectSplitNotMatchInput { .. }
|
||||
| DissectConsecutiveNames { .. }
|
||||
| DissectNoMatchingPattern { .. }
|
||||
| DissectModifierAlreadySet { .. }
|
||||
| DissectAppendOrderAlreadySet { .. }
|
||||
| DissectOrderOnlyAppend { .. }
|
||||
| DissectOrderOnlyAppendModifier { .. }
|
||||
| DissectEndModifierAlreadySet { .. }
|
||||
| EpochInvalidResolution { .. }
|
||||
| GsubPatternRequired { .. }
|
||||
| GsubReplacementRequired { .. }
|
||||
| Regex { .. }
|
||||
| JoinSeparatorRequired { .. }
|
||||
| LetterInvalidMethod { .. }
|
||||
| RegexNamedGroupNotFound { .. }
|
||||
| RegexNoValidField { .. }
|
||||
| RegexNoValidPattern { .. }
|
||||
| UrlEncodingInvalidMethod { .. }
|
||||
| DigestPatternInvalid { .. }
|
||||
| UrlEncodingDecode { .. }
|
||||
| TransformOnFailureInvalidValue { .. }
|
||||
| TransformElementMustBeMap { .. }
|
||||
| TransformTypeMustBeSet { .. }
|
||||
| TransformEmpty { .. }
|
||||
| TransformColumnNameMustBeUnique { .. }
|
||||
| TransformMultipleTimestampIndex { .. }
|
||||
| TransformTimestampIndexCount { .. }
|
||||
| CoerceUnsupportedNullType { .. }
|
||||
| CoerceUnsupportedNullTypeTo { .. }
|
||||
| CoerceUnsupportedEpochType { .. }
|
||||
| CoerceStringToType { .. }
|
||||
| CoerceJsonTypeTo { .. }
|
||||
| CoerceTypeToJson { .. }
|
||||
| CoerceIncompatibleTypes { .. }
|
||||
| ValueInvalidResolution { .. }
|
||||
| ValueParseType { .. }
|
||||
| ValueParseInt { .. }
|
||||
| ValueParseFloat { .. }
|
||||
| ValueParseBoolean { .. }
|
||||
| ValueDefaultValueUnsupported { .. }
|
||||
| ValueUnsupportedNumberType { .. }
|
||||
| ValueUnsupportedYamlType { .. }
|
||||
| ValueYamlKeyMustBeString { .. }
|
||||
| YamlLoad { .. }
|
||||
| YamlParse { .. }
|
||||
| PrepareValueMustBeObject { .. }
|
||||
| ColumnOptions { .. }
|
||||
| UnsupportedIndexType { .. }
|
||||
| UnsupportedNumberType { .. }
|
||||
| IdentifyPipelineColumnTypeMismatch { .. }
|
||||
| JsonPathParse { .. }
|
||||
| JsonPathParseResultIndex { .. }
|
||||
| FieldRequiredForDispatcher
|
||||
| TableSuffixRequiredForDispatcherRule
|
||||
| ValueRequiredForDispatcherRule
|
||||
| ReachedMaxNestedLevels { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
@@ -13,16 +13,11 @@
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
pub mod error;
|
||||
pub mod field;
|
||||
pub mod processor;
|
||||
pub mod transform;
|
||||
pub mod value;
|
||||
|
||||
use error::{
|
||||
IntermediateKeyIndexSnafu, PrepareValueMustBeObjectSnafu, YamlLoadSnafu, YamlParseSnafu,
|
||||
};
|
||||
use processor::{Processor, Processors};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use transform::{Transformer, Transforms};
|
||||
@@ -30,7 +25,9 @@ use value::Value;
|
||||
use yaml_rust::YamlLoader;
|
||||
|
||||
use crate::dispatcher::{Dispatcher, Rule};
|
||||
use crate::etl::error::Result;
|
||||
use crate::error::{
|
||||
IntermediateKeyIndexSnafu, PrepareValueMustBeObjectSnafu, Result, YamlLoadSnafu, YamlParseSnafu,
|
||||
};
|
||||
|
||||
const DESCRIPTION: &str = "description";
|
||||
const PROCESSORS: &str = "processors";
|
||||
|
||||
@@ -17,8 +17,7 @@ use std::str::FromStr;
|
||||
|
||||
use snafu::OptionExt;
|
||||
|
||||
use super::error::{EmptyInputFieldSnafu, MissingInputFieldSnafu};
|
||||
use crate::etl::error::{Error, Result};
|
||||
use crate::error::{EmptyInputFieldSnafu, Error, MissingInputFieldSnafu, Result};
|
||||
|
||||
/// Raw processor-defined inputs and outputs
|
||||
#[derive(Debug, Default, Clone)]
|
||||
|
||||
@@ -45,15 +45,13 @@ use snafu::{OptionExt, ResultExt};
|
||||
use timestamp::TimestampProcessor;
|
||||
use urlencoding::UrlEncodingProcessor;
|
||||
|
||||
use super::error::{
|
||||
FailedParseFieldFromStringSnafu, FieldMustBeTypeSnafu, ProcessorKeyMustBeStringSnafu,
|
||||
ProcessorMustBeMapSnafu, ProcessorMustHaveStringKeySnafu,
|
||||
};
|
||||
use super::field::{Field, Fields};
|
||||
use super::PipelineMap;
|
||||
use crate::etl::error::{Error, Result};
|
||||
use crate::error::{
|
||||
Error, FailedParseFieldFromStringSnafu, FieldMustBeTypeSnafu, ProcessorKeyMustBeStringSnafu,
|
||||
ProcessorMustBeMapSnafu, ProcessorMustHaveStringKeySnafu, Result, UnsupportedProcessorSnafu,
|
||||
};
|
||||
use crate::etl::processor::simple_extract::SimpleExtractProcessor;
|
||||
use crate::etl_error::UnsupportedProcessorSnafu;
|
||||
|
||||
const FIELD_NAME: &str = "field";
|
||||
const FIELDS_NAME: &str = "fields";
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use urlencoding::decode;
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
CmcdMissingKeySnafu, CmcdMissingValueSnafu, Error, FailedToParseFloatKeySnafu,
|
||||
FailedToParseIntKeySnafu, KeyMustBeStringSnafu, ProcessorExpectStringSnafu,
|
||||
ProcessorMissingFieldSnafu, Result,
|
||||
|
||||
@@ -19,7 +19,7 @@ use itertools::EitherOrBoth::{Both, Left, Right};
|
||||
use itertools::Itertools;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
CsvNoRecordSnafu, CsvQuoteNameSnafu, CsvReadSnafu, CsvSeparatorNameSnafu, Error,
|
||||
KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result,
|
||||
};
|
||||
|
||||
@@ -19,7 +19,7 @@ use chrono_tz::Tz;
|
||||
use lazy_static::lazy_static;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
DateFailedToGetLocalTimezoneSnafu, DateFailedToGetTimestampSnafu, DateParseSnafu,
|
||||
DateParseTimezoneSnafu, Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu,
|
||||
ProcessorFailedToParseStringSnafu, ProcessorMissingFieldSnafu, Result,
|
||||
|
||||
@@ -22,7 +22,7 @@ use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result,
|
||||
};
|
||||
use crate::etl::field::Fields;
|
||||
|
||||
@@ -24,8 +24,9 @@ use std::borrow::Cow;
|
||||
use regex::Regex;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::etl::error::{
|
||||
Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result,
|
||||
use crate::error::{
|
||||
DigestPatternInvalidSnafu, Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu,
|
||||
ProcessorMissingFieldSnafu, Result,
|
||||
};
|
||||
use crate::etl::field::Fields;
|
||||
use crate::etl::processor::{
|
||||
@@ -33,7 +34,6 @@ use crate::etl::processor::{
|
||||
};
|
||||
use crate::etl::value::Value;
|
||||
use crate::etl::PipelineMap;
|
||||
use crate::etl_error::DigestPatternInvalidSnafu;
|
||||
|
||||
pub(crate) const PROCESSOR_DIGEST: &str = "digest";
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ use ahash::{HashMap, HashMapExt, HashSet, HashSetExt};
|
||||
use itertools::Itertools;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
DissectAppendOrderAlreadySetSnafu, DissectConsecutiveNamesSnafu, DissectEmptyPatternSnafu,
|
||||
DissectEndModifierAlreadySetSnafu, DissectInvalidPatternSnafu, DissectModifierAlreadySetSnafu,
|
||||
DissectNoMatchingPatternSnafu, DissectOrderOnlyAppendModifierSnafu,
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
EpochInvalidResolutionSnafu, Error, FailedToParseIntSnafu, KeyMustBeStringSnafu,
|
||||
ProcessorMissingFieldSnafu, ProcessorUnsupportedValueSnafu, Result,
|
||||
};
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use regex::Regex;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
Error, GsubPatternRequiredSnafu, GsubReplacementRequiredSnafu, KeyMustBeStringSnafu,
|
||||
ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, RegexSnafu, Result,
|
||||
};
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
Error, JoinSeparatorRequiredSnafu, KeyMustBeStringSnafu, ProcessorExpectStringSnafu,
|
||||
ProcessorMissingFieldSnafu, Result,
|
||||
};
|
||||
|
||||
@@ -19,12 +19,11 @@ use super::{
|
||||
yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, PipelineMap, Processor, FIELDS_NAME,
|
||||
FIELD_NAME, IGNORE_MISSING_NAME, JSON_PATH_NAME, JSON_PATH_RESULT_INDEX_NAME,
|
||||
};
|
||||
use crate::etl::error::{Error, Result};
|
||||
use crate::etl::field::Fields;
|
||||
use crate::etl_error::{
|
||||
JsonPathParseResultIndexSnafu, JsonPathParseSnafu, KeyMustBeStringSnafu,
|
||||
ProcessorMissingFieldSnafu,
|
||||
use crate::error::{
|
||||
Error, JsonPathParseResultIndexSnafu, JsonPathParseSnafu, KeyMustBeStringSnafu,
|
||||
ProcessorMissingFieldSnafu, Result,
|
||||
};
|
||||
use crate::etl::field::Fields;
|
||||
use crate::Value;
|
||||
|
||||
pub(crate) const PROCESSOR_JSON_PATH: &str = "json_path";
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
Error, KeyMustBeStringSnafu, LetterInvalidMethodSnafu, ProcessorExpectStringSnafu,
|
||||
ProcessorMissingFieldSnafu, Result,
|
||||
};
|
||||
|
||||
@@ -22,7 +22,7 @@ use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu,
|
||||
RegexNamedGroupNotFoundSnafu, RegexNoValidFieldSnafu, RegexNoValidPatternSnafu, RegexSnafu,
|
||||
Result,
|
||||
|
||||
@@ -14,13 +14,12 @@
|
||||
|
||||
use snafu::OptionExt as _;
|
||||
|
||||
use crate::etl::error::{Error, Result};
|
||||
use crate::error::{Error, KeyMustBeStringSnafu, ProcessorMissingFieldSnafu, Result};
|
||||
use crate::etl::field::Fields;
|
||||
use crate::etl::processor::{
|
||||
yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, FIELDS_NAME, FIELD_NAME,
|
||||
IGNORE_MISSING_NAME, SIMPLE_EXTRACT_KEY_NAME,
|
||||
};
|
||||
use crate::etl_error::{KeyMustBeStringSnafu, ProcessorMissingFieldSnafu};
|
||||
use crate::{PipelineMap, Processor, Value};
|
||||
|
||||
pub(crate) const PROCESSOR_SIMPLE_EXTRACT: &str = "simple_extract";
|
||||
|
||||
@@ -19,7 +19,7 @@ use chrono_tz::Tz;
|
||||
use lazy_static::lazy_static;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
DateFailedToGetLocalTimezoneSnafu, DateFailedToGetTimestampSnafu, DateInvalidFormatSnafu,
|
||||
DateParseSnafu, DateParseTimezoneSnafu, EpochInvalidResolutionSnafu, Error,
|
||||
KeyMustBeStringSnafu, ProcessorFailedToParseStringSnafu, ProcessorMissingFieldSnafu,
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use urlencoding::{decode, encode};
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result,
|
||||
UrlEncodingDecodeSnafu, UrlEncodingInvalidMethodSnafu,
|
||||
};
|
||||
|
||||
@@ -17,7 +17,14 @@ pub mod transformer;
|
||||
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::etl::error::{Error, Result};
|
||||
use super::field::Fields;
|
||||
use super::processor::{yaml_new_field, yaml_new_fields, yaml_string};
|
||||
use super::value::Timestamp;
|
||||
use super::PipelineMap;
|
||||
use crate::error::{
|
||||
Error, KeyMustBeStringSnafu, Result, TransformElementMustBeMapSnafu,
|
||||
TransformOnFailureInvalidValueSnafu, TransformTypeMustBeSetSnafu,
|
||||
};
|
||||
use crate::etl::processor::yaml_bool;
|
||||
use crate::etl::transform::index::Index;
|
||||
use crate::etl::value::Value;
|
||||
@@ -32,15 +39,6 @@ const TRANSFORM_ON_FAILURE: &str = "on_failure";
|
||||
|
||||
pub use transformer::greptime::GreptimeTransformer;
|
||||
|
||||
use super::error::{
|
||||
KeyMustBeStringSnafu, TransformElementMustBeMapSnafu, TransformOnFailureInvalidValueSnafu,
|
||||
TransformTypeMustBeSetSnafu,
|
||||
};
|
||||
use super::field::Fields;
|
||||
use super::processor::{yaml_new_field, yaml_new_fields, yaml_string};
|
||||
use super::value::Timestamp;
|
||||
use super::PipelineMap;
|
||||
|
||||
pub trait Transformer: std::fmt::Debug + Sized + Send + Sync + 'static {
|
||||
type Output;
|
||||
type VecOutput;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::etl::error::{Error, Result, UnsupportedIndexTypeSnafu};
|
||||
use crate::error::{Error, Result, UnsupportedIndexTypeSnafu};
|
||||
|
||||
const INDEX_TIMESTAMP: &str = "timestamp";
|
||||
const INDEX_TIMEINDEX: &str = "time";
|
||||
|
||||
@@ -27,7 +27,7 @@ use greptime_proto::v1::{ColumnSchema, Row, Rows, Value as GreptimeValue};
|
||||
use itertools::Itertools;
|
||||
use serde_json::Number;
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
IdentifyPipelineColumnTypeMismatchSnafu, ReachedMaxNestedLevelsSnafu, Result,
|
||||
TransformColumnNameMustBeUniqueSnafu, TransformEmptySnafu,
|
||||
TransformMultipleTimestampIndexSnafu, TransformTimestampIndexCountSnafu,
|
||||
|
||||
@@ -20,7 +20,7 @@ use greptime_proto::v1::value::ValueData;
|
||||
use greptime_proto::v1::{ColumnDataType, ColumnSchema, SemanticType};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::etl::error::{
|
||||
use crate::error::{
|
||||
CoerceIncompatibleTypesSnafu, CoerceJsonTypeToSnafu, CoerceStringToTypeSnafu,
|
||||
CoerceTypeToJsonSnafu, CoerceUnsupportedEpochTypeSnafu, CoerceUnsupportedNullTypeSnafu,
|
||||
CoerceUnsupportedNullTypeToSnafu, ColumnOptionsSnafu, Error, Result,
|
||||
|
||||
@@ -28,13 +28,12 @@ use regex::Regex;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
pub use time::Timestamp;
|
||||
|
||||
use super::error::{
|
||||
ValueDefaultValueUnsupportedSnafu, ValueInvalidResolutionSnafu, ValueParseBooleanSnafu,
|
||||
ValueParseFloatSnafu, ValueParseIntSnafu, ValueParseTypeSnafu, ValueUnsupportedNumberTypeSnafu,
|
||||
ValueUnsupportedYamlTypeSnafu, ValueYamlKeyMustBeStringSnafu,
|
||||
};
|
||||
use super::PipelineMap;
|
||||
use crate::etl::error::{Error, Result};
|
||||
use crate::error::{
|
||||
Error, Result, ValueDefaultValueUnsupportedSnafu, ValueInvalidResolutionSnafu,
|
||||
ValueParseBooleanSnafu, ValueParseFloatSnafu, ValueParseIntSnafu, ValueParseTypeSnafu,
|
||||
ValueUnsupportedNumberTypeSnafu, ValueUnsupportedYamlTypeSnafu, ValueYamlKeyMustBeStringSnafu,
|
||||
};
|
||||
|
||||
/// Value can be used as type
|
||||
/// acts as value: the enclosed value is the actual value
|
||||
|
||||
@@ -13,22 +13,22 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod dispatcher;
|
||||
pub mod error;
|
||||
mod etl;
|
||||
mod manager;
|
||||
mod metrics;
|
||||
|
||||
pub use etl::error::Result;
|
||||
pub use etl::processor::Processor;
|
||||
pub use etl::transform::transformer::greptime::{GreptimePipelineParams, SchemaInfo};
|
||||
pub use etl::transform::transformer::identity_pipeline;
|
||||
pub use etl::transform::{GreptimeTransformer, Transformer};
|
||||
pub use etl::value::{Array, Map, Value};
|
||||
pub use etl::{
|
||||
error as etl_error, json_array_to_intermediate_state, json_to_intermediate_state, parse,
|
||||
Content, DispatchedTo, Pipeline, PipelineExecOutput, PipelineMap,
|
||||
json_array_to_intermediate_state, json_to_intermediate_state, parse, Content, DispatchedTo,
|
||||
Pipeline, PipelineExecOutput, PipelineMap,
|
||||
};
|
||||
pub use manager::{
|
||||
error, pipeline_operator, table, util, PipelineDefinition, PipelineInfo, PipelineRef,
|
||||
pipeline_operator, table, util, PipelineDefinition, PipelineInfo, PipelineRef,
|
||||
PipelineTableRef, PipelineVersion, PipelineWay, SelectInfo,
|
||||
GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME,
|
||||
GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME, GREPTIME_INTERNAL_TRACE_PIPELINE_V1_NAME,
|
||||
};
|
||||
|
||||
@@ -19,10 +19,10 @@ use datatypes::timestamp::TimestampNanosecond;
|
||||
use itertools::Itertools;
|
||||
use util::to_pipeline_version;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::table::PipelineTable;
|
||||
use crate::{GreptimeTransformer, Pipeline};
|
||||
|
||||
pub mod error;
|
||||
pub mod pipeline_operator;
|
||||
pub mod table;
|
||||
pub mod util;
|
||||
@@ -99,7 +99,7 @@ impl PipelineWay {
|
||||
name: Option<&str>,
|
||||
version: Option<&str>,
|
||||
default_pipeline: PipelineWay,
|
||||
) -> error::Result<PipelineWay> {
|
||||
) -> Result<PipelineWay> {
|
||||
if let Some(pipeline_name) = name {
|
||||
if pipeline_name == GREPTIME_INTERNAL_TRACE_PIPELINE_V1_NAME {
|
||||
Ok(PipelineWay::OtlpTraceDirectV1)
|
||||
|
||||
@@ -1,153 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datatypes::timestamp::TimestampNanosecond;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Pipeline table not found"))]
|
||||
PipelineTableNotFound {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to insert pipeline to pipelines table"))]
|
||||
InsertPipeline {
|
||||
#[snafu(source)]
|
||||
source: operator::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse pipeline"))]
|
||||
CompilePipeline {
|
||||
#[snafu(source)]
|
||||
source: crate::etl::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Pipeline not found, name: {}, version: {}", name, version.map(|ts| ts.0.to_iso8601_string()).unwrap_or("latest".to_string())))]
|
||||
PipelineNotFound {
|
||||
name: String,
|
||||
version: Option<TimestampNanosecond>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect record batch"))]
|
||||
CollectRecords {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to cast type, msg: {}", msg))]
|
||||
CastType {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build DataFusion logical plan"))]
|
||||
BuildDfLogicalPlan {
|
||||
#[snafu(source)]
|
||||
error: datafusion_common::DataFusionError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute internal statement"))]
|
||||
ExecuteInternalStatement {
|
||||
#[snafu(source)]
|
||||
source: query::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create dataframe"))]
|
||||
DataFrame {
|
||||
#[snafu(source)]
|
||||
source: query::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("General catalog error"))]
|
||||
Catalog {
|
||||
#[snafu(source)]
|
||||
source: catalog::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create table"))]
|
||||
CreateTable {
|
||||
#[snafu(source)]
|
||||
source: operator::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute pipeline"))]
|
||||
PipelineTransform {
|
||||
#[snafu(source)]
|
||||
source: crate::etl::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid pipeline version format: {}", version))]
|
||||
InvalidPipelineVersion {
|
||||
version: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
CastType { .. } => StatusCode::Unexpected,
|
||||
PipelineTableNotFound { .. } => StatusCode::TableNotFound,
|
||||
InsertPipeline { source, .. } => source.status_code(),
|
||||
CollectRecords { source, .. } => source.status_code(),
|
||||
PipelineNotFound { .. }
|
||||
| CompilePipeline { .. }
|
||||
| PipelineTransform { .. }
|
||||
| InvalidPipelineVersion { .. } => StatusCode::InvalidArguments,
|
||||
BuildDfLogicalPlan { .. } => StatusCode::Internal,
|
||||
ExecuteInternalStatement { source, .. } => source.status_code(),
|
||||
DataFrame { source, .. } => source.status_code(),
|
||||
Catalog { source, .. } => source.status_code(),
|
||||
CreateTable { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -41,9 +41,9 @@ use table::metadata::TableInfo;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
BuildDfLogicalPlanSnafu, CastTypeSnafu, CollectRecordsSnafu, CompilePipelineSnafu,
|
||||
DataFrameSnafu, ExecuteInternalStatementSnafu, InsertPipelineSnafu,
|
||||
InvalidPipelineVersionSnafu, PipelineNotFoundSnafu, Result,
|
||||
BuildDfLogicalPlanSnafu, CastTypeSnafu, CollectRecordsSnafu, DataFrameSnafu,
|
||||
ExecuteInternalStatementSnafu, InsertPipelineSnafu, InvalidPipelineVersionSnafu,
|
||||
PipelineNotFoundSnafu, Result,
|
||||
};
|
||||
use crate::etl::transform::GreptimeTransformer;
|
||||
use crate::etl::{parse, Content, Pipeline};
|
||||
@@ -204,7 +204,7 @@ impl PipelineTable {
|
||||
/// Compile a pipeline from a string.
|
||||
pub fn compile_pipeline(pipeline: &str) -> Result<Pipeline<GreptimeTransformer>> {
|
||||
let yaml_content = Content::Yaml(pipeline);
|
||||
parse::<GreptimeTransformer>(&yaml_content).context(CompilePipelineSnafu)
|
||||
parse::<GreptimeTransformer>(&yaml_content)
|
||||
}
|
||||
|
||||
/// Insert a pipeline into the pipeline table.
|
||||
|
||||
@@ -151,7 +151,7 @@ pub enum Error {
|
||||
#[snafu(display("Failed to describe statement"))]
|
||||
DescribeStatement { source: BoxedError },
|
||||
|
||||
#[snafu(display("Pipeline management api error"))]
|
||||
#[snafu(display("Pipeline error"))]
|
||||
Pipeline {
|
||||
#[snafu(source)]
|
||||
source: pipeline::error::Error,
|
||||
@@ -159,14 +159,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Pipeline transform error"))]
|
||||
PipelineTransform {
|
||||
#[snafu(source)]
|
||||
source: pipeline::etl_error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Not supported: {}", feat))]
|
||||
NotSupported { feat: String },
|
||||
|
||||
@@ -661,7 +653,6 @@ impl ErrorExt for Error {
|
||||
| CheckDatabaseValidity { source, .. } => source.status_code(),
|
||||
|
||||
Pipeline { source, .. } => source.status_code(),
|
||||
PipelineTransform { source, .. } => source.status_code(),
|
||||
|
||||
NotSupported { .. }
|
||||
| InvalidParameter { .. }
|
||||
|
||||
@@ -154,7 +154,7 @@ impl Default for HttpOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
addr: "127.0.0.1:4000".to_string(),
|
||||
timeout: Duration::from_secs(30),
|
||||
timeout: Duration::from_secs(0),
|
||||
disable_dashboard: false,
|
||||
body_limit: DEFAULT_BODY_LIMIT,
|
||||
is_strict_mode: false,
|
||||
@@ -1384,7 +1384,7 @@ mod test {
|
||||
fn test_http_options_default() {
|
||||
let default = HttpOptions::default();
|
||||
assert_eq!("127.0.0.1:4000".to_string(), default.addr);
|
||||
assert_eq!(Duration::from_secs(30), default.timeout)
|
||||
assert_eq!(Duration::from_secs(0), default.timeout)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -12,12 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::io::BufRead;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use api::v1::RowInsertRequests;
|
||||
use async_trait::async_trait;
|
||||
use axum::body::Bytes;
|
||||
use axum::extract::{FromRequest, Multipart, Path, Query, Request, State};
|
||||
use axum::http::header::CONTENT_TYPE;
|
||||
use axum::http::{HeaderMap, StatusCode};
|
||||
@@ -30,7 +32,6 @@ use common_telemetry::{error, warn};
|
||||
use datatypes::value::column_data_to_json;
|
||||
use headers::ContentType;
|
||||
use lazy_static::lazy_static;
|
||||
use pipeline::error::PipelineTransformSnafu;
|
||||
use pipeline::util::to_pipeline_version;
|
||||
use pipeline::{GreptimePipelineParams, GreptimeTransformer, PipelineDefinition, PipelineVersion};
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -282,9 +283,7 @@ async fn dryrun_pipeline_inner(
|
||||
&pipeline_handler,
|
||||
PipelineDefinition::Resolved(pipeline),
|
||||
¶ms,
|
||||
pipeline::json_array_to_intermediate_state(value)
|
||||
.context(PipelineTransformSnafu)
|
||||
.context(PipelineSnafu)?,
|
||||
pipeline::json_array_to_intermediate_state(value).context(PipelineSnafu)?,
|
||||
"dry_run".to_owned(),
|
||||
query_ctx,
|
||||
true,
|
||||
@@ -389,8 +388,8 @@ pub struct PipelineDryrunParams {
|
||||
/// Check if the payload is valid json
|
||||
/// Check if the payload contains pipeline or pipeline_name and data
|
||||
/// Return Some if valid, None if invalid
|
||||
fn check_pipeline_dryrun_params_valid(payload: &str) -> Option<PipelineDryrunParams> {
|
||||
match serde_json::from_str::<PipelineDryrunParams>(payload) {
|
||||
fn check_pipeline_dryrun_params_valid(payload: &Bytes) -> Option<PipelineDryrunParams> {
|
||||
match serde_json::from_slice::<PipelineDryrunParams>(payload) {
|
||||
// payload with pipeline or pipeline_name and data is array
|
||||
Ok(params) if params.pipeline.is_some() || params.pipeline_name.is_some() => Some(params),
|
||||
// because of the pipeline_name or pipeline is required
|
||||
@@ -432,7 +431,7 @@ pub async fn pipeline_dryrun(
|
||||
Query(query_params): Query<LogIngesterQueryParams>,
|
||||
Extension(mut query_ctx): Extension<QueryContext>,
|
||||
TypedHeader(content_type): TypedHeader<ContentType>,
|
||||
payload: String,
|
||||
payload: Bytes,
|
||||
) -> Result<Response> {
|
||||
let handler = log_state.log_handler;
|
||||
|
||||
@@ -514,7 +513,7 @@ pub async fn log_ingester(
|
||||
Extension(mut query_ctx): Extension<QueryContext>,
|
||||
TypedHeader(content_type): TypedHeader<ContentType>,
|
||||
headers: HeaderMap,
|
||||
payload: String,
|
||||
payload: Bytes,
|
||||
) -> Result<HttpResponse> {
|
||||
// validate source and payload
|
||||
let source = query_params.source.as_deref();
|
||||
@@ -565,40 +564,45 @@ pub async fn log_ingester(
|
||||
|
||||
fn extract_pipeline_value_by_content_type(
|
||||
content_type: ContentType,
|
||||
payload: String,
|
||||
payload: Bytes,
|
||||
ignore_errors: bool,
|
||||
) -> Result<Vec<Value>> {
|
||||
Ok(match content_type {
|
||||
ct if ct == *JSON_CONTENT_TYPE => transform_ndjson_array_factory(
|
||||
Deserializer::from_str(&payload).into_iter(),
|
||||
Deserializer::from_slice(&payload).into_iter(),
|
||||
ignore_errors,
|
||||
)?,
|
||||
ct if ct == *NDJSON_CONTENT_TYPE => {
|
||||
let mut result = Vec::with_capacity(1000);
|
||||
for (index, line) in payload.lines().enumerate() {
|
||||
match serde_json::from_str(line) {
|
||||
Ok(v) => {
|
||||
result.push(v);
|
||||
}
|
||||
Err(_) => {
|
||||
if !ignore_errors {
|
||||
warn!(
|
||||
"invalid json item in array, index: {:?}, value: {:?}",
|
||||
index, line
|
||||
);
|
||||
return InvalidParameterSnafu {
|
||||
reason: format!("invalid item:{} in array", line),
|
||||
}
|
||||
.fail();
|
||||
let line = match line {
|
||||
Ok(line) if !line.is_empty() => line,
|
||||
Ok(_) => continue, // Skip empty lines
|
||||
Err(_) if ignore_errors => continue,
|
||||
Err(e) => {
|
||||
warn!(e; "invalid string at index: {}", index);
|
||||
return InvalidParameterSnafu {
|
||||
reason: format!("invalid line at index: {}", index),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
|
||||
if let Ok(v) = serde_json::from_str(&line) {
|
||||
result.push(v);
|
||||
} else if !ignore_errors {
|
||||
warn!("invalid JSON at index: {}, content: {:?}", index, line);
|
||||
return InvalidParameterSnafu {
|
||||
reason: format!("invalid JSON at index: {}", index),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
ct if ct == *TEXT_CONTENT_TYPE || ct == *TEXT_UTF8_CONTENT_TYPE => payload
|
||||
.lines()
|
||||
.filter(|line| !line.is_empty())
|
||||
.filter_map(|line| line.ok().filter(|line| !line.is_empty()))
|
||||
.map(|line| json!({"message": line}))
|
||||
.collect(),
|
||||
_ => UnsupportedContentTypeSnafu { content_type }.fail()?,
|
||||
@@ -629,9 +633,7 @@ pub(crate) async fn ingest_logs_inner(
|
||||
&state,
|
||||
PipelineDefinition::from_name(&pipeline_name, version),
|
||||
&pipeline_params,
|
||||
pipeline::json_array_to_intermediate_state(request.values)
|
||||
.context(PipelineTransformSnafu)
|
||||
.context(PipelineSnafu)?,
|
||||
pipeline::json_array_to_intermediate_state(request.values).context(PipelineSnafu)?,
|
||||
request.table,
|
||||
&query_ctx,
|
||||
true,
|
||||
@@ -677,7 +679,8 @@ pub(crate) async fn ingest_logs_inner(
|
||||
pub trait LogValidator: Send + Sync {
|
||||
/// validate payload by source before processing
|
||||
/// Return a `Some` result to indicate validation failure.
|
||||
async fn validate(&self, source: Option<&str>, payload: &str) -> Option<Result<HttpResponse>>;
|
||||
async fn validate(&self, source: Option<&str>, payload: &Bytes)
|
||||
-> Option<Result<HttpResponse>>;
|
||||
}
|
||||
|
||||
pub type LogValidatorRef = Arc<dyn LogValidator + 'static>;
|
||||
@@ -731,17 +734,17 @@ mod tests {
|
||||
{"a": 1}
|
||||
{"b": 2"}
|
||||
{"c": 1}
|
||||
"#;
|
||||
"#
|
||||
.as_bytes();
|
||||
let payload = Bytes::from_static(payload);
|
||||
|
||||
let fail_rest =
|
||||
extract_pipeline_value_by_content_type(ContentType::json(), payload.to_string(), true);
|
||||
extract_pipeline_value_by_content_type(ContentType::json(), payload.clone(), true);
|
||||
assert!(fail_rest.is_ok());
|
||||
assert_eq!(fail_rest.unwrap(), vec![json!({"a": 1})]);
|
||||
|
||||
let fail_only_wrong = extract_pipeline_value_by_content_type(
|
||||
NDJSON_CONTENT_TYPE.clone(),
|
||||
payload.to_string(),
|
||||
true,
|
||||
);
|
||||
let fail_only_wrong =
|
||||
extract_pipeline_value_by_content_type(NDJSON_CONTENT_TYPE.clone(), payload, true);
|
||||
assert!(fail_only_wrong.is_ok());
|
||||
assert_eq!(
|
||||
fail_only_wrong.unwrap(),
|
||||
|
||||
@@ -23,7 +23,8 @@ use pipeline::{GreptimePipelineParams, SelectInfo};
|
||||
use crate::http::header::constants::{
|
||||
GREPTIME_LOG_EXTRACT_KEYS_HEADER_NAME, GREPTIME_LOG_PIPELINE_NAME_HEADER_NAME,
|
||||
GREPTIME_LOG_PIPELINE_VERSION_HEADER_NAME, GREPTIME_LOG_TABLE_NAME_HEADER_NAME,
|
||||
GREPTIME_PIPELINE_PARAMS_HEADER, GREPTIME_TRACE_TABLE_NAME_HEADER_NAME,
|
||||
GREPTIME_PIPELINE_NAME_HEADER_NAME, GREPTIME_PIPELINE_PARAMS_HEADER,
|
||||
GREPTIME_PIPELINE_VERSION_HEADER_NAME, GREPTIME_TRACE_TABLE_NAME_HEADER_NAME,
|
||||
};
|
||||
|
||||
/// Axum extractor for optional target log table name from HTTP header
|
||||
@@ -38,7 +39,7 @@ where
|
||||
|
||||
async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result<Self, Self::Rejection> {
|
||||
let headers = &parts.headers;
|
||||
string_value_from_header(headers, GREPTIME_LOG_TABLE_NAME_HEADER_NAME).map(LogTableName)
|
||||
string_value_from_header(headers, &[GREPTIME_LOG_TABLE_NAME_HEADER_NAME]).map(LogTableName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,7 +55,8 @@ where
|
||||
|
||||
async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result<Self, Self::Rejection> {
|
||||
let headers = &parts.headers;
|
||||
string_value_from_header(headers, GREPTIME_TRACE_TABLE_NAME_HEADER_NAME).map(TraceTableName)
|
||||
string_value_from_header(headers, &[GREPTIME_TRACE_TABLE_NAME_HEADER_NAME])
|
||||
.map(TraceTableName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,7 +73,7 @@ where
|
||||
|
||||
async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result<Self, Self::Rejection> {
|
||||
let select =
|
||||
string_value_from_header(&parts.headers, GREPTIME_LOG_EXTRACT_KEYS_HEADER_NAME)?;
|
||||
string_value_from_header(&parts.headers, &[GREPTIME_LOG_EXTRACT_KEYS_HEADER_NAME])?;
|
||||
|
||||
match select {
|
||||
Some(name) => {
|
||||
@@ -102,12 +104,22 @@ where
|
||||
|
||||
async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result<Self, Self::Rejection> {
|
||||
let headers = &parts.headers;
|
||||
let pipeline_name =
|
||||
string_value_from_header(headers, GREPTIME_LOG_PIPELINE_NAME_HEADER_NAME)?;
|
||||
let pipeline_version =
|
||||
string_value_from_header(headers, GREPTIME_LOG_PIPELINE_VERSION_HEADER_NAME)?;
|
||||
let pipeline_name = string_value_from_header(
|
||||
headers,
|
||||
&[
|
||||
GREPTIME_LOG_PIPELINE_NAME_HEADER_NAME,
|
||||
GREPTIME_PIPELINE_NAME_HEADER_NAME,
|
||||
],
|
||||
)?;
|
||||
let pipeline_version = string_value_from_header(
|
||||
headers,
|
||||
&[
|
||||
GREPTIME_LOG_PIPELINE_VERSION_HEADER_NAME,
|
||||
GREPTIME_PIPELINE_VERSION_HEADER_NAME,
|
||||
],
|
||||
)?;
|
||||
let pipeline_parameters =
|
||||
string_value_from_header(headers, GREPTIME_PIPELINE_PARAMS_HEADER)?;
|
||||
string_value_from_header(headers, &[GREPTIME_PIPELINE_PARAMS_HEADER])?;
|
||||
|
||||
Ok(PipelineInfo {
|
||||
pipeline_name,
|
||||
@@ -120,17 +132,19 @@ where
|
||||
#[inline]
|
||||
fn string_value_from_header(
|
||||
headers: &HeaderMap,
|
||||
header_key: &str,
|
||||
header_keys: &[&str],
|
||||
) -> Result<Option<String>, (StatusCode, String)> {
|
||||
headers
|
||||
.get(header_key)
|
||||
.map(|value| {
|
||||
String::from_utf8(value.as_bytes().to_vec()).map_err(|_| {
|
||||
for header_key in header_keys {
|
||||
if let Some(value) = headers.get(*header_key) {
|
||||
return Some(String::from_utf8(value.as_bytes().to_vec()).map_err(|_| {
|
||||
(
|
||||
StatusCode::BAD_REQUEST,
|
||||
format!("`{}` header is not valid UTF-8 string type.", header_key),
|
||||
)
|
||||
})
|
||||
})
|
||||
.transpose()
|
||||
}))
|
||||
.transpose();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
@@ -45,8 +45,15 @@ pub mod constants {
|
||||
pub const GREPTIME_DB_HEADER_NAME: &str = "x-greptime-db-name";
|
||||
pub const GREPTIME_TIMEZONE_HEADER_NAME: &str = "x-greptime-timezone";
|
||||
pub const GREPTIME_DB_HEADER_ERROR_CODE: &str = common_error::GREPTIME_DB_HEADER_ERROR_CODE;
|
||||
|
||||
// Deprecated: pipeline is also used with trace, so we remove log from it.
|
||||
pub const GREPTIME_LOG_PIPELINE_NAME_HEADER_NAME: &str = "x-greptime-log-pipeline-name";
|
||||
pub const GREPTIME_LOG_PIPELINE_VERSION_HEADER_NAME: &str = "x-greptime-log-pipeline-version";
|
||||
|
||||
// More generic pipeline header name
|
||||
pub const GREPTIME_PIPELINE_NAME_HEADER_NAME: &str = "x-greptime-pipeline-name";
|
||||
pub const GREPTIME_PIPELINE_VERSION_HEADER_NAME: &str = "x-greptime-pipeline-version";
|
||||
|
||||
pub const GREPTIME_LOG_TABLE_NAME_HEADER_NAME: &str = "x-greptime-log-table-name";
|
||||
pub const GREPTIME_LOG_EXTRACT_KEYS_HEADER_NAME: &str = "x-greptime-log-extract-keys";
|
||||
pub const GREPTIME_TRACE_TABLE_NAME_HEADER_NAME: &str = "x-greptime-trace-table-name";
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::hash_map::Entry::{Occupied, Vacant};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -26,8 +25,6 @@ use common_error::status_code::StatusCode;
|
||||
use common_query::{Output, OutputData};
|
||||
use common_recordbatch::util;
|
||||
use common_telemetry::{debug, error, tracing, warn};
|
||||
use datafusion_expr::{col, Expr};
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value as JsonValue;
|
||||
use session::context::{Channel, QueryContext};
|
||||
@@ -36,6 +33,7 @@ use snafu::{OptionExt, ResultExt};
|
||||
use crate::error::{
|
||||
status_code_to_http_status, CollectRecordbatchSnafu, Error, InvalidJaegerQuerySnafu, Result,
|
||||
};
|
||||
use crate::http::extractor::TraceTableName;
|
||||
use crate::http::HttpRecordsOutput;
|
||||
use crate::metrics::METRIC_JAEGER_QUERY_ELAPSED;
|
||||
use crate::otlp::trace::{
|
||||
@@ -47,43 +45,9 @@ use crate::otlp::trace::{
|
||||
};
|
||||
use crate::query_handler::JaegerQueryHandlerRef;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref FIND_TRACES_COLS: Vec<Expr> = vec![
|
||||
col(TRACE_ID_COLUMN),
|
||||
col(TIMESTAMP_COLUMN),
|
||||
col(DURATION_NANO_COLUMN),
|
||||
col(SERVICE_NAME_COLUMN),
|
||||
col(SPAN_NAME_COLUMN),
|
||||
col(SPAN_ID_COLUMN),
|
||||
col(SPAN_ATTRIBUTES_COLUMN),
|
||||
col(RESOURCE_ATTRIBUTES_COLUMN),
|
||||
col(PARENT_SPAN_ID_COLUMN),
|
||||
col(SPAN_EVENTS_COLUMN),
|
||||
col(SCOPE_NAME_COLUMN),
|
||||
col(SCOPE_VERSION_COLUMN),
|
||||
col(SPAN_KIND_COLUMN),
|
||||
col(SPAN_STATUS_CODE),
|
||||
];
|
||||
static ref FIND_TRACES_SCHEMA: Vec<(&'static str, &'static str)> = vec![
|
||||
(TRACE_ID_COLUMN, "String"),
|
||||
(TIMESTAMP_COLUMN, "TimestampNanosecond"),
|
||||
(DURATION_NANO_COLUMN, "UInt64"),
|
||||
(SERVICE_NAME_COLUMN, "String"),
|
||||
(SPAN_NAME_COLUMN, "String"),
|
||||
(SPAN_ID_COLUMN, "String"),
|
||||
(SPAN_ATTRIBUTES_COLUMN, "Json"),
|
||||
(RESOURCE_ATTRIBUTES_COLUMN, "Json"),
|
||||
(PARENT_SPAN_ID_COLUMN, "String"),
|
||||
(SPAN_EVENTS_COLUMN, "Json"),
|
||||
(SCOPE_NAME_COLUMN, "String"),
|
||||
(SCOPE_VERSION_COLUMN, "String"),
|
||||
(SPAN_KIND_COLUMN, "String"),
|
||||
(SPAN_STATUS_CODE, "String"),
|
||||
];
|
||||
}
|
||||
pub const JAEGER_QUERY_TABLE_NAME_KEY: &str = "jaeger_query_table_name";
|
||||
|
||||
const REF_TYPE_CHILD_OF: &str = "CHILD_OF";
|
||||
|
||||
const SPAN_KIND_TIME_FMTS: [&str; 2] = ["%Y-%m-%d %H:%M:%S%.6f%z", "%Y-%m-%d %H:%M:%S%.9f%z"];
|
||||
|
||||
/// JaegerAPIResponse is the response of Jaeger HTTP API.
|
||||
@@ -240,9 +204,6 @@ pub enum ValueType {
|
||||
#[derive(Default, Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct JaegerQueryParams {
|
||||
/// Database that the trace data stored in.
|
||||
pub db: Option<String>,
|
||||
|
||||
/// Service name of the trace.
|
||||
#[serde(rename = "service")]
|
||||
pub service_name: Option<String>,
|
||||
@@ -275,26 +236,27 @@ pub struct JaegerQueryParams {
|
||||
pub span_kind: Option<String>,
|
||||
}
|
||||
|
||||
fn update_query_context(query_ctx: &mut QueryContext, table_name: Option<String>) {
|
||||
// db should be already handled by middlewares
|
||||
query_ctx.set_channel(Channel::Jaeger);
|
||||
if let Some(table) = table_name {
|
||||
query_ctx.set_extension(JAEGER_QUERY_TABLE_NAME_KEY, table);
|
||||
}
|
||||
}
|
||||
|
||||
impl QueryTraceParams {
|
||||
fn from_jaeger_query_params(db: &str, query_params: JaegerQueryParams) -> Result<Self> {
|
||||
fn from_jaeger_query_params(query_params: JaegerQueryParams) -> Result<Self> {
|
||||
let mut internal_query_params: QueryTraceParams = QueryTraceParams {
|
||||
db: db.to_string(),
|
||||
service_name: query_params.service_name.context(InvalidJaegerQuerySnafu {
|
||||
reason: "service_name is required".to_string(),
|
||||
})?,
|
||||
operation_name: query_params.operation_name,
|
||||
// Convert start time from microseconds to nanoseconds.
|
||||
start_time: query_params.start.map(|start| start * 1000),
|
||||
end_time: query_params.end.map(|end| end * 1000),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
internal_query_params.service_name =
|
||||
query_params.service_name.context(InvalidJaegerQuerySnafu {
|
||||
reason: "service_name is required".to_string(),
|
||||
})?;
|
||||
|
||||
internal_query_params.operation_name = query_params.operation_name;
|
||||
|
||||
// Convert start time from microseconds to nanoseconds.
|
||||
internal_query_params.start_time = query_params.start.map(|start| start * 1000);
|
||||
|
||||
// Convert end time from microseconds to nanoseconds.
|
||||
internal_query_params.end_time = query_params.end.map(|end| end * 1000);
|
||||
|
||||
if let Some(max_duration) = query_params.max_duration {
|
||||
let duration = humantime::parse_duration(&max_duration).map_err(|e| {
|
||||
InvalidJaegerQuerySnafu {
|
||||
@@ -343,7 +305,6 @@ impl QueryTraceParams {
|
||||
|
||||
#[derive(Debug, Default, PartialEq)]
|
||||
pub struct QueryTraceParams {
|
||||
pub db: String,
|
||||
pub service_name: String,
|
||||
pub operation_name: Option<String>,
|
||||
|
||||
@@ -367,12 +328,14 @@ pub async fn handle_get_services(
|
||||
State(handler): State<JaegerQueryHandlerRef>,
|
||||
Query(query_params): Query<JaegerQueryParams>,
|
||||
Extension(mut query_ctx): Extension<QueryContext>,
|
||||
TraceTableName(table_name): TraceTableName,
|
||||
) -> impl IntoResponse {
|
||||
debug!(
|
||||
"Received Jaeger '/api/services' request, query_params: {:?}, query_ctx: {:?}",
|
||||
query_params, query_ctx
|
||||
);
|
||||
query_ctx.set_channel(Channel::Jaeger);
|
||||
|
||||
update_query_context(&mut query_ctx, table_name);
|
||||
let query_ctx = Arc::new(query_ctx);
|
||||
let db = query_ctx.get_db_string();
|
||||
|
||||
@@ -418,12 +381,14 @@ pub async fn handle_get_trace(
|
||||
Path(trace_id): Path<String>,
|
||||
Query(query_params): Query<JaegerQueryParams>,
|
||||
Extension(mut query_ctx): Extension<QueryContext>,
|
||||
TraceTableName(table_name): TraceTableName,
|
||||
) -> impl IntoResponse {
|
||||
debug!(
|
||||
"Received Jaeger '/api/traces/{}' request, query_params: {:?}, query_ctx: {:?}",
|
||||
trace_id, query_params, query_ctx
|
||||
);
|
||||
query_ctx.set_channel(Channel::Jaeger);
|
||||
|
||||
update_query_context(&mut query_ctx, table_name);
|
||||
let query_ctx = Arc::new(query_ctx);
|
||||
let db = query_ctx.get_db_string();
|
||||
|
||||
@@ -472,12 +437,14 @@ pub async fn handle_find_traces(
|
||||
State(handler): State<JaegerQueryHandlerRef>,
|
||||
Query(query_params): Query<JaegerQueryParams>,
|
||||
Extension(mut query_ctx): Extension<QueryContext>,
|
||||
TraceTableName(table_name): TraceTableName,
|
||||
) -> impl IntoResponse {
|
||||
debug!(
|
||||
"Received Jaeger '/api/traces' request, query_params: {:?}, query_ctx: {:?}",
|
||||
query_params, query_ctx
|
||||
);
|
||||
query_ctx.set_channel(Channel::Jaeger);
|
||||
|
||||
update_query_context(&mut query_ctx, table_name);
|
||||
let query_ctx = Arc::new(query_ctx);
|
||||
let db = query_ctx.get_db_string();
|
||||
|
||||
@@ -486,7 +453,7 @@ pub async fn handle_find_traces(
|
||||
.with_label_values(&[&db, "/api/traces"])
|
||||
.start_timer();
|
||||
|
||||
match QueryTraceParams::from_jaeger_query_params(&db, query_params) {
|
||||
match QueryTraceParams::from_jaeger_query_params(query_params) {
|
||||
Ok(query_params) => {
|
||||
let output = handler.find_traces(query_ctx, query_params).await;
|
||||
match output {
|
||||
@@ -521,13 +488,14 @@ pub async fn handle_get_operations(
|
||||
State(handler): State<JaegerQueryHandlerRef>,
|
||||
Query(query_params): Query<JaegerQueryParams>,
|
||||
Extension(mut query_ctx): Extension<QueryContext>,
|
||||
TraceTableName(table_name): TraceTableName,
|
||||
) -> impl IntoResponse {
|
||||
debug!(
|
||||
"Received Jaeger '/api/operations' request, query_params: {:?}, query_ctx: {:?}",
|
||||
query_params, query_ctx
|
||||
);
|
||||
if let Some(service_name) = query_params.service_name {
|
||||
query_ctx.set_channel(Channel::Jaeger);
|
||||
if let Some(service_name) = &query_params.service_name {
|
||||
update_query_context(&mut query_ctx, table_name);
|
||||
let query_ctx = Arc::new(query_ctx);
|
||||
let db = query_ctx.get_db_string();
|
||||
|
||||
@@ -537,7 +505,7 @@ pub async fn handle_get_operations(
|
||||
.start_timer();
|
||||
|
||||
match handler
|
||||
.get_operations(query_ctx, &service_name, query_params.span_kind.as_deref())
|
||||
.get_operations(query_ctx, service_name, query_params.span_kind.as_deref())
|
||||
.await
|
||||
{
|
||||
Ok(output) => match covert_to_records(output).await {
|
||||
@@ -593,12 +561,14 @@ pub async fn handle_get_operations_by_service(
|
||||
Path(service_name): Path<String>,
|
||||
Query(query_params): Query<JaegerQueryParams>,
|
||||
Extension(mut query_ctx): Extension<QueryContext>,
|
||||
TraceTableName(table_name): TraceTableName,
|
||||
) -> impl IntoResponse {
|
||||
debug!(
|
||||
"Received Jaeger '/api/services/{}/operations' request, query_params: {:?}, query_ctx: {:?}",
|
||||
service_name, query_params, query_ctx
|
||||
);
|
||||
query_ctx.set_channel(Channel::Jaeger);
|
||||
|
||||
update_query_context(&mut query_ctx, table_name);
|
||||
let query_ctx = Arc::new(query_ctx);
|
||||
let db = query_ctx.get_db_string();
|
||||
|
||||
@@ -690,11 +660,8 @@ fn error_response(err: Error) -> (HttpStatusCode, axum::Json<JaegerAPIResponse>)
|
||||
}),
|
||||
)
|
||||
}
|
||||
// Construct Jaeger traces from records.
|
||||
fn traces_from_records(records: HttpRecordsOutput) -> Result<Vec<Trace>> {
|
||||
let expected_schema = FIND_TRACES_SCHEMA.clone();
|
||||
check_schema(&records, &expected_schema)?;
|
||||
|
||||
fn traces_from_records(records: HttpRecordsOutput) -> Result<Vec<Trace>> {
|
||||
// maintain the mapping: trace_id -> (process_id -> service_name).
|
||||
let mut trace_id_to_processes: HashMap<String, HashMap<String, String>> = HashMap::new();
|
||||
// maintain the mapping: trace_id -> spans.
|
||||
@@ -702,38 +669,202 @@ fn traces_from_records(records: HttpRecordsOutput) -> Result<Vec<Trace>> {
|
||||
// maintain the mapping: service.name -> resource.attributes.
|
||||
let mut service_to_resource_attributes: HashMap<String, Vec<KeyValue>> = HashMap::new();
|
||||
|
||||
let is_span_attributes_flatten = !records
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.any(|c| c.name == SPAN_ATTRIBUTES_COLUMN);
|
||||
|
||||
for row in records.rows.into_iter() {
|
||||
let mut span = Span::default();
|
||||
let mut row_iter = row.into_iter();
|
||||
let mut service_name = None;
|
||||
let mut resource_tags = vec![];
|
||||
|
||||
// Set trace id.
|
||||
if let Some(JsonValue::String(trace_id)) = row_iter.next() {
|
||||
span.trace_id = trace_id.clone();
|
||||
trace_id_to_processes.entry(trace_id).or_default();
|
||||
}
|
||||
for (idx, cell) in row.into_iter().enumerate() {
|
||||
// safe to use index here
|
||||
let column_name = &records.schema.column_schemas[idx].name;
|
||||
|
||||
// Convert timestamp from nanoseconds to microseconds.
|
||||
if let Some(JsonValue::Number(timestamp)) = row_iter.next() {
|
||||
span.start_time = timestamp.as_u64().ok_or_else(|| {
|
||||
InvalidJaegerQuerySnafu {
|
||||
reason: "Failed to convert timestamp to u64".to_string(),
|
||||
match column_name.as_str() {
|
||||
TRACE_ID_COLUMN => {
|
||||
if let JsonValue::String(trace_id) = cell {
|
||||
span.trace_id = trace_id.clone();
|
||||
trace_id_to_processes.entry(trace_id).or_default();
|
||||
}
|
||||
}
|
||||
.build()
|
||||
})? / 1000;
|
||||
}
|
||||
|
||||
// Convert duration from nanoseconds to microseconds.
|
||||
if let Some(JsonValue::Number(duration)) = row_iter.next() {
|
||||
span.duration = duration.as_u64().ok_or_else(|| {
|
||||
InvalidJaegerQuerySnafu {
|
||||
reason: "Failed to convert duration to u64".to_string(),
|
||||
TIMESTAMP_COLUMN => {
|
||||
span.start_time = cell.as_u64().context(InvalidJaegerQuerySnafu {
|
||||
reason: "Failed to convert timestamp to u64".to_string(),
|
||||
})? / 1000;
|
||||
}
|
||||
.build()
|
||||
})? / 1000;
|
||||
DURATION_NANO_COLUMN => {
|
||||
span.duration = cell.as_u64().context(InvalidJaegerQuerySnafu {
|
||||
reason: "Failed to convert duration to u64".to_string(),
|
||||
})? / 1000;
|
||||
}
|
||||
SERVICE_NAME_COLUMN => {
|
||||
if let JsonValue::String(name) = cell {
|
||||
service_name = Some(name);
|
||||
}
|
||||
}
|
||||
SPAN_NAME_COLUMN => {
|
||||
if let JsonValue::String(span_name) = cell {
|
||||
span.operation_name = span_name;
|
||||
}
|
||||
}
|
||||
SPAN_ID_COLUMN => {
|
||||
if let JsonValue::String(span_id) = cell {
|
||||
span.span_id = span_id;
|
||||
}
|
||||
}
|
||||
SPAN_ATTRIBUTES_COLUMN => {
|
||||
// for v0 data model, span_attributes are nested as a json
|
||||
// data structure
|
||||
if let JsonValue::Object(span_attrs) = cell {
|
||||
span.tags.extend(object_to_tags(span_attrs));
|
||||
}
|
||||
}
|
||||
RESOURCE_ATTRIBUTES_COLUMN => {
|
||||
// for v0 data model, resource_attributes are nested as a json
|
||||
// data structure
|
||||
|
||||
if let JsonValue::Object(mut resource_attrs) = cell {
|
||||
resource_attrs.remove(KEY_SERVICE_NAME);
|
||||
resource_tags = object_to_tags(resource_attrs);
|
||||
}
|
||||
}
|
||||
PARENT_SPAN_ID_COLUMN => {
|
||||
if let JsonValue::String(parent_span_id) = cell {
|
||||
if !parent_span_id.is_empty() {
|
||||
span.references.push(Reference {
|
||||
trace_id: span.trace_id.clone(),
|
||||
span_id: parent_span_id,
|
||||
ref_type: REF_TYPE_CHILD_OF.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
SPAN_EVENTS_COLUMN => {
|
||||
if let JsonValue::Array(events) = cell {
|
||||
for event in events {
|
||||
if let JsonValue::Object(mut obj) = event {
|
||||
let Some(action) = obj.get("name").and_then(|v| v.as_str()) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let Some(t) =
|
||||
obj.get("time").and_then(|t| t.as_str()).and_then(|s| {
|
||||
SPAN_KIND_TIME_FMTS
|
||||
.iter()
|
||||
.find_map(|fmt| {
|
||||
chrono::DateTime::parse_from_str(s, fmt).ok()
|
||||
})
|
||||
.map(|dt| dt.timestamp_micros() as u64)
|
||||
})
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let mut fields = vec![KeyValue {
|
||||
key: "event".to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(action.to_string()),
|
||||
}];
|
||||
|
||||
// Add event attributes as fields
|
||||
if let Some(JsonValue::Object(attrs)) = obj.remove("attributes") {
|
||||
fields.extend(object_to_tags(attrs));
|
||||
}
|
||||
|
||||
span.logs.push(Log {
|
||||
timestamp: t,
|
||||
fields,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
SCOPE_NAME_COLUMN => {
|
||||
if let JsonValue::String(scope_name) = cell {
|
||||
if !scope_name.is_empty() {
|
||||
span.tags.push(KeyValue {
|
||||
key: KEY_OTEL_SCOPE_NAME.to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(scope_name),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
SCOPE_VERSION_COLUMN => {
|
||||
if let JsonValue::String(scope_version) = cell {
|
||||
if !scope_version.is_empty() {
|
||||
span.tags.push(KeyValue {
|
||||
key: KEY_OTEL_SCOPE_VERSION.to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(scope_version),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
SPAN_KIND_COLUMN => {
|
||||
if let JsonValue::String(span_kind) = cell {
|
||||
if !span_kind.is_empty() {
|
||||
span.tags.push(KeyValue {
|
||||
key: KEY_SPAN_KIND.to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(normalize_span_kind(&span_kind)),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
SPAN_STATUS_CODE => {
|
||||
if let JsonValue::String(span_status) = cell {
|
||||
if span_status != SPAN_STATUS_UNSET && !span_status.is_empty() {
|
||||
span.tags.push(KeyValue {
|
||||
key: KEY_OTEL_STATUS_CODE.to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(normalize_status_code(&span_status)),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ => {
|
||||
// this this v1 data model
|
||||
if is_span_attributes_flatten {
|
||||
const SPAN_ATTR_PREFIX: &str = "span_attributes.";
|
||||
const RESOURCE_ATTR_PREFIX: &str = "resource_attributes.";
|
||||
// a span attributes column
|
||||
if column_name.starts_with(SPAN_ATTR_PREFIX) {
|
||||
if let Some(keyvalue) = to_keyvalue(
|
||||
column_name
|
||||
.strip_prefix(SPAN_ATTR_PREFIX)
|
||||
.unwrap_or_default()
|
||||
.to_string(),
|
||||
cell,
|
||||
) {
|
||||
span.tags.push(keyvalue);
|
||||
}
|
||||
} else if column_name.starts_with(RESOURCE_ATTR_PREFIX) {
|
||||
if let Some(keyvalue) = to_keyvalue(
|
||||
column_name
|
||||
.strip_prefix(RESOURCE_ATTR_PREFIX)
|
||||
.unwrap_or_default()
|
||||
.to_string(),
|
||||
cell,
|
||||
) {
|
||||
resource_tags.push(keyvalue);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect services to construct processes.
|
||||
if let Some(JsonValue::String(service_name)) = row_iter.next() {
|
||||
if let Some(service_name) = service_name {
|
||||
if !service_to_resource_attributes.contains_key(&service_name) {
|
||||
service_to_resource_attributes.insert(service_name.clone(), resource_tags);
|
||||
}
|
||||
|
||||
if let Some(process) = trace_id_to_processes.get_mut(&span.trace_id) {
|
||||
if let Some(process_id) = process.get(&service_name) {
|
||||
span.process_id = process_id.clone();
|
||||
@@ -746,127 +877,8 @@ fn traces_from_records(records: HttpRecordsOutput) -> Result<Vec<Trace>> {
|
||||
}
|
||||
}
|
||||
|
||||
// Set operation name. In Jaeger, the operation name is the span name.
|
||||
if let Some(JsonValue::String(span_name)) = row_iter.next() {
|
||||
span.operation_name = span_name;
|
||||
}
|
||||
|
||||
// Set span id.
|
||||
if let Some(JsonValue::String(span_id)) = row_iter.next() {
|
||||
span.span_id = span_id;
|
||||
}
|
||||
|
||||
// Convert span attributes to tags.
|
||||
if let Some(JsonValue::Object(object)) = row_iter.next() {
|
||||
span.tags = object_to_tags(object);
|
||||
}
|
||||
|
||||
// Save resource attributes with service name.
|
||||
if let Some(JsonValue::Object(mut object)) = row_iter.next() {
|
||||
if let Some(service_name) = object
|
||||
.remove(KEY_SERVICE_NAME)
|
||||
.and_then(|v| v.as_str().map(|s| s.to_string()))
|
||||
{
|
||||
match service_to_resource_attributes.entry(service_name) {
|
||||
Occupied(_) => {}
|
||||
Vacant(vacant) => {
|
||||
let _ = vacant.insert(object_to_tags(object));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set parent span id.
|
||||
if let Some(JsonValue::String(parent_span_id)) = row_iter.next()
|
||||
&& !parent_span_id.is_empty()
|
||||
{
|
||||
span.references.push(Reference {
|
||||
trace_id: span.trace_id.clone(),
|
||||
span_id: parent_span_id,
|
||||
ref_type: REF_TYPE_CHILD_OF.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Set span events to logs.
|
||||
if let Some(JsonValue::Array(events)) = row_iter.next() {
|
||||
for event in events {
|
||||
if let JsonValue::Object(mut obj) = event {
|
||||
let Some(action) = obj.get("name").and_then(|v| v.as_str()) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let Some(t) = obj.get("time").and_then(|t| t.as_str()).and_then(|s| {
|
||||
SPAN_KIND_TIME_FMTS
|
||||
.iter()
|
||||
.find_map(|fmt| chrono::DateTime::parse_from_str(s, fmt).ok())
|
||||
.map(|dt| dt.timestamp_micros() as u64)
|
||||
}) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let mut fields = vec![KeyValue {
|
||||
key: "event".to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(action.to_string()),
|
||||
}];
|
||||
|
||||
// Add event attributes as fields
|
||||
if let Some(JsonValue::Object(attrs)) = obj.remove("attributes") {
|
||||
fields.extend(object_to_tags(attrs));
|
||||
}
|
||||
|
||||
span.logs.push(Log {
|
||||
timestamp: t,
|
||||
fields,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set scope name.
|
||||
if let Some(JsonValue::String(scope_name)) = row_iter.next()
|
||||
&& !scope_name.is_empty()
|
||||
{
|
||||
span.tags.push(KeyValue {
|
||||
key: KEY_OTEL_SCOPE_NAME.to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(scope_name),
|
||||
});
|
||||
}
|
||||
|
||||
// Set scope version.
|
||||
if let Some(JsonValue::String(scope_version)) = row_iter.next()
|
||||
&& !scope_version.is_empty()
|
||||
{
|
||||
span.tags.push(KeyValue {
|
||||
key: KEY_OTEL_SCOPE_VERSION.to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(scope_version),
|
||||
});
|
||||
}
|
||||
|
||||
// Set span kind.
|
||||
if let Some(JsonValue::String(span_kind)) = row_iter.next()
|
||||
&& !span_kind.is_empty()
|
||||
{
|
||||
span.tags.push(KeyValue {
|
||||
key: KEY_SPAN_KIND.to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(normalize_span_kind(&span_kind)),
|
||||
});
|
||||
}
|
||||
|
||||
// Set span status code.
|
||||
if let Some(JsonValue::String(span_status_code)) = row_iter.next()
|
||||
&& span_status_code != SPAN_STATUS_UNSET
|
||||
&& !span_status_code.is_empty()
|
||||
{
|
||||
span.tags.push(KeyValue {
|
||||
key: KEY_OTEL_STATUS_CODE.to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(normalize_status_code(&span_status_code)),
|
||||
});
|
||||
}
|
||||
// ensure span tags order
|
||||
span.tags.sort_by(|a, b| a.key.cmp(&b.key));
|
||||
|
||||
if let Some(spans) = trace_id_to_spans.get_mut(&span.trace_id) {
|
||||
spans.push(span);
|
||||
@@ -899,42 +911,41 @@ fn traces_from_records(records: HttpRecordsOutput) -> Result<Vec<Trace>> {
|
||||
Ok(traces)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn to_keyvalue(key: String, value: JsonValue) -> Option<KeyValue> {
|
||||
match value {
|
||||
JsonValue::String(value) => Some(KeyValue {
|
||||
key,
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(value.to_string()),
|
||||
}),
|
||||
JsonValue::Number(value) => Some(KeyValue {
|
||||
key,
|
||||
value_type: ValueType::Int64,
|
||||
value: Value::Int64(value.as_i64().unwrap_or(0)),
|
||||
}),
|
||||
JsonValue::Bool(value) => Some(KeyValue {
|
||||
key,
|
||||
value_type: ValueType::Boolean,
|
||||
value: Value::Boolean(value),
|
||||
}),
|
||||
JsonValue::Array(value) => Some(KeyValue {
|
||||
key,
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(serde_json::to_string(&value).unwrap()),
|
||||
}),
|
||||
JsonValue::Object(value) => Some(KeyValue {
|
||||
key,
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(serde_json::to_string(&value).unwrap()),
|
||||
}),
|
||||
JsonValue::Null => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn object_to_tags(object: serde_json::map::Map<String, JsonValue>) -> Vec<KeyValue> {
|
||||
object
|
||||
.into_iter()
|
||||
.filter_map(|(key, value)| match value {
|
||||
JsonValue::String(value) => Some(KeyValue {
|
||||
key,
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(value.to_string()),
|
||||
}),
|
||||
JsonValue::Number(value) => Some(KeyValue {
|
||||
key,
|
||||
value_type: ValueType::Int64,
|
||||
value: Value::Int64(value.as_i64().unwrap_or(0)),
|
||||
}),
|
||||
JsonValue::Bool(value) => Some(KeyValue {
|
||||
key,
|
||||
value_type: ValueType::Boolean,
|
||||
value: Value::Boolean(value),
|
||||
}),
|
||||
JsonValue::Array(value) => Some(KeyValue {
|
||||
key,
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(serde_json::to_string(&value).unwrap()),
|
||||
}),
|
||||
JsonValue::Object(value) => Some(KeyValue {
|
||||
key,
|
||||
value_type: ValueType::String,
|
||||
value: Value::String(serde_json::to_string(&value).unwrap()),
|
||||
}),
|
||||
// FIXME(zyy17): Do we need to support other types?
|
||||
_ => {
|
||||
warn!("Unsupported value type: {:?}", value);
|
||||
None
|
||||
}
|
||||
})
|
||||
.filter_map(|(key, value)| to_keyvalue(key, value))
|
||||
.collect()
|
||||
}
|
||||
|
||||
@@ -1055,7 +1066,6 @@ fn convert_string_to_boolean(input: &serde_json::Value) -> Option<serde_json::Va
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
|
||||
use serde_json::{json, Number, Value as JsonValue};
|
||||
|
||||
use super::*;
|
||||
@@ -1301,6 +1311,151 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_traces_from_v1_records() {
|
||||
// The tests is the tuple of `(test_records, expected)`.
|
||||
let tests = vec![(
|
||||
HttpRecordsOutput {
|
||||
schema: OutputSchema {
|
||||
column_schemas: vec![
|
||||
ColumnSchema {
|
||||
name: "trace_id".to_string(),
|
||||
data_type: "String".to_string(),
|
||||
},
|
||||
ColumnSchema {
|
||||
name: "timestamp".to_string(),
|
||||
data_type: "TimestampNanosecond".to_string(),
|
||||
},
|
||||
ColumnSchema {
|
||||
name: "duration_nano".to_string(),
|
||||
data_type: "UInt64".to_string(),
|
||||
},
|
||||
ColumnSchema {
|
||||
name: "service_name".to_string(),
|
||||
data_type: "String".to_string(),
|
||||
},
|
||||
ColumnSchema {
|
||||
name: "span_name".to_string(),
|
||||
data_type: "String".to_string(),
|
||||
},
|
||||
ColumnSchema {
|
||||
name: "span_id".to_string(),
|
||||
data_type: "String".to_string(),
|
||||
},
|
||||
ColumnSchema {
|
||||
name: "span_attributes.http.request.method".to_string(),
|
||||
data_type: "String".to_string(),
|
||||
},
|
||||
ColumnSchema {
|
||||
name: "span_attributes.http.request.url".to_string(),
|
||||
data_type: "String".to_string(),
|
||||
},
|
||||
ColumnSchema {
|
||||
name: "span_attributes.http.status_code".to_string(),
|
||||
data_type: "UInt64".to_string(),
|
||||
},
|
||||
],
|
||||
},
|
||||
rows: vec![
|
||||
vec![
|
||||
JsonValue::String("5611dce1bc9ebed65352d99a027b08ea".to_string()),
|
||||
JsonValue::Number(Number::from_u128(1738726754492422000).unwrap()),
|
||||
JsonValue::Number(Number::from_u128(100000000).unwrap()),
|
||||
JsonValue::String("test-service-0".to_string()),
|
||||
JsonValue::String("access-mysql".to_string()),
|
||||
JsonValue::String("008421dbbd33a3e9".to_string()),
|
||||
JsonValue::String("GET".to_string()),
|
||||
JsonValue::String("/data".to_string()),
|
||||
JsonValue::Number(Number::from_u128(200).unwrap()),
|
||||
],
|
||||
vec![
|
||||
JsonValue::String("5611dce1bc9ebed65352d99a027b08ea".to_string()),
|
||||
JsonValue::Number(Number::from_u128(1738726754642422000).unwrap()),
|
||||
JsonValue::Number(Number::from_u128(100000000).unwrap()),
|
||||
JsonValue::String("test-service-0".to_string()),
|
||||
JsonValue::String("access-redis".to_string()),
|
||||
JsonValue::String("ffa03416a7b9ea48".to_string()),
|
||||
JsonValue::String("POST".to_string()),
|
||||
JsonValue::String("/create".to_string()),
|
||||
JsonValue::Number(Number::from_u128(400).unwrap()),
|
||||
],
|
||||
],
|
||||
total_rows: 2,
|
||||
metrics: HashMap::new(),
|
||||
},
|
||||
vec![Trace {
|
||||
trace_id: "5611dce1bc9ebed65352d99a027b08ea".to_string(),
|
||||
spans: vec![
|
||||
Span {
|
||||
trace_id: "5611dce1bc9ebed65352d99a027b08ea".to_string(),
|
||||
span_id: "008421dbbd33a3e9".to_string(),
|
||||
operation_name: "access-mysql".to_string(),
|
||||
start_time: 1738726754492422,
|
||||
duration: 100000,
|
||||
tags: vec![
|
||||
KeyValue {
|
||||
key: "http.request.method".to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String("GET".to_string()),
|
||||
},
|
||||
KeyValue {
|
||||
key: "http.request.url".to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String("/data".to_string()),
|
||||
},
|
||||
KeyValue {
|
||||
key: "http.status_code".to_string(),
|
||||
value_type: ValueType::Int64,
|
||||
value: Value::Int64(200),
|
||||
},
|
||||
],
|
||||
process_id: "p1".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
Span {
|
||||
trace_id: "5611dce1bc9ebed65352d99a027b08ea".to_string(),
|
||||
span_id: "ffa03416a7b9ea48".to_string(),
|
||||
operation_name: "access-redis".to_string(),
|
||||
start_time: 1738726754642422,
|
||||
duration: 100000,
|
||||
tags: vec![
|
||||
KeyValue {
|
||||
key: "http.request.method".to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String("POST".to_string()),
|
||||
},
|
||||
KeyValue {
|
||||
key: "http.request.url".to_string(),
|
||||
value_type: ValueType::String,
|
||||
value: Value::String("/create".to_string()),
|
||||
},
|
||||
KeyValue {
|
||||
key: "http.status_code".to_string(),
|
||||
value_type: ValueType::Int64,
|
||||
value: Value::Int64(400),
|
||||
},
|
||||
],
|
||||
process_id: "p1".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
processes: HashMap::from([(
|
||||
"p1".to_string(),
|
||||
Process {
|
||||
service_name: "test-service-0".to_string(),
|
||||
tags: vec![],
|
||||
},
|
||||
)]),
|
||||
..Default::default()
|
||||
}],
|
||||
)];
|
||||
|
||||
for (records, expected) in tests {
|
||||
let traces = traces_from_records(records).unwrap();
|
||||
assert_eq!(traces, expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_jaeger_query_params() {
|
||||
// The tests is the tuple of `(test_query_params, expected)`.
|
||||
@@ -1311,7 +1466,6 @@ mod tests {
|
||||
..Default::default()
|
||||
},
|
||||
QueryTraceParams {
|
||||
db: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
service_name: "test-service-0".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -1329,7 +1483,6 @@ mod tests {
|
||||
..Default::default()
|
||||
},
|
||||
QueryTraceParams {
|
||||
db: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
service_name: "test-service-0".to_string(),
|
||||
operation_name: Some("access-mysql".to_string()),
|
||||
start_time: Some(1738726754492422000),
|
||||
@@ -1349,9 +1502,7 @@ mod tests {
|
||||
];
|
||||
|
||||
for (query_params, expected) in tests {
|
||||
let query_params =
|
||||
QueryTraceParams::from_jaeger_query_params(DEFAULT_SCHEMA_NAME, query_params)
|
||||
.unwrap();
|
||||
let query_params = QueryTraceParams::from_jaeger_query_params(query_params).unwrap();
|
||||
assert_eq!(query_params, expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ use tokio::io::AsyncWrite;
|
||||
use crate::error::{self, DataFrameSnafu, InvalidPrepareStatementSnafu, Result};
|
||||
use crate::metrics::METRIC_AUTH_FAILURE;
|
||||
use crate::mysql::helper::{
|
||||
self, format_placeholder, replace_placeholders, transform_placeholders,
|
||||
self, fix_placeholder_types, format_placeholder, replace_placeholders, transform_placeholders,
|
||||
};
|
||||
use crate::mysql::writer;
|
||||
use crate::mysql::writer::{create_mysql_column, handle_err};
|
||||
@@ -183,7 +183,7 @@ impl MysqlInstanceShim {
|
||||
let describe_result = self
|
||||
.do_describe(statement.clone(), query_ctx.clone())
|
||||
.await?;
|
||||
let (plan, schema) = if let Some(DescribeResult {
|
||||
let (mut plan, schema) = if let Some(DescribeResult {
|
||||
logical_plan,
|
||||
schema,
|
||||
}) = describe_result
|
||||
@@ -193,7 +193,8 @@ impl MysqlInstanceShim {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
let params = if let Some(plan) = &plan {
|
||||
let params = if let Some(plan) = &mut plan {
|
||||
fix_placeholder_types(plan)?;
|
||||
prepared_params(
|
||||
&plan
|
||||
.get_parameter_types()
|
||||
@@ -258,7 +259,8 @@ impl MysqlInstanceShim {
|
||||
};
|
||||
|
||||
let outputs = match sql_plan.plan {
|
||||
Some(plan) => {
|
||||
Some(mut plan) => {
|
||||
fix_placeholder_types(&mut plan)?;
|
||||
let param_types = plan
|
||||
.get_parameter_types()
|
||||
.context(DataFrameSnafu)?
|
||||
@@ -295,6 +297,10 @@ impl MysqlInstanceShim {
|
||||
}
|
||||
Params::CliParams(params) => params.iter().map(|x| x.to_string()).collect(),
|
||||
};
|
||||
debug!(
|
||||
"do_execute Replacing with Params: {:?}, Original Query: {}",
|
||||
param_strs, sql_plan.query
|
||||
);
|
||||
let query = replace_params(param_strs, sql_plan.query);
|
||||
debug!("Mysql execute replaced query: {}", query);
|
||||
self.do_query(&query, query_ctx.clone()).await
|
||||
@@ -412,6 +418,7 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
|
||||
let (params, columns) = self
|
||||
.do_prepare(raw_query, query_ctx.clone(), stmt_key)
|
||||
.await?;
|
||||
debug!("on_prepare: Params: {:?}, Columns: {:?}", params, columns);
|
||||
w.reply(stmt_id, ¶ms, &columns).await?;
|
||||
crate::metrics::METRIC_MYSQL_PREPARED_COUNT
|
||||
.with_label_values(&[query_ctx.get_db_string().as_str()])
|
||||
@@ -641,12 +648,13 @@ fn replace_params_with_values(
|
||||
debug_assert_eq!(param_types.len(), params.len());
|
||||
|
||||
debug!(
|
||||
"replace_params_with_values(param_types: {:#?}, params: {:#?})",
|
||||
"replace_params_with_values(param_types: {:#?}, params: {:#?}, plan: {:#?})",
|
||||
param_types,
|
||||
params
|
||||
.iter()
|
||||
.map(|x| format!("({:?}, {:?})", x.value, x.coltype))
|
||||
.join(", ")
|
||||
.join(", "),
|
||||
plan
|
||||
);
|
||||
|
||||
let mut values = Vec::with_capacity(params.len());
|
||||
@@ -672,9 +680,10 @@ fn replace_params_with_exprs(
|
||||
debug_assert_eq!(param_types.len(), params.len());
|
||||
|
||||
debug!(
|
||||
"replace_params_with_exprs(param_types: {:#?}, params: {:#?})",
|
||||
"replace_params_with_exprs(param_types: {:#?}, params: {:#?}, plan: {:#?})",
|
||||
param_types,
|
||||
params.iter().map(|x| format!("({:?})", x)).join(", ")
|
||||
params.iter().map(|x| format!("({:?})", x)).join(", "),
|
||||
plan
|
||||
);
|
||||
|
||||
let mut values = Vec::with_capacity(params.len());
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user