mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-25 23:49:58 +00:00
Compare commits
12 Commits
bench_flow
...
v0.4.4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
616eb04914 | ||
|
|
7c53f92e4b | ||
|
|
445bd92c7a | ||
|
|
92a9802343 | ||
|
|
abbac46c05 | ||
|
|
d0d0f091f0 | ||
|
|
707a0d5626 | ||
|
|
e42767d500 | ||
|
|
ca18ccf7d4 | ||
|
|
b1d8812806 | ||
|
|
7547e7ebdf | ||
|
|
6100cb335a |
139
Cargo.lock
generated
139
Cargo.lock
generated
@@ -196,7 +196,7 @@ checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -655,7 +655,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -828,7 +828,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "benchmarks"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -1160,7 +1160,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1429,7 +1429,7 @@ checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1"
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -1462,7 +1462,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu",
|
||||
"substrait 0.17.1",
|
||||
"substrait 0.4.3",
|
||||
"substrait 0.4.4",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic 0.10.2",
|
||||
@@ -1492,7 +1492,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"async-trait",
|
||||
@@ -1540,7 +1540,7 @@ dependencies = [
|
||||
"servers",
|
||||
"session",
|
||||
"snafu",
|
||||
"substrait 0.4.3",
|
||||
"substrait 0.4.4",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tikv-jemallocator",
|
||||
@@ -1573,7 +1573,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"bitvec",
|
||||
@@ -1588,7 +1588,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -1599,7 +1599,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"humantime-serde",
|
||||
@@ -1608,7 +1608,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -1639,7 +1639,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"bigdecimal",
|
||||
@@ -1653,7 +1653,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"snafu",
|
||||
"strum 0.25.0",
|
||||
@@ -1661,7 +1661,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"chrono-tz 0.6.3",
|
||||
@@ -1684,7 +1684,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -1703,7 +1703,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -1733,7 +1733,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1752,7 +1752,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -1767,7 +1767,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -1780,7 +1780,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-recursion",
|
||||
@@ -1819,7 +1819,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -1843,7 +1843,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -1851,7 +1851,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1874,7 +1874,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -1891,7 +1891,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -1909,7 +1909,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"common-error",
|
||||
@@ -1934,7 +1934,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"rand",
|
||||
@@ -1943,7 +1943,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -1958,7 +1958,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
]
|
||||
@@ -2581,7 +2581,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2640,7 +2640,7 @@ dependencies = [
|
||||
"snafu",
|
||||
"sql",
|
||||
"store-api",
|
||||
"substrait 0.4.3",
|
||||
"substrait 0.4.4",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -2654,7 +2654,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3092,7 +3092,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -3190,9 +3190,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
|
||||
|
||||
[[package]]
|
||||
name = "form_urlencoded"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652"
|
||||
checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
|
||||
dependencies = [
|
||||
"percent-encoding",
|
||||
]
|
||||
@@ -3208,7 +3208,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -3272,7 +3272,7 @@ dependencies = [
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
|
||||
"store-api",
|
||||
"strfmt",
|
||||
"substrait 0.4.3",
|
||||
"substrait 0.4.4",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.7.8",
|
||||
@@ -3875,9 +3875,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
|
||||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "0.4.0"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c"
|
||||
checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6"
|
||||
dependencies = [
|
||||
"unicode-bidi",
|
||||
"unicode-normalization",
|
||||
@@ -3910,7 +3910,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-base",
|
||||
@@ -4365,7 +4365,7 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -4635,7 +4635,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4665,7 +4665,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"api",
|
||||
@@ -4743,7 +4743,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"ahash 0.8.6",
|
||||
"api",
|
||||
@@ -4814,7 +4814,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"api",
|
||||
@@ -5272,7 +5272,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@@ -5519,7 +5519,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-compat",
|
||||
@@ -5564,7 +5564,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
|
||||
"store-api",
|
||||
"substrait 0.4.3",
|
||||
"substrait 0.4.4",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.10.2",
|
||||
@@ -5784,7 +5784,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -5876,9 +5876,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "percent-encoding"
|
||||
version = "2.3.0"
|
||||
version = "2.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"
|
||||
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
|
||||
|
||||
[[package]]
|
||||
name = "pest"
|
||||
@@ -6122,7 +6122,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"common-base",
|
||||
@@ -6349,7 +6349,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
@@ -6558,7 +6558,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bitflags 2.4.1",
|
||||
@@ -6669,7 +6669,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"ahash 0.8.6",
|
||||
"api",
|
||||
@@ -6727,7 +6727,7 @@ dependencies = [
|
||||
"stats-cli",
|
||||
"store-api",
|
||||
"streaming-stats",
|
||||
"substrait 0.4.3",
|
||||
"substrait 0.4.4",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -7928,7 +7928,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "script"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -8188,7 +8188,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"aide",
|
||||
"api",
|
||||
@@ -8277,11 +8277,12 @@ dependencies = [
|
||||
"tonic-reflection",
|
||||
"tower",
|
||||
"tower-http",
|
||||
"urlencoding",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -8542,7 +8543,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -8594,7 +8595,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.4.8",
|
||||
@@ -8800,7 +8801,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -8939,7 +8940,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
@@ -9087,7 +9088,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"async-trait",
|
||||
@@ -9193,7 +9194,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -9249,7 +9250,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.4.3",
|
||||
"substrait 0.4.4",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
@@ -10248,9 +10249,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
|
||||
|
||||
[[package]]
|
||||
name = "url"
|
||||
version = "2.4.1"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5"
|
||||
checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633"
|
||||
dependencies = [
|
||||
"form_urlencoded",
|
||||
"idna",
|
||||
|
||||
@@ -58,7 +58,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
|
||||
@@ -135,6 +135,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
|
||||
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
||||
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
||||
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
||||
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
|
||||
|
||||
## Project Status
|
||||
|
||||
|
||||
@@ -64,8 +64,8 @@ worker_channel_size = 128
|
||||
worker_request_batch_size = 64
|
||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||
manifest_checkpoint_distance = 10
|
||||
# Manifest compression type
|
||||
manifest_compress_type = "uncompressed"
|
||||
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
compress_manifest = false
|
||||
# Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
|
||||
@@ -133,8 +133,8 @@ worker_channel_size = 128
|
||||
worker_request_batch_size = 64
|
||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||
manifest_checkpoint_distance = 10
|
||||
# Manifest compression type
|
||||
manifest_compress_type = "uncompressed"
|
||||
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
compress_manifest = false
|
||||
# Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
|
||||
@@ -100,6 +100,9 @@ struct StartCommand {
|
||||
http_timeout: Option<u64>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_METASRV")]
|
||||
env_prefix: String,
|
||||
/// The working home directory of this metasrv instance.
|
||||
#[clap(long)]
|
||||
data_home: Option<String>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -152,6 +155,10 @@ impl StartCommand {
|
||||
opts.http.timeout = Duration::from_secs(http_timeout);
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.data_home = data_home.clone();
|
||||
}
|
||||
|
||||
// Disable dashboard in metasrv.
|
||||
opts.http.disable_dashboard = true;
|
||||
|
||||
|
||||
@@ -227,6 +227,9 @@ struct StartCommand {
|
||||
user_provider: Option<String>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
|
||||
env_prefix: String,
|
||||
/// The working home directory of this standalone instance.
|
||||
#[clap(long)]
|
||||
data_home: Option<String>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -257,6 +260,10 @@ impl StartCommand {
|
||||
opts.http.addr = addr.clone()
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.storage.data_home = data_home.clone();
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
// frontend grpc addr conflict with datanode default grpc addr
|
||||
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
|
||||
|
||||
@@ -42,5 +42,6 @@ tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono.workspace = true
|
||||
common-procedure = { workspace = true, features = ["testing"] }
|
||||
datatypes.workspace = true
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
|
||||
@@ -43,9 +43,9 @@ use crate::rpc::ddl::{
|
||||
TruncateTableTask,
|
||||
};
|
||||
use crate::rpc::router::RegionRoute;
|
||||
|
||||
pub type DdlManagerRef = Arc<DdlManager>;
|
||||
|
||||
/// The [DdlManager] provides the ability to execute Ddl.
|
||||
pub struct DdlManager {
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
@@ -55,26 +55,31 @@ pub struct DdlManager {
|
||||
}
|
||||
|
||||
impl DdlManager {
|
||||
pub fn new(
|
||||
/// Returns a new [DdlManager] with all Ddl [BoxedProcedureLoader](common_procedure::procedure::BoxedProcedureLoader)s registered.
|
||||
pub fn try_new(
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
datanode_clients: DatanodeManagerRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
table_meta_allocator: TableMetadataAllocatorRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
) -> Result<Self> {
|
||||
let manager = Self {
|
||||
procedure_manager,
|
||||
datanode_manager: datanode_clients,
|
||||
cache_invalidator,
|
||||
table_metadata_manager,
|
||||
table_meta_allocator,
|
||||
}
|
||||
};
|
||||
manager.register_loaders()?;
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
/// Returns the [TableMetadataManagerRef].
|
||||
pub fn table_metadata_manager(&self) -> &TableMetadataManagerRef {
|
||||
&self.table_metadata_manager
|
||||
}
|
||||
|
||||
/// Returns the [DdlContext]
|
||||
pub fn create_context(&self) -> DdlContext {
|
||||
DdlContext {
|
||||
datanode_manager: self.datanode_manager.clone(),
|
||||
@@ -83,7 +88,7 @@ impl DdlManager {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_start(&self) -> Result<()> {
|
||||
fn register_loaders(&self) -> Result<()> {
|
||||
let context = self.create_context();
|
||||
|
||||
self.procedure_manager
|
||||
@@ -142,6 +147,7 @@ impl DdlManager {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
/// Submits and executes an alter table task.
|
||||
pub async fn submit_alter_table_task(
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
@@ -159,6 +165,7 @@ impl DdlManager {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
/// Submits and executes a create table task.
|
||||
pub async fn submit_create_table_task(
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
@@ -176,6 +183,7 @@ impl DdlManager {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
/// Submits and executes a drop table task.
|
||||
pub async fn submit_drop_table_task(
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
@@ -199,6 +207,7 @@ impl DdlManager {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
/// Submits and executes a truncate table task.
|
||||
pub async fn submit_truncate_table_task(
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
@@ -416,3 +425,80 @@ impl DdlTaskExecutor for DdlManager {
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::Partition;
|
||||
use common_procedure::local::LocalManager;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use super::DdlManager;
|
||||
use crate::cache_invalidator::DummyCacheInvalidator;
|
||||
use crate::datanode_manager::{DatanodeManager, DatanodeRef};
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::ddl::create_table::CreateTableProcedure;
|
||||
use crate::ddl::drop_table::DropTableProcedure;
|
||||
use crate::ddl::truncate_table::TruncateTableProcedure;
|
||||
use crate::ddl::{TableMetadataAllocator, TableMetadataAllocatorContext};
|
||||
use crate::error::Result;
|
||||
use crate::key::TableMetadataManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::router::RegionRoute;
|
||||
use crate::state_store::KvStateStore;
|
||||
|
||||
/// A dummy implemented [DatanodeManager].
|
||||
pub struct DummyDatanodeManager;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DatanodeManager for DummyDatanodeManager {
|
||||
async fn datanode(&self, _datanode: &Peer) -> DatanodeRef {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
/// A dummy implemented [TableMetadataAllocator].
|
||||
pub struct DummyTableMetadataAllocator;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableMetadataAllocator for DummyTableMetadataAllocator {
|
||||
async fn create(
|
||||
&self,
|
||||
_ctx: &TableMetadataAllocatorContext,
|
||||
_table_info: &mut RawTableInfo,
|
||||
_partitions: &[Partition],
|
||||
) -> Result<(TableId, Vec<RegionRoute>)> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_new() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
|
||||
|
||||
let state_store = Arc::new(KvStateStore::new(kv_backend));
|
||||
let procedure_manager = Arc::new(LocalManager::new(Default::default(), state_store));
|
||||
|
||||
let _ = DdlManager::try_new(
|
||||
procedure_manager.clone(),
|
||||
Arc::new(DummyDatanodeManager),
|
||||
Arc::new(DummyCacheInvalidator),
|
||||
table_metadata_manager,
|
||||
Arc::new(DummyTableMetadataAllocator),
|
||||
);
|
||||
|
||||
let expected_loaders = vec![
|
||||
CreateTableProcedure::TYPE_NAME,
|
||||
AlterTableProcedure::TYPE_NAME,
|
||||
DropTableProcedure::TYPE_NAME,
|
||||
TruncateTableProcedure::TYPE_NAME,
|
||||
];
|
||||
|
||||
for loader in expected_loaders {
|
||||
assert!(procedure_manager.contains_loader(loader));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ use crate::DatanodeId;
|
||||
|
||||
pub const REMOVED_PREFIX: &str = "__removed";
|
||||
|
||||
const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
||||
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
||||
|
||||
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
|
||||
|
||||
@@ -4,6 +4,9 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
|
||||
@@ -527,6 +527,13 @@ impl LocalManager {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
/// Returns true if contains a specified loader.
|
||||
pub fn contains_loader(&self, name: &str) -> bool {
|
||||
let loaders = self.manager_ctx.loaders.lock().unwrap();
|
||||
loaders.contains_key(name)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -26,6 +26,12 @@ use store_api::storage::RegionNumber;
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to init ddl manager"))]
|
||||
InitDdlManager {
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to invalidate table cache"))]
|
||||
InvalidateTableCache {
|
||||
location: Location,
|
||||
@@ -319,7 +325,9 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::ParseSql { source, .. } => source.status_code(),
|
||||
|
||||
Error::InvalidateTableCache { source, .. } => source.status_code(),
|
||||
Error::InvalidateTableCache { source, .. } | Error::InitDdlManager { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::Table { source, .. }
|
||||
| Error::CopyTable { source, .. }
|
||||
|
||||
@@ -328,13 +328,16 @@ impl Instance {
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
|
||||
|
||||
let cache_invalidator = Arc::new(DummyCacheInvalidator);
|
||||
let ddl_executor = Arc::new(DdlManager::new(
|
||||
procedure_manager,
|
||||
datanode_manager,
|
||||
cache_invalidator.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
Arc::new(StandaloneTableMetadataCreator::new(kv_backend.clone())),
|
||||
));
|
||||
let ddl_executor = Arc::new(
|
||||
DdlManager::try_new(
|
||||
procedure_manager,
|
||||
datanode_manager,
|
||||
cache_invalidator.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
Arc::new(StandaloneTableMetadataCreator::new(kv_backend.clone())),
|
||||
)
|
||||
.context(error::InitDdlManagerSnafu)?,
|
||||
);
|
||||
|
||||
let statement_executor = Arc::new(StatementExecutor::new(
|
||||
catalog_manager.clone(),
|
||||
|
||||
@@ -43,6 +43,12 @@ pub enum Error {
|
||||
region_id: RegionId,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init ddl manager"))]
|
||||
InitDdlManager {
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create default catalog and schema"))]
|
||||
InitMetadata {
|
||||
location: Location,
|
||||
@@ -685,7 +691,9 @@ impl ErrorExt for Error {
|
||||
| Error::UpdateTableRoute { source, .. }
|
||||
| Error::GetFullTableInfo { source, .. } => source.status_code(),
|
||||
|
||||
Error::InitMetadata { source, .. } => source.status_code(),
|
||||
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
}
|
||||
|
||||
@@ -28,10 +28,11 @@ use common_meta::sequence::{Sequence, SequenceRef};
|
||||
use common_meta::state_store::KvStateStore;
|
||||
use common_procedure::local::{LocalManager, ManagerConfig};
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::cache_invalidator::MetasrvCacheInvalidator;
|
||||
use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef};
|
||||
use crate::error::Result;
|
||||
use crate::error::{self, Result};
|
||||
use crate::greptimedb_telemetry::get_greptimedb_telemetry_task;
|
||||
use crate::handler::check_leader_handler::CheckLeaderHandler;
|
||||
use crate::handler::collect_stats_handler::CollectStatsHandler;
|
||||
@@ -196,8 +197,7 @@ impl MetaSrvBuilder {
|
||||
&table_metadata_manager,
|
||||
(&selector, &selector_ctx),
|
||||
&table_id_sequence,
|
||||
);
|
||||
let _ = ddl_manager.try_start();
|
||||
)?;
|
||||
let opening_region_keeper = Arc::new(OpeningRegionKeeper::default());
|
||||
|
||||
let handler_group = match handler_group {
|
||||
@@ -330,7 +330,7 @@ fn build_ddl_manager(
|
||||
table_metadata_manager: &TableMetadataManagerRef,
|
||||
(selector, selector_ctx): (&SelectorRef, &SelectorContext),
|
||||
table_id_sequence: &SequenceRef,
|
||||
) -> DdlManagerRef {
|
||||
) -> Result<DdlManagerRef> {
|
||||
let datanode_clients = datanode_clients.unwrap_or_else(|| {
|
||||
let datanode_client_channel_config = ChannelConfig::new()
|
||||
.timeout(Duration::from_millis(
|
||||
@@ -355,12 +355,15 @@ fn build_ddl_manager(
|
||||
table_id_sequence.clone(),
|
||||
));
|
||||
|
||||
Arc::new(DdlManager::new(
|
||||
procedure_manager.clone(),
|
||||
datanode_clients,
|
||||
cache_invalidator,
|
||||
table_metadata_manager.clone(),
|
||||
table_meta_allocator,
|
||||
Ok(Arc::new(
|
||||
DdlManager::try_new(
|
||||
procedure_manager.clone(),
|
||||
datanode_clients,
|
||||
cache_invalidator,
|
||||
table_metadata_manager.clone(),
|
||||
table_meta_allocator,
|
||||
)
|
||||
.context(error::InitDdlManagerSnafu)?,
|
||||
))
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_datasource::compression::CompressionType;
|
||||
use common_telemetry::warn;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -42,8 +41,8 @@ pub struct MitoConfig {
|
||||
/// Number of meta action updated to trigger a new checkpoint
|
||||
/// for the manifest (default 10).
|
||||
pub manifest_checkpoint_distance: u64,
|
||||
/// Manifest compression type (default uncompressed).
|
||||
pub manifest_compress_type: CompressionType,
|
||||
/// Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
pub compress_manifest: bool,
|
||||
|
||||
// Background job configs:
|
||||
/// Max number of running background jobs (default 4).
|
||||
@@ -78,7 +77,7 @@ impl Default for MitoConfig {
|
||||
worker_channel_size: 128,
|
||||
worker_request_batch_size: 64,
|
||||
manifest_checkpoint_distance: 10,
|
||||
manifest_compress_type: CompressionType::Uncompressed,
|
||||
compress_manifest: false,
|
||||
max_background_jobs: DEFAULT_MAX_BG_JOB,
|
||||
auto_flush_interval: Duration::from_secs(30 * 60),
|
||||
global_write_buffer_size: ReadableSize::gb(1),
|
||||
|
||||
@@ -29,7 +29,7 @@ use super::*;
|
||||
use crate::region::version::VersionControlData;
|
||||
use crate::test_util::{
|
||||
build_delete_rows_for_key, build_rows, build_rows_for_key, delete_rows, delete_rows_schema,
|
||||
flush_region, put_rows, rows_schema, CreateRequestBuilder, TestEnv,
|
||||
flush_region, put_rows, reopen_region, rows_schema, CreateRequestBuilder, TestEnv,
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
@@ -353,6 +353,55 @@ async fn test_put_delete() {
|
||||
assert_eq!(expected, batches.pretty_print().unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_not_null_fields() {
|
||||
let mut env = TestEnv::new();
|
||||
let engine = env.create_engine(MitoConfig::default()).await;
|
||||
|
||||
let region_id = RegionId::new(1, 1);
|
||||
let request = CreateRequestBuilder::new().all_not_null(true).build();
|
||||
let region_dir = request.region_dir.clone();
|
||||
|
||||
let column_schemas = rows_schema(&request);
|
||||
let delete_schema = delete_rows_schema(&request);
|
||||
engine
|
||||
.handle_request(region_id, RegionRequest::Create(request))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let rows = Rows {
|
||||
schema: column_schemas.clone(),
|
||||
rows: build_rows_for_key("a", 0, 4, 0),
|
||||
};
|
||||
put_rows(&engine, region_id, rows).await;
|
||||
// Delete (a, 2)
|
||||
let rows = Rows {
|
||||
schema: delete_schema.clone(),
|
||||
rows: build_delete_rows_for_key("a", 2, 3),
|
||||
};
|
||||
delete_rows(&engine, region_id, rows).await;
|
||||
|
||||
let request = ScanRequest::default();
|
||||
let stream = engine.handle_query(region_id, request).await.unwrap();
|
||||
let batches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let expected = "\
|
||||
+-------+---------+---------------------+
|
||||
| tag_0 | field_0 | ts |
|
||||
+-------+---------+---------------------+
|
||||
| a | 0.0 | 1970-01-01T00:00:00 |
|
||||
| a | 1.0 | 1970-01-01T00:00:01 |
|
||||
| a | 3.0 | 1970-01-01T00:00:03 |
|
||||
+-------+---------+---------------------+";
|
||||
assert_eq!(expected, batches.pretty_print().unwrap());
|
||||
|
||||
// Reopen and scan again.
|
||||
reopen_region(&engine, region_id, region_dir, false).await;
|
||||
let request = ScanRequest::default();
|
||||
let stream = engine.handle_query(region_id, request).await.unwrap();
|
||||
let batches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
assert_eq!(expected, batches.pretty_print().unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_put_overwrite() {
|
||||
let mut env = TestEnv::new();
|
||||
|
||||
@@ -42,6 +42,7 @@ const DEFAULT_MANIFEST_COMPRESSION_TYPE: CompressionType = CompressionType::Gzip
|
||||
/// So when we encounter problems, we need to fall back to `FALL_BACK_COMPRESS_TYPE` for processing.
|
||||
const FALL_BACK_COMPRESS_TYPE: CompressionType = CompressionType::Uncompressed;
|
||||
|
||||
/// Returns the [CompressionType] according to whether to compress manifest files.
|
||||
#[inline]
|
||||
pub const fn manifest_compress_type(compress: bool) -> CompressionType {
|
||||
if compress {
|
||||
|
||||
@@ -33,6 +33,7 @@ use crate::cache::CacheManagerRef;
|
||||
use crate::config::MitoConfig;
|
||||
use crate::error::{EmptyRegionDirSnafu, ObjectStoreNotFoundSnafu, RegionCorruptedSnafu, Result};
|
||||
use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions};
|
||||
use crate::manifest::storage::manifest_compress_type;
|
||||
use crate::memtable::MemtableBuilderRef;
|
||||
use crate::region::options::RegionOptions;
|
||||
use crate::region::version::{VersionBuilder, VersionControl, VersionControlRef};
|
||||
@@ -259,7 +260,9 @@ impl RegionOpener {
|
||||
Ok(RegionManifestOptions {
|
||||
manifest_dir: new_manifest_dir(&self.region_dir),
|
||||
object_store,
|
||||
compress_type: config.manifest_compress_type,
|
||||
// We don't allow users to set the compression algorithm as we use it as a file suffix.
|
||||
// Currently, the manifest storage doesn't have good support for changing compression algorithms.
|
||||
compress_type: manifest_compress_type(config.compress_manifest),
|
||||
checkpoint_distance: config.manifest_checkpoint_distance,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -274,17 +274,21 @@ impl WriteRequest {
|
||||
|
||||
/// Checks whether we should allow a row doesn't provide this column.
|
||||
fn check_missing_column(&self, column: &ColumnMetadata) -> Result<()> {
|
||||
// For delete request, all tags and timestamp is required. We don't fill default
|
||||
// tag or timestamp while deleting rows.
|
||||
ensure!(
|
||||
self.op_type != OpType::Delete || column.semantic_type == SemanticType::Field,
|
||||
InvalidRequestSnafu {
|
||||
region_id: self.region_id,
|
||||
reason: format!("delete requests need column {}", column.column_schema.name),
|
||||
if self.op_type == OpType::Delete {
|
||||
if column.semantic_type == SemanticType::Field {
|
||||
// For delete request, all tags and timestamp is required. We don't fill default
|
||||
// tag or timestamp while deleting rows.
|
||||
return Ok(());
|
||||
} else {
|
||||
return InvalidRequestSnafu {
|
||||
region_id: self.region_id,
|
||||
reason: format!("delete requests need column {}", column.column_schema.name),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// Checks whether they have default value.
|
||||
// Not a delete request. Checks whether they have default value.
|
||||
ensure!(
|
||||
column.column_schema.is_nullable()
|
||||
|| column.column_schema.default_constraint().is_some(),
|
||||
@@ -990,7 +994,7 @@ mod tests {
|
||||
assert_eq!(expect_rows, request.rows);
|
||||
}
|
||||
|
||||
fn region_metadata_two_fields() -> RegionMetadata {
|
||||
fn builder_with_ts_tag() -> RegionMetadataBuilder {
|
||||
let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 1));
|
||||
builder
|
||||
.push_column_metadata(ColumnMetadata {
|
||||
@@ -1011,6 +1015,13 @@ mod tests {
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 2,
|
||||
})
|
||||
.primary_key(vec![2]);
|
||||
builder
|
||||
}
|
||||
|
||||
fn region_metadata_two_fields() -> RegionMetadata {
|
||||
let mut builder = builder_with_ts_tag();
|
||||
builder
|
||||
.push_column_metadata(ColumnMetadata {
|
||||
column_schema: datatypes::schema::ColumnSchema::new(
|
||||
"f0",
|
||||
@@ -1033,8 +1044,7 @@ mod tests {
|
||||
.unwrap(),
|
||||
semantic_type: SemanticType::Field,
|
||||
column_id: 4,
|
||||
})
|
||||
.primary_key(vec![2]);
|
||||
});
|
||||
builder.build().unwrap()
|
||||
}
|
||||
|
||||
@@ -1100,6 +1110,75 @@ mod tests {
|
||||
assert_eq!(expect_rows, request.rows);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fill_missing_without_default_in_delete() {
|
||||
let mut builder = builder_with_ts_tag();
|
||||
builder
|
||||
// f0 is nullable.
|
||||
.push_column_metadata(ColumnMetadata {
|
||||
column_schema: datatypes::schema::ColumnSchema::new(
|
||||
"f0",
|
||||
ConcreteDataType::int64_datatype(),
|
||||
true,
|
||||
),
|
||||
semantic_type: SemanticType::Field,
|
||||
column_id: 3,
|
||||
})
|
||||
// f1 is not nullable and don't has default.
|
||||
.push_column_metadata(ColumnMetadata {
|
||||
column_schema: datatypes::schema::ColumnSchema::new(
|
||||
"f1",
|
||||
ConcreteDataType::int64_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Field,
|
||||
column_id: 4,
|
||||
});
|
||||
let metadata = builder.build().unwrap();
|
||||
|
||||
let rows = Rows {
|
||||
schema: vec![
|
||||
new_column_schema("k0", ColumnDataType::Int64, SemanticType::Tag),
|
||||
new_column_schema(
|
||||
"ts",
|
||||
ColumnDataType::TimestampMillisecond,
|
||||
SemanticType::Timestamp,
|
||||
),
|
||||
],
|
||||
// Missing f0 (nullable), f1 (not nullable).
|
||||
rows: vec![Row {
|
||||
values: vec![i64_value(100), ts_ms_value(1)],
|
||||
}],
|
||||
};
|
||||
let mut request = WriteRequest::new(RegionId::new(1, 1), OpType::Delete, rows).unwrap();
|
||||
let err = request.check_schema(&metadata).unwrap_err();
|
||||
assert!(err.is_fill_default());
|
||||
request.fill_missing_columns(&metadata).unwrap();
|
||||
|
||||
let expect_rows = Rows {
|
||||
schema: vec![
|
||||
new_column_schema("k0", ColumnDataType::Int64, SemanticType::Tag),
|
||||
new_column_schema(
|
||||
"ts",
|
||||
ColumnDataType::TimestampMillisecond,
|
||||
SemanticType::Timestamp,
|
||||
),
|
||||
new_column_schema("f0", ColumnDataType::Int64, SemanticType::Field),
|
||||
new_column_schema("f1", ColumnDataType::Int64, SemanticType::Field),
|
||||
],
|
||||
// Column f1 is not nullable and we use 0 for padding.
|
||||
rows: vec![Row {
|
||||
values: vec![
|
||||
i64_value(100),
|
||||
ts_ms_value(1),
|
||||
Value { value_data: None },
|
||||
i64_value(0),
|
||||
],
|
||||
}],
|
||||
};
|
||||
assert_eq!(expect_rows, request.rows);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_default() {
|
||||
let rows = Rows {
|
||||
|
||||
@@ -277,6 +277,7 @@ pub struct CreateRequestBuilder {
|
||||
field_num: usize,
|
||||
options: HashMap<String, String>,
|
||||
primary_key: Option<Vec<ColumnId>>,
|
||||
all_not_null: bool,
|
||||
}
|
||||
|
||||
impl Default for CreateRequestBuilder {
|
||||
@@ -287,6 +288,7 @@ impl Default for CreateRequestBuilder {
|
||||
field_num: 1,
|
||||
options: HashMap::new(),
|
||||
primary_key: None,
|
||||
all_not_null: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -321,21 +323,29 @@ impl CreateRequestBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn insert_option(mut self, key: &str, value: &str) -> Self {
|
||||
self.options.insert(key.to_string(), value.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn all_not_null(mut self, value: bool) -> Self {
|
||||
self.all_not_null = value;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(&self) -> RegionCreateRequest {
|
||||
let mut column_id = 0;
|
||||
let mut column_metadatas = Vec::with_capacity(self.tag_num + self.field_num + 1);
|
||||
let mut primary_key = Vec::with_capacity(self.tag_num);
|
||||
let nullable = !self.all_not_null;
|
||||
for i in 0..self.tag_num {
|
||||
column_metadatas.push(ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
format!("tag_{i}"),
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
nullable,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id,
|
||||
@@ -348,7 +358,7 @@ impl CreateRequestBuilder {
|
||||
column_schema: ColumnSchema::new(
|
||||
format!("field_{i}"),
|
||||
ConcreteDataType::float64_datatype(),
|
||||
true,
|
||||
nullable,
|
||||
),
|
||||
semantic_type: SemanticType::Field,
|
||||
column_id,
|
||||
@@ -359,6 +369,7 @@ impl CreateRequestBuilder {
|
||||
column_schema: ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
// Time index is always not null.
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Timestamp,
|
||||
|
||||
@@ -24,6 +24,7 @@ use common_catalog::format_full_table_name;
|
||||
use common_meta::cache_invalidator::Context;
|
||||
use common_meta::ddl::ExecutorContext;
|
||||
use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
|
||||
use common_meta::key::NAME_PATTERN;
|
||||
use common_meta::rpc::ddl::{DdlTask, SubmitDdlTaskRequest, SubmitDdlTaskResponse};
|
||||
use common_meta::rpc::router::{Partition, Partition as MetaPartition};
|
||||
use common_meta::table_name::TableName;
|
||||
@@ -31,7 +32,9 @@ use common_query::Output;
|
||||
use common_telemetry::{info, tracing};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::RawSchema;
|
||||
use lazy_static::lazy_static;
|
||||
use partition::partition::{PartitionBound, PartitionDef};
|
||||
use regex::Regex;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::ast::Value as SqlValue;
|
||||
@@ -41,7 +44,7 @@ use sql::statements::sql_value_to_value;
|
||||
use sql::MAXVALUE;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::{self, RawTableInfo, RawTableMeta, TableId, TableInfo, TableType};
|
||||
use table::requests::{AlterTableRequest, TableOptions};
|
||||
use table::requests::{AlterKind, AlterTableRequest, TableOptions};
|
||||
use table::TableRef;
|
||||
|
||||
use super::StatementExecutor;
|
||||
@@ -53,6 +56,10 @@ use crate::error::{
|
||||
};
|
||||
use crate::expr_factory;
|
||||
|
||||
lazy_static! {
|
||||
static ref NAME_PATTERN_REG: Regex = Regex::new(&format!("^{NAME_PATTERN}$")).unwrap();
|
||||
}
|
||||
|
||||
impl StatementExecutor {
|
||||
pub fn catalog_manager(&self) -> CatalogManagerRef {
|
||||
self.catalog_manager.clone()
|
||||
@@ -122,6 +129,13 @@ impl StatementExecutor {
|
||||
};
|
||||
}
|
||||
|
||||
ensure!(
|
||||
NAME_PATTERN_REG.is_match(&create_table.table_name),
|
||||
error::UnexpectedSnafu {
|
||||
violated: format!("Invalid table name: {}", create_table.table_name)
|
||||
}
|
||||
);
|
||||
|
||||
let table_name = TableName::new(
|
||||
&create_table.catalog_name,
|
||||
&create_table.schema_name,
|
||||
@@ -213,7 +227,20 @@ impl StatementExecutor {
|
||||
let request: AlterTableRequest = common_grpc_expr::alter_expr_to_request(table_id, expr)
|
||||
.context(AlterExprToRequestSnafu)?;
|
||||
|
||||
let AlterTableRequest { table_name, .. } = &request;
|
||||
let AlterTableRequest {
|
||||
table_name,
|
||||
alter_kind,
|
||||
..
|
||||
} = &request;
|
||||
|
||||
if let AlterKind::RenameTable { new_table_name } = alter_kind {
|
||||
ensure!(
|
||||
NAME_PATTERN_REG.is_match(new_table_name),
|
||||
error::UnexpectedSnafu {
|
||||
violated: format!("Invalid table name: {}", new_table_name)
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
let _ = table_info
|
||||
.meta
|
||||
@@ -359,6 +386,20 @@ impl StatementExecutor {
|
||||
database: &str,
|
||||
create_if_not_exists: bool,
|
||||
) -> Result<Output> {
|
||||
ensure!(
|
||||
NAME_PATTERN_REG.is_match(catalog),
|
||||
error::UnexpectedSnafu {
|
||||
violated: format!("Invalid catalog name: {}", catalog)
|
||||
}
|
||||
);
|
||||
|
||||
ensure!(
|
||||
NAME_PATTERN_REG.is_match(database),
|
||||
error::UnexpectedSnafu {
|
||||
violated: format!("Invalid database name: {}", database)
|
||||
}
|
||||
);
|
||||
|
||||
// TODO(weny): considers executing it in the procedures.
|
||||
let schema_key = SchemaNameKey::new(catalog, database);
|
||||
let exists = self
|
||||
@@ -587,6 +628,13 @@ mod test {
|
||||
use super::*;
|
||||
use crate::expr_factory;
|
||||
|
||||
#[test]
|
||||
fn test_name_is_match() {
|
||||
assert!(!NAME_PATTERN_REG.is_match("/adaf"));
|
||||
assert!(!NAME_PATTERN_REG.is_match("🈲"));
|
||||
assert!(NAME_PATTERN_REG.is_match("hello"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_partition_columns() {
|
||||
let create_table = CreateTableExpr {
|
||||
|
||||
@@ -92,6 +92,7 @@ tonic-reflection = "0.10"
|
||||
tonic.workspace = true
|
||||
tower = { version = "0.4", features = ["full"] }
|
||||
tower-http = { version = "0.3", features = ["full"] }
|
||||
urlencoding = "2.1"
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
tikv-jemalloc-ctl = { version = "0.5", features = ["use_std"] }
|
||||
|
||||
@@ -1 +1 @@
|
||||
v0.3.3
|
||||
v0.4.1
|
||||
|
||||
@@ -394,6 +394,13 @@ pub enum Error {
|
||||
error: serde_json::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode url"))]
|
||||
UrlDecode {
|
||||
#[snafu(source)]
|
||||
error: FromUtf8Error,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -444,6 +451,7 @@ impl ErrorExt for Error {
|
||||
| DataFrame { .. }
|
||||
| PreparedStmtTypeMismatch { .. }
|
||||
| TimePrecision { .. }
|
||||
| UrlDecode { .. }
|
||||
| IncompatibleSchema { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
InfluxdbLinesWrite { source, .. }
|
||||
|
||||
@@ -35,7 +35,7 @@ use super::header::GreptimeDbName;
|
||||
use super::PUBLIC_APIS;
|
||||
use crate::error::{
|
||||
self, InvalidAuthorizationHeaderSnafu, InvalidParameterSnafu, InvisibleASCIISnafu,
|
||||
NotFoundInfluxAuthSnafu, Result, UnsupportedAuthSchemeSnafu,
|
||||
NotFoundInfluxAuthSnafu, Result, UnsupportedAuthSchemeSnafu, UrlDecodeSnafu,
|
||||
};
|
||||
use crate::http::HTTP_API_PREFIX;
|
||||
|
||||
@@ -166,7 +166,9 @@ fn get_influxdb_credentials<B: Send + Sync + 'static>(
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
match extract_influxdb_user_from_query(query_str) {
|
||||
let query_str = urlencoding::decode(query_str).context(UrlDecodeSnafu)?;
|
||||
|
||||
match extract_influxdb_user_from_query(&query_str) {
|
||||
(None, None) => Ok(None),
|
||||
(Some(username), Some(password)) => {
|
||||
Ok(Some((username.to_string(), password.to_string().into())))
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use snafu::ResultExt;
|
||||
use sqlparser::ast::Ident;
|
||||
use sqlparser::dialect::Dialect;
|
||||
use sqlparser::keywords::Keyword;
|
||||
use sqlparser::parser::{Parser, ParserError};
|
||||
@@ -166,6 +167,29 @@ impl<'a> ParserContext<'a> {
|
||||
pub(crate) fn peek_token_as_string(&self) -> String {
|
||||
self.parser.peek_token().to_string()
|
||||
}
|
||||
|
||||
/// Canonicalize the identifier to lowercase if it's not quoted.
|
||||
pub fn canonicalize_identifier(ident: Ident) -> Ident {
|
||||
if ident.quote_style.is_some() {
|
||||
ident
|
||||
} else {
|
||||
Ident {
|
||||
value: ident.value.to_lowercase(),
|
||||
quote_style: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Like [canonicalize_identifier] but for [ObjectName].
|
||||
pub fn canonicalize_object_name(object_name: ObjectName) -> ObjectName {
|
||||
ObjectName(
|
||||
object_name
|
||||
.0
|
||||
.into_iter()
|
||||
.map(Self::canonicalize_identifier)
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -33,20 +33,22 @@ impl<'a> ParserContext<'a> {
|
||||
let parser = &mut self.parser;
|
||||
parser.expect_keywords(&[Keyword::ALTER, Keyword::TABLE])?;
|
||||
|
||||
let table_name = parser.parse_object_name()?;
|
||||
let raw_table_name = parser.parse_object_name()?;
|
||||
let table_name = Self::canonicalize_object_name(raw_table_name);
|
||||
|
||||
let alter_operation = if parser.parse_keyword(Keyword::ADD) {
|
||||
if let Some(constraint) = parser.parse_optional_table_constraint()? {
|
||||
AlterTableOperation::AddConstraint(constraint)
|
||||
} else {
|
||||
let _ = parser.parse_keyword(Keyword::COLUMN);
|
||||
let column_def = parser.parse_column_def()?;
|
||||
let mut column_def = parser.parse_column_def()?;
|
||||
column_def.name = Self::canonicalize_identifier(column_def.name);
|
||||
let location = if parser.parse_keyword(Keyword::FIRST) {
|
||||
Some(AddColumnLocation::First)
|
||||
} else if let Token::Word(word) = parser.peek_token().token {
|
||||
if word.value.to_ascii_uppercase() == "AFTER" {
|
||||
let _ = parser.next_token();
|
||||
let name = parser.parse_identifier()?;
|
||||
let name = Self::canonicalize_identifier(parser.parse_identifier()?);
|
||||
Some(AddColumnLocation::After {
|
||||
column_name: name.value,
|
||||
})
|
||||
@@ -63,7 +65,7 @@ impl<'a> ParserContext<'a> {
|
||||
}
|
||||
} else if parser.parse_keyword(Keyword::DROP) {
|
||||
if parser.parse_keyword(Keyword::COLUMN) {
|
||||
let name = self.parser.parse_identifier()?;
|
||||
let name = Self::canonicalize_identifier(self.parser.parse_identifier()?);
|
||||
AlterTableOperation::DropColumn { name }
|
||||
} else {
|
||||
return Err(ParserError::ParserError(format!(
|
||||
@@ -72,7 +74,8 @@ impl<'a> ParserContext<'a> {
|
||||
)));
|
||||
}
|
||||
} else if parser.parse_keyword(Keyword::RENAME) {
|
||||
let new_table_name_obj = parser.parse_object_name()?;
|
||||
let new_table_name_obj_raw = parser.parse_object_name()?;
|
||||
let new_table_name_obj = Self::canonicalize_object_name(new_table_name_obj_raw);
|
||||
let new_table_name = match &new_table_name_obj.0[..] {
|
||||
[table] => table.value.clone(),
|
||||
_ => {
|
||||
|
||||
@@ -69,7 +69,7 @@ impl<'a> ParserContext<'a> {
|
||||
}
|
||||
|
||||
fn parse_copy_table(&mut self) -> Result<CopyTable> {
|
||||
let table_name =
|
||||
let raw_table_name =
|
||||
self.parser
|
||||
.parse_object_name()
|
||||
.with_context(|_| error::UnexpectedSnafu {
|
||||
@@ -77,6 +77,7 @@ impl<'a> ParserContext<'a> {
|
||||
expected: "a table name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let table_name = Self::canonicalize_object_name(raw_table_name);
|
||||
|
||||
if self.parser.parse_keyword(Keyword::TO) {
|
||||
let (with, connection, location) = self.parse_copy_to()?;
|
||||
|
||||
@@ -73,7 +73,7 @@ impl<'a> ParserContext<'a> {
|
||||
let if_not_exists =
|
||||
self.parser
|
||||
.parse_keywords(&[Keyword::IF, Keyword::NOT, Keyword::EXISTS]);
|
||||
let table_name = self
|
||||
let raw_table_name = self
|
||||
.parser
|
||||
.parse_object_name()
|
||||
.context(error::UnexpectedSnafu {
|
||||
@@ -81,6 +81,7 @@ impl<'a> ParserContext<'a> {
|
||||
expected: "a table name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let table_name = Self::canonicalize_object_name(raw_table_name);
|
||||
let (columns, constraints) = self.parse_columns()?;
|
||||
let engine = self.parse_table_engine(common_catalog::consts::FILE_ENGINE)?;
|
||||
let options = self
|
||||
@@ -142,7 +143,7 @@ impl<'a> ParserContext<'a> {
|
||||
self.parser
|
||||
.parse_keywords(&[Keyword::IF, Keyword::NOT, Keyword::EXISTS]);
|
||||
|
||||
let table_name = self
|
||||
let raw_table_name = self
|
||||
.parser
|
||||
.parse_object_name()
|
||||
.context(error::UnexpectedSnafu {
|
||||
@@ -150,6 +151,7 @@ impl<'a> ParserContext<'a> {
|
||||
expected: "a table name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let table_name = Self::canonicalize_object_name(raw_table_name);
|
||||
|
||||
let (columns, constraints) = self.parse_columns()?;
|
||||
|
||||
@@ -197,10 +199,14 @@ impl<'a> ParserContext<'a> {
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
|
||||
let column_list = self
|
||||
let raw_column_list = self
|
||||
.parser
|
||||
.parse_parenthesized_column_list(Mandatory, false)
|
||||
.context(error::SyntaxSnafu)?;
|
||||
let column_list = raw_column_list
|
||||
.into_iter()
|
||||
.map(Self::canonicalize_identifier)
|
||||
.collect();
|
||||
|
||||
let entries = self.parse_comma_separated(Self::parse_partition_entry)?;
|
||||
|
||||
@@ -435,7 +441,7 @@ impl<'a> ParserContext<'a> {
|
||||
};
|
||||
}
|
||||
Ok(ColumnDef {
|
||||
name,
|
||||
name: Self::canonicalize_identifier(name),
|
||||
data_type,
|
||||
collation,
|
||||
options,
|
||||
@@ -488,7 +494,8 @@ impl<'a> ParserContext<'a> {
|
||||
|
||||
fn parse_optional_table_constraint(&mut self) -> Result<Option<TableConstraint>> {
|
||||
let name = if self.parser.parse_keyword(Keyword::CONSTRAINT) {
|
||||
Some(self.parser.parse_identifier().context(error::SyntaxSnafu)?)
|
||||
let raw_name = self.parser.parse_identifier().context(error::SyntaxSnafu)?;
|
||||
Some(Self::canonicalize_identifier(raw_name))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -504,10 +511,14 @@ impl<'a> ParserContext<'a> {
|
||||
expected: "KEY",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let columns = self
|
||||
let raw_columns = self
|
||||
.parser
|
||||
.parse_parenthesized_column_list(Mandatory, false)
|
||||
.context(error::SyntaxSnafu)?;
|
||||
let columns = raw_columns
|
||||
.into_iter()
|
||||
.map(Self::canonicalize_identifier)
|
||||
.collect();
|
||||
Ok(Some(TableConstraint::Unique {
|
||||
name,
|
||||
columns,
|
||||
@@ -526,10 +537,14 @@ impl<'a> ParserContext<'a> {
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
|
||||
let columns = self
|
||||
let raw_columns = self
|
||||
.parser
|
||||
.parse_parenthesized_column_list(Mandatory, false)
|
||||
.context(error::SyntaxSnafu)?;
|
||||
let columns = raw_columns
|
||||
.into_iter()
|
||||
.map(Self::canonicalize_identifier)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
|
||||
@@ -30,7 +30,7 @@ impl<'a> ParserContext<'a> {
|
||||
}
|
||||
|
||||
fn parse_describe_table(&mut self) -> Result<Statement> {
|
||||
let table_idents =
|
||||
let raw_table_idents =
|
||||
self.parser
|
||||
.parse_object_name()
|
||||
.with_context(|_| error::UnexpectedSnafu {
|
||||
@@ -38,6 +38,7 @@ impl<'a> ParserContext<'a> {
|
||||
expected: "a table name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let table_idents = Self::canonicalize_object_name(raw_table_idents);
|
||||
ensure!(
|
||||
!table_idents.0.is_empty(),
|
||||
InvalidTableNameSnafu {
|
||||
|
||||
@@ -29,7 +29,7 @@ impl<'a> ParserContext<'a> {
|
||||
}
|
||||
let _ = self.parser.next_token();
|
||||
|
||||
let table_ident =
|
||||
let raw_table_ident =
|
||||
self.parser
|
||||
.parse_object_name()
|
||||
.with_context(|_| error::UnexpectedSnafu {
|
||||
@@ -37,6 +37,7 @@ impl<'a> ParserContext<'a> {
|
||||
expected: "a table name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let table_ident = Self::canonicalize_object_name(raw_table_ident);
|
||||
ensure!(
|
||||
!table_ident.0.is_empty(),
|
||||
InvalidTableNameSnafu {
|
||||
|
||||
@@ -50,7 +50,7 @@ impl<'a> ParserContext<'a> {
|
||||
|
||||
/// Parse SHOW CREATE TABLE statement
|
||||
fn parse_show_create_table(&mut self) -> Result<Statement> {
|
||||
let table_name =
|
||||
let raw_table_name =
|
||||
self.parser
|
||||
.parse_object_name()
|
||||
.with_context(|_| error::UnexpectedSnafu {
|
||||
@@ -58,6 +58,7 @@ impl<'a> ParserContext<'a> {
|
||||
expected: "a table name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let table_name = Self::canonicalize_object_name(raw_table_name);
|
||||
ensure!(
|
||||
!table_name.0.is_empty(),
|
||||
InvalidTableNameSnafu {
|
||||
|
||||
@@ -26,7 +26,7 @@ impl<'a> ParserContext<'a> {
|
||||
let _ = self.parser.next_token();
|
||||
let _ = self.parser.parse_keyword(Keyword::TABLE);
|
||||
|
||||
let table_ident =
|
||||
let raw_table_ident =
|
||||
self.parser
|
||||
.parse_object_name()
|
||||
.with_context(|_| error::UnexpectedSnafu {
|
||||
@@ -34,6 +34,7 @@ impl<'a> ParserContext<'a> {
|
||||
expected: "a table name",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
let table_ident = Self::canonicalize_object_name(raw_table_ident);
|
||||
|
||||
ensure!(
|
||||
!table_ident.0.is_empty(),
|
||||
|
||||
@@ -99,10 +99,10 @@ impl GreptimeDbStandaloneBuilder {
|
||||
.init()
|
||||
.await
|
||||
.unwrap();
|
||||
procedure_manager.start().await.unwrap();
|
||||
|
||||
let instance = Instance::try_new_standalone(
|
||||
kv_backend,
|
||||
procedure_manager,
|
||||
procedure_manager.clone(),
|
||||
catalog_manager,
|
||||
plugins,
|
||||
datanode.region_server(),
|
||||
@@ -110,6 +110,9 @@ impl GreptimeDbStandaloneBuilder {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Ensures all loaders are registered.
|
||||
procedure_manager.start().await.unwrap();
|
||||
|
||||
test_util::prepare_another_catalog_and_schema(&instance).await;
|
||||
|
||||
instance.start().await.unwrap();
|
||||
|
||||
@@ -720,7 +720,7 @@ num_workers = {}
|
||||
worker_channel_size = 128
|
||||
worker_request_batch_size = 64
|
||||
manifest_checkpoint_distance = 10
|
||||
manifest_compress_type = "uncompressed"
|
||||
compress_manifest = false
|
||||
max_background_jobs = 4
|
||||
auto_flush_interval = "30m"
|
||||
global_write_buffer_size = "1GiB"
|
||||
|
||||
@@ -87,6 +87,23 @@ DESC TABLE test;
|
||||
| idc | String | | YES | idc | FIELD |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
|
||||
ALTER TABLE test ADD COLUMN "IdC" STRING default 'idc' PRIMARY KEY;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DESC TABLE test;
|
||||
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
| Column | Type | Key | Null | Default | Semantic Type |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
| i | Int32 | | YES | | FIELD |
|
||||
| j | TimestampMillisecond | PRI | NO | | TIMESTAMP |
|
||||
| k | Int32 | | YES | | FIELD |
|
||||
| host | String | | YES | | FIELD |
|
||||
| idc | String | | YES | idc | FIELD |
|
||||
| IdC | String | | YES | idc | FIELD |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
|
||||
DROP TABLE test;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
@@ -22,4 +22,8 @@ SELECT * FROM test;
|
||||
|
||||
DESC TABLE test;
|
||||
|
||||
ALTER TABLE test ADD COLUMN "IdC" STRING default 'idc' PRIMARY KEY;
|
||||
|
||||
DESC TABLE test;
|
||||
|
||||
DROP TABLE test;
|
||||
|
||||
@@ -6,7 +6,11 @@ INSERT INTO test VALUES (1, 1), (2, 2);
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
ALTER TABLE test DROP COLUMN i;
|
||||
ALTER TABLE test DROP COLUMN "I";
|
||||
|
||||
Error: 4002(TableColumnNotFound), Column I not exists in table test
|
||||
|
||||
ALTER TABLE test DROP COLUMN I;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@ CREATE TABLE test(i INTEGER, j TIMESTAMP TIME INDEX);
|
||||
|
||||
INSERT INTO test VALUES (1, 1), (2, 2);
|
||||
|
||||
ALTER TABLE test DROP COLUMN i;
|
||||
ALTER TABLE test DROP COLUMN "I";
|
||||
|
||||
ALTER TABLE test DROP COLUMN I;
|
||||
|
||||
SELECT * FROM test;
|
||||
|
||||
|
||||
@@ -25,6 +25,10 @@ SELECT * from t;
|
||||
| | 1970-01-01T00:00:00.004 |
|
||||
+---+-------------------------+
|
||||
|
||||
ALTER TABLE t RENAME 'Hi👋';
|
||||
|
||||
Error: 1002(Unexpected), Unexpected, violated: Invalid table name: Hi👋
|
||||
|
||||
ALTER TABLE t RENAME new_table;
|
||||
|
||||
Affected Rows: 0
|
||||
@@ -77,3 +81,46 @@ DROP TABLE new_table;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE "AbCdE"("CoLa" INTEGER, "cOlB" TIMESTAMP TIME INDEX);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
ALTER TABLE "AbCdE" RENAME "fGhI";
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DESC TABLE "fGhI";
|
||||
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
| Column | Type | Key | Null | Default | Semantic Type |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
| CoLa | Int32 | | YES | | FIELD |
|
||||
| cOlB | TimestampMillisecond | PRI | NO | | TIMESTAMP |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
|
||||
SELECT * FROM "fGhI";
|
||||
|
||||
++
|
||||
++
|
||||
|
||||
ALTER TABLE "fGhI" RENAME JkLmN;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DESC TABLE "JkLmN";
|
||||
|
||||
Error: 4001(TableNotFound), Table not found: JkLmN
|
||||
|
||||
DESC TABLE JkLmN;
|
||||
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
| Column | Type | Key | Null | Default | Semantic Type |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
| CoLa | Int32 | | YES | | FIELD |
|
||||
| cOlB | TimestampMillisecond | PRI | NO | | TIMESTAMP |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
|
||||
DROP TABLE jklmn;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
|
||||
@@ -6,6 +6,8 @@ INSERT INTO TABLE t VALUES (1, 1), (3, 3), (NULL, 4);
|
||||
|
||||
SELECT * from t;
|
||||
|
||||
ALTER TABLE t RENAME 'Hi👋';
|
||||
|
||||
ALTER TABLE t RENAME new_table;
|
||||
|
||||
DESC TABLE t;
|
||||
@@ -26,3 +28,20 @@ ALTER TABLE new_table RENAME t;
|
||||
DROP TABLE t;
|
||||
|
||||
DROP TABLE new_table;
|
||||
|
||||
|
||||
CREATE TABLE "AbCdE"("CoLa" INTEGER, "cOlB" TIMESTAMP TIME INDEX);
|
||||
|
||||
ALTER TABLE "AbCdE" RENAME "fGhI";
|
||||
|
||||
DESC TABLE "fGhI";
|
||||
|
||||
SELECT * FROM "fGhI";
|
||||
|
||||
ALTER TABLE "fGhI" RENAME JkLmN;
|
||||
|
||||
DESC TABLE "JkLmN";
|
||||
|
||||
DESC TABLE JkLmN;
|
||||
|
||||
DROP TABLE jklmn;
|
||||
|
||||
@@ -50,6 +50,10 @@ CREATE TABLE test2 (i INTEGER, j TIMESTAMP TIME INDEX);
|
||||
|
||||
Error: 4000(TableAlreadyExists), Table already exists: `greptime.public.test2`
|
||||
|
||||
CREATE TABLE 'N.~' (i TIMESTAMP TIME INDEX);
|
||||
|
||||
Error: 1002(Unexpected), Unexpected, violated: Invalid table name: N.~
|
||||
|
||||
DESC TABLE integers;
|
||||
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
|
||||
@@ -24,6 +24,8 @@ CREATE TABLE test2 (i INTEGER, j TIMESTAMP TIME INDEX);
|
||||
|
||||
CREATE TABLE test2 (i INTEGER, j TIMESTAMP TIME INDEX);
|
||||
|
||||
CREATE TABLE 'N.~' (i TIMESTAMP TIME INDEX);
|
||||
|
||||
DESC TABLE integers;
|
||||
|
||||
DESC TABLE test1;
|
||||
|
||||
@@ -6,6 +6,10 @@ create database 'illegal-database';
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
create database '㊙️database';
|
||||
|
||||
Error: 1002(Unexpected), Unexpected, violated: Invalid database name: ㊙️database
|
||||
|
||||
show databases;
|
||||
|
||||
+--------------------+
|
||||
|
||||
@@ -2,4 +2,6 @@ create database illegal-database;
|
||||
|
||||
create database 'illegal-database';
|
||||
|
||||
create database '㊙️database';
|
||||
|
||||
show databases;
|
||||
|
||||
@@ -6,11 +6,11 @@ use upper_case_table_name;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
create table system_Metric(ts timestamp time index);
|
||||
create table "system_Metric"(ts timestamp time index);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
insert into system_Metric values (0), (1);
|
||||
insert into "system_Metric" values (0), (1);
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
@@ -27,7 +27,76 @@ select * from "system_Metric";
|
||||
| 1970-01-01T00:00:00.001 |
|
||||
+-------------------------+
|
||||
|
||||
drop table system_Metric;
|
||||
drop table "system_Metric";
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
create table "AbCdEfG"("CoLA" string, "cOlB" string, "tS" timestamp time index, primary key ("CoLA"));
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
desc table "AbCdEfG";
|
||||
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
| Column | Type | Key | Null | Default | Semantic Type |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
| CoLA | String | PRI | YES | | TAG |
|
||||
| cOlB | String | | YES | | FIELD |
|
||||
| tS | TimestampMillisecond | PRI | NO | | TIMESTAMP |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
|
||||
-- unquoted table name and column name.
|
||||
create table AbCdEfGe(CoLA string, cOlB string, tS timestamp time index, primary key (cOlA));
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
desc table aBcDeFgE;
|
||||
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
| Column | Type | Key | Null | Default | Semantic Type |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
| cola | String | PRI | YES | | TAG |
|
||||
| colb | String | | YES | | FIELD |
|
||||
| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
|
||||
drop table "AbCdEfG";
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
drop table aBcDeFgE;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- unquoted column name in partition
|
||||
create table AbCdEfGe(
|
||||
CoLA string PRIMARY KEY,
|
||||
tS timestamp time index
|
||||
) PARTITION BY RANGE COLUMNS (cOlA) (
|
||||
PARTITION p0 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
drop table abcdefge;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- unquoted column name in TIME INDEX
|
||||
create table AbCdEfGe(CoLA string, tS timestamp, TIME INDEX (Ts));
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
desc table abcdefge;
|
||||
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
| Column | Type | Key | Null | Default | Semantic Type |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
| cola | String | | YES | | FIELD |
|
||||
| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
|
||||
drop table abcdefge;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
|
||||
@@ -2,14 +2,44 @@ create database upper_case_table_name;
|
||||
|
||||
use upper_case_table_name;
|
||||
|
||||
create table system_Metric(ts timestamp time index);
|
||||
create table "system_Metric"(ts timestamp time index);
|
||||
|
||||
insert into system_Metric values (0), (1);
|
||||
insert into "system_Metric" values (0), (1);
|
||||
|
||||
select * from system_Metric;
|
||||
|
||||
select * from "system_Metric";
|
||||
|
||||
drop table system_Metric;
|
||||
drop table "system_Metric";
|
||||
|
||||
create table "AbCdEfG"("CoLA" string, "cOlB" string, "tS" timestamp time index, primary key ("CoLA"));
|
||||
|
||||
desc table "AbCdEfG";
|
||||
|
||||
-- unquoted table name and column name.
|
||||
create table AbCdEfGe(CoLA string, cOlB string, tS timestamp time index, primary key (cOlA));
|
||||
|
||||
desc table aBcDeFgE;
|
||||
|
||||
drop table "AbCdEfG";
|
||||
|
||||
drop table aBcDeFgE;
|
||||
|
||||
-- unquoted column name in partition
|
||||
create table AbCdEfGe(
|
||||
CoLA string PRIMARY KEY,
|
||||
tS timestamp time index
|
||||
) PARTITION BY RANGE COLUMNS (cOlA) (
|
||||
PARTITION p0 VALUES LESS THAN (MAXVALUE)
|
||||
);
|
||||
|
||||
drop table abcdefge;
|
||||
|
||||
-- unquoted column name in TIME INDEX
|
||||
create table AbCdEfGe(CoLA string, tS timestamp, TIME INDEX (Ts));
|
||||
|
||||
desc table abcdefge;
|
||||
|
||||
drop table abcdefge;
|
||||
|
||||
use public;
|
||||
|
||||
@@ -60,3 +60,27 @@ DROP TABLE monitor;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE "MoNiToR" ("hOsT" STRING PRIMARY KEY, "tS" TIMESTAMP TIME INDEX, "cPu" DOUBLE DEFAULT 0);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DELETE FROM "MoNiToR" WHERE "hOsT" = 'host2';
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE "MoNiToR";
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE MoNiToR (hOsT STRING PRIMARY KEY, tS TIMESTAMP TIME INDEX, cPu DOUBLE DEFAULT 0);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DELETE FROM MoNiToR WHERE hOsT = 'host2';
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE MoNiToR;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
|
||||
@@ -24,3 +24,16 @@ DELETE FROM monitor WHERE memory > 2048;
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
|
||||
|
||||
DROP TABLE monitor;
|
||||
|
||||
|
||||
CREATE TABLE "MoNiToR" ("hOsT" STRING PRIMARY KEY, "tS" TIMESTAMP TIME INDEX, "cPu" DOUBLE DEFAULT 0);
|
||||
|
||||
DELETE FROM "MoNiToR" WHERE "hOsT" = 'host2';
|
||||
|
||||
DROP TABLE "MoNiToR";
|
||||
|
||||
CREATE TABLE MoNiToR (hOsT STRING PRIMARY KEY, tS TIMESTAMP TIME INDEX, cPu DOUBLE DEFAULT 0);
|
||||
|
||||
DELETE FROM MoNiToR WHERE hOsT = 'host2';
|
||||
|
||||
DROP TABLE MoNiToR;
|
||||
|
||||
38
tests/cases/standalone/common/delete/delete_non_null.result
Normal file
38
tests/cases/standalone/common/delete/delete_non_null.result
Normal file
@@ -0,0 +1,38 @@
|
||||
CREATE TABLE monitor (host STRING NOT NULL, ts TIMESTAMP NOT NULL, cpu DOUBLE NOT NULL, memory DOUBLE NOT NULL, TIME INDEX (ts), PRIMARY KEY(host));
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO monitor(ts, host, cpu, memory) VALUES
|
||||
(1655276557000, 'host1', 10.0, 1024),
|
||||
(1655276557000, 'host2', 20.0, 1024),
|
||||
(1655276557000, 'host3', 30.0, 1024);
|
||||
|
||||
Affected Rows: 3
|
||||
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts, host;
|
||||
|
||||
+---------------------+-------+------+--------+
|
||||
| ts | host | cpu | memory |
|
||||
+---------------------+-------+------+--------+
|
||||
| 2022-06-15T07:02:37 | host1 | 10.0 | 1024.0 |
|
||||
| 2022-06-15T07:02:37 | host2 | 20.0 | 1024.0 |
|
||||
| 2022-06-15T07:02:37 | host3 | 30.0 | 1024.0 |
|
||||
+---------------------+-------+------+--------+
|
||||
|
||||
DELETE FROM monitor WHERE host = 'host2';
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts, host;
|
||||
|
||||
+---------------------+-------+------+--------+
|
||||
| ts | host | cpu | memory |
|
||||
+---------------------+-------+------+--------+
|
||||
| 2022-06-15T07:02:37 | host1 | 10.0 | 1024.0 |
|
||||
| 2022-06-15T07:02:37 | host3 | 30.0 | 1024.0 |
|
||||
+---------------------+-------+------+--------+
|
||||
|
||||
DROP TABLE monitor;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
14
tests/cases/standalone/common/delete/delete_non_null.sql
Normal file
14
tests/cases/standalone/common/delete/delete_non_null.sql
Normal file
@@ -0,0 +1,14 @@
|
||||
CREATE TABLE monitor (host STRING NOT NULL, ts TIMESTAMP NOT NULL, cpu DOUBLE NOT NULL, memory DOUBLE NOT NULL, TIME INDEX (ts), PRIMARY KEY(host));
|
||||
|
||||
INSERT INTO monitor(ts, host, cpu, memory) VALUES
|
||||
(1655276557000, 'host1', 10.0, 1024),
|
||||
(1655276557000, 'host2', 20.0, 1024),
|
||||
(1655276557000, 'host3', 30.0, 1024);
|
||||
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts, host;
|
||||
|
||||
DELETE FROM monitor WHERE host = 'host2';
|
||||
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts, host;
|
||||
|
||||
DROP TABLE monitor;
|
||||
@@ -347,19 +347,19 @@ select i, split_part(s, 'b', 1) from test8 order by i;
|
||||
| | d |
|
||||
+---+----------------------------------------+
|
||||
|
||||
CREATE TABLE DirectReports
|
||||
CREATE TABLE "DirectReports"
|
||||
(
|
||||
EmployeeID smallint,
|
||||
"EmployeeID" smallint,
|
||||
"Name" varchar NOT NULL,
|
||||
Title varchar NOT NULL,
|
||||
EmployeeLevel int NOT NULL,
|
||||
"Title" varchar NOT NULL,
|
||||
"EmployeeLevel" int NOT NULL,
|
||||
"Sort" varchar NOT NULL,
|
||||
"Timestamp" TIMESTAMP TIME INDEX,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO DirectReports VALUES
|
||||
INSERT INTO "DirectReports" VALUES
|
||||
(1, 'Ken Sánchez', 'Chief Executive Officer', 1, 'Ken Sánchez', 1),
|
||||
(273, '>Brian Welcker', 'Vice President of Sales', 2, 'Ken Sánchez>Brian Welcker', 2),
|
||||
(274, '>>Stephen Jiang', 'North American Sales Manager', 3, 'Ken Sánchez>Brian Welcker>Stephen Jiang', 3),
|
||||
@@ -426,7 +426,7 @@ DROP table test8;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE DirectReports;
|
||||
DROP TABLE "DirectReports";
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
|
||||
@@ -82,17 +82,17 @@ insert into test8 values (3, 'aba', 1), (1, 'ccbcc', 2), (NULL, 'dbdbd', 3), (2,
|
||||
|
||||
select i, split_part(s, 'b', 1) from test8 order by i;
|
||||
|
||||
CREATE TABLE DirectReports
|
||||
CREATE TABLE "DirectReports"
|
||||
(
|
||||
EmployeeID smallint,
|
||||
"EmployeeID" smallint,
|
||||
"Name" varchar NOT NULL,
|
||||
Title varchar NOT NULL,
|
||||
EmployeeLevel int NOT NULL,
|
||||
"Title" varchar NOT NULL,
|
||||
"EmployeeLevel" int NOT NULL,
|
||||
"Sort" varchar NOT NULL,
|
||||
"Timestamp" TIMESTAMP TIME INDEX,
|
||||
);
|
||||
|
||||
INSERT INTO DirectReports VALUES
|
||||
INSERT INTO "DirectReports" VALUES
|
||||
(1, 'Ken Sánchez', 'Chief Executive Officer', 1, 'Ken Sánchez', 1),
|
||||
(273, '>Brian Welcker', 'Vice President of Sales', 2, 'Ken Sánchez>Brian Welcker', 2),
|
||||
(274, '>>Stephen Jiang', 'North American Sales Manager', 3, 'Ken Sánchez>Brian Welcker>Stephen Jiang', 3),
|
||||
@@ -125,4 +125,4 @@ DROP table test7;
|
||||
|
||||
DROP table test8;
|
||||
|
||||
DROP TABLE DirectReports;
|
||||
DROP TABLE "DirectReports";
|
||||
|
||||
@@ -74,3 +74,27 @@ DROP TABLE monitor;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE "MoNiToR" ("hOsT" STRING PRIMARY KEY, "tS" TIMESTAMP TIME INDEX, "cPu" DOUBLE DEFAULT 0);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
TRUNCATE "MoNiToR";
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE "MoNiToR";
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE MoNiToR (hOsT STRING PRIMARY KEY, tS TIMESTAMP TIME INDEX, cPu DOUBLE DEFAULT 0);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
TRUNCATE MoNiToR;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE MoNiToR;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
|
||||
@@ -31,3 +31,16 @@ TRUNCATE monitor;
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
|
||||
|
||||
DROP TABLE monitor;
|
||||
|
||||
|
||||
CREATE TABLE "MoNiToR" ("hOsT" STRING PRIMARY KEY, "tS" TIMESTAMP TIME INDEX, "cPu" DOUBLE DEFAULT 0);
|
||||
|
||||
TRUNCATE "MoNiToR";
|
||||
|
||||
DROP TABLE "MoNiToR";
|
||||
|
||||
CREATE TABLE MoNiToR (hOsT STRING PRIMARY KEY, tS TIMESTAMP TIME INDEX, cPu DOUBLE DEFAULT 0);
|
||||
|
||||
TRUNCATE MoNiToR;
|
||||
|
||||
DROP TABLE MoNiToR;
|
||||
|
||||
Reference in New Issue
Block a user