Compare commits

..

4 Commits

Author SHA1 Message Date
Discord9
5ce678fbf4 clone client instead 2023-12-05 11:31:14 +08:00
Discord9
2e101bad53 feat: patch to resend insert to flow worker 2023-12-05 10:25:28 +08:00
Discord9
7d4009e51f dbg: comment out handle request for test 2023-11-30 15:02:40 +08:00
Discord9
1a1c093ad7 feat: resend req to flow 2023-11-30 14:08:58 +08:00
59 changed files with 228 additions and 882 deletions

139
Cargo.lock generated
View File

@@ -196,7 +196,7 @@ checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
[[package]]
name = "api"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"common-base",
"common-decimal",
@@ -655,7 +655,7 @@ dependencies = [
[[package]]
name = "auth"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"async-trait",
@@ -828,7 +828,7 @@ dependencies = [
[[package]]
name = "benchmarks"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"arrow",
"chrono",
@@ -1160,7 +1160,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"arc-swap",
@@ -1429,7 +1429,7 @@ checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1"
[[package]]
name = "client"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"arrow-flight",
@@ -1462,7 +1462,7 @@ dependencies = [
"session",
"snafu",
"substrait 0.17.1",
"substrait 0.4.4",
"substrait 0.4.3",
"tokio",
"tokio-stream",
"tonic 0.10.2",
@@ -1492,7 +1492,7 @@ dependencies = [
[[package]]
name = "cmd"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"anymap",
"async-trait",
@@ -1540,7 +1540,7 @@ dependencies = [
"servers",
"session",
"snafu",
"substrait 0.4.4",
"substrait 0.4.3",
"table",
"temp-env",
"tikv-jemallocator",
@@ -1573,7 +1573,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"anymap",
"bitvec",
@@ -1588,7 +1588,7 @@ dependencies = [
[[package]]
name = "common-catalog"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"chrono",
"common-error",
@@ -1599,7 +1599,7 @@ dependencies = [
[[package]]
name = "common-config"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"common-base",
"humantime-serde",
@@ -1608,7 +1608,7 @@ dependencies = [
[[package]]
name = "common-datasource"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"arrow",
"arrow-schema",
@@ -1639,7 +1639,7 @@ dependencies = [
[[package]]
name = "common-decimal"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"arrow",
"bigdecimal",
@@ -1653,7 +1653,7 @@ dependencies = [
[[package]]
name = "common-error"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"snafu",
"strum 0.25.0",
@@ -1661,7 +1661,7 @@ dependencies = [
[[package]]
name = "common-function"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"arc-swap",
"chrono-tz 0.6.3",
@@ -1684,7 +1684,7 @@ dependencies = [
[[package]]
name = "common-greptimedb-telemetry"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"async-trait",
"common-error",
@@ -1703,7 +1703,7 @@ dependencies = [
[[package]]
name = "common-grpc"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"arrow-flight",
@@ -1733,7 +1733,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"async-trait",
@@ -1752,7 +1752,7 @@ dependencies = [
[[package]]
name = "common-macro"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"arc-swap",
"common-query",
@@ -1767,7 +1767,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"common-error",
"common-macro",
@@ -1780,7 +1780,7 @@ dependencies = [
[[package]]
name = "common-meta"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"async-recursion",
@@ -1819,7 +1819,7 @@ dependencies = [
[[package]]
name = "common-procedure"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"async-stream",
"async-trait",
@@ -1843,7 +1843,7 @@ dependencies = [
[[package]]
name = "common-procedure-test"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"async-trait",
"common-procedure",
@@ -1851,7 +1851,7 @@ dependencies = [
[[package]]
name = "common-query"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"async-trait",
@@ -1874,7 +1874,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"common-error",
"common-macro",
@@ -1891,7 +1891,7 @@ dependencies = [
[[package]]
name = "common-runtime"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"async-trait",
"common-error",
@@ -1909,7 +1909,7 @@ dependencies = [
[[package]]
name = "common-telemetry"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"backtrace",
"common-error",
@@ -1934,7 +1934,7 @@ dependencies = [
[[package]]
name = "common-test-util"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"once_cell",
"rand",
@@ -1943,7 +1943,7 @@ dependencies = [
[[package]]
name = "common-time"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"arrow",
"chrono",
@@ -1958,7 +1958,7 @@ dependencies = [
[[package]]
name = "common-version"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"build-data",
]
@@ -2581,7 +2581,7 @@ dependencies = [
[[package]]
name = "datanode"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"arrow-flight",
@@ -2640,7 +2640,7 @@ dependencies = [
"snafu",
"sql",
"store-api",
"substrait 0.4.4",
"substrait 0.4.3",
"table",
"tokio",
"tokio-stream",
@@ -2654,7 +2654,7 @@ dependencies = [
[[package]]
name = "datatypes"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"arrow",
"arrow-array",
@@ -3092,7 +3092,7 @@ dependencies = [
[[package]]
name = "file-engine"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"async-trait",
@@ -3190,9 +3190,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "form_urlencoded"
version = "1.2.1"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652"
dependencies = [
"percent-encoding",
]
@@ -3208,7 +3208,7 @@ dependencies = [
[[package]]
name = "frontend"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"arc-swap",
@@ -3272,7 +3272,7 @@ dependencies = [
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
"store-api",
"strfmt",
"substrait 0.4.4",
"substrait 0.4.3",
"table",
"tokio",
"toml 0.7.8",
@@ -3875,9 +3875,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "idna"
version = "0.5.0"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6"
checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c"
dependencies = [
"unicode-bidi",
"unicode-normalization",
@@ -3910,7 +3910,7 @@ dependencies = [
[[package]]
name = "index"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"async-trait",
"common-base",
@@ -4365,7 +4365,7 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "log-store"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"async-stream",
"async-trait",
@@ -4635,7 +4635,7 @@ dependencies = [
[[package]]
name = "meta-client"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"async-trait",
@@ -4665,7 +4665,7 @@ dependencies = [
[[package]]
name = "meta-srv"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"anymap",
"api",
@@ -4743,7 +4743,7 @@ dependencies = [
[[package]]
name = "metric-engine"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"ahash 0.8.6",
"api",
@@ -4814,7 +4814,7 @@ dependencies = [
[[package]]
name = "mito2"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"anymap",
"api",
@@ -5272,7 +5272,7 @@ dependencies = [
[[package]]
name = "object-store"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"anyhow",
"async-trait",
@@ -5519,7 +5519,7 @@ dependencies = [
[[package]]
name = "operator"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"async-compat",
@@ -5564,7 +5564,7 @@ dependencies = [
"sql",
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
"store-api",
"substrait 0.4.4",
"substrait 0.4.3",
"table",
"tokio",
"tonic 0.10.2",
@@ -5784,7 +5784,7 @@ dependencies = [
[[package]]
name = "partition"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"async-trait",
@@ -5876,9 +5876,9 @@ dependencies = [
[[package]]
name = "percent-encoding"
version = "2.3.1"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"
[[package]]
name = "pest"
@@ -6122,7 +6122,7 @@ dependencies = [
[[package]]
name = "plugins"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"auth",
"common-base",
@@ -6349,7 +6349,7 @@ dependencies = [
[[package]]
name = "promql"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"async-recursion",
"async-trait",
@@ -6558,7 +6558,7 @@ dependencies = [
[[package]]
name = "puffin"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"async-trait",
"bitflags 2.4.1",
@@ -6669,7 +6669,7 @@ dependencies = [
[[package]]
name = "query"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"ahash 0.8.6",
"api",
@@ -6727,7 +6727,7 @@ dependencies = [
"stats-cli",
"store-api",
"streaming-stats",
"substrait 0.4.4",
"substrait 0.4.3",
"table",
"tokio",
"tokio-stream",
@@ -7928,7 +7928,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "script"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"arc-swap",
@@ -8188,7 +8188,7 @@ dependencies = [
[[package]]
name = "servers"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"aide",
"api",
@@ -8277,12 +8277,11 @@ dependencies = [
"tonic-reflection",
"tower",
"tower-http",
"urlencoding",
]
[[package]]
name = "session"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"arc-swap",
@@ -8543,7 +8542,7 @@ dependencies = [
[[package]]
name = "sql"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"common-base",
@@ -8595,7 +8594,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"async-trait",
"clap 4.4.8",
@@ -8801,7 +8800,7 @@ dependencies = [
[[package]]
name = "store-api"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"aquamarine",
@@ -8940,7 +8939,7 @@ dependencies = [
[[package]]
name = "substrait"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"async-recursion",
"async-trait",
@@ -9088,7 +9087,7 @@ dependencies = [
[[package]]
name = "table"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"anymap",
"async-trait",
@@ -9194,7 +9193,7 @@ dependencies = [
[[package]]
name = "tests-integration"
version = "0.4.4"
version = "0.4.3"
dependencies = [
"api",
"async-trait",
@@ -9250,7 +9249,7 @@ dependencies = [
"sql",
"sqlx",
"store-api",
"substrait 0.4.4",
"substrait 0.4.3",
"table",
"tempfile",
"time",
@@ -10249,9 +10248,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "url"
version = "2.5.0"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633"
checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5"
dependencies = [
"form_urlencoded",
"idna",

View File

@@ -58,7 +58,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.4.4"
version = "0.4.3"
edition = "2021"
license = "Apache-2.0"

View File

@@ -135,7 +135,6 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
## Project Status

View File

@@ -64,8 +64,8 @@ worker_channel_size = 128
worker_request_batch_size = 64
# Number of meta action updated to trigger a new checkpoint for the manifest
manifest_checkpoint_distance = 10
# Whether to compress manifest and checkpoint file by gzip (default false).
compress_manifest = false
# Manifest compression type
manifest_compress_type = "uncompressed"
# Max number of running background jobs
max_background_jobs = 4
# Interval to auto flush a region if it has not flushed yet.

View File

@@ -133,8 +133,8 @@ worker_channel_size = 128
worker_request_batch_size = 64
# Number of meta action updated to trigger a new checkpoint for the manifest
manifest_checkpoint_distance = 10
# Whether to compress manifest and checkpoint file by gzip (default false).
compress_manifest = false
# Manifest compression type
manifest_compress_type = "uncompressed"
# Max number of running background jobs
max_background_jobs = 4
# Interval to auto flush a region if it has not flushed yet.

View File

@@ -100,9 +100,6 @@ struct StartCommand {
http_timeout: Option<u64>,
#[clap(long, default_value = "GREPTIMEDB_METASRV")]
env_prefix: String,
/// The working home directory of this metasrv instance.
#[clap(long)]
data_home: Option<String>,
}
impl StartCommand {
@@ -155,10 +152,6 @@ impl StartCommand {
opts.http.timeout = Duration::from_secs(http_timeout);
}
if let Some(data_home) = &self.data_home {
opts.data_home = data_home.clone();
}
// Disable dashboard in metasrv.
opts.http.disable_dashboard = true;

View File

@@ -227,9 +227,6 @@ struct StartCommand {
user_provider: Option<String>,
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
env_prefix: String,
/// The working home directory of this standalone instance.
#[clap(long)]
data_home: Option<String>,
}
impl StartCommand {
@@ -260,10 +257,6 @@ impl StartCommand {
opts.http.addr = addr.clone()
}
if let Some(data_home) = &self.data_home {
opts.storage.data_home = data_home.clone();
}
if let Some(addr) = &self.rpc_addr {
// frontend grpc addr conflict with datanode default grpc addr
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;

View File

@@ -42,6 +42,5 @@ tonic.workspace = true
[dev-dependencies]
chrono.workspace = true
common-procedure = { workspace = true, features = ["testing"] }
datatypes.workspace = true
hyper = { version = "0.14", features = ["full"] }

View File

@@ -43,9 +43,9 @@ use crate::rpc::ddl::{
TruncateTableTask,
};
use crate::rpc::router::RegionRoute;
pub type DdlManagerRef = Arc<DdlManager>;
/// The [DdlManager] provides the ability to execute Ddl.
pub struct DdlManager {
procedure_manager: ProcedureManagerRef,
datanode_manager: DatanodeManagerRef,
@@ -55,31 +55,26 @@ pub struct DdlManager {
}
impl DdlManager {
/// Returns a new [DdlManager] with all Ddl [BoxedProcedureLoader](common_procedure::procedure::BoxedProcedureLoader)s registered.
pub fn try_new(
pub fn new(
procedure_manager: ProcedureManagerRef,
datanode_clients: DatanodeManagerRef,
cache_invalidator: CacheInvalidatorRef,
table_metadata_manager: TableMetadataManagerRef,
table_meta_allocator: TableMetadataAllocatorRef,
) -> Result<Self> {
let manager = Self {
) -> Self {
Self {
procedure_manager,
datanode_manager: datanode_clients,
cache_invalidator,
table_metadata_manager,
table_meta_allocator,
};
manager.register_loaders()?;
Ok(manager)
}
}
/// Returns the [TableMetadataManagerRef].
pub fn table_metadata_manager(&self) -> &TableMetadataManagerRef {
&self.table_metadata_manager
}
/// Returns the [DdlContext]
pub fn create_context(&self) -> DdlContext {
DdlContext {
datanode_manager: self.datanode_manager.clone(),
@@ -88,7 +83,7 @@ impl DdlManager {
}
}
fn register_loaders(&self) -> Result<()> {
pub fn try_start(&self) -> Result<()> {
let context = self.create_context();
self.procedure_manager
@@ -147,7 +142,6 @@ impl DdlManager {
}
#[tracing::instrument(skip_all)]
/// Submits and executes an alter table task.
pub async fn submit_alter_table_task(
&self,
cluster_id: u64,
@@ -165,7 +159,6 @@ impl DdlManager {
}
#[tracing::instrument(skip_all)]
/// Submits and executes a create table task.
pub async fn submit_create_table_task(
&self,
cluster_id: u64,
@@ -183,7 +176,6 @@ impl DdlManager {
}
#[tracing::instrument(skip_all)]
/// Submits and executes a drop table task.
pub async fn submit_drop_table_task(
&self,
cluster_id: u64,
@@ -207,7 +199,6 @@ impl DdlManager {
}
#[tracing::instrument(skip_all)]
/// Submits and executes a truncate table task.
pub async fn submit_truncate_table_task(
&self,
cluster_id: u64,
@@ -425,80 +416,3 @@ impl DdlTaskExecutor for DdlManager {
.await
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use api::v1::meta::Partition;
use common_procedure::local::LocalManager;
use table::metadata::{RawTableInfo, TableId};
use super::DdlManager;
use crate::cache_invalidator::DummyCacheInvalidator;
use crate::datanode_manager::{DatanodeManager, DatanodeRef};
use crate::ddl::alter_table::AlterTableProcedure;
use crate::ddl::create_table::CreateTableProcedure;
use crate::ddl::drop_table::DropTableProcedure;
use crate::ddl::truncate_table::TruncateTableProcedure;
use crate::ddl::{TableMetadataAllocator, TableMetadataAllocatorContext};
use crate::error::Result;
use crate::key::TableMetadataManager;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::peer::Peer;
use crate::rpc::router::RegionRoute;
use crate::state_store::KvStateStore;
/// A dummy implemented [DatanodeManager].
pub struct DummyDatanodeManager;
#[async_trait::async_trait]
impl DatanodeManager for DummyDatanodeManager {
async fn datanode(&self, _datanode: &Peer) -> DatanodeRef {
unimplemented!()
}
}
/// A dummy implemented [TableMetadataAllocator].
pub struct DummyTableMetadataAllocator;
#[async_trait::async_trait]
impl TableMetadataAllocator for DummyTableMetadataAllocator {
async fn create(
&self,
_ctx: &TableMetadataAllocatorContext,
_table_info: &mut RawTableInfo,
_partitions: &[Partition],
) -> Result<(TableId, Vec<RegionRoute>)> {
unimplemented!()
}
}
#[test]
fn test_try_new() {
let kv_backend = Arc::new(MemoryKvBackend::new());
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
let state_store = Arc::new(KvStateStore::new(kv_backend));
let procedure_manager = Arc::new(LocalManager::new(Default::default(), state_store));
let _ = DdlManager::try_new(
procedure_manager.clone(),
Arc::new(DummyDatanodeManager),
Arc::new(DummyCacheInvalidator),
table_metadata_manager,
Arc::new(DummyTableMetadataAllocator),
);
let expected_loaders = vec![
CreateTableProcedure::TYPE_NAME,
AlterTableProcedure::TYPE_NAME,
DropTableProcedure::TYPE_NAME,
TruncateTableProcedure::TYPE_NAME,
];
for loader in expected_loaders {
assert!(procedure_manager.contains_loader(loader));
}
}
}

View File

@@ -89,7 +89,7 @@ use crate::DatanodeId;
pub const REMOVED_PREFIX: &str = "__removed";
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";

View File

@@ -4,9 +4,6 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[features]
testing = []
[dependencies]
async-stream.workspace = true
async-trait.workspace = true

View File

@@ -527,13 +527,6 @@ impl LocalManager {
Ok(())
}
#[cfg(any(test, feature = "testing"))]
/// Returns true if contains a specified loader.
pub fn contains_loader(&self, name: &str) -> bool {
let loaders = self.manager_ctx.loaders.lock().unwrap();
loaders.contains_key(name)
}
}
#[async_trait]

View File

@@ -26,12 +26,6 @@ use store_api::storage::RegionNumber;
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Failed to init ddl manager"))]
InitDdlManager {
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to invalidate table cache"))]
InvalidateTableCache {
location: Location,
@@ -325,9 +319,7 @@ impl ErrorExt for Error {
Error::ParseSql { source, .. } => source.status_code(),
Error::InvalidateTableCache { source, .. } | Error::InitDdlManager { source, .. } => {
source.status_code()
}
Error::InvalidateTableCache { source, .. } => source.status_code(),
Error::Table { source, .. }
| Error::CopyTable { source, .. }

View File

@@ -44,6 +44,7 @@ pub struct FrontendOptions {
pub logging: LoggingOptions,
pub datanode: DatanodeOptions,
pub user_provider: Option<String>,
pub flow_grpc_addr: Option<String>,
}
impl Default for FrontendOptions {
@@ -64,6 +65,7 @@ impl Default for FrontendOptions {
logging: LoggingOptions::default(),
datanode: DatanodeOptions::default(),
user_provider: None,
flow_grpc_addr: None,
}
}
}

View File

@@ -24,6 +24,7 @@ pub mod standalone;
use std::collections::HashMap;
use std::sync::Arc;
use api::v1::greptime_database_client::GreptimeDatabaseClient;
use api::v1::meta::Role;
use async_trait::async_trait;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
@@ -82,6 +83,8 @@ use sql::statements::copy::CopyTable;
use sql::statements::statement::Statement;
use sqlparser::ast::ObjectName;
pub use standalone::StandaloneDatanodeManager;
use tokio::sync::Mutex;
use tonic::transport::Channel;
use self::region_query::FrontendRegionQueryHandler;
use self::standalone::StandaloneTableMetadataCreator;
@@ -117,6 +120,12 @@ pub trait FrontendInstance:
pub type FrontendInstanceRef = Arc<dyn FrontendInstance>;
pub type StatementExecutorRef = Arc<StatementExecutor>;
/// Send certain query to flow worker through grpc
pub struct FlowProxy {
flow_client: Mutex<GreptimeDatabaseClient<Channel>>,
}
pub type FlowProxyRef = Arc<FlowProxy>;
#[derive(Clone)]
pub struct Instance {
catalog_manager: CatalogManagerRef,
@@ -128,6 +137,7 @@ pub struct Instance {
heartbeat_task: Option<HeartbeatTask>,
inserter: InserterRef,
deleter: DeleterRef,
flow: Option<FlowProxyRef>,
}
impl Instance {
@@ -219,6 +229,7 @@ impl Instance {
heartbeat_task,
inserter,
deleter,
flow: None,
})
}
@@ -328,16 +339,13 @@ impl Instance {
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
let cache_invalidator = Arc::new(DummyCacheInvalidator);
let ddl_executor = Arc::new(
DdlManager::try_new(
procedure_manager,
datanode_manager,
cache_invalidator.clone(),
table_metadata_manager.clone(),
Arc::new(StandaloneTableMetadataCreator::new(kv_backend.clone())),
)
.context(error::InitDdlManagerSnafu)?,
);
let ddl_executor = Arc::new(DdlManager::new(
procedure_manager,
datanode_manager,
cache_invalidator.clone(),
table_metadata_manager.clone(),
Arc::new(StandaloneTableMetadataCreator::new(kv_backend.clone())),
));
let statement_executor = Arc::new(StatementExecutor::new(
catalog_manager.clone(),
@@ -348,6 +356,11 @@ impl Instance {
inserter.clone(),
));
let addr = std::env::var("FLOW_ADDR").unwrap_or("http://[::1]:14514".to_string());
let conn = tonic::transport::Endpoint::new(addr)
.unwrap()
.connect_lazy();
let client = GreptimeDatabaseClient::new(conn);
Ok(Instance {
catalog_manager: catalog_manager.clone(),
script_executor,
@@ -358,6 +371,12 @@ impl Instance {
heartbeat_task: None,
inserter,
deleter,
flow: Some(
FlowProxy {
flow_client: Mutex::new(client),
}
.into(),
),
})
}

View File

@@ -15,11 +15,14 @@
use api::v1::ddl_request::{Expr as DdlExpr, Expr};
use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
use api::v1::{DeleteRequests, InsertRequests, RowDeleteRequests, RowInsertRequests};
use api::v1::{
DeleteRequests, GreptimeRequest, InsertRequests, RowDeleteRequests, RowInsertRequests,
};
use async_trait::async_trait;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
use common_meta::table_name::TableName;
use common_query::Output;
use common_telemetry::info;
use query::parser::PromQuery;
use servers::interceptor::{GrpcQueryInterceptor, GrpcQueryInterceptorRef};
use servers::query_handler::grpc::GrpcQueryHandler;
@@ -47,6 +50,27 @@ impl GrpcQueryHandler for Instance {
.as_ref()
.check_permission(ctx.current_user(), PermissionReq::GrpcRequest(&request))
.context(PermissionSnafu)?;
/*
// copy row inserts to flow worker
if let Request::RowInserts(_) = &request {
let full_req = GreptimeRequest {
header: None,
request: Some(request.clone()),
};
if let Some(flow_proxy) = &self.flow {
flow_proxy
.flow_client
.lock()
.await
.handle(full_req)
.await
.unwrap();
} else {
info!("flow proxy is not initialized");
}
};
let output = Output::AffectedRows(0);*/
let output = match request {
Request::Inserts(requests) => self.handle_inserts(requests, ctx.clone()).await?,
@@ -187,10 +211,24 @@ impl Instance {
requests: RowInsertRequests,
ctx: QueryContextRef,
) -> Result<Output> {
let full_req = GreptimeRequest {
header: None,
request: Some(Request::RowInserts(requests.clone())),
};
if let Some(flow_proxy) = &self.flow {
let mut client = flow_proxy.flow_client.lock().await.clone();
client.handle(full_req).await.unwrap();
} else {
info!("flow proxy is not initialized");
}
Ok(Output::AffectedRows(0))
/*
self.inserter
.handle_row_inserts(requests, ctx, self.statement_executor.as_ref())
.await
.context(TableOperationSnafu)
*/
}
pub async fn handle_deletes(

View File

@@ -43,12 +43,6 @@ pub enum Error {
region_id: RegionId,
},
#[snafu(display("Failed to init ddl manager"))]
InitDdlManager {
location: Location,
source: common_meta::error::Error,
},
#[snafu(display("Failed to create default catalog and schema"))]
InitMetadata {
location: Location,
@@ -691,9 +685,7 @@ impl ErrorExt for Error {
| Error::UpdateTableRoute { source, .. }
| Error::GetFullTableInfo { source, .. } => source.status_code(),
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
source.status_code()
}
Error::InitMetadata { source, .. } => source.status_code(),
Error::Other { source, .. } => source.status_code(),
}

View File

@@ -28,11 +28,10 @@ use common_meta::sequence::{Sequence, SequenceRef};
use common_meta::state_store::KvStateStore;
use common_procedure::local::{LocalManager, ManagerConfig};
use common_procedure::ProcedureManagerRef;
use snafu::ResultExt;
use crate::cache_invalidator::MetasrvCacheInvalidator;
use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef};
use crate::error::{self, Result};
use crate::error::Result;
use crate::greptimedb_telemetry::get_greptimedb_telemetry_task;
use crate::handler::check_leader_handler::CheckLeaderHandler;
use crate::handler::collect_stats_handler::CollectStatsHandler;
@@ -197,7 +196,8 @@ impl MetaSrvBuilder {
&table_metadata_manager,
(&selector, &selector_ctx),
&table_id_sequence,
)?;
);
let _ = ddl_manager.try_start();
let opening_region_keeper = Arc::new(OpeningRegionKeeper::default());
let handler_group = match handler_group {
@@ -330,7 +330,7 @@ fn build_ddl_manager(
table_metadata_manager: &TableMetadataManagerRef,
(selector, selector_ctx): (&SelectorRef, &SelectorContext),
table_id_sequence: &SequenceRef,
) -> Result<DdlManagerRef> {
) -> DdlManagerRef {
let datanode_clients = datanode_clients.unwrap_or_else(|| {
let datanode_client_channel_config = ChannelConfig::new()
.timeout(Duration::from_millis(
@@ -355,15 +355,12 @@ fn build_ddl_manager(
table_id_sequence.clone(),
));
Ok(Arc::new(
DdlManager::try_new(
procedure_manager.clone(),
datanode_clients,
cache_invalidator,
table_metadata_manager.clone(),
table_meta_allocator,
)
.context(error::InitDdlManagerSnafu)?,
Arc::new(DdlManager::new(
procedure_manager.clone(),
datanode_clients,
cache_invalidator,
table_metadata_manager.clone(),
table_meta_allocator,
))
}

View File

@@ -17,6 +17,7 @@
use std::time::Duration;
use common_base::readable_size::ReadableSize;
use common_datasource::compression::CompressionType;
use common_telemetry::warn;
use serde::{Deserialize, Serialize};
@@ -41,8 +42,8 @@ pub struct MitoConfig {
/// Number of meta action updated to trigger a new checkpoint
/// for the manifest (default 10).
pub manifest_checkpoint_distance: u64,
/// Whether to compress manifest and checkpoint file by gzip (default false).
pub compress_manifest: bool,
/// Manifest compression type (default uncompressed).
pub manifest_compress_type: CompressionType,
// Background job configs:
/// Max number of running background jobs (default 4).
@@ -77,7 +78,7 @@ impl Default for MitoConfig {
worker_channel_size: 128,
worker_request_batch_size: 64,
manifest_checkpoint_distance: 10,
compress_manifest: false,
manifest_compress_type: CompressionType::Uncompressed,
max_background_jobs: DEFAULT_MAX_BG_JOB,
auto_flush_interval: Duration::from_secs(30 * 60),
global_write_buffer_size: ReadableSize::gb(1),

View File

@@ -29,7 +29,7 @@ use super::*;
use crate::region::version::VersionControlData;
use crate::test_util::{
build_delete_rows_for_key, build_rows, build_rows_for_key, delete_rows, delete_rows_schema,
flush_region, put_rows, reopen_region, rows_schema, CreateRequestBuilder, TestEnv,
flush_region, put_rows, rows_schema, CreateRequestBuilder, TestEnv,
};
#[tokio::test]
@@ -353,55 +353,6 @@ async fn test_put_delete() {
assert_eq!(expected, batches.pretty_print().unwrap());
}
#[tokio::test]
async fn test_delete_not_null_fields() {
let mut env = TestEnv::new();
let engine = env.create_engine(MitoConfig::default()).await;
let region_id = RegionId::new(1, 1);
let request = CreateRequestBuilder::new().all_not_null(true).build();
let region_dir = request.region_dir.clone();
let column_schemas = rows_schema(&request);
let delete_schema = delete_rows_schema(&request);
engine
.handle_request(region_id, RegionRequest::Create(request))
.await
.unwrap();
let rows = Rows {
schema: column_schemas.clone(),
rows: build_rows_for_key("a", 0, 4, 0),
};
put_rows(&engine, region_id, rows).await;
// Delete (a, 2)
let rows = Rows {
schema: delete_schema.clone(),
rows: build_delete_rows_for_key("a", 2, 3),
};
delete_rows(&engine, region_id, rows).await;
let request = ScanRequest::default();
let stream = engine.handle_query(region_id, request).await.unwrap();
let batches = RecordBatches::try_collect(stream).await.unwrap();
let expected = "\
+-------+---------+---------------------+
| tag_0 | field_0 | ts |
+-------+---------+---------------------+
| a | 0.0 | 1970-01-01T00:00:00 |
| a | 1.0 | 1970-01-01T00:00:01 |
| a | 3.0 | 1970-01-01T00:00:03 |
+-------+---------+---------------------+";
assert_eq!(expected, batches.pretty_print().unwrap());
// Reopen and scan again.
reopen_region(&engine, region_id, region_dir, false).await;
let request = ScanRequest::default();
let stream = engine.handle_query(region_id, request).await.unwrap();
let batches = RecordBatches::try_collect(stream).await.unwrap();
assert_eq!(expected, batches.pretty_print().unwrap());
}
#[tokio::test]
async fn test_put_overwrite() {
let mut env = TestEnv::new();

View File

@@ -42,7 +42,6 @@ const DEFAULT_MANIFEST_COMPRESSION_TYPE: CompressionType = CompressionType::Gzip
/// So when we encounter problems, we need to fall back to `FALL_BACK_COMPRESS_TYPE` for processing.
const FALL_BACK_COMPRESS_TYPE: CompressionType = CompressionType::Uncompressed;
/// Returns the [CompressionType] according to whether to compress manifest files.
#[inline]
pub const fn manifest_compress_type(compress: bool) -> CompressionType {
if compress {

View File

@@ -33,7 +33,6 @@ use crate::cache::CacheManagerRef;
use crate::config::MitoConfig;
use crate::error::{EmptyRegionDirSnafu, ObjectStoreNotFoundSnafu, RegionCorruptedSnafu, Result};
use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions};
use crate::manifest::storage::manifest_compress_type;
use crate::memtable::MemtableBuilderRef;
use crate::region::options::RegionOptions;
use crate::region::version::{VersionBuilder, VersionControl, VersionControlRef};
@@ -260,9 +259,7 @@ impl RegionOpener {
Ok(RegionManifestOptions {
manifest_dir: new_manifest_dir(&self.region_dir),
object_store,
// We don't allow users to set the compression algorithm as we use it as a file suffix.
// Currently, the manifest storage doesn't have good support for changing compression algorithms.
compress_type: manifest_compress_type(config.compress_manifest),
compress_type: config.manifest_compress_type,
checkpoint_distance: config.manifest_checkpoint_distance,
})
}

View File

@@ -274,21 +274,17 @@ impl WriteRequest {
/// Checks whether we should allow a row doesn't provide this column.
fn check_missing_column(&self, column: &ColumnMetadata) -> Result<()> {
if self.op_type == OpType::Delete {
if column.semantic_type == SemanticType::Field {
// For delete request, all tags and timestamp is required. We don't fill default
// tag or timestamp while deleting rows.
return Ok(());
} else {
return InvalidRequestSnafu {
region_id: self.region_id,
reason: format!("delete requests need column {}", column.column_schema.name),
}
.fail();
// For delete request, all tags and timestamp is required. We don't fill default
// tag or timestamp while deleting rows.
ensure!(
self.op_type != OpType::Delete || column.semantic_type == SemanticType::Field,
InvalidRequestSnafu {
region_id: self.region_id,
reason: format!("delete requests need column {}", column.column_schema.name),
}
}
);
// Not a delete request. Checks whether they have default value.
// Checks whether they have default value.
ensure!(
column.column_schema.is_nullable()
|| column.column_schema.default_constraint().is_some(),
@@ -994,7 +990,7 @@ mod tests {
assert_eq!(expect_rows, request.rows);
}
fn builder_with_ts_tag() -> RegionMetadataBuilder {
fn region_metadata_two_fields() -> RegionMetadata {
let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 1));
builder
.push_column_metadata(ColumnMetadata {
@@ -1015,13 +1011,6 @@ mod tests {
semantic_type: SemanticType::Tag,
column_id: 2,
})
.primary_key(vec![2]);
builder
}
fn region_metadata_two_fields() -> RegionMetadata {
let mut builder = builder_with_ts_tag();
builder
.push_column_metadata(ColumnMetadata {
column_schema: datatypes::schema::ColumnSchema::new(
"f0",
@@ -1044,7 +1033,8 @@ mod tests {
.unwrap(),
semantic_type: SemanticType::Field,
column_id: 4,
});
})
.primary_key(vec![2]);
builder.build().unwrap()
}
@@ -1110,75 +1100,6 @@ mod tests {
assert_eq!(expect_rows, request.rows);
}
#[test]
fn test_fill_missing_without_default_in_delete() {
let mut builder = builder_with_ts_tag();
builder
// f0 is nullable.
.push_column_metadata(ColumnMetadata {
column_schema: datatypes::schema::ColumnSchema::new(
"f0",
ConcreteDataType::int64_datatype(),
true,
),
semantic_type: SemanticType::Field,
column_id: 3,
})
// f1 is not nullable and don't has default.
.push_column_metadata(ColumnMetadata {
column_schema: datatypes::schema::ColumnSchema::new(
"f1",
ConcreteDataType::int64_datatype(),
false,
),
semantic_type: SemanticType::Field,
column_id: 4,
});
let metadata = builder.build().unwrap();
let rows = Rows {
schema: vec![
new_column_schema("k0", ColumnDataType::Int64, SemanticType::Tag),
new_column_schema(
"ts",
ColumnDataType::TimestampMillisecond,
SemanticType::Timestamp,
),
],
// Missing f0 (nullable), f1 (not nullable).
rows: vec![Row {
values: vec![i64_value(100), ts_ms_value(1)],
}],
};
let mut request = WriteRequest::new(RegionId::new(1, 1), OpType::Delete, rows).unwrap();
let err = request.check_schema(&metadata).unwrap_err();
assert!(err.is_fill_default());
request.fill_missing_columns(&metadata).unwrap();
let expect_rows = Rows {
schema: vec![
new_column_schema("k0", ColumnDataType::Int64, SemanticType::Tag),
new_column_schema(
"ts",
ColumnDataType::TimestampMillisecond,
SemanticType::Timestamp,
),
new_column_schema("f0", ColumnDataType::Int64, SemanticType::Field),
new_column_schema("f1", ColumnDataType::Int64, SemanticType::Field),
],
// Column f1 is not nullable and we use 0 for padding.
rows: vec![Row {
values: vec![
i64_value(100),
ts_ms_value(1),
Value { value_data: None },
i64_value(0),
],
}],
};
assert_eq!(expect_rows, request.rows);
}
#[test]
fn test_no_default() {
let rows = Rows {

View File

@@ -277,7 +277,6 @@ pub struct CreateRequestBuilder {
field_num: usize,
options: HashMap<String, String>,
primary_key: Option<Vec<ColumnId>>,
all_not_null: bool,
}
impl Default for CreateRequestBuilder {
@@ -288,7 +287,6 @@ impl Default for CreateRequestBuilder {
field_num: 1,
options: HashMap::new(),
primary_key: None,
all_not_null: false,
}
}
}
@@ -323,29 +321,21 @@ impl CreateRequestBuilder {
self
}
#[must_use]
pub fn insert_option(mut self, key: &str, value: &str) -> Self {
self.options.insert(key.to_string(), value.to_string());
self
}
#[must_use]
pub fn all_not_null(mut self, value: bool) -> Self {
self.all_not_null = value;
self
}
pub fn build(&self) -> RegionCreateRequest {
let mut column_id = 0;
let mut column_metadatas = Vec::with_capacity(self.tag_num + self.field_num + 1);
let mut primary_key = Vec::with_capacity(self.tag_num);
let nullable = !self.all_not_null;
for i in 0..self.tag_num {
column_metadatas.push(ColumnMetadata {
column_schema: ColumnSchema::new(
format!("tag_{i}"),
ConcreteDataType::string_datatype(),
nullable,
true,
),
semantic_type: SemanticType::Tag,
column_id,
@@ -358,7 +348,7 @@ impl CreateRequestBuilder {
column_schema: ColumnSchema::new(
format!("field_{i}"),
ConcreteDataType::float64_datatype(),
nullable,
true,
),
semantic_type: SemanticType::Field,
column_id,
@@ -369,7 +359,6 @@ impl CreateRequestBuilder {
column_schema: ColumnSchema::new(
"ts",
ConcreteDataType::timestamp_millisecond_datatype(),
// Time index is always not null.
false,
),
semantic_type: SemanticType::Timestamp,

View File

@@ -24,7 +24,6 @@ use common_catalog::format_full_table_name;
use common_meta::cache_invalidator::Context;
use common_meta::ddl::ExecutorContext;
use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
use common_meta::key::NAME_PATTERN;
use common_meta::rpc::ddl::{DdlTask, SubmitDdlTaskRequest, SubmitDdlTaskResponse};
use common_meta::rpc::router::{Partition, Partition as MetaPartition};
use common_meta::table_name::TableName;
@@ -32,9 +31,7 @@ use common_query::Output;
use common_telemetry::{info, tracing};
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::RawSchema;
use lazy_static::lazy_static;
use partition::partition::{PartitionBound, PartitionDef};
use regex::Regex;
use session::context::QueryContextRef;
use snafu::{ensure, OptionExt, ResultExt};
use sql::ast::Value as SqlValue;
@@ -44,7 +41,7 @@ use sql::statements::sql_value_to_value;
use sql::MAXVALUE;
use table::dist_table::DistTable;
use table::metadata::{self, RawTableInfo, RawTableMeta, TableId, TableInfo, TableType};
use table::requests::{AlterKind, AlterTableRequest, TableOptions};
use table::requests::{AlterTableRequest, TableOptions};
use table::TableRef;
use super::StatementExecutor;
@@ -56,10 +53,6 @@ use crate::error::{
};
use crate::expr_factory;
lazy_static! {
static ref NAME_PATTERN_REG: Regex = Regex::new(&format!("^{NAME_PATTERN}$")).unwrap();
}
impl StatementExecutor {
pub fn catalog_manager(&self) -> CatalogManagerRef {
self.catalog_manager.clone()
@@ -129,13 +122,6 @@ impl StatementExecutor {
};
}
ensure!(
NAME_PATTERN_REG.is_match(&create_table.table_name),
error::UnexpectedSnafu {
violated: format!("Invalid table name: {}", create_table.table_name)
}
);
let table_name = TableName::new(
&create_table.catalog_name,
&create_table.schema_name,
@@ -227,20 +213,7 @@ impl StatementExecutor {
let request: AlterTableRequest = common_grpc_expr::alter_expr_to_request(table_id, expr)
.context(AlterExprToRequestSnafu)?;
let AlterTableRequest {
table_name,
alter_kind,
..
} = &request;
if let AlterKind::RenameTable { new_table_name } = alter_kind {
ensure!(
NAME_PATTERN_REG.is_match(new_table_name),
error::UnexpectedSnafu {
violated: format!("Invalid table name: {}", new_table_name)
}
);
}
let AlterTableRequest { table_name, .. } = &request;
let _ = table_info
.meta
@@ -386,20 +359,6 @@ impl StatementExecutor {
database: &str,
create_if_not_exists: bool,
) -> Result<Output> {
ensure!(
NAME_PATTERN_REG.is_match(catalog),
error::UnexpectedSnafu {
violated: format!("Invalid catalog name: {}", catalog)
}
);
ensure!(
NAME_PATTERN_REG.is_match(database),
error::UnexpectedSnafu {
violated: format!("Invalid database name: {}", database)
}
);
// TODO(weny): considers executing it in the procedures.
let schema_key = SchemaNameKey::new(catalog, database);
let exists = self
@@ -628,13 +587,6 @@ mod test {
use super::*;
use crate::expr_factory;
#[test]
fn test_name_is_match() {
assert!(!NAME_PATTERN_REG.is_match("/adaf"));
assert!(!NAME_PATTERN_REG.is_match("🈲"));
assert!(NAME_PATTERN_REG.is_match("hello"));
}
#[test]
fn test_validate_partition_columns() {
let create_table = CreateTableExpr {

View File

@@ -92,7 +92,6 @@ tonic-reflection = "0.10"
tonic.workspace = true
tower = { version = "0.4", features = ["full"] }
tower-http = { version = "0.3", features = ["full"] }
urlencoding = "2.1"
[target.'cfg(not(windows))'.dependencies]
tikv-jemalloc-ctl = { version = "0.5", features = ["use_std"] }

View File

@@ -1 +1 @@
v0.4.1
v0.3.3

View File

@@ -394,13 +394,6 @@ pub enum Error {
error: serde_json::error::Error,
location: Location,
},
#[snafu(display("Failed to decode url"))]
UrlDecode {
#[snafu(source)]
error: FromUtf8Error,
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -451,7 +444,6 @@ impl ErrorExt for Error {
| DataFrame { .. }
| PreparedStmtTypeMismatch { .. }
| TimePrecision { .. }
| UrlDecode { .. }
| IncompatibleSchema { .. } => StatusCode::InvalidArguments,
InfluxdbLinesWrite { source, .. }

View File

@@ -35,7 +35,7 @@ use super::header::GreptimeDbName;
use super::PUBLIC_APIS;
use crate::error::{
self, InvalidAuthorizationHeaderSnafu, InvalidParameterSnafu, InvisibleASCIISnafu,
NotFoundInfluxAuthSnafu, Result, UnsupportedAuthSchemeSnafu, UrlDecodeSnafu,
NotFoundInfluxAuthSnafu, Result, UnsupportedAuthSchemeSnafu,
};
use crate::http::HTTP_API_PREFIX;
@@ -166,9 +166,7 @@ fn get_influxdb_credentials<B: Send + Sync + 'static>(
return Ok(None);
};
let query_str = urlencoding::decode(query_str).context(UrlDecodeSnafu)?;
match extract_influxdb_user_from_query(&query_str) {
match extract_influxdb_user_from_query(query_str) {
(None, None) => Ok(None),
(Some(username), Some(password)) => {
Ok(Some((username.to_string(), password.to_string().into())))

View File

@@ -13,7 +13,6 @@
// limitations under the License.
use snafu::ResultExt;
use sqlparser::ast::Ident;
use sqlparser::dialect::Dialect;
use sqlparser::keywords::Keyword;
use sqlparser::parser::{Parser, ParserError};
@@ -167,29 +166,6 @@ impl<'a> ParserContext<'a> {
pub(crate) fn peek_token_as_string(&self) -> String {
self.parser.peek_token().to_string()
}
/// Canonicalize the identifier to lowercase if it's not quoted.
pub fn canonicalize_identifier(ident: Ident) -> Ident {
if ident.quote_style.is_some() {
ident
} else {
Ident {
value: ident.value.to_lowercase(),
quote_style: None,
}
}
}
/// Like [canonicalize_identifier] but for [ObjectName].
pub fn canonicalize_object_name(object_name: ObjectName) -> ObjectName {
ObjectName(
object_name
.0
.into_iter()
.map(Self::canonicalize_identifier)
.collect(),
)
}
}
#[cfg(test)]

View File

@@ -33,22 +33,20 @@ impl<'a> ParserContext<'a> {
let parser = &mut self.parser;
parser.expect_keywords(&[Keyword::ALTER, Keyword::TABLE])?;
let raw_table_name = parser.parse_object_name()?;
let table_name = Self::canonicalize_object_name(raw_table_name);
let table_name = parser.parse_object_name()?;
let alter_operation = if parser.parse_keyword(Keyword::ADD) {
if let Some(constraint) = parser.parse_optional_table_constraint()? {
AlterTableOperation::AddConstraint(constraint)
} else {
let _ = parser.parse_keyword(Keyword::COLUMN);
let mut column_def = parser.parse_column_def()?;
column_def.name = Self::canonicalize_identifier(column_def.name);
let column_def = parser.parse_column_def()?;
let location = if parser.parse_keyword(Keyword::FIRST) {
Some(AddColumnLocation::First)
} else if let Token::Word(word) = parser.peek_token().token {
if word.value.to_ascii_uppercase() == "AFTER" {
let _ = parser.next_token();
let name = Self::canonicalize_identifier(parser.parse_identifier()?);
let name = parser.parse_identifier()?;
Some(AddColumnLocation::After {
column_name: name.value,
})
@@ -65,7 +63,7 @@ impl<'a> ParserContext<'a> {
}
} else if parser.parse_keyword(Keyword::DROP) {
if parser.parse_keyword(Keyword::COLUMN) {
let name = Self::canonicalize_identifier(self.parser.parse_identifier()?);
let name = self.parser.parse_identifier()?;
AlterTableOperation::DropColumn { name }
} else {
return Err(ParserError::ParserError(format!(
@@ -74,8 +72,7 @@ impl<'a> ParserContext<'a> {
)));
}
} else if parser.parse_keyword(Keyword::RENAME) {
let new_table_name_obj_raw = parser.parse_object_name()?;
let new_table_name_obj = Self::canonicalize_object_name(new_table_name_obj_raw);
let new_table_name_obj = parser.parse_object_name()?;
let new_table_name = match &new_table_name_obj.0[..] {
[table] => table.value.clone(),
_ => {

View File

@@ -69,7 +69,7 @@ impl<'a> ParserContext<'a> {
}
fn parse_copy_table(&mut self) -> Result<CopyTable> {
let raw_table_name =
let table_name =
self.parser
.parse_object_name()
.with_context(|_| error::UnexpectedSnafu {
@@ -77,7 +77,6 @@ impl<'a> ParserContext<'a> {
expected: "a table name",
actual: self.peek_token_as_string(),
})?;
let table_name = Self::canonicalize_object_name(raw_table_name);
if self.parser.parse_keyword(Keyword::TO) {
let (with, connection, location) = self.parse_copy_to()?;

View File

@@ -73,7 +73,7 @@ impl<'a> ParserContext<'a> {
let if_not_exists =
self.parser
.parse_keywords(&[Keyword::IF, Keyword::NOT, Keyword::EXISTS]);
let raw_table_name = self
let table_name = self
.parser
.parse_object_name()
.context(error::UnexpectedSnafu {
@@ -81,7 +81,6 @@ impl<'a> ParserContext<'a> {
expected: "a table name",
actual: self.peek_token_as_string(),
})?;
let table_name = Self::canonicalize_object_name(raw_table_name);
let (columns, constraints) = self.parse_columns()?;
let engine = self.parse_table_engine(common_catalog::consts::FILE_ENGINE)?;
let options = self
@@ -143,7 +142,7 @@ impl<'a> ParserContext<'a> {
self.parser
.parse_keywords(&[Keyword::IF, Keyword::NOT, Keyword::EXISTS]);
let raw_table_name = self
let table_name = self
.parser
.parse_object_name()
.context(error::UnexpectedSnafu {
@@ -151,7 +150,6 @@ impl<'a> ParserContext<'a> {
expected: "a table name",
actual: self.peek_token_as_string(),
})?;
let table_name = Self::canonicalize_object_name(raw_table_name);
let (columns, constraints) = self.parse_columns()?;
@@ -199,14 +197,10 @@ impl<'a> ParserContext<'a> {
actual: self.peek_token_as_string(),
})?;
let raw_column_list = self
let column_list = self
.parser
.parse_parenthesized_column_list(Mandatory, false)
.context(error::SyntaxSnafu)?;
let column_list = raw_column_list
.into_iter()
.map(Self::canonicalize_identifier)
.collect();
let entries = self.parse_comma_separated(Self::parse_partition_entry)?;
@@ -441,7 +435,7 @@ impl<'a> ParserContext<'a> {
};
}
Ok(ColumnDef {
name: Self::canonicalize_identifier(name),
name,
data_type,
collation,
options,
@@ -494,8 +488,7 @@ impl<'a> ParserContext<'a> {
fn parse_optional_table_constraint(&mut self) -> Result<Option<TableConstraint>> {
let name = if self.parser.parse_keyword(Keyword::CONSTRAINT) {
let raw_name = self.parser.parse_identifier().context(error::SyntaxSnafu)?;
Some(Self::canonicalize_identifier(raw_name))
Some(self.parser.parse_identifier().context(error::SyntaxSnafu)?)
} else {
None
};
@@ -511,14 +504,10 @@ impl<'a> ParserContext<'a> {
expected: "KEY",
actual: self.peek_token_as_string(),
})?;
let raw_columns = self
let columns = self
.parser
.parse_parenthesized_column_list(Mandatory, false)
.context(error::SyntaxSnafu)?;
let columns = raw_columns
.into_iter()
.map(Self::canonicalize_identifier)
.collect();
Ok(Some(TableConstraint::Unique {
name,
columns,
@@ -537,14 +526,10 @@ impl<'a> ParserContext<'a> {
actual: self.peek_token_as_string(),
})?;
let raw_columns = self
let columns = self
.parser
.parse_parenthesized_column_list(Mandatory, false)
.context(error::SyntaxSnafu)?;
let columns = raw_columns
.into_iter()
.map(Self::canonicalize_identifier)
.collect::<Vec<_>>();
ensure!(
columns.len() == 1,

View File

@@ -30,7 +30,7 @@ impl<'a> ParserContext<'a> {
}
fn parse_describe_table(&mut self) -> Result<Statement> {
let raw_table_idents =
let table_idents =
self.parser
.parse_object_name()
.with_context(|_| error::UnexpectedSnafu {
@@ -38,7 +38,6 @@ impl<'a> ParserContext<'a> {
expected: "a table name",
actual: self.peek_token_as_string(),
})?;
let table_idents = Self::canonicalize_object_name(raw_table_idents);
ensure!(
!table_idents.0.is_empty(),
InvalidTableNameSnafu {

View File

@@ -29,7 +29,7 @@ impl<'a> ParserContext<'a> {
}
let _ = self.parser.next_token();
let raw_table_ident =
let table_ident =
self.parser
.parse_object_name()
.with_context(|_| error::UnexpectedSnafu {
@@ -37,7 +37,6 @@ impl<'a> ParserContext<'a> {
expected: "a table name",
actual: self.peek_token_as_string(),
})?;
let table_ident = Self::canonicalize_object_name(raw_table_ident);
ensure!(
!table_ident.0.is_empty(),
InvalidTableNameSnafu {

View File

@@ -50,7 +50,7 @@ impl<'a> ParserContext<'a> {
/// Parse SHOW CREATE TABLE statement
fn parse_show_create_table(&mut self) -> Result<Statement> {
let raw_table_name =
let table_name =
self.parser
.parse_object_name()
.with_context(|_| error::UnexpectedSnafu {
@@ -58,7 +58,6 @@ impl<'a> ParserContext<'a> {
expected: "a table name",
actual: self.peek_token_as_string(),
})?;
let table_name = Self::canonicalize_object_name(raw_table_name);
ensure!(
!table_name.0.is_empty(),
InvalidTableNameSnafu {

View File

@@ -26,7 +26,7 @@ impl<'a> ParserContext<'a> {
let _ = self.parser.next_token();
let _ = self.parser.parse_keyword(Keyword::TABLE);
let raw_table_ident =
let table_ident =
self.parser
.parse_object_name()
.with_context(|_| error::UnexpectedSnafu {
@@ -34,7 +34,6 @@ impl<'a> ParserContext<'a> {
expected: "a table name",
actual: self.peek_token_as_string(),
})?;
let table_ident = Self::canonicalize_object_name(raw_table_ident);
ensure!(
!table_ident.0.is_empty(),

View File

@@ -99,10 +99,10 @@ impl GreptimeDbStandaloneBuilder {
.init()
.await
.unwrap();
procedure_manager.start().await.unwrap();
let instance = Instance::try_new_standalone(
kv_backend,
procedure_manager.clone(),
procedure_manager,
catalog_manager,
plugins,
datanode.region_server(),
@@ -110,9 +110,6 @@ impl GreptimeDbStandaloneBuilder {
.await
.unwrap();
// Ensures all loaders are registered.
procedure_manager.start().await.unwrap();
test_util::prepare_another_catalog_and_schema(&instance).await;
instance.start().await.unwrap();

View File

@@ -720,7 +720,7 @@ num_workers = {}
worker_channel_size = 128
worker_request_batch_size = 64
manifest_checkpoint_distance = 10
compress_manifest = false
manifest_compress_type = "uncompressed"
max_background_jobs = 4
auto_flush_interval = "30m"
global_write_buffer_size = "1GiB"

View File

@@ -87,23 +87,6 @@ DESC TABLE test;
| idc | String | | YES | idc | FIELD |
+--------+----------------------+-----+------+---------+---------------+
ALTER TABLE test ADD COLUMN "IdC" STRING default 'idc' PRIMARY KEY;
Affected Rows: 0
DESC TABLE test;
+--------+----------------------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
+--------+----------------------+-----+------+---------+---------------+
| i | Int32 | | YES | | FIELD |
| j | TimestampMillisecond | PRI | NO | | TIMESTAMP |
| k | Int32 | | YES | | FIELD |
| host | String | | YES | | FIELD |
| idc | String | | YES | idc | FIELD |
| IdC | String | | YES | idc | FIELD |
+--------+----------------------+-----+------+---------+---------------+
DROP TABLE test;
Affected Rows: 0

View File

@@ -22,8 +22,4 @@ SELECT * FROM test;
DESC TABLE test;
ALTER TABLE test ADD COLUMN "IdC" STRING default 'idc' PRIMARY KEY;
DESC TABLE test;
DROP TABLE test;

View File

@@ -6,11 +6,7 @@ INSERT INTO test VALUES (1, 1), (2, 2);
Affected Rows: 2
ALTER TABLE test DROP COLUMN "I";
Error: 4002(TableColumnNotFound), Column I not exists in table test
ALTER TABLE test DROP COLUMN I;
ALTER TABLE test DROP COLUMN i;
Affected Rows: 0

View File

@@ -2,9 +2,7 @@ CREATE TABLE test(i INTEGER, j TIMESTAMP TIME INDEX);
INSERT INTO test VALUES (1, 1), (2, 2);
ALTER TABLE test DROP COLUMN "I";
ALTER TABLE test DROP COLUMN I;
ALTER TABLE test DROP COLUMN i;
SELECT * FROM test;

View File

@@ -25,10 +25,6 @@ SELECT * from t;
| | 1970-01-01T00:00:00.004 |
+---+-------------------------+
ALTER TABLE t RENAME 'Hi👋';
Error: 1002(Unexpected), Unexpected, violated: Invalid table name: Hi👋
ALTER TABLE t RENAME new_table;
Affected Rows: 0
@@ -81,46 +77,3 @@ DROP TABLE new_table;
Affected Rows: 0
CREATE TABLE "AbCdE"("CoLa" INTEGER, "cOlB" TIMESTAMP TIME INDEX);
Affected Rows: 0
ALTER TABLE "AbCdE" RENAME "fGhI";
Affected Rows: 0
DESC TABLE "fGhI";
+--------+----------------------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
+--------+----------------------+-----+------+---------+---------------+
| CoLa | Int32 | | YES | | FIELD |
| cOlB | TimestampMillisecond | PRI | NO | | TIMESTAMP |
+--------+----------------------+-----+------+---------+---------------+
SELECT * FROM "fGhI";
++
++
ALTER TABLE "fGhI" RENAME JkLmN;
Affected Rows: 0
DESC TABLE "JkLmN";
Error: 4001(TableNotFound), Table not found: JkLmN
DESC TABLE JkLmN;
+--------+----------------------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
+--------+----------------------+-----+------+---------+---------------+
| CoLa | Int32 | | YES | | FIELD |
| cOlB | TimestampMillisecond | PRI | NO | | TIMESTAMP |
+--------+----------------------+-----+------+---------+---------------+
DROP TABLE jklmn;
Affected Rows: 0

View File

@@ -6,8 +6,6 @@ INSERT INTO TABLE t VALUES (1, 1), (3, 3), (NULL, 4);
SELECT * from t;
ALTER TABLE t RENAME 'Hi👋';
ALTER TABLE t RENAME new_table;
DESC TABLE t;
@@ -28,20 +26,3 @@ ALTER TABLE new_table RENAME t;
DROP TABLE t;
DROP TABLE new_table;
CREATE TABLE "AbCdE"("CoLa" INTEGER, "cOlB" TIMESTAMP TIME INDEX);
ALTER TABLE "AbCdE" RENAME "fGhI";
DESC TABLE "fGhI";
SELECT * FROM "fGhI";
ALTER TABLE "fGhI" RENAME JkLmN;
DESC TABLE "JkLmN";
DESC TABLE JkLmN;
DROP TABLE jklmn;

View File

@@ -50,10 +50,6 @@ CREATE TABLE test2 (i INTEGER, j TIMESTAMP TIME INDEX);
Error: 4000(TableAlreadyExists), Table already exists: `greptime.public.test2`
CREATE TABLE 'N.~' (i TIMESTAMP TIME INDEX);
Error: 1002(Unexpected), Unexpected, violated: Invalid table name: N.~
DESC TABLE integers;
+--------+----------------------+-----+------+---------+---------------+

View File

@@ -24,8 +24,6 @@ CREATE TABLE test2 (i INTEGER, j TIMESTAMP TIME INDEX);
CREATE TABLE test2 (i INTEGER, j TIMESTAMP TIME INDEX);
CREATE TABLE 'N.~' (i TIMESTAMP TIME INDEX);
DESC TABLE integers;
DESC TABLE test1;

View File

@@ -6,10 +6,6 @@ create database 'illegal-database';
Affected Rows: 1
create database '㊙database';
Error: 1002(Unexpected), Unexpected, violated: Invalid database name: ㊙database
show databases;
+--------------------+

View File

@@ -2,6 +2,4 @@ create database illegal-database;
create database 'illegal-database';
create database 'database';
show databases;

View File

@@ -6,11 +6,11 @@ use upper_case_table_name;
Affected Rows: 0
create table "system_Metric"(ts timestamp time index);
create table system_Metric(ts timestamp time index);
Affected Rows: 0
insert into "system_Metric" values (0), (1);
insert into system_Metric values (0), (1);
Affected Rows: 2
@@ -27,76 +27,7 @@ select * from "system_Metric";
| 1970-01-01T00:00:00.001 |
+-------------------------+
drop table "system_Metric";
Affected Rows: 0
create table "AbCdEfG"("CoLA" string, "cOlB" string, "tS" timestamp time index, primary key ("CoLA"));
Affected Rows: 0
desc table "AbCdEfG";
+--------+----------------------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
+--------+----------------------+-----+------+---------+---------------+
| CoLA | String | PRI | YES | | TAG |
| cOlB | String | | YES | | FIELD |
| tS | TimestampMillisecond | PRI | NO | | TIMESTAMP |
+--------+----------------------+-----+------+---------+---------------+
-- unquoted table name and column name.
create table AbCdEfGe(CoLA string, cOlB string, tS timestamp time index, primary key (cOlA));
Affected Rows: 0
desc table aBcDeFgE;
+--------+----------------------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
+--------+----------------------+-----+------+---------+---------------+
| cola | String | PRI | YES | | TAG |
| colb | String | | YES | | FIELD |
| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
+--------+----------------------+-----+------+---------+---------------+
drop table "AbCdEfG";
Affected Rows: 0
drop table aBcDeFgE;
Affected Rows: 0
-- unquoted column name in partition
create table AbCdEfGe(
CoLA string PRIMARY KEY,
tS timestamp time index
) PARTITION BY RANGE COLUMNS (cOlA) (
PARTITION p0 VALUES LESS THAN (MAXVALUE)
);
Affected Rows: 0
drop table abcdefge;
Affected Rows: 0
-- unquoted column name in TIME INDEX
create table AbCdEfGe(CoLA string, tS timestamp, TIME INDEX (Ts));
Affected Rows: 0
desc table abcdefge;
+--------+----------------------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
+--------+----------------------+-----+------+---------+---------------+
| cola | String | | YES | | FIELD |
| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
+--------+----------------------+-----+------+---------+---------------+
drop table abcdefge;
drop table system_Metric;
Affected Rows: 0

View File

@@ -2,44 +2,14 @@ create database upper_case_table_name;
use upper_case_table_name;
create table "system_Metric"(ts timestamp time index);
create table system_Metric(ts timestamp time index);
insert into "system_Metric" values (0), (1);
insert into system_Metric values (0), (1);
select * from system_Metric;
select * from "system_Metric";
drop table "system_Metric";
create table "AbCdEfG"("CoLA" string, "cOlB" string, "tS" timestamp time index, primary key ("CoLA"));
desc table "AbCdEfG";
-- unquoted table name and column name.
create table AbCdEfGe(CoLA string, cOlB string, tS timestamp time index, primary key (cOlA));
desc table aBcDeFgE;
drop table "AbCdEfG";
drop table aBcDeFgE;
-- unquoted column name in partition
create table AbCdEfGe(
CoLA string PRIMARY KEY,
tS timestamp time index
) PARTITION BY RANGE COLUMNS (cOlA) (
PARTITION p0 VALUES LESS THAN (MAXVALUE)
);
drop table abcdefge;
-- unquoted column name in TIME INDEX
create table AbCdEfGe(CoLA string, tS timestamp, TIME INDEX (Ts));
desc table abcdefge;
drop table abcdefge;
drop table system_Metric;
use public;

View File

@@ -60,27 +60,3 @@ DROP TABLE monitor;
Affected Rows: 0
CREATE TABLE "MoNiToR" ("hOsT" STRING PRIMARY KEY, "tS" TIMESTAMP TIME INDEX, "cPu" DOUBLE DEFAULT 0);
Affected Rows: 0
DELETE FROM "MoNiToR" WHERE "hOsT" = 'host2';
Affected Rows: 0
DROP TABLE "MoNiToR";
Affected Rows: 0
CREATE TABLE MoNiToR (hOsT STRING PRIMARY KEY, tS TIMESTAMP TIME INDEX, cPu DOUBLE DEFAULT 0);
Affected Rows: 0
DELETE FROM MoNiToR WHERE hOsT = 'host2';
Affected Rows: 0
DROP TABLE MoNiToR;
Affected Rows: 0

View File

@@ -24,16 +24,3 @@ DELETE FROM monitor WHERE memory > 2048;
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
DROP TABLE monitor;
CREATE TABLE "MoNiToR" ("hOsT" STRING PRIMARY KEY, "tS" TIMESTAMP TIME INDEX, "cPu" DOUBLE DEFAULT 0);
DELETE FROM "MoNiToR" WHERE "hOsT" = 'host2';
DROP TABLE "MoNiToR";
CREATE TABLE MoNiToR (hOsT STRING PRIMARY KEY, tS TIMESTAMP TIME INDEX, cPu DOUBLE DEFAULT 0);
DELETE FROM MoNiToR WHERE hOsT = 'host2';
DROP TABLE MoNiToR;

View File

@@ -1,38 +0,0 @@
CREATE TABLE monitor (host STRING NOT NULL, ts TIMESTAMP NOT NULL, cpu DOUBLE NOT NULL, memory DOUBLE NOT NULL, TIME INDEX (ts), PRIMARY KEY(host));
Affected Rows: 0
INSERT INTO monitor(ts, host, cpu, memory) VALUES
(1655276557000, 'host1', 10.0, 1024),
(1655276557000, 'host2', 20.0, 1024),
(1655276557000, 'host3', 30.0, 1024);
Affected Rows: 3
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts, host;
+---------------------+-------+------+--------+
| ts | host | cpu | memory |
+---------------------+-------+------+--------+
| 2022-06-15T07:02:37 | host1 | 10.0 | 1024.0 |
| 2022-06-15T07:02:37 | host2 | 20.0 | 1024.0 |
| 2022-06-15T07:02:37 | host3 | 30.0 | 1024.0 |
+---------------------+-------+------+--------+
DELETE FROM monitor WHERE host = 'host2';
Affected Rows: 1
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts, host;
+---------------------+-------+------+--------+
| ts | host | cpu | memory |
+---------------------+-------+------+--------+
| 2022-06-15T07:02:37 | host1 | 10.0 | 1024.0 |
| 2022-06-15T07:02:37 | host3 | 30.0 | 1024.0 |
+---------------------+-------+------+--------+
DROP TABLE monitor;
Affected Rows: 0

View File

@@ -1,14 +0,0 @@
CREATE TABLE monitor (host STRING NOT NULL, ts TIMESTAMP NOT NULL, cpu DOUBLE NOT NULL, memory DOUBLE NOT NULL, TIME INDEX (ts), PRIMARY KEY(host));
INSERT INTO monitor(ts, host, cpu, memory) VALUES
(1655276557000, 'host1', 10.0, 1024),
(1655276557000, 'host2', 20.0, 1024),
(1655276557000, 'host3', 30.0, 1024);
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts, host;
DELETE FROM monitor WHERE host = 'host2';
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts, host;
DROP TABLE monitor;

View File

@@ -347,19 +347,19 @@ select i, split_part(s, 'b', 1) from test8 order by i;
| | d |
+---+----------------------------------------+
CREATE TABLE "DirectReports"
CREATE TABLE DirectReports
(
"EmployeeID" smallint,
EmployeeID smallint,
"Name" varchar NOT NULL,
"Title" varchar NOT NULL,
"EmployeeLevel" int NOT NULL,
Title varchar NOT NULL,
EmployeeLevel int NOT NULL,
"Sort" varchar NOT NULL,
"Timestamp" TIMESTAMP TIME INDEX,
);
Affected Rows: 0
INSERT INTO "DirectReports" VALUES
INSERT INTO DirectReports VALUES
(1, 'Ken Sánchez', 'Chief Executive Officer', 1, 'Ken Sánchez', 1),
(273, '>Brian Welcker', 'Vice President of Sales', 2, 'Ken Sánchez>Brian Welcker', 2),
(274, '>>Stephen Jiang', 'North American Sales Manager', 3, 'Ken Sánchez>Brian Welcker>Stephen Jiang', 3),
@@ -426,7 +426,7 @@ DROP table test8;
Affected Rows: 0
DROP TABLE "DirectReports";
DROP TABLE DirectReports;
Affected Rows: 0

View File

@@ -82,17 +82,17 @@ insert into test8 values (3, 'aba', 1), (1, 'ccbcc', 2), (NULL, 'dbdbd', 3), (2,
select i, split_part(s, 'b', 1) from test8 order by i;
CREATE TABLE "DirectReports"
CREATE TABLE DirectReports
(
"EmployeeID" smallint,
EmployeeID smallint,
"Name" varchar NOT NULL,
"Title" varchar NOT NULL,
"EmployeeLevel" int NOT NULL,
Title varchar NOT NULL,
EmployeeLevel int NOT NULL,
"Sort" varchar NOT NULL,
"Timestamp" TIMESTAMP TIME INDEX,
);
INSERT INTO "DirectReports" VALUES
INSERT INTO DirectReports VALUES
(1, 'Ken Sánchez', 'Chief Executive Officer', 1, 'Ken Sánchez', 1),
(273, '>Brian Welcker', 'Vice President of Sales', 2, 'Ken Sánchez>Brian Welcker', 2),
(274, '>>Stephen Jiang', 'North American Sales Manager', 3, 'Ken Sánchez>Brian Welcker>Stephen Jiang', 3),
@@ -125,4 +125,4 @@ DROP table test7;
DROP table test8;
DROP TABLE "DirectReports";
DROP TABLE DirectReports;

View File

@@ -74,27 +74,3 @@ DROP TABLE monitor;
Affected Rows: 0
CREATE TABLE "MoNiToR" ("hOsT" STRING PRIMARY KEY, "tS" TIMESTAMP TIME INDEX, "cPu" DOUBLE DEFAULT 0);
Affected Rows: 0
TRUNCATE "MoNiToR";
Affected Rows: 0
DROP TABLE "MoNiToR";
Affected Rows: 0
CREATE TABLE MoNiToR (hOsT STRING PRIMARY KEY, tS TIMESTAMP TIME INDEX, cPu DOUBLE DEFAULT 0);
Affected Rows: 0
TRUNCATE MoNiToR;
Affected Rows: 0
DROP TABLE MoNiToR;
Affected Rows: 0

View File

@@ -31,16 +31,3 @@ TRUNCATE monitor;
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
DROP TABLE monitor;
CREATE TABLE "MoNiToR" ("hOsT" STRING PRIMARY KEY, "tS" TIMESTAMP TIME INDEX, "cPu" DOUBLE DEFAULT 0);
TRUNCATE "MoNiToR";
DROP TABLE "MoNiToR";
CREATE TABLE MoNiToR (hOsT STRING PRIMARY KEY, tS TIMESTAMP TIME INDEX, cPu DOUBLE DEFAULT 0);
TRUNCATE MoNiToR;
DROP TABLE MoNiToR;