mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-05-17 05:20:37 +00:00
feat!: support table ddl for custom storage (#2733)
* feat: support table ddl for custom_storage * refactor: rename extract_variant_name to name * chore: add blank * chore: keep compatible * feat: rename custom_stores to providers * chore: rename * chore: config * refactor: add should_retry in client Error * fix: test fail * chore: remove unused options * chore: remove unused import * chore: remove the blanks. * chore: revert --------- Co-authored-by: dennis zhuang <killme2008@gmail.com>
This commit is contained in:
@@ -43,6 +43,7 @@ sync_write = false
|
||||
[storage]
|
||||
# The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
# Storage type.
|
||||
type = "File"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
@@ -53,6 +54,9 @@ type = "File"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
[storage.default_store]
|
||||
type = "File"
|
||||
|
||||
# Mito engine options
|
||||
[[region_engine]]
|
||||
[region_engine.mito]
|
||||
|
||||
@@ -131,3 +131,15 @@ impl From<Status> for Error {
|
||||
Self::Server { code, msg }
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn should_retry(&self) -> bool {
|
||||
!matches!(
|
||||
self,
|
||||
Self::RegionServer {
|
||||
code: Code::InvalidArgument,
|
||||
..
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,7 +29,6 @@ use prost::Message;
|
||||
use snafu::{location, Location, OptionExt, ResultExt};
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
use crate::error::Error::RegionServer;
|
||||
use crate::error::{
|
||||
self, ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
MissingFieldSnafu, Result, ServerSnafu,
|
||||
@@ -45,7 +44,7 @@ pub struct RegionRequester {
|
||||
impl Datanode for RegionRequester {
|
||||
async fn handle(&self, request: RegionRequest) -> MetaResult<AffectedRows> {
|
||||
self.handle_inner(request).await.map_err(|err| {
|
||||
if matches!(err, RegionServer { .. }) {
|
||||
if err.should_retry() {
|
||||
meta_error::Error::RetryLater {
|
||||
source: BoxedError::new(err),
|
||||
}
|
||||
|
||||
@@ -214,7 +214,7 @@ mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{FileConfig, ObjectStoreConfig};
|
||||
use datanode::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::Mode;
|
||||
|
||||
@@ -251,8 +251,17 @@ mod tests {
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
type = "File"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
type = "Gcs"
|
||||
bucket = "foo"
|
||||
endpoint = "bar"
|
||||
|
||||
[[storage.providers]]
|
||||
type = "S3"
|
||||
bucket = "foo"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
@@ -305,6 +314,15 @@ mod tests {
|
||||
&options.storage.store,
|
||||
ObjectStoreConfig::File(FileConfig { .. })
|
||||
));
|
||||
assert_eq!(options.storage.providers.len(), 2);
|
||||
assert!(matches!(
|
||||
options.storage.providers[0],
|
||||
ObjectStoreConfig::Gcs(GcsConfig { .. })
|
||||
));
|
||||
assert!(matches!(
|
||||
options.storage.providers[1],
|
||||
ObjectStoreConfig::S3(S3Config { .. })
|
||||
));
|
||||
|
||||
assert_eq!("debug", options.logging.level.unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
|
||||
@@ -238,7 +238,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
// Check the configs from environment variables.
|
||||
match opts.storage.store {
|
||||
match &opts.storage.store {
|
||||
ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
||||
}
|
||||
|
||||
@@ -426,6 +426,7 @@ mod tests {
|
||||
use auth::{Identity, Password, UserProviderRef};
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{FileConfig, GcsConfig};
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
@@ -473,8 +474,16 @@ mod tests {
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
type = "Gcs"
|
||||
bucket = "foo"
|
||||
endpoint = "bar"
|
||||
|
||||
[[storage.providers]]
|
||||
type = "S3"
|
||||
access_key_id = "access_key_id"
|
||||
secret_access_key = "secret_access_key"
|
||||
@@ -524,7 +533,16 @@ mod tests {
|
||||
|
||||
assert_eq!("/tmp/greptimedb/test/wal", dn_opts.wal.dir.unwrap());
|
||||
|
||||
match &dn_opts.storage.store {
|
||||
assert!(matches!(
|
||||
&dn_opts.storage.store,
|
||||
datanode::config::ObjectStoreConfig::File(FileConfig { .. })
|
||||
));
|
||||
assert_eq!(dn_opts.storage.providers.len(), 2);
|
||||
assert!(matches!(
|
||||
dn_opts.storage.providers[0],
|
||||
datanode::config::ObjectStoreConfig::Gcs(GcsConfig { .. })
|
||||
));
|
||||
match &dn_opts.storage.providers[1] {
|
||||
datanode::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(
|
||||
"Secret([REDACTED alloc::string::String])".to_string(),
|
||||
|
||||
@@ -48,6 +48,18 @@ pub enum ObjectStoreConfig {
|
||||
Gcs(GcsConfig),
|
||||
}
|
||||
|
||||
impl ObjectStoreConfig {
|
||||
pub fn name(&self) -> &'static str {
|
||||
match self {
|
||||
Self::File(_) => "File",
|
||||
Self::S3(_) => "S3",
|
||||
Self::Oss(_) => "Oss",
|
||||
Self::Azblob(_) => "Azblob",
|
||||
Self::Gcs(_) => "Gcs",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Storage engine config
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
@@ -63,6 +75,7 @@ pub struct StorageConfig {
|
||||
pub data_home: String,
|
||||
#[serde(flatten)]
|
||||
pub store: ObjectStoreConfig,
|
||||
pub providers: Vec<ObjectStoreConfig>,
|
||||
}
|
||||
|
||||
impl Default for StorageConfig {
|
||||
@@ -71,6 +84,7 @@ impl Default for StorageConfig {
|
||||
global_ttl: None,
|
||||
data_home: DEFAULT_DATA_HOME.to_string(),
|
||||
store: ObjectStoreConfig::default(),
|
||||
providers: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -295,7 +309,7 @@ mod tests {
|
||||
secret_access_key = "secret_access_key"
|
||||
"#;
|
||||
let opts: DatanodeOptions = toml::from_str(toml_str).unwrap();
|
||||
match opts.storage.store {
|
||||
match &opts.storage.store {
|
||||
ObjectStoreConfig::S3(cfg) => {
|
||||
assert_eq!(
|
||||
"Secret([REDACTED alloc::string::String])".to_string(),
|
||||
|
||||
@@ -354,7 +354,6 @@ impl DatanodeBuilder {
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
info!("going to open {} regions", regions.len());
|
||||
let semaphore = Arc::new(tokio::sync::Semaphore::new(OPEN_REGION_PARALLELISM));
|
||||
let mut tasks = vec![];
|
||||
@@ -417,7 +416,6 @@ impl DatanodeBuilder {
|
||||
);
|
||||
|
||||
let table_provider_factory = Arc::new(DummyTableProviderFactory);
|
||||
|
||||
let mut region_server = RegionServer::with_table_provider(
|
||||
query_engine,
|
||||
runtime,
|
||||
@@ -425,13 +423,8 @@ impl DatanodeBuilder {
|
||||
table_provider_factory,
|
||||
);
|
||||
|
||||
let object_store = store::new_object_store(opts).await?;
|
||||
let object_store_manager = ObjectStoreManager::new(
|
||||
"default", // TODO: use a name which is set in the configuration when #919 is done.
|
||||
object_store,
|
||||
);
|
||||
let engines =
|
||||
Self::build_store_engines(opts, log_store, Arc::new(object_store_manager)).await?;
|
||||
let object_store_manager = Self::build_object_store_manager(opts).await?;
|
||||
let engines = Self::build_store_engines(opts, log_store, object_store_manager).await?;
|
||||
for engine in engines {
|
||||
region_server.register_engine(engine);
|
||||
}
|
||||
@@ -496,6 +489,21 @@ impl DatanodeBuilder {
|
||||
}
|
||||
Ok(engines)
|
||||
}
|
||||
|
||||
/// Builds [ObjectStoreManager]
|
||||
async fn build_object_store_manager(opts: &DatanodeOptions) -> Result<ObjectStoreManagerRef> {
|
||||
let object_store =
|
||||
store::new_object_store(opts.storage.store.clone(), &opts.storage.data_home).await?;
|
||||
let default_name = opts.storage.store.name();
|
||||
let mut object_store_manager = ObjectStoreManager::new(default_name, object_store);
|
||||
for store in &opts.storage.providers {
|
||||
object_store_manager.add(
|
||||
store.name(),
|
||||
store::new_object_store(store.clone(), &opts.storage.data_home).await?,
|
||||
);
|
||||
}
|
||||
Ok(Arc::new(object_store_manager))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -32,12 +32,15 @@ use object_store::util::normalize_dir;
|
||||
use object_store::{util, HttpClient, ObjectStore, ObjectStoreBuilder};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::config::{DatanodeOptions, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
||||
use crate::config::{ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
||||
use crate::error::{self, Result};
|
||||
|
||||
pub(crate) async fn new_object_store(opts: &DatanodeOptions) -> Result<ObjectStore> {
|
||||
let data_home = normalize_dir(&opts.storage.data_home);
|
||||
let object_store = match &opts.storage.store {
|
||||
pub(crate) async fn new_object_store(
|
||||
store: ObjectStoreConfig,
|
||||
data_home: &str,
|
||||
) -> Result<ObjectStore> {
|
||||
let data_home = normalize_dir(data_home);
|
||||
let object_store = match &store {
|
||||
ObjectStoreConfig::File(file_config) => {
|
||||
fs::new_fs_object_store(&data_home, file_config).await
|
||||
}
|
||||
@@ -50,9 +53,8 @@ pub(crate) async fn new_object_store(opts: &DatanodeOptions) -> Result<ObjectSto
|
||||
}?;
|
||||
|
||||
// Enable retry layer and cache layer for non-fs object storages
|
||||
let object_store = if !matches!(opts.storage.store, ObjectStoreConfig::File(..)) {
|
||||
let object_store =
|
||||
create_object_store_with_cache(object_store, &opts.storage.store).await?;
|
||||
let object_store = if !matches!(store, ObjectStoreConfig::File(..)) {
|
||||
let object_store = create_object_store_with_cache(object_store, &store).await?;
|
||||
object_store.layer(RetryLayer::new().with_jitter())
|
||||
} else {
|
||||
object_store
|
||||
|
||||
@@ -170,6 +170,8 @@ impl<'a> ParserContext<'a> {
|
||||
}
|
||||
);
|
||||
}
|
||||
// Sorts options so that `test_display_create_table` can always pass.
|
||||
let options = options.into_iter().sorted().collect();
|
||||
let create_table = CreateTable {
|
||||
if_not_exists,
|
||||
name: table_name,
|
||||
|
||||
@@ -241,7 +241,7 @@ mod tests {
|
||||
PARTITION r2 VALUES LESS THAN (MAXVALUE),
|
||||
)
|
||||
engine=mito
|
||||
with(regions=1, ttl='7d');
|
||||
with(regions=1, ttl='7d', storage='File');
|
||||
";
|
||||
let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}).unwrap();
|
||||
assert_eq!(1, result.len());
|
||||
@@ -267,6 +267,7 @@ PARTITION BY RANGE COLUMNS (ts) (
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
regions = 1,
|
||||
storage = 'File',
|
||||
ttl = '7d'
|
||||
)"#,
|
||||
&new_sql
|
||||
|
||||
@@ -84,6 +84,7 @@ pub struct TableOptions {
|
||||
pub const WRITE_BUFFER_SIZE_KEY: &str = "write_buffer_size";
|
||||
pub const TTL_KEY: &str = "ttl";
|
||||
pub const REGIONS_KEY: &str = "regions";
|
||||
pub const STORAGE_KEY: &str = "storage";
|
||||
|
||||
impl TryFrom<&HashMap<String, String>> for TableOptions {
|
||||
type Error = error::Error;
|
||||
@@ -340,6 +341,7 @@ pub fn valid_table_option(key: &str) -> bool {
|
||||
| WRITE_BUFFER_SIZE_KEY
|
||||
| TTL_KEY
|
||||
| REGIONS_KEY
|
||||
| STORAGE_KEY
|
||||
) | is_supported_in_s3(key)
|
||||
}
|
||||
|
||||
@@ -365,6 +367,7 @@ mod tests {
|
||||
assert!(valid_table_option(TTL_KEY));
|
||||
assert!(valid_table_option(REGIONS_KEY));
|
||||
assert!(valid_table_option(WRITE_BUFFER_SIZE_KEY));
|
||||
assert!(valid_table_option(STORAGE_KEY));
|
||||
assert!(!valid_table_option("foo"));
|
||||
}
|
||||
|
||||
|
||||
@@ -68,6 +68,7 @@ pub struct GreptimeDbClusterBuilder {
|
||||
cluster_name: String,
|
||||
kv_backend: KvBackendRef,
|
||||
store_config: Option<ObjectStoreConfig>,
|
||||
store_providers: Option<Vec<StorageType>>,
|
||||
datanodes: Option<u32>,
|
||||
}
|
||||
|
||||
@@ -92,6 +93,7 @@ impl GreptimeDbClusterBuilder {
|
||||
cluster_name: cluster_name.to_string(),
|
||||
kv_backend,
|
||||
store_config: None,
|
||||
store_providers: None,
|
||||
datanodes: None,
|
||||
}
|
||||
}
|
||||
@@ -101,6 +103,11 @@ impl GreptimeDbClusterBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_store_providers(mut self, store_providers: Vec<StorageType>) -> Self {
|
||||
self.store_providers = Some(store_providers);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_datanodes(mut self, datanodes: u32) -> Self {
|
||||
self.datanodes = Some(datanodes);
|
||||
self
|
||||
@@ -176,14 +183,15 @@ impl GreptimeDbClusterBuilder {
|
||||
|
||||
dir_guards.push(FileDirGuard::new(home_tmp_dir));
|
||||
|
||||
create_datanode_opts(store_config.clone(), home_dir)
|
||||
create_datanode_opts(store_config.clone(), vec![], home_dir)
|
||||
} else {
|
||||
let (opts, guard) = create_tmp_dir_and_datanode_opts(
|
||||
StorageType::File,
|
||||
self.store_providers.clone().unwrap_or_default(),
|
||||
&format!("{}-dn-{}", self.cluster_name, datanode_id),
|
||||
);
|
||||
|
||||
storage_guards.push(guard.storage_guard);
|
||||
storage_guards.push(guard.storage_guards);
|
||||
dir_guards.push(guard.home_guard);
|
||||
|
||||
opts
|
||||
@@ -195,7 +203,11 @@ impl GreptimeDbClusterBuilder {
|
||||
|
||||
instances.insert(datanode_id, datanode);
|
||||
}
|
||||
(instances, storage_guards, dir_guards)
|
||||
(
|
||||
instances,
|
||||
storage_guards.into_iter().flatten().collect(),
|
||||
dir_guards,
|
||||
)
|
||||
}
|
||||
|
||||
async fn wait_datanodes_alive(
|
||||
|
||||
@@ -40,7 +40,8 @@ pub struct GreptimeDbStandalone {
|
||||
|
||||
pub struct GreptimeDbStandaloneBuilder {
|
||||
instance_name: String,
|
||||
store_type: Option<StorageType>,
|
||||
store_providers: Option<Vec<StorageType>>,
|
||||
default_store: Option<StorageType>,
|
||||
plugin: Option<Plugins>,
|
||||
}
|
||||
|
||||
@@ -48,14 +49,23 @@ impl GreptimeDbStandaloneBuilder {
|
||||
pub fn new(instance_name: &str) -> Self {
|
||||
Self {
|
||||
instance_name: instance_name.to_string(),
|
||||
store_type: None,
|
||||
store_providers: None,
|
||||
plugin: None,
|
||||
default_store: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_store_type(self, store_type: StorageType) -> Self {
|
||||
pub fn with_default_store_type(self, store_type: StorageType) -> Self {
|
||||
Self {
|
||||
store_type: Some(store_type),
|
||||
default_store: Some(store_type),
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn with_store_providers(self, store_providers: Vec<StorageType>) -> Self {
|
||||
Self {
|
||||
store_providers: Some(store_providers),
|
||||
..self
|
||||
}
|
||||
}
|
||||
@@ -69,9 +79,11 @@ impl GreptimeDbStandaloneBuilder {
|
||||
}
|
||||
|
||||
pub async fn build(self) -> GreptimeDbStandalone {
|
||||
let store_type = self.store_type.unwrap_or(StorageType::File);
|
||||
let default_store_type = self.default_store.unwrap_or(StorageType::File);
|
||||
let store_types = self.store_providers.unwrap_or_default();
|
||||
|
||||
let (opts, guard) = create_tmp_dir_and_datanode_opts(store_type, &self.instance_name);
|
||||
let (opts, guard) =
|
||||
create_tmp_dir_and_datanode_opts(default_store_type, store_types, &self.instance_name);
|
||||
|
||||
let procedure_config = ProcedureConfig::default();
|
||||
let kv_backend_config = KvBackendConfig::default();
|
||||
|
||||
@@ -77,6 +77,26 @@ impl Display for StorageType {
|
||||
}
|
||||
|
||||
impl StorageType {
|
||||
pub fn build_storage_types_based_on_env() -> Vec<StorageType> {
|
||||
let mut storage_types = Vec::with_capacity(4);
|
||||
storage_types.push(StorageType::File);
|
||||
if let Ok(bucket) = env::var("GT_S3_BUCKET") {
|
||||
if !bucket.is_empty() {
|
||||
storage_types.push(StorageType::S3);
|
||||
}
|
||||
}
|
||||
if env::var("GT_OSS_BUCKET").is_ok() {
|
||||
storage_types.push(StorageType::Oss);
|
||||
}
|
||||
if env::var("GT_AZBLOB_CONTAINER").is_ok() {
|
||||
storage_types.push(StorageType::Azblob);
|
||||
}
|
||||
if env::var("GT_GCS_BUCKET").is_ok() {
|
||||
storage_types.push(StorageType::Gcs);
|
||||
}
|
||||
storage_types
|
||||
}
|
||||
|
||||
pub fn test_on(&self) -> bool {
|
||||
let _ = dotenv::dotenv();
|
||||
|
||||
@@ -244,7 +264,7 @@ pub enum TempDirGuard {
|
||||
|
||||
pub struct TestGuard {
|
||||
pub home_guard: FileDirGuard,
|
||||
pub storage_guard: StorageGuard,
|
||||
pub storage_guards: Vec<StorageGuard>,
|
||||
}
|
||||
|
||||
pub struct FileDirGuard {
|
||||
@@ -261,42 +281,62 @@ pub struct StorageGuard(pub TempDirGuard);
|
||||
|
||||
impl TestGuard {
|
||||
pub async fn remove_all(&mut self) {
|
||||
if let TempDirGuard::S3(guard)
|
||||
| TempDirGuard::Oss(guard)
|
||||
| TempDirGuard::Azblob(guard)
|
||||
| TempDirGuard::Gcs(guard) = &mut self.storage_guard.0
|
||||
{
|
||||
guard.remove_all().await.unwrap()
|
||||
for storage_guard in self.storage_guards.iter_mut() {
|
||||
if let TempDirGuard::S3(guard)
|
||||
| TempDirGuard::Oss(guard)
|
||||
| TempDirGuard::Azblob(guard)
|
||||
| TempDirGuard::Gcs(guard) = &mut storage_guard.0
|
||||
{
|
||||
guard.remove_all().await.unwrap()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_tmp_dir_and_datanode_opts(
|
||||
store_type: StorageType,
|
||||
default_store_type: StorageType,
|
||||
store_provider_types: Vec<StorageType>,
|
||||
name: &str,
|
||||
) -> (DatanodeOptions, TestGuard) {
|
||||
let home_tmp_dir = create_temp_dir(&format!("gt_data_{name}"));
|
||||
let home_dir = home_tmp_dir.path().to_str().unwrap().to_string();
|
||||
|
||||
let (store, data_tmp_dir) = get_test_store_config(&store_type);
|
||||
let opts = create_datanode_opts(store, home_dir);
|
||||
// Excludes the default object store.
|
||||
let mut store_providers = Vec::with_capacity(store_provider_types.len());
|
||||
// Includes the default object store.
|
||||
let mut storage_guards = Vec::with_capacity(store_provider_types.len() + 1);
|
||||
|
||||
let (default_store, data_tmp_dir) = get_test_store_config(&default_store_type);
|
||||
storage_guards.push(StorageGuard(data_tmp_dir));
|
||||
|
||||
for store_type in store_provider_types {
|
||||
let (store, data_tmp_dir) = get_test_store_config(&store_type);
|
||||
store_providers.push(store);
|
||||
storage_guards.push(StorageGuard(data_tmp_dir))
|
||||
}
|
||||
let opts = create_datanode_opts(default_store, store_providers, home_dir);
|
||||
|
||||
(
|
||||
opts,
|
||||
TestGuard {
|
||||
home_guard: FileDirGuard::new(home_tmp_dir),
|
||||
storage_guard: StorageGuard(data_tmp_dir),
|
||||
storage_guards,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn create_datanode_opts(store: ObjectStoreConfig, home_dir: String) -> DatanodeOptions {
|
||||
pub(crate) fn create_datanode_opts(
|
||||
default_store: ObjectStoreConfig,
|
||||
providers: Vec<ObjectStoreConfig>,
|
||||
home_dir: String,
|
||||
) -> DatanodeOptions {
|
||||
DatanodeOptions {
|
||||
node_id: Some(0),
|
||||
require_lease_before_startup: true,
|
||||
storage: StorageConfig {
|
||||
data_home: home_dir,
|
||||
store,
|
||||
providers,
|
||||
store: default_store,
|
||||
..Default::default()
|
||||
},
|
||||
mode: Mode::Standalone,
|
||||
@@ -325,7 +365,7 @@ async fn setup_standalone_instance(
|
||||
store_type: StorageType,
|
||||
) -> GreptimeDbStandalone {
|
||||
GreptimeDbStandaloneBuilder::new(test_name)
|
||||
.with_store_type(store_type)
|
||||
.with_default_store_type(store_type)
|
||||
.build()
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -31,8 +31,9 @@ use session::context::{QueryContext, QueryContextRef};
|
||||
|
||||
use crate::test_util::check_output_stream;
|
||||
use crate::tests::test_util::{
|
||||
both_instances_cases, check_unordered_output_stream, distributed, find_testing_resource,
|
||||
prepare_path, standalone, standalone_instance_case, MockInstance,
|
||||
both_instances_cases, both_instances_cases_with_custom_storages, check_unordered_output_stream,
|
||||
distributed, distributed_with_multiple_object_stores, find_testing_resource, prepare_path,
|
||||
standalone, standalone_instance_case, standalone_with_multiple_object_stores, MockInstance,
|
||||
};
|
||||
|
||||
#[apply(both_instances_cases)]
|
||||
@@ -1840,3 +1841,120 @@ async fn execute_sql_with(
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[apply(both_instances_cases_with_custom_storages)]
|
||||
async fn test_custom_storage(instance: Arc<dyn MockInstance>) {
|
||||
let frontend = instance.frontend();
|
||||
let custom_storages = [
|
||||
("S3", "GT_S3_BUCKET"),
|
||||
("Oss", "GT_OSS_BUCKET"),
|
||||
("Azblob", "GT_AZBLOB_CONTAINER"),
|
||||
("Gcs", "GT_GCS_BUCKET"),
|
||||
];
|
||||
for (storage_name, custom_storage_env) in custom_storages {
|
||||
if let Ok(env_value) = env::var(custom_storage_env) {
|
||||
if env_value.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let sql = if instance.is_distributed_mode() {
|
||||
format!(
|
||||
r#"create table test_table(
|
||||
a int null primary key,
|
||||
ts timestamp time index,
|
||||
)
|
||||
PARTITION BY RANGE COLUMNS (a) (
|
||||
PARTITION r0 VALUES LESS THAN (1),
|
||||
PARTITION r1 VALUES LESS THAN (10),
|
||||
PARTITION r2 VALUES LESS THAN (100),
|
||||
PARTITION r3 VALUES LESS THAN (MAXVALUE),
|
||||
)
|
||||
with(storage='{storage_name}')
|
||||
"#
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
r#"create table test_table(a int primary key, ts timestamp time index)with(storage='{storage_name}');"#
|
||||
)
|
||||
};
|
||||
|
||||
let output = execute_sql(&instance.frontend(), &sql).await;
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
let output = execute_sql(
|
||||
&frontend,
|
||||
r#"insert into test_table(a, ts) values
|
||||
(1, 1655276557000),
|
||||
(1000, 1655276558000)
|
||||
"#,
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(output, Output::AffectedRows(2)));
|
||||
|
||||
let output = execute_sql(&frontend, "select * from test_table").await;
|
||||
let expected = "\
|
||||
+------+---------------------+
|
||||
| a | ts |
|
||||
+------+---------------------+
|
||||
| 1 | 2022-06-15T07:02:37 |
|
||||
| 1000 | 2022-06-15T07:02:38 |
|
||||
+------+---------------------+";
|
||||
|
||||
check_output_stream(output, expected).await;
|
||||
let output = execute_sql(&frontend, "show create table test_table").await;
|
||||
let Output::RecordBatches(record_batches) = output else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let record_batches = record_batches.iter().collect::<Vec<_>>();
|
||||
let column = record_batches[0].column_by_name("Create Table").unwrap();
|
||||
let actual = column.get(0);
|
||||
|
||||
let expect = if instance.is_distributed_mode() {
|
||||
format!(
|
||||
r#"CREATE TABLE IF NOT EXISTS "test_table" (
|
||||
"a" INT NULL,
|
||||
"ts" TIMESTAMP(3) NOT NULL,
|
||||
TIME INDEX ("ts"),
|
||||
PRIMARY KEY ("a")
|
||||
)
|
||||
PARTITION BY RANGE COLUMNS ("a") (
|
||||
PARTITION r0 VALUES LESS THAN (1),
|
||||
PARTITION r1 VALUES LESS THAN (10),
|
||||
PARTITION r2 VALUES LESS THAN (100),
|
||||
PARTITION r3 VALUES LESS THAN (MAXVALUE)
|
||||
)
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
regions = 4,
|
||||
storage = '{storage_name}'
|
||||
)"#
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
r#"CREATE TABLE IF NOT EXISTS "test_table" (
|
||||
"a" INT NULL,
|
||||
"ts" TIMESTAMP(3) NOT NULL,
|
||||
TIME INDEX ("ts"),
|
||||
PRIMARY KEY ("a")
|
||||
)
|
||||
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
regions = 1,
|
||||
storage = '{storage_name}'
|
||||
)"#
|
||||
)
|
||||
};
|
||||
assert_eq!(actual.to_string(), expect);
|
||||
let output = execute_sql(&frontend, "truncate test_table").await;
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
let output = execute_sql(&frontend, "select * from test_table").await;
|
||||
let expected = "\
|
||||
++
|
||||
++";
|
||||
|
||||
check_output_stream(output, expected).await;
|
||||
let output = execute_sql(&frontend, "drop table test_table").await;
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,9 @@ use common_test_util::find_workspace_path;
|
||||
use frontend::instance::Instance;
|
||||
use rstest_reuse::{self, template};
|
||||
|
||||
use crate::cluster::GreptimeDbClusterBuilder;
|
||||
use crate::standalone::{GreptimeDbStandalone, GreptimeDbStandaloneBuilder};
|
||||
use crate::test_util::StorageType;
|
||||
use crate::tests::{create_distributed_instance, MockDistributedInstance};
|
||||
|
||||
pub(crate) trait MockInstance {
|
||||
@@ -61,6 +63,42 @@ pub(crate) async fn distributed() -> Arc<dyn MockInstance> {
|
||||
Arc::new(instance)
|
||||
}
|
||||
|
||||
pub(crate) async fn standalone_with_multiple_object_stores() -> Arc<dyn MockInstance> {
|
||||
let _ = dotenv::dotenv();
|
||||
let test_name = uuid::Uuid::new_v4().to_string();
|
||||
let storage_types = StorageType::build_storage_types_based_on_env();
|
||||
let instance = GreptimeDbStandaloneBuilder::new(&test_name)
|
||||
.with_store_providers(storage_types)
|
||||
.build()
|
||||
.await;
|
||||
Arc::new(instance)
|
||||
}
|
||||
|
||||
pub(crate) async fn distributed_with_multiple_object_stores() -> Arc<dyn MockInstance> {
|
||||
let _ = dotenv::dotenv();
|
||||
let test_name = uuid::Uuid::new_v4().to_string();
|
||||
let providers = StorageType::build_storage_types_based_on_env();
|
||||
let cluster = GreptimeDbClusterBuilder::new(&test_name)
|
||||
.await
|
||||
.with_store_providers(providers)
|
||||
.build()
|
||||
.await;
|
||||
Arc::new(MockDistributedInstance(cluster))
|
||||
}
|
||||
|
||||
#[template]
|
||||
#[rstest]
|
||||
#[case::test_with_standalone(standalone_with_multiple_object_stores())]
|
||||
#[case::test_with_distributed(distributed_with_multiple_object_stores())]
|
||||
#[awt]
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
pub(crate) fn both_instances_cases_with_custom_storages(
|
||||
#[future]
|
||||
#[case]
|
||||
instance: Arc<dyn MockInstance>,
|
||||
) {
|
||||
}
|
||||
|
||||
#[template]
|
||||
#[rstest]
|
||||
#[case::test_with_standalone(standalone())]
|
||||
|
||||
@@ -712,6 +712,7 @@ sync_write = false
|
||||
|
||||
[datanode.storage]
|
||||
type = "{}"
|
||||
providers = []
|
||||
|
||||
[[datanode.region_engine]]
|
||||
|
||||
|
||||
@@ -100,3 +100,24 @@ WITH(
|
||||
|
||||
Error: 1004(InvalidArguments), Invalid table option key: foo
|
||||
|
||||
CREATE TABLE not_supported_table_storage_option (
|
||||
id INT UNSIGNED,
|
||||
host STRING,
|
||||
cpu DOUBLE,
|
||||
disk FLOAT,
|
||||
ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY (id, host)
|
||||
)
|
||||
PARTITION BY RANGE COLUMNS (id) (
|
||||
PARTITION r0 VALUES LESS THAN (5),
|
||||
PARTITION r1 VALUES LESS THAN (9),
|
||||
PARTITION r2 VALUES LESS THAN (MAXVALUE),
|
||||
)
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
storage = 'S3'
|
||||
);
|
||||
|
||||
Error: 1004(InvalidArguments), Object store not found: s3
|
||||
|
||||
|
||||
@@ -50,3 +50,21 @@ WITH(
|
||||
ttl = '7d',
|
||||
write_buffer_size = 1024
|
||||
);
|
||||
CREATE TABLE not_supported_table_storage_option (
|
||||
id INT UNSIGNED,
|
||||
host STRING,
|
||||
cpu DOUBLE,
|
||||
disk FLOAT,
|
||||
ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY (id, host)
|
||||
)
|
||||
PARTITION BY RANGE COLUMNS (id) (
|
||||
PARTITION r0 VALUES LESS THAN (5),
|
||||
PARTITION r1 VALUES LESS THAN (9),
|
||||
PARTITION r2 VALUES LESS THAN (MAXVALUE),
|
||||
)
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
storage = 'S3'
|
||||
);
|
||||
|
||||
Reference in New Issue
Block a user