Compare commits

...

1 Commits

Author SHA1 Message Date
luofucong
dc9378c316 chore: upgrade DataFusion family
Signed-off-by: luofucong <luofc@foxmail.com>
2026-01-12 19:21:56 +08:00
72 changed files with 1736 additions and 1306 deletions

1555
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -100,13 +100,13 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
ahash = { version = "0.8", features = ["compile-time-rng"] }
aquamarine = "0.6"
arrow = { version = "56.2", features = ["prettyprint"] }
arrow-array = { version = "56.2", default-features = false, features = ["chrono-tz"] }
arrow-buffer = "56.2"
arrow-cast = "56.2"
arrow-flight = "56.2"
arrow-ipc = { version = "56.2", default-features = false, features = ["lz4", "zstd"] }
arrow-schema = { version = "56.2", features = ["serde"] }
arrow = { version = "57.0", features = ["prettyprint"] }
arrow-array = { version = "57.0", default-features = false, features = ["chrono-tz"] }
arrow-buffer = "57.0"
arrow-cast = "57.0"
arrow-flight = "57.0"
arrow-ipc = { version = "57.0", default-features = false, features = ["lz4", "zstd"] }
arrow-schema = { version = "57.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
# Remember to update axum-extra, axum-macros when updating axum
@@ -120,38 +120,39 @@ bitflags = "2.4.1"
bytemuck = "1.12"
bytes = { version = "1.7", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] }
chrono-tz = { version = "0.10.1", features = ["case-insensitive"] }
chrono-tz = { version = "0.10", features = ["case-insensitive"] }
clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
const_format = "0.2"
crossbeam-utils = "0.8"
dashmap = "6.1"
datafusion = "50"
datafusion-common = "50"
datafusion-expr = "50"
datafusion-functions = "50"
datafusion-functions-aggregate-common = "50"
datafusion-optimizer = "50"
datafusion-orc = "0.5"
datafusion-pg-catalog = "0.12.3"
datafusion-physical-expr = "50"
datafusion-physical-plan = "50"
datafusion-sql = "50"
datafusion-substrait = "50"
datafusion = "51.0"
datafusion-common = "51.0"
datafusion-datasource = "51.0"
datafusion-expr = "51.0"
datafusion-functions = "51.0"
datafusion-functions-aggregate-common = "51.0"
datafusion-optimizer = "51.0"
datafusion-orc = { git = "https://github.com/GreptimeTeam/datafusion-orc.git", rev = "35f2e04bf81f2ab7b6f86c0450d6a77b7098d43e" }
datafusion-pg-catalog = "0.13"
datafusion-physical-expr = "51.0"
datafusion-physical-plan = "51.0"
datafusion-sql = "51.0"
datafusion-substrait = "51.0"
deadpool = "0.12"
deadpool-postgres = "0.14"
derive_builder = "0.20"
derive_more = { version = "2.1", features = ["full"] }
dotenv = "0.15"
either = "1.15"
etcd-client = { version = "0.16.1", features = [
etcd-client = { version = "0.17", features = [
"tls",
"tls-roots",
] }
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "58aeee49267fb1eafa6f9123f9d0c47dd0f62722" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "69499de6d38d9032101fa8a9e10d375e124ca8d3" }
hex = "0.4"
http = "1"
humantime = "2.1"
@@ -162,7 +163,7 @@ itertools = "0.14"
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
lazy_static = "1.4"
local-ip-address = "0.6"
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "3b7cd33234358b18ece977bf689dc6fb760f29ab" }
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "a73a6b83eeb014645aac527b456816a81bd6b472" }
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
mockall = "0.13"
moka = "0.12"
@@ -172,7 +173,7 @@ notify = "8.0"
num_cpus = "1.16"
object_store_opendal = "0.54"
once_cell = "1.18"
opentelemetry-proto = { version = "0.30", features = [
opentelemetry-proto = { version = "0.31", features = [
"gen-tonic",
"metrics",
"trace",
@@ -180,18 +181,18 @@ opentelemetry-proto = { version = "0.30", features = [
"logs",
] }
ordered-float = { version = "4.3", features = ["serde"] }
otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "2d64b7c0fa95642028a8205b36fe9ea0b023ec59", features = [
otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "5da284414e9b14f678344b51e5292229e4b5f8d2", features = [
"server",
] }
parking_lot = "0.12"
parquet = { version = "56.2", default-features = false, features = ["arrow", "async", "object_store"] }
parquet = { version = "57.0", default-features = false, features = ["arrow", "async", "object_store"] }
paste = "1.0"
pin-project = "1.0"
pretty_assertions = "1.4.0"
prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { version = "0.7.1", features = ["ser"] }
prost = { version = "0.13", features = ["no-recursion-limit"] }
prost-types = "0.13"
prost = { version = "0.14", features = ["no-recursion-limit"] }
prost-types = "0.14"
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.9"
ratelimit = "0.10"
@@ -222,7 +223,7 @@ simd-json = "0.15"
similar-asserts = "1.6.0"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sqlparser = { version = "0.58.0", default-features = false, features = ["std", "visitor", "serde"] }
sqlparser = { version = "0.59.0", default-features = false, features = ["std", "visitor", "serde"] }
sqlx = { version = "0.8", default-features = false, features = ["any", "macros", "json", "runtime-tokio-rustls"] }
strum = { version = "0.27", features = ["derive"] }
sysinfo = "0.33"
@@ -233,7 +234,7 @@ tokio-rustls = { version = "0.26.2", default-features = false }
tokio-stream = "0.1"
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
tonic = { version = "0.13", features = ["tls-ring", "gzip", "zstd"] }
tonic = { version = "0.14", features = ["tls-ring", "gzip", "zstd"] }
tower = "0.5"
tower-http = "0.6"
tracing = "0.1"
@@ -321,19 +322,20 @@ git = "https://github.com/GreptimeTeam/greptime-meter.git"
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
[patch.crates-io]
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-physical-expr-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-datasource = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "a0ce2bc6eb3e804532932f39833c32432f5c9a39" } # branch = "v0.58.x"
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7143b2fc4492a7970774583ed0997a459f3e5c05" }
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7143b2fc4492a7970774583ed0997a459f3e5c05" }
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7143b2fc4492a7970774583ed0997a459f3e5c05" }
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7143b2fc4492a7970774583ed0997a459f3e5c05" }
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7143b2fc4492a7970774583ed0997a459f3e5c05" }
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7143b2fc4492a7970774583ed0997a459f3e5c05" }
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7143b2fc4492a7970774583ed0997a459f3e5c05" }
datafusion-physical-expr-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7143b2fc4492a7970774583ed0997a459f3e5c05" }
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7143b2fc4492a7970774583ed0997a459f3e5c05" }
datafusion-pg-catalog = { git = "https://github.com/GreptimeTeam/datafusion-postgres.git", rev = "74ac8e2806be6de91ff192b97f64735392539d16" }
datafusion-datasource = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7143b2fc4492a7970774583ed0997a459f3e5c05" }
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7143b2fc4492a7970774583ed0997a459f3e5c05" }
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7143b2fc4492a7970774583ed0997a459f3e5c05" }
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "d7d95a44889e099e32d78e9bad9bc00598faef28" } # on branch v0.59.x
[profile.release]
debug = 1

View File

@@ -35,6 +35,7 @@ use mito2::sst::parquet::reader::ParquetReaderBuilder;
use mito2::sst::parquet::{PARQUET_METADATA_KEY, WriteOptions};
use mito2::worker::write_cache_from_config;
use object_store::ObjectStore;
use parquet::file::metadata::{FooterTail, KeyValue};
use regex::Regex;
use snafu::OptionExt;
use store_api::metadata::{RegionMetadata, RegionMetadataRef};
@@ -463,7 +464,6 @@ fn extract_region_metadata(
file_path: &str,
meta: &parquet::file::metadata::ParquetMetaData,
) -> error::Result<RegionMetadataRef> {
use parquet::format::KeyValue;
let kvs: Option<&Vec<KeyValue>> = meta.file_metadata().key_value_metadata();
let Some(kvs) = kvs else {
return Err(error::IllegalConfigSnafu {
@@ -608,7 +608,7 @@ async fn load_parquet_metadata(
let buffer_len = buffer.len();
let mut footer = [0; 8];
footer.copy_from_slice(&buffer[buffer_len - FOOTER_SIZE..]);
let footer = ParquetMetaDataReader::decode_footer_tail(&footer)?;
let footer = FooterTail::try_new(&footer)?;
let metadata_len = footer.metadata_length() as u64;
if actual_size - (FOOTER_SIZE as u64) < metadata_len {
return Err("invalid footer/metadata length".into());

View File

@@ -27,13 +27,14 @@ common-recordbatch.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
datafusion.workspace = true
datafusion-datasource.workspace = true
datafusion-orc.workspace = true
datatypes.workspace = true
futures.workspace = true
lazy_static.workspace = true
object-store.workspace = true
object_store_opendal.workspace = true
orc-rust = { version = "0.6.3", default-features = false, features = ["async"] }
orc-rust = { version = "0.7", default-features = false, features = ["async"] }
parquet.workspace = true
paste.workspace = true
regex.workspace = true

View File

@@ -14,7 +14,7 @@
use arrow::record_batch::RecordBatch;
use async_trait::async_trait;
use datafusion::parquet::format::FileMetaData;
use parquet::file::metadata::ParquetMetaData;
use crate::error::Result;
@@ -24,5 +24,5 @@ pub trait DfRecordBatchEncoder {
#[async_trait]
pub trait ArrowWriterCloser {
async fn close(mut self) -> Result<FileMetaData>;
async fn close(mut self) -> Result<ParquetMetaData>;
}

View File

@@ -40,7 +40,6 @@ use datafusion::datasource::physical_plan::{
use datafusion::error::{DataFusionError, Result as DataFusionResult};
use datafusion::physical_plan::SendableRecordBatchStream;
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
use datatypes::arrow::datatypes::SchemaRef;
use futures::{StreamExt, TryStreamExt};
use object_store::ObjectStore;
use object_store_opendal::OpendalStore;
@@ -303,24 +302,20 @@ where
pub async fn file_to_stream(
store: &ObjectStore,
filename: &str,
file_schema: SchemaRef,
file_source: Arc<dyn FileSource>,
projection: Option<Vec<usize>>,
compression_type: CompressionType,
) -> Result<DfSendableRecordBatchStream> {
let df_compression: DfCompressionType = compression_type.into();
let config = FileScanConfigBuilder::new(
ObjectStoreUrl::local_filesystem(),
file_schema,
file_source.clone(),
)
.with_file_group(FileGroup::new(vec![PartitionedFile::new(
filename.to_string(),
0,
)]))
.with_projection(projection)
.with_file_compression_type(df_compression)
.build();
let config =
FileScanConfigBuilder::new(ObjectStoreUrl::local_filesystem(), file_source.clone())
.with_file_group(FileGroup::new(vec![PartitionedFile::new(
filename.to_string(),
0,
)]))
.with_projection_indices(projection)
.with_file_compression_type(df_compression)
.build();
let store = Arc::new(OpendalStore::new(store.clone()));
let file_opener = file_source

View File

@@ -440,14 +440,11 @@ mod tests {
.await
.unwrap(),
);
let csv_source = CsvSource::new(true, b',', b'"')
.with_schema(schema.clone())
.with_batch_size(8192);
let csv_source = CsvSource::new(schema).with_batch_size(8192);
let stream = file_to_stream(
&store,
compressed_file_path_str,
schema.clone(),
csv_source.clone(),
None,
compression_type,

View File

@@ -347,14 +347,11 @@ mod tests {
.await
.unwrap(),
);
let json_source = JsonSource::new()
.with_schema(schema.clone())
.with_batch_size(8192);
let json_source = JsonSource::new(schema).with_batch_size(8192);
let stream = file_to_stream(
&store,
compressed_file_path_str,
schema.clone(),
json_source.clone(),
None,
compression_type,

View File

@@ -18,15 +18,15 @@ use std::sync::Arc;
use arrow::record_batch::RecordBatch;
use arrow_schema::Schema;
use async_trait::async_trait;
use datafusion::datasource::physical_plan::{FileMeta, ParquetFileReaderFactory};
use datafusion::datasource::physical_plan::ParquetFileReaderFactory;
use datafusion::error::Result as DatafusionResult;
use datafusion::parquet::arrow::async_reader::AsyncFileReader;
use datafusion::parquet::arrow::{ArrowWriter, parquet_to_arrow_schema};
use datafusion::parquet::errors::{ParquetError, Result as ParquetResult};
use datafusion::parquet::file::metadata::ParquetMetaData;
use datafusion::parquet::format::FileMetaData;
use datafusion::physical_plan::SendableRecordBatchStream;
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
use datafusion_datasource::PartitionedFile;
use datatypes::schema::SchemaRef;
use futures::StreamExt;
use futures::future::BoxFuture;
@@ -100,11 +100,11 @@ impl ParquetFileReaderFactory for DefaultParquetFileReaderFactory {
fn create_reader(
&self,
_partition_index: usize,
file_meta: FileMeta,
partitioned_file: PartitionedFile,
_metadata_size_hint: Option<usize>,
_metrics: &ExecutionPlanMetricsSet,
) -> DatafusionResult<Box<dyn AsyncFileReader + Send>> {
let path = file_meta.location().to_string();
let path = partitioned_file.path().to_string();
let object_store = self.object_store.clone();
Ok(Box::new(LazyParquetFileReader::new(object_store, path)))
@@ -180,7 +180,7 @@ impl DfRecordBatchEncoder for ArrowWriter<SharedBuffer> {
#[async_trait]
impl ArrowWriterCloser for ArrowWriter<SharedBuffer> {
async fn close(self) -> Result<FileMetaData> {
async fn close(self) -> Result<ParquetMetaData> {
self.close().context(error::EncodeRecordBatchSnafu)
}
}

View File

@@ -67,14 +67,14 @@ impl Test<'_> {
async fn test_json_opener() {
let store = test_store("/");
let schema = test_basic_schema();
let file_source = Arc::new(JsonSource::new()).with_batch_size(test_util::TEST_BATCH_SIZE);
let file_source = Arc::new(JsonSource::new(schema)).with_batch_size(test_util::TEST_BATCH_SIZE);
let path = &find_workspace_path("/src/common/datasource/tests/json/basic.json")
.display()
.to_string();
let tests = [
Test {
config: scan_config(schema.clone(), None, path, file_source.clone()),
config: scan_config(None, path, file_source.clone()),
file_source: file_source.clone(),
expected: vec![
"+-----+-------+",
@@ -87,7 +87,7 @@ async fn test_json_opener() {
],
},
Test {
config: scan_config(schema, Some(1), path, file_source.clone()),
config: scan_config(Some(1), path, file_source.clone()),
file_source,
expected: vec![
"+-----+------+",
@@ -112,13 +112,11 @@ async fn test_csv_opener() {
.display()
.to_string();
let file_source = CsvSource::new(true, b',', b'"')
.with_batch_size(test_util::TEST_BATCH_SIZE)
.with_schema(schema.clone());
let file_source = CsvSource::new(schema).with_batch_size(test_util::TEST_BATCH_SIZE);
let tests = [
Test {
config: scan_config(schema.clone(), None, path, file_source.clone()),
config: scan_config(None, path, file_source.clone()),
file_source: file_source.clone(),
expected: vec![
"+-----+-------+---------------------+----------+------------+",
@@ -131,7 +129,7 @@ async fn test_csv_opener() {
],
},
Test {
config: scan_config(schema, Some(1), path, file_source.clone()),
config: scan_config(Some(1), path, file_source.clone()),
file_source,
expected: vec![
"+-----+------+---------------------+----------+------------+",
@@ -158,10 +156,10 @@ async fn test_parquet_exec() {
.display()
.to_string();
let parquet_source = ParquetSource::default()
let parquet_source = ParquetSource::new(schema)
.with_parquet_file_reader_factory(Arc::new(DefaultParquetFileReaderFactory::new(store)));
let config = scan_config(schema, None, path, Arc::new(parquet_source));
let config = scan_config(None, path, Arc::new(parquet_source));
let exec = DataSourceExec::from_data_source(config);
let ctx = SessionContext::new();
@@ -197,11 +195,11 @@ async fn test_orc_opener() {
let store = test_store("/");
let schema = Arc::new(OrcFormat.infer_schema(&store, path).await.unwrap());
let file_source = Arc::new(OrcSource::default());
let file_source = Arc::new(OrcSource::new(schema.into()));
let tests = [
Test {
config: scan_config(schema.clone(), None, path, file_source.clone()),
config: scan_config(None, path, file_source.clone()),
file_source: file_source.clone(),
expected: vec![
"+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+",
@@ -216,7 +214,7 @@ async fn test_orc_opener() {
],
},
Test {
config: scan_config(schema.clone(), Some(1), path, file_source.clone()),
config: scan_config(Some(1), path, file_source.clone()),
file_source,
expected: vec![
"+----------+-----+------+------------+---+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+-------------------------+-------------+",

View File

@@ -80,7 +80,6 @@ pub fn csv_basic_schema() -> SchemaRef {
}
pub(crate) fn scan_config(
file_schema: SchemaRef,
limit: Option<usize>,
filename: &str,
file_source: Arc<dyn FileSource>,
@@ -89,7 +88,7 @@ pub(crate) fn scan_config(
let filename = &filename.replace('\\', "/");
let file_group = FileGroup::new(vec![PartitionedFile::new(filename.clone(), 4096)]);
FileScanConfigBuilder::new(ObjectStoreUrl::local_filesystem(), file_schema, file_source)
FileScanConfigBuilder::new(ObjectStoreUrl::local_filesystem(), file_source)
.with_file_group(file_group)
.with_limit(limit)
.build()
@@ -109,7 +108,7 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
let size = store.read(origin_path).await.unwrap().len();
let config = scan_config(schema, None, origin_path, Arc::new(JsonSource::new()));
let config = scan_config(None, origin_path, Arc::new(JsonSource::new(schema)));
let stream = FileStream::new(
&config,
0,
@@ -151,10 +150,8 @@ pub async fn setup_stream_to_csv_test(
let schema = csv_basic_schema();
let csv_source = CsvSource::new(true, b',', b'"')
.with_schema(schema.clone())
.with_batch_size(TEST_BATCH_SIZE);
let config = scan_config(schema, None, origin_path, csv_source.clone());
let csv_source = CsvSource::new(schema).with_batch_size(TEST_BATCH_SIZE);
let config = scan_config(None, origin_path, csv_source.clone());
let size = store.read(origin_path).await.unwrap().len();
let csv_opener = csv_source.create_file_opener(

View File

@@ -104,7 +104,8 @@ mod tests {
assert!(matches!(f.signature(),
datafusion_expr::Signature {
type_signature: datafusion_expr::TypeSignature::Uniform(1, valid_types),
volatility: datafusion_expr::Volatility::Immutable
volatility: datafusion_expr::Volatility::Immutable,
..
} if valid_types == &ConcreteDataType::numerics().into_iter().map(|dt| { use datatypes::data_type::DataType; dt.as_arrow_type() }).collect::<Vec<_>>()));
}

View File

@@ -331,7 +331,8 @@ mod tests {
assert!(matches!(f.signature(),
datafusion_expr::Signature {
type_signature: datafusion_expr::TypeSignature::Uniform(1, valid_types),
volatility: datafusion_expr::Volatility::Immutable
volatility: datafusion_expr::Volatility::Immutable,
..
} if valid_types == &vec![ArrowDataType::Utf8]));
}

View File

@@ -145,7 +145,8 @@ mod tests {
assert!(matches!(f.signature(),
datafusion_expr::Signature {
type_signature: datafusion_expr::TypeSignature::OneOf(sigs),
volatility: datafusion_expr::Volatility::Immutable
volatility: datafusion_expr::Volatility::Immutable,
..
} if sigs.len() == 2));
}

View File

@@ -341,6 +341,7 @@ impl AggregateUDFImpl for StateWrapper {
name: acc_args.name,
is_distinct: acc_args.is_distinct,
exprs: acc_args.exprs,
expr_fields: &[],
};
self.inner.accumulator(acc_args)?
};

View File

@@ -122,7 +122,8 @@ mod tests {
matches!(f.signature(),
Signature {
type_signature: TypeSignature::OneOf(sigs),
volatility: Volatility::Immutable
volatility: Volatility::Immutable,
..
} if sigs.len() == 15),
"{:?}",
f.signature()

View File

@@ -193,7 +193,8 @@ mod tests {
assert!(matches!(f.signature(),
Signature {
type_signature: TypeSignature::OneOf(sigs),
volatility: Volatility::Immutable
volatility: Volatility::Immutable,
..
} if sigs.len() == 6));
}

View File

@@ -120,7 +120,8 @@ mod tests {
matches!(f.signature(),
Signature {
type_signature: TypeSignature::OneOf(sigs),
volatility: Volatility::Immutable
volatility: Volatility::Immutable,
..
} if sigs.len() == 15),
"{:?}",
f.signature()

View File

@@ -25,7 +25,6 @@ use datafusion_common::arrow::array::{
};
use datafusion_common::arrow::datatypes::DataType;
use datafusion_common::{DataFusionError, Result};
use datafusion_expr::type_coercion::aggregates::STRINGS;
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, Volatility};
use datatypes::arrow_array::{int_array_value_at_index, string_array_value_at_index};
use datatypes::json::JsonStructureSettings;
@@ -519,7 +518,7 @@ impl Default for JsonGetObject {
DataType::LargeBinary,
DataType::BinaryView,
],
STRINGS.to_vec(),
vec![DataType::UInt8, DataType::LargeUtf8, DataType::Utf8View],
),
}
}

View File

@@ -99,7 +99,8 @@ mod tests {
assert!(matches!(rate.signature(),
Signature {
type_signature: TypeSignature::Uniform(2, valid_types),
volatility: Volatility::Immutable
volatility: Volatility::Immutable,
..
} if valid_types == NUMERICS
));
let values = vec![1.0, 3.0, 6.0];

View File

@@ -19,8 +19,10 @@ use datafusion_common::DataFusionError;
use datafusion_common::arrow::array::{Array, AsArray, StringViewBuilder};
use datafusion_common::arrow::compute;
use datafusion_common::arrow::datatypes::DataType;
use datafusion_expr::type_coercion::aggregates::BINARYS;
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature, TypeSignature, Volatility};
use datafusion_common::types::logical_binary;
use datafusion_expr::{
Coercion, ColumnarValue, ScalarFunctionArgs, Signature, TypeSignatureClass, Volatility,
};
use datatypes::types::vector_type_value_to_string;
use crate::function::{Function, extract_args};
@@ -35,11 +37,10 @@ pub struct VectorToStringFunction {
impl Default for VectorToStringFunction {
fn default() -> Self {
Self {
signature: Signature::one_of(
vec![
TypeSignature::Uniform(1, vec![DataType::BinaryView]),
TypeSignature::Uniform(1, BINARYS.to_vec()),
],
signature: Signature::coercible(
vec![Coercion::new_exact(TypeSignatureClass::Native(
logical_binary(),
))],
Volatility::Immutable,
),
}

View File

@@ -15,10 +15,10 @@
use std::fmt::Display;
use datafusion::arrow::datatypes::DataType;
use datafusion::logical_expr::ColumnarValue;
use datafusion::logical_expr::{Coercion, ColumnarValue, TypeSignature, TypeSignatureClass};
use datafusion_common::ScalarValue;
use datafusion_expr::type_coercion::aggregates::{BINARYS, STRINGS};
use datafusion_expr::{ScalarFunctionArgs, Signature, TypeSignature, Volatility};
use datafusion_common::types::{logical_binary, logical_string};
use datafusion_expr::{ScalarFunctionArgs, Signature, Volatility};
use nalgebra::DVectorView;
use crate::function::Function;
@@ -36,9 +36,12 @@ impl Default for ElemAvgFunction {
Self {
signature: Signature::one_of(
vec![
TypeSignature::Uniform(1, STRINGS.to_vec()),
TypeSignature::Uniform(1, BINARYS.to_vec()),
TypeSignature::Uniform(1, vec![DataType::BinaryView]),
TypeSignature::Coercible(vec![Coercion::new_exact(
TypeSignatureClass::Native(logical_binary()),
)]),
TypeSignature::Coercible(vec![Coercion::new_exact(
TypeSignatureClass::Native(logical_string()),
)]),
],
Volatility::Immutable,
),

View File

@@ -15,10 +15,10 @@
use std::fmt::Display;
use datafusion::arrow::datatypes::DataType;
use datafusion::logical_expr::ColumnarValue;
use datafusion::logical_expr_common::type_coercion::aggregates::{BINARYS, STRINGS};
use datafusion::logical_expr::{Coercion, ColumnarValue, TypeSignature, TypeSignatureClass};
use datafusion_common::ScalarValue;
use datafusion_expr::{ScalarFunctionArgs, Signature, TypeSignature, Volatility};
use datafusion_common::types::{logical_binary, logical_string};
use datafusion_expr::{ScalarFunctionArgs, Signature, Volatility};
use nalgebra::DVectorView;
use crate::function::Function;
@@ -49,9 +49,12 @@ impl Default for ElemProductFunction {
Self {
signature: Signature::one_of(
vec![
TypeSignature::Uniform(1, STRINGS.to_vec()),
TypeSignature::Uniform(1, BINARYS.to_vec()),
TypeSignature::Uniform(1, vec![DataType::BinaryView]),
TypeSignature::Coercible(vec![Coercion::new_exact(
TypeSignatureClass::Native(logical_binary()),
)]),
TypeSignature::Coercible(vec![Coercion::new_exact(
TypeSignatureClass::Native(logical_string()),
)]),
],
Volatility::Immutable,
),

View File

@@ -15,9 +15,9 @@
use std::fmt::Display;
use datafusion::arrow::datatypes::DataType;
use datafusion::logical_expr::ColumnarValue;
use datafusion::logical_expr::{Coercion, ColumnarValue, TypeSignatureClass};
use datafusion_common::ScalarValue;
use datafusion_expr::type_coercion::aggregates::{BINARYS, STRINGS};
use datafusion_common::types::{logical_binary, logical_string};
use datafusion_expr::{ScalarFunctionArgs, Signature, TypeSignature, Volatility};
use nalgebra::DVectorView;
@@ -36,9 +36,12 @@ impl Default for ElemSumFunction {
Self {
signature: Signature::one_of(
vec![
TypeSignature::Uniform(1, STRINGS.to_vec()),
TypeSignature::Uniform(1, BINARYS.to_vec()),
TypeSignature::Uniform(1, vec![DataType::BinaryView]),
TypeSignature::Coercible(vec![Coercion::new_exact(
TypeSignatureClass::Native(logical_binary()),
)]),
TypeSignature::Coercible(vec![Coercion::new_exact(
TypeSignatureClass::Native(logical_string()),
)]),
],
Volatility::Immutable,
),

View File

@@ -15,10 +15,10 @@
use std::fmt::Display;
use datafusion::arrow::datatypes::DataType;
use datafusion::logical_expr::ColumnarValue;
use datafusion::logical_expr_common::type_coercion::aggregates::{BINARYS, STRINGS};
use datafusion::logical_expr::{Coercion, ColumnarValue, TypeSignatureClass};
use datafusion_common::ScalarValue;
use datafusion_expr::{ScalarFunctionArgs, Signature, TypeSignature, Volatility};
use datafusion_common::types::{logical_binary, logical_string};
use datafusion_expr::{ScalarFunctionArgs, Signature, Volatility};
use crate::function::Function;
use crate::scalars::vector::VectorCalculator;
@@ -47,10 +47,10 @@ pub(crate) struct VectorDimFunction {
impl Default for VectorDimFunction {
fn default() -> Self {
Self {
signature: Signature::one_of(
signature: Signature::coercible(
vec![
TypeSignature::Uniform(1, STRINGS.to_vec()),
TypeSignature::Uniform(1, BINARYS.to_vec()),
Coercion::new_exact(TypeSignatureClass::Native(logical_binary())),
Coercion::new_exact(TypeSignatureClass::Native(logical_string())),
],
Volatility::Immutable,
),

View File

@@ -15,10 +15,10 @@
use std::fmt::Display;
use datafusion::arrow::datatypes::DataType;
use datafusion::logical_expr::ColumnarValue;
use datafusion::logical_expr_common::type_coercion::aggregates::{BINARYS, STRINGS};
use datafusion::logical_expr::{Coercion, ColumnarValue, TypeSignatureClass};
use datafusion_common::ScalarValue;
use datafusion_expr::{ScalarFunctionArgs, Signature, TypeSignature, Volatility};
use datafusion_common::types::{logical_binary, logical_string};
use datafusion_expr::{ScalarFunctionArgs, Signature, Volatility};
use nalgebra::DVectorView;
use crate::function::Function;
@@ -50,11 +50,10 @@ pub(crate) struct VectorNormFunction {
impl Default for VectorNormFunction {
fn default() -> Self {
Self {
signature: Signature::one_of(
signature: Signature::coercible(
vec![
TypeSignature::Uniform(1, STRINGS.to_vec()),
TypeSignature::Uniform(1, BINARYS.to_vec()),
TypeSignature::Uniform(1, vec![DataType::BinaryView]),
Coercion::new_exact(TypeSignatureClass::Native(logical_binary())),
Coercion::new_exact(TypeSignatureClass::Native(logical_string())),
],
Volatility::Immutable,
),

View File

@@ -106,7 +106,8 @@ mod tests {
assert!(matches!(f.signature(),
datafusion_expr::Signature {
type_signature: datafusion_expr::TypeSignature::Uniform(1, valid_types),
volatility: datafusion_expr::Volatility::Immutable
volatility: datafusion_expr::Volatility::Immutable,
..
} if valid_types == &vec![ArrowDataType::Utf8]));
}

View File

@@ -103,10 +103,11 @@ impl FlightEncoder {
FlightMessage::RecordBatch(record_batch) => {
let (encoded_dictionaries, encoded_batch) = self
.data_gen
.encoded_batch(
.encode(
&record_batch,
&mut self.dictionary_tracker,
&self.write_options,
&mut Default::default(),
)
.expect("DictionaryTracker configured above to not fail on replacement");

View File

@@ -7,7 +7,6 @@ license.workspace = true
[dependencies]
common-error.workspace = true
common-macro.workspace = true
prost.workspace = true
snafu.workspace = true
tokio.workspace = true

View File

@@ -1145,10 +1145,11 @@ impl TryFrom<ScalarValue> for Value {
ScalarValue::List(array) => {
// this is for item type
let datatype = ConcreteDataType::try_from(&array.value_type())?;
let items = ScalarValue::convert_array_to_scalar_vec(array.as_ref())
.context(ConvertArrowArrayToScalarsSnafu)?
let scalar_values = ScalarValue::convert_array_to_scalar_vec(array.as_ref())
.context(ConvertArrowArrayToScalarsSnafu)?;
let items = scalar_values
.into_iter()
.flatten()
.flat_map(|v| v.unwrap_or_else(|| vec![ScalarValue::Null]))
.map(|x| x.try_into())
.collect::<Result<Vec<Value>>>()?;
Value::List(ListValue::new(items, Arc::new(datatype)))
@@ -2997,6 +2998,7 @@ pub(crate) mod tests {
.unwrap()
.into_iter()
.flatten()
.flatten()
.collect::<Vec<_>>();
assert_eq!(
vs,

View File

@@ -13,7 +13,6 @@
// limitations under the License.
use std::any::Any;
use std::borrow::Borrow;
use std::sync::Arc;
use arrow::array::{Array, ArrayBuilder, ArrayIter, ArrayRef, BooleanArray, BooleanBuilder};
@@ -69,8 +68,8 @@ impl From<Vec<Option<bool>>> for BooleanVector {
}
}
impl<Ptr: Borrow<Option<bool>>> FromIterator<Ptr> for BooleanVector {
fn from_iter<I: IntoIterator<Item = Ptr>>(iter: I) -> Self {
impl FromIterator<Option<bool>> for BooleanVector {
fn from_iter<T: IntoIterator<Item = Option<bool>>>(iter: T) -> Self {
BooleanVector {
array: BooleanArray::from_iter(iter),
}
@@ -303,7 +302,7 @@ mod tests {
#[test]
fn test_boolean_vector_from_iter() {
let input = vec![Some(false), Some(true), Some(false), Some(true)];
let vec = input.iter().collect::<BooleanVector>();
let vec = input.iter().cloned().collect::<BooleanVector>();
assert_eq!(4, vec.len());
for (i, v) in input.into_iter().enumerate() {
assert_eq!(v, vec.get_data(i), "Failed at {i}")

View File

@@ -170,10 +170,11 @@ impl Helper {
ScalarValue::List(array) => {
let item_type = Arc::new(ConcreteDataType::try_from(&array.value_type())?);
let mut builder = ListVectorBuilder::with_type_capacity(item_type.clone(), 1);
let values = ScalarValue::convert_array_to_scalar_vec(array.as_ref())
.context(ConvertArrowArrayToScalarsSnafu)?
let scalar_values = ScalarValue::convert_array_to_scalar_vec(array.as_ref())
.context(ConvertArrowArrayToScalarsSnafu)?;
let values = scalar_values
.into_iter()
.flatten()
.flat_map(|v| v.unwrap_or_else(|| vec![ScalarValue::Null]))
.map(ScalarValue::try_into)
.collect::<Result<Vec<Value>>>()?;
builder.push(Some(ListValueRef::Ref {

View File

@@ -18,6 +18,7 @@ use common_datasource::file_format::Format;
use common_datasource::file_format::csv::CsvFormat;
use common_datasource::file_format::parquet::DefaultParquetFileReaderFactory;
use datafusion::common::ToDFSchema;
use datafusion::config::CsvOptions;
use datafusion::datasource::listing::PartitionedFile;
use datafusion::datasource::object_store::ObjectStoreUrl;
use datafusion::datasource::physical_plan::{
@@ -34,7 +35,6 @@ use datafusion::prelude::SessionContext;
use datafusion_expr::expr::Expr;
use datafusion_expr::utils::conjunction;
use datafusion_orc::OrcSource;
use datatypes::arrow::datatypes::Schema as ArrowSchema;
use datatypes::schema::SchemaRef;
use object_store::ObjectStore;
use snafu::ResultExt;
@@ -45,7 +45,6 @@ const DEFAULT_BATCH_SIZE: usize = 8192;
fn build_record_batch_stream(
scan_plan_config: &ScanPlanConfig,
file_schema: Arc<ArrowSchema>,
limit: Option<usize>,
file_source: Arc<dyn FileSource>,
) -> Result<DfSendableRecordBatchStream> {
@@ -55,15 +54,12 @@ fn build_record_batch_stream(
.map(|filename| PartitionedFile::new(filename.clone(), 0))
.collect::<Vec<_>>();
let config = FileScanConfigBuilder::new(
ObjectStoreUrl::local_filesystem(),
file_schema,
file_source.clone(),
)
.with_projection(scan_plan_config.projection.cloned())
.with_limit(limit)
.with_file_group(FileGroup::new(files))
.build();
let config =
FileScanConfigBuilder::new(ObjectStoreUrl::local_filesystem(), file_source.clone())
.with_projection_indices(scan_plan_config.projection.cloned())
.with_limit(limit)
.with_file_group(FileGroup::new(files))
.build();
let store = Arc::new(object_store_opendal::OpendalStore::new(
scan_plan_config.store.clone(),
@@ -89,11 +85,14 @@ fn new_csv_stream(
// push down limit only if there is no filter
let limit = config.filters.is_empty().then_some(config.limit).flatten();
let csv_source = CsvSource::new(format.has_header, format.delimiter, b'"')
.with_schema(file_schema.clone())
let options = CsvOptions::default()
.with_has_header(format.has_header)
.with_delimiter(format.delimiter);
let csv_source = CsvSource::new(file_schema)
.with_csv_options(options)
.with_batch_size(DEFAULT_BATCH_SIZE);
build_record_batch_stream(config, file_schema, limit, csv_source)
build_record_batch_stream(config, limit, csv_source)
}
fn new_json_stream(config: &ScanPlanConfig) -> Result<DfSendableRecordBatchStream> {
@@ -102,8 +101,8 @@ fn new_json_stream(config: &ScanPlanConfig) -> Result<DfSendableRecordBatchStrea
// push down limit only if there is no filter
let limit = config.filters.is_empty().then_some(config.limit).flatten();
let file_source = JsonSource::new().with_batch_size(DEFAULT_BATCH_SIZE);
build_record_batch_stream(config, file_schema, limit, file_source)
let file_source = JsonSource::new(file_schema).with_batch_size(DEFAULT_BATCH_SIZE);
build_record_batch_stream(config, limit, file_source)
}
fn new_parquet_stream_with_exec_plan(
@@ -126,9 +125,10 @@ fn new_parquet_stream_with_exec_plan(
.collect::<Vec<_>>(),
);
let mut parquet_source = ParquetSource::default().with_parquet_file_reader_factory(Arc::new(
DefaultParquetFileReaderFactory::new(store.clone()),
));
let mut parquet_source = ParquetSource::new(file_schema.clone())
.with_parquet_file_reader_factory(Arc::new(DefaultParquetFileReaderFactory::new(
store.clone(),
)));
// build predicate filter
let filters = filters.to_vec();
@@ -143,15 +143,12 @@ fn new_parquet_stream_with_exec_plan(
parquet_source = parquet_source.with_predicate(filters);
};
let file_scan_config = FileScanConfigBuilder::new(
ObjectStoreUrl::local_filesystem(),
file_schema,
Arc::new(parquet_source),
)
.with_file_group(file_group)
.with_projection(projection.cloned())
.with_limit(*limit)
.build();
let file_scan_config =
FileScanConfigBuilder::new(ObjectStoreUrl::local_filesystem(), Arc::new(parquet_source))
.with_file_group(file_group)
.with_projection_indices(projection.cloned())
.with_limit(*limit)
.build();
// TODO(ruihang): get this from upper layer
let task_ctx = SessionContext::default().task_ctx();
@@ -170,8 +167,8 @@ fn new_orc_stream(config: &ScanPlanConfig) -> Result<DfSendableRecordBatchStream
// push down limit only if there is no filter
let limit = config.filters.is_empty().then_some(config.limit).flatten();
let file_source = OrcSource::default().with_batch_size(DEFAULT_BATCH_SIZE);
build_record_batch_stream(config, file_schema, limit, file_source)
let file_source = OrcSource::new(file_schema.into()).with_batch_size(DEFAULT_BATCH_SIZE);
build_record_batch_stream(config, limit, file_source)
}
#[derive(Debug, Clone)]

View File

@@ -16,11 +16,13 @@
use std::mem;
use parquet::basic::ColumnOrder;
use parquet::file::metadata::{
FileMetaData, ParquetColumnIndex, ParquetMetaData, ParquetOffsetIndex, RowGroupMetaData,
FileMetaData, KeyValue, ParquetColumnIndex, ParquetMetaData, ParquetOffsetIndex,
RowGroupMetaData,
};
use parquet::file::page_index::index::Index;
use parquet::format::{ColumnOrder, KeyValue, PageLocation};
use parquet::file::page_index::column_index::ColumnIndexMetaData as Index;
use parquet::file::page_index::offset_index::PageLocation;
use parquet::schema::types::{ColumnDescriptor, SchemaDescriptor, Type};
/// Returns estimated size of [ParquetMetaData].

View File

@@ -601,14 +601,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Invalid file metadata"))]
ConvertMetaData {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: parquet::errors::ParquetError,
},
#[snafu(display("Column not found, column: {column}"))]
ColumnNotFound {
column: String,
@@ -1276,7 +1268,6 @@ impl ErrorExt for Error {
| Join { .. }
| WorkerStopped { .. }
| Recv { .. }
| ConvertMetaData { .. }
| DecodeWal { .. }
| ComputeArrow { .. }
| BiErrors { .. }

View File

@@ -71,7 +71,6 @@ use crate::sst::index::IndexOutput;
use crate::sst::parquet::file_range::{PreFilterMode, row_group_contains_delete};
use crate::sst::parquet::flat_format::primary_key_column_index;
use crate::sst::parquet::format::{PrimaryKeyArray, PrimaryKeyArrayBuilder, ReadFormat};
use crate::sst::parquet::helper::parse_parquet_metadata;
use crate::sst::parquet::{PARQUET_METADATA_KEY, SstInfo};
use crate::sst::{SeriesEstimator, to_sst_arrow_schema};
@@ -1197,7 +1196,7 @@ impl BulkPartEncoder {
metrics.num_rows += total_rows;
let buf = Bytes::from(buf);
let parquet_metadata = Arc::new(parse_parquet_metadata(file_metadata)?);
let parquet_metadata = Arc::new(file_metadata);
let num_series = series_estimator.finish();
Ok(Some(EncodedBulkPart {
@@ -1232,7 +1231,7 @@ impl BulkPartEncoder {
};
let buf = Bytes::from(buf);
let parquet_metadata = Arc::new(parse_parquet_metadata(file_metadata)?);
let parquet_metadata = Arc::new(file_metadata);
Ok(Some(EncodedBulkPart {
data: buf,

View File

@@ -13,82 +13,11 @@
// limitations under the License.
use std::ops::Range;
use std::sync::Arc;
use std::time::Instant;
use bytes::Bytes;
use common_telemetry::trace;
use object_store::ObjectStore;
use parquet::basic::ColumnOrder;
use parquet::file::metadata::{FileMetaData, ParquetMetaData, RowGroupMetaData};
use parquet::format;
use parquet::schema::types::{SchemaDescriptor, from_thrift};
use snafu::ResultExt;
use crate::error;
use crate::error::Result;
// Refer to https://github.com/apache/arrow-rs/blob/7e134f4d277c0b62c27529fc15a4739de3ad0afd/parquet/src/file/footer.rs#L74-L90
/// Convert [format::FileMetaData] to [ParquetMetaData]
pub fn parse_parquet_metadata(t_file_metadata: format::FileMetaData) -> Result<ParquetMetaData> {
let schema = from_thrift(&t_file_metadata.schema).context(error::ConvertMetaDataSnafu)?;
let schema_desc_ptr = Arc::new(SchemaDescriptor::new(schema));
let mut row_groups = Vec::with_capacity(t_file_metadata.row_groups.len());
for rg in t_file_metadata.row_groups {
row_groups.push(
RowGroupMetaData::from_thrift(schema_desc_ptr.clone(), rg)
.context(error::ConvertMetaDataSnafu)?,
);
}
let column_orders = parse_column_orders(t_file_metadata.column_orders, &schema_desc_ptr);
let file_metadata = FileMetaData::new(
t_file_metadata.version,
t_file_metadata.num_rows,
t_file_metadata.created_by,
t_file_metadata.key_value_metadata,
schema_desc_ptr,
column_orders,
);
// There may be a problem owing to lacking of column_index and offset_index,
// if we open page index in the future.
Ok(ParquetMetaData::new(file_metadata, row_groups))
}
// Port from https://github.com/apache/arrow-rs/blob/7e134f4d277c0b62c27529fc15a4739de3ad0afd/parquet/src/file/footer.rs#L106-L137
/// Parses column orders from Thrift definition.
/// If no column orders are defined, returns `None`.
fn parse_column_orders(
t_column_orders: Option<Vec<format::ColumnOrder>>,
schema_descr: &SchemaDescriptor,
) -> Option<Vec<ColumnOrder>> {
match t_column_orders {
Some(orders) => {
// Should always be the case
assert_eq!(
orders.len(),
schema_descr.num_columns(),
"Column order length mismatch"
);
let mut res = Vec::with_capacity(schema_descr.num_columns());
for (i, column) in schema_descr.columns().iter().enumerate() {
match orders[i] {
format::ColumnOrder::TYPEORDER(_) => {
let sort_order = ColumnOrder::get_sort_order(
column.logical_type(),
column.converted_type(),
column.physical_type(),
);
res.push(ColumnOrder::TYPE_DEFINED_ORDER(sort_order));
}
}
}
Some(res)
}
None => None,
}
}
const FETCH_PARALLELISM: usize = 8;
pub(crate) const MERGE_GAP: usize = 512 * 1024;

View File

@@ -31,8 +31,7 @@ use mito_codec::row_converter::build_primary_key_codec;
use object_store::ObjectStore;
use parquet::arrow::arrow_reader::{ParquetRecordBatchReader, RowSelection};
use parquet::arrow::{FieldLevels, ProjectionMask, parquet_to_arrow_field_levels};
use parquet::file::metadata::ParquetMetaData;
use parquet::format::KeyValue;
use parquet::file::metadata::{KeyValue, ParquetMetaData};
use snafu::{OptionExt, ResultExt};
use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataRef};
use store_api::region_request::PathType;

View File

@@ -55,7 +55,6 @@ use crate::sst::file::RegionFileId;
use crate::sst::index::{IndexOutput, Indexer, IndexerBuilder};
use crate::sst::parquet::flat_format::{FlatWriteFormat, time_index_column_index};
use crate::sst::parquet::format::PrimaryKeyWriteFormat;
use crate::sst::parquet::helper::parse_parquet_metadata;
use crate::sst::parquet::{PARQUET_METADATA_KEY, SstInfo, WriteOptions};
use crate::sst::{
DEFAULT_WRITE_BUFFER_SIZE, DEFAULT_WRITE_CONCURRENCY, FlatSchemaOptions, SeriesEstimator,
@@ -205,14 +204,12 @@ where
}
current_writer.flush().await.context(WriteParquetSnafu)?;
let file_meta = current_writer.close().await.context(WriteParquetSnafu)?;
let parquet_metadata = current_writer.close().await.context(WriteParquetSnafu)?;
let file_size = self.bytes_written.load(Ordering::Relaxed) as u64;
// Safety: num rows > 0 so we must have min/max.
let time_range = stats.time_range.unwrap();
// convert FileMetaData to ParquetMetaData
let parquet_metadata = parse_parquet_metadata(file_meta)?;
let max_row_group_uncompressed_size: u64 = parquet_metadata
.row_groups()
.iter()

View File

@@ -33,6 +33,7 @@ use common_telemetry::{debug, tracing};
use datafusion::datasource::physical_plan::{CsvSource, FileSource, JsonSource};
use datafusion::parquet::arrow::ParquetRecordBatchStreamBuilder;
use datafusion::parquet::arrow::arrow_reader::ArrowReaderMetadata;
use datafusion_common::config::CsvOptions;
use datafusion_expr::Expr;
use datatypes::arrow::compute::can_cast_types;
use datatypes::arrow::datatypes::{DataType as ArrowDataType, Schema, SchemaRef};
@@ -214,13 +215,15 @@ impl StatementExecutor {
.context(error::ProjectSchemaSnafu)?,
);
let csv_source = CsvSource::new(format.has_header, format.delimiter, b'"')
.with_schema(schema.clone())
let options = CsvOptions::default()
.with_has_header(format.has_header)
.with_delimiter(format.delimiter);
let csv_source = CsvSource::new(schema.clone())
.with_csv_options(options)
.with_batch_size(DEFAULT_BATCH_SIZE);
let stream = file_to_stream(
object_store,
path,
schema.clone(),
csv_source,
Some(projection),
format.compression_type,
@@ -247,13 +250,11 @@ impl StatementExecutor {
.context(error::ProjectSchemaSnafu)?,
);
let json_source = JsonSource::new()
.with_schema(schema.clone())
.with_batch_size(DEFAULT_BATCH_SIZE);
let json_source =
JsonSource::new(schema.clone()).with_batch_size(DEFAULT_BATCH_SIZE);
let stream = file_to_stream(
object_store,
path,
schema.clone(),
json_source,
Some(projection),
format.compression_type,

View File

@@ -255,9 +255,9 @@ fn metrics_to_string(metrics: RecordBatchMetrics, format: AnalyzeFormat) -> DfRe
match format {
AnalyzeFormat::JSON => Ok(JsonMetrics::from_record_batch_metrics(metrics).to_string()),
AnalyzeFormat::TEXT => Ok(metrics.to_string()),
AnalyzeFormat::GRAPHVIZ => Err(DataFusionError::NotImplemented(
"GRAPHVIZ format is not supported for metrics output".to_string(),
)),
format => Err(DataFusionError::NotImplemented(format!(
"AnalyzeFormat {format}",
))),
}
}

View File

@@ -543,7 +543,8 @@ impl QueryEngine for DatafusionQueryEngine {
}
// configure execution options
state.config_mut().options_mut().execution.time_zone = query_ctx.timezone().to_string();
state.config_mut().options_mut().execution.time_zone =
Some(query_ctx.timezone().to_string());
// usually it's impossible to have both `set variable` set by sql client and
// hint in header by grpc client, so only need to deal with them separately

View File

@@ -123,7 +123,7 @@ tokio-rustls.workspace = true
tokio-stream = { workspace = true, features = ["net"] }
tokio-util.workspace = true
tonic.workspace = true
tonic-reflection = "0.13"
tonic-reflection = "0.14"
tower = { workspace = true, features = ["full"] }
tower-http = { version = "0.6", features = ["full"] }
tracing.workspace = true

View File

@@ -715,7 +715,7 @@ fn replace_params_with_values(
if let Some(Some(t)) = param_types.get(&format_placeholder(i + 1)) {
let value = helper::convert_value(param, t)?;
values.push(value);
values.push(value.into());
}
}
@@ -744,7 +744,7 @@ fn replace_params_with_exprs(
if let Some(Some(t)) = param_types.get(&format_placeholder(i + 1)) {
let value = helper::convert_expr_to_scalar_value(param, t)?;
values.push(value);
values.push(value.into());
}
}

View File

@@ -13,8 +13,10 @@
// limitations under the License.
use std::ops::ControlFlow;
use std::sync::Arc;
use std::time::Duration;
use arrow_schema::Field;
use chrono::NaiveDate;
use common_query::prelude::ScalarValue;
use common_sql::convert::sql_value_to_value;
@@ -87,8 +89,8 @@ pub fn fix_placeholder_types(plan: &mut LogicalPlan) -> Result<()> {
let give_placeholder_types = |mut e: datafusion_expr::Expr| {
if let datafusion_expr::Expr::Cast(cast) = &mut e {
if let datafusion_expr::Expr::Placeholder(ph) = &mut *cast.expr {
if ph.data_type.is_none() {
ph.data_type = Some(cast.data_type.clone());
if ph.field.is_none() {
ph.field = Some(Arc::new(Field::new("", cast.data_type.clone(), true)));
common_telemetry::debug!(
"give placeholder type {:?} to {:?}",
cast.data_type,

View File

@@ -324,11 +324,12 @@ impl ExtendedQueryHandler for PostgresServerHandlerInner {
}
let output = if let Some(plan) = &sql_plan.plan {
let values = parameters_to_scalar_values(plan, portal)?;
let plan = plan
.clone()
.replace_params_with_values(&ParamValues::List(parameters_to_scalar_values(
plan, portal,
)?))
.replace_params_with_values(&ParamValues::List(
values.into_iter().map(Into::into).collect(),
))
.context(DataFusionSnafu)
.map_err(convert_err)?;
self.query_handler

View File

@@ -225,7 +225,7 @@ impl QueryContext {
/// Create a new datafusion's ConfigOptions instance based on the current QueryContext.
pub fn create_config_options(&self) -> ConfigOptions {
let mut config = ConfigOptions::default();
config.execution.time_zone = self.timezone().to_string();
config.execution.time_zone = Some(self.timezone().to_string());
config
}

View File

@@ -271,7 +271,7 @@ pub fn sql_data_type_to_concrete_data_type(
})?
.map(|t| ConcreteDataType::timestamp_datatype(t.unit()))
.unwrap_or(ConcreteDataType::timestamp_millisecond_datatype())),
SqlDataType::Interval => Ok(ConcreteDataType::interval_month_day_nano_datatype()),
SqlDataType::Interval { .. } => Ok(ConcreteDataType::interval_month_day_nano_datatype()),
SqlDataType::Decimal(exact_info) => match exact_info {
ExactNumberInfo::None => Ok(ConcreteDataType::decimal128_default_datatype()),
// refer to https://dev.mysql.com/doc/refman/8.0/en/fixed-point-types.html
@@ -333,8 +333,8 @@ pub fn concrete_data_type_to_sql_data_type(data_type: &ConcreteDataType) -> Resu
ConcreteDataType::Int8(_) => Ok(SqlDataType::TinyInt(None)),
ConcreteDataType::UInt8(_) => Ok(SqlDataType::TinyIntUnsigned(None)),
ConcreteDataType::String(_) => Ok(SqlDataType::String(None)),
ConcreteDataType::Float32(_) => Ok(SqlDataType::Float(None)),
ConcreteDataType::Float64(_) => Ok(SqlDataType::Double(ExactNumberInfo::None)),
ConcreteDataType::Float32(_) => Ok(SqlDataType::Float(ExactNumberInfo::Precision(4))),
ConcreteDataType::Float64(_) => Ok(SqlDataType::Float(ExactNumberInfo::Precision(8))),
ConcreteDataType::Boolean(_) => Ok(SqlDataType::Boolean),
ConcreteDataType::Date(_) => Ok(SqlDataType::Date),
ConcreteDataType::Timestamp(ts_type) => Ok(SqlDataType::Timestamp(
@@ -345,10 +345,13 @@ pub fn concrete_data_type_to_sql_data_type(data_type: &ConcreteDataType) -> Resu
Some(time_type.precision()),
TimezoneInfo::None,
)),
ConcreteDataType::Interval(_) => Ok(SqlDataType::Interval),
ConcreteDataType::Interval(_) => Ok(SqlDataType::Interval {
fields: None,
precision: None,
}),
ConcreteDataType::Binary(_) => Ok(SqlDataType::Varbinary(None)),
ConcreteDataType::Decimal128(d) => Ok(SqlDataType::Decimal(
ExactNumberInfo::PrecisionAndScale(d.precision() as u64, d.scale() as u64),
ExactNumberInfo::PrecisionAndScale(d.precision() as u64, d.scale() as i64),
)),
ConcreteDataType::Json(_) => Ok(SqlDataType::JSON),
ConcreteDataType::Vector(v) => Ok(SqlDataType::Custom(
@@ -412,7 +415,7 @@ mod tests {
ConcreteDataType::string_datatype(),
);
check_type(
SqlDataType::Float(None),
SqlDataType::Float(ExactNumberInfo::Precision(4)),
ConcreteDataType::float32_datatype(),
);
check_type(
@@ -450,7 +453,10 @@ mod tests {
ConcreteDataType::timestamp_microsecond_datatype(),
);
check_type(
SqlDataType::Interval,
SqlDataType::Interval {
fields: None,
precision: None,
},
ConcreteDataType::interval_month_day_nano_datatype(),
);
check_type(SqlDataType::JSON, ConcreteDataType::json_datatype());

View File

@@ -114,7 +114,7 @@ impl TransformRule for ExpandIntervalTransformRule {
kind,
format,
} => {
if DataType::Interval == *data_type {
if matches!(data_type, DataType::Interval { .. }) {
match &**cast_exp {
Expr::Value(ValueWithSpan {
value: Value::SingleQuotedString(value),
@@ -129,7 +129,7 @@ impl TransformRule for ExpandIntervalTransformRule {
*expr = Expr::Cast {
kind: kind.clone(),
expr: single_quoted_string_expr(interval_value),
data_type: DataType::Interval,
data_type: data_type.clone(),
format: std::mem::take(format),
}
}
@@ -392,7 +392,10 @@ mod tests {
let mut cast_to_interval_expr = Expr::Cast {
expr: single_quoted_string_expr("3y2mon".to_string()),
data_type: DataType::Interval,
data_type: DataType::Interval {
fields: None,
precision: None,
},
format: None,
kind: sqlparser::ast::CastKind::Cast,
};
@@ -407,7 +410,10 @@ mod tests {
expr: Box::new(Expr::Value(
Value::SingleQuotedString("3 years 2 months".to_string()).into()
)),
data_type: DataType::Interval,
data_type: DataType::Interval {
fields: None,
precision: None,
},
format: None,
}
);

View File

@@ -178,10 +178,10 @@ pub(crate) fn get_type_by_alias(data_type: &DataType) -> Option<DataType> {
DataType::UInt16 => Some(DataType::SmallIntUnsigned(None)),
DataType::UInt32 => Some(DataType::IntUnsigned(None)),
DataType::UInt64 => Some(DataType::BigIntUnsigned(None)),
DataType::Float4 => Some(DataType::Float(None)),
DataType::Float4 => Some(DataType::Float(ExactNumberInfo::Precision(4))),
DataType::Float8 => Some(DataType::Double(ExactNumberInfo::None)),
DataType::Float32 => Some(DataType::Float(None)),
DataType::Float64 => Some(DataType::Double(ExactNumberInfo::None)),
DataType::Float32 => Some(DataType::Float(ExactNumberInfo::Precision(4))),
DataType::Float64 => Some(DataType::Float(ExactNumberInfo::Precision(8))),
DataType::Bool => Some(DataType::Boolean),
DataType::Datetime(_) => Some(DataType::Timestamp(Some(6), TimezoneInfo::None)),
_ => None,
@@ -222,10 +222,10 @@ pub(crate) fn get_data_type_by_alias_name(name: &str) -> Option<DataType> {
"UINT16" => Some(DataType::SmallIntUnsigned(None)),
"UINT32" => Some(DataType::IntUnsigned(None)),
"UINT64" => Some(DataType::BigIntUnsigned(None)),
"FLOAT4" => Some(DataType::Float(None)),
"FLOAT4" => Some(DataType::Float(ExactNumberInfo::Precision(4))),
"FLOAT8" => Some(DataType::Double(ExactNumberInfo::None)),
"FLOAT32" => Some(DataType::Float(None)),
"FLOAT64" => Some(DataType::Double(ExactNumberInfo::None)),
"FLOAT32" => Some(DataType::Float(ExactNumberInfo::Precision(4))),
"FLOAT64" => Some(DataType::Float(ExactNumberInfo::Precision(8))),
// String type alias
"TINYTEXT" | "MEDIUMTEXT" | "LONGTEXT" => Some(DataType::Text),
_ => None,
@@ -256,7 +256,7 @@ mod tests {
);
assert_eq!(
get_data_type_by_alias_name("float32"),
Some(DataType::Float(None))
Some(DataType::Float(ExactNumberInfo::Precision(4)))
);
assert_eq!(
get_data_type_by_alias_name("float8"),
@@ -264,7 +264,7 @@ mod tests {
);
assert_eq!(
get_data_type_by_alias_name("float4"),
Some(DataType::Float(None))
Some(DataType::Float(ExactNumberInfo::Precision(4)))
);
assert_eq!(
get_data_type_by_alias_name("int8"),

View File

@@ -2,103 +2,103 @@
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
explain select * from numbers;
+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | CooperativeExec |
+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | CooperativeExec |
| | RepartitionExec: partitioning=REDACTED
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {} }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32 }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQLNESS REPLACE (peers.*) REDACTED
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
explain select * from numbers order by number desc;
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Sort: numbers.number DESC NULLS FIRST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 DESC] |
| | CooperativeExec |
| | SortExec: expr=[number@0 DESC], preserve_partitioning=[true] |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Sort: numbers.number DESC NULLS FIRST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 DESC] |
| | CooperativeExec |
| | SortExec: expr=[number@0 DESC], preserve_partitioning=[true] |
| | RepartitionExec: partitioning=REDACTED
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {} }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32 }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQLNESS REPLACE (peers.*) REDACTED
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
explain select * from numbers order by number asc;
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Sort: numbers.number ASC NULLS LAST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 ASC NULLS LAST] |
| | CooperativeExec |
| | SortExec: expr=[number@0 ASC NULLS LAST], preserve_partitioning=[true] |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Sort: numbers.number ASC NULLS LAST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 ASC NULLS LAST] |
| | CooperativeExec |
| | SortExec: expr=[number@0 ASC NULLS LAST], preserve_partitioning=[true] |
| | RepartitionExec: partitioning=REDACTED
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {} }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32 }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQLNESS REPLACE (peers.*) REDACTED
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
explain select * from numbers order by number desc limit 10;
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Limit: skip=0, fetch=10 |
| | Sort: numbers.number DESC NULLS FIRST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 DESC], fetch=10 |
| | CooperativeExec |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Limit: skip=0, fetch=10 |
| | Sort: numbers.number DESC NULLS FIRST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 DESC], fetch=10 |
| | CooperativeExec |
| | RepartitionExec: partitioning=REDACTED
| | SortExec: TopK(fetch=10), expr=[number@0 DESC], preserve_partitioning=[false] |
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {} }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| | SortExec: TopK(fetch=10), expr=[number@0 DESC], preserve_partitioning=[false] |
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32 }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQLNESS REPLACE (peers.*) REDACTED
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
explain select * from numbers order by number asc limit 10;
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Limit: skip=0, fetch=10 |
| | Sort: numbers.number ASC NULLS LAST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 ASC NULLS LAST], fetch=10 |
| | CooperativeExec |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Limit: skip=0, fetch=10 |
| | Sort: numbers.number ASC NULLS LAST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 ASC NULLS LAST], fetch=10 |
| | CooperativeExec |
| | RepartitionExec: partitioning=REDACTED
| | SortExec: TopK(fetch=10), expr=[number@0 ASC NULLS LAST], preserve_partitioning=[false] |
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {} }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| | SortExec: TopK(fetch=10), expr=[number@0 ASC NULLS LAST], preserve_partitioning=[false] |
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32 }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+

View File

@@ -38,7 +38,7 @@ Affected Rows: 3
SELECT AVG(i), AVG(1), AVG(DISTINCT i), AVG(NULL) FROM integers;
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Execution error: Function 'avg' user-defined coercion failed with "Error during planning: The function \"avg\" does not support inputs of type Null." No function matches the given name and argument types 'avg(Null)'. You might need to add explicit type casts.
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Execution error: Function 'avg' user-defined coercion failed with "Error during planning: Avg does not support inputs of type Null." No function matches the given name and argument types 'avg(Null)'. You might need to add explicit type casts.
Candidate functions:
avg(UserDefined)

View File

@@ -1,19 +1,19 @@
--- date_add ---
SELECT date_add('2023-12-06 07:39:46.222'::TIMESTAMP_MS, INTERVAL '5 day');
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| date_add(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(Millisecond, None)")),IntervalMonthDayNano("IntervalMonthDayNano { months: 0, days: 5, nanoseconds: 0 }")) |
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| 2023-12-11T07:39:46.222 |
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+
| date_add(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(ms)")),IntervalMonthDayNano("IntervalMonthDayNano { months: 0, days: 5, nanoseconds: 0 }")) |
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+
| 2023-12-11T07:39:46.222 |
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+
SELECT date_add('2023-12-06 07:39:46.222'::TIMESTAMP_MS, '5 day');
+----------------------------------------------------------------------------------------------------------+
| date_add(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(Millisecond, None)")),Utf8("5 day")) |
+----------------------------------------------------------------------------------------------------------+
| 2023-12-11T07:39:46.222 |
+----------------------------------------------------------------------------------------------------------+
+-------------------------------------------------------------------------------------------+
| date_add(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(ms)")),Utf8("5 day")) |
+-------------------------------------------------------------------------------------------+
| 2023-12-11T07:39:46.222 |
+-------------------------------------------------------------------------------------------+
SELECT date_add('2023-12-06'::DATE, INTERVAL '3 month 5 day');
@@ -34,19 +34,19 @@ SELECT date_add('2023-12-06'::DATE, '3 month 5 day');
--- date_sub ---
SELECT date_sub('2023-12-06 07:39:46.222'::TIMESTAMP_MS, INTERVAL '5 day');
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| date_sub(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(Millisecond, None)")),IntervalMonthDayNano("IntervalMonthDayNano { months: 0, days: 5, nanoseconds: 0 }")) |
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| 2023-12-01T07:39:46.222 |
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+
| date_sub(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(ms)")),IntervalMonthDayNano("IntervalMonthDayNano { months: 0, days: 5, nanoseconds: 0 }")) |
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+
| 2023-12-01T07:39:46.222 |
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+
SELECT date_sub('2023-12-06 07:39:46.222'::TIMESTAMP_MS, '5 day');
+----------------------------------------------------------------------------------------------------------+
| date_sub(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(Millisecond, None)")),Utf8("5 day")) |
+----------------------------------------------------------------------------------------------------------+
| 2023-12-01T07:39:46.222 |
+----------------------------------------------------------------------------------------------------------+
+-------------------------------------------------------------------------------------------+
| date_sub(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(ms)")),Utf8("5 day")) |
+-------------------------------------------------------------------------------------------+
| 2023-12-01T07:39:46.222 |
+-------------------------------------------------------------------------------------------+
SELECT date_sub('2023-12-06'::DATE, INTERVAL '3 month 5 day');
@@ -67,28 +67,28 @@ SELECT date_sub('2023-12-06'::DATE, '3 month 5 day');
--- date_format ---
SELECT date_format('2023-12-06 07:39:46.222'::TIMESTAMP_MS, '%Y-%m-%d %H:%M:%S:%3f');
+-----------------------------------------------------------------------------------------------------------------------------+
| date_format(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(Millisecond, None)")),Utf8("%Y-%m-%d %H:%M:%S:%3f")) |
+-----------------------------------------------------------------------------------------------------------------------------+
| 2023-12-06 07:39:46:222 |
+-----------------------------------------------------------------------------------------------------------------------------+
+--------------------------------------------------------------------------------------------------------------+
| date_format(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(ms)")),Utf8("%Y-%m-%d %H:%M:%S:%3f")) |
+--------------------------------------------------------------------------------------------------------------+
| 2023-12-06 07:39:46:222 |
+--------------------------------------------------------------------------------------------------------------+
SELECT date_format('2023-12-06 07:39:46.222'::TIMESTAMP_S, '%Y-%m-%d %H:%M:%S:%3f');
+------------------------------------------------------------------------------------------------------------------------+
| date_format(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(Second, None)")),Utf8("%Y-%m-%d %H:%M:%S:%3f")) |
+------------------------------------------------------------------------------------------------------------------------+
| 2023-12-06 07:39:46:000 |
+------------------------------------------------------------------------------------------------------------------------+
+-------------------------------------------------------------------------------------------------------------+
| date_format(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(s)")),Utf8("%Y-%m-%d %H:%M:%S:%3f")) |
+-------------------------------------------------------------------------------------------------------------+
| 2023-12-06 07:39:46:000 |
+-------------------------------------------------------------------------------------------------------------+
--- datetime not supported yet ---
SELECT date_format('2023-12-06 07:39:46.222'::DATETIME, '%Y-%m-%d %H:%M:%S:%3f');
+-----------------------------------------------------------------------------------------------------------------------------+
| date_format(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(Microsecond, None)")),Utf8("%Y-%m-%d %H:%M:%S:%3f")) |
+-----------------------------------------------------------------------------------------------------------------------------+
| 2023-12-06 07:39:46:222 |
+-----------------------------------------------------------------------------------------------------------------------------+
+--------------------------------------------------------------------------------------------------------------+
| date_format(arrow_cast(Utf8("2023-12-06 07:39:46.222"),Utf8("Timestamp(µs)")),Utf8("%Y-%m-%d %H:%M:%S:%3f")) |
+--------------------------------------------------------------------------------------------------------------+
| 2023-12-06 07:39:46:222 |
+--------------------------------------------------------------------------------------------------------------+
SELECT date_format('2023-12-06'::DATE, '%m-%d');

View File

@@ -335,11 +335,11 @@ FROM cell_cte;
SELECT json_encode_path(37.76938, -122.3889, 1728083375::TimestampSecond);
+----------------------------------------------------------------------------------------------------------------------+
| json_encode_path(Float64(37.76938),Float64(-122.3889),arrow_cast(Int64(1728083375),Utf8("Timestamp(Second, None)"))) |
+----------------------------------------------------------------------------------------------------------------------+
| [[-122.3889,37.76938]] |
+----------------------------------------------------------------------------------------------------------------------+
+-----------------------------------------------------------------------------------------------------------+
| json_encode_path(Float64(37.76938),Float64(-122.3889),arrow_cast(Int64(1728083375),Utf8("Timestamp(s)"))) |
+-----------------------------------------------------------------------------------------------------------+
| [[-122.3889,37.76938]] |
+-----------------------------------------------------------------------------------------------------------+
SELECT json_encode_path(lat, lon, ts)
FROM(
@@ -360,11 +360,11 @@ FROM(
SELECT UNNEST(geo_path(37.76938, -122.3889, 1728083375::TimestampSecond));
+----------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+
| __unnest_placeholder(geo_path(Float64(37.76938),Float64(-122.3889),arrow_cast(Int64(1728083375),Utf8("Timestamp(Second, None)")))).lat | __unnest_placeholder(geo_path(Float64(37.76938),Float64(-122.3889),arrow_cast(Int64(1728083375),Utf8("Timestamp(Second, None)")))).lng |
+----------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+
| [37.76938] | [-122.3889] |
+----------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+
+-----------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
| __unnest_placeholder(geo_path(Float64(37.76938),Float64(-122.3889),arrow_cast(Int64(1728083375),Utf8("Timestamp(s)")))).lat | __unnest_placeholder(geo_path(Float64(37.76938),Float64(-122.3889),arrow_cast(Int64(1728083375),Utf8("Timestamp(s)")))).lng |
+-----------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
| [37.76938] | [-122.3889] |
+-----------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
SELECT UNNEST(geo_path(lat, lon, ts))
FROM(

View File

@@ -22,9 +22,9 @@ select GREATEST('2000-02-11'::Date, '2020-12-30'::Date);
select GREATEST('2021-07-01 00:00:00'::Timestamp, '2024-07-01 00:00:00'::Timestamp);
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| greatest(arrow_cast(Utf8("2021-07-01 00:00:00"),Utf8("Timestamp(Millisecond, None)")),arrow_cast(Utf8("2024-07-01 00:00:00"),Utf8("Timestamp(Millisecond, None)"))) |
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| 2024-07-01T00:00:00 |
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+---------------------------------------------------------------------------------------------------------------------------------------+
| greatest(arrow_cast(Utf8("2021-07-01 00:00:00"),Utf8("Timestamp(ms)")),arrow_cast(Utf8("2024-07-01 00:00:00"),Utf8("Timestamp(ms)"))) |
+---------------------------------------------------------------------------------------------------------------------------------------+
| 2024-07-01T00:00:00 |
+---------------------------------------------------------------------------------------------------------------------------------------+

View File

@@ -1,6 +1,6 @@
-- Migrated from DuckDB test: test/sql/join/ complex condition tests
-- Tests complex join conditions and predicates
CREATE TABLE sales_reps(rep_id INTEGER, "name" VARCHAR, region VARCHAR, quota INTEGER, ts TIMESTAMP TIME INDEX);
CREATE TABLE sales_reps(rep_id INTEGER, "name" VARCHAR, "region" VARCHAR, quota INTEGER, ts TIMESTAMP TIME INDEX);
Affected Rows: 0

View File

@@ -1,7 +1,7 @@
-- Migrated from DuckDB test: test/sql/join/ complex condition tests
-- Tests complex join conditions and predicates
CREATE TABLE sales_reps(rep_id INTEGER, "name" VARCHAR, region VARCHAR, quota INTEGER, ts TIMESTAMP TIME INDEX);
CREATE TABLE sales_reps(rep_id INTEGER, "name" VARCHAR, "region" VARCHAR, quota INTEGER, ts TIMESTAMP TIME INDEX);
CREATE TABLE customer_accounts(account_id INTEGER, account_name VARCHAR, region VARCHAR, rep_id INTEGER, revenue INTEGER, ts TIMESTAMP TIME INDEX);

View File

@@ -3,7 +3,7 @@ CREATE TABLE events_push(event_id INTEGER, user_id INTEGER, event_type VARCHAR,
Affected Rows: 0
CREATE TABLE users_push(user_id INTEGER, user_name VARCHAR, region VARCHAR, ts TIMESTAMP TIME INDEX);
CREATE TABLE users_push(user_id INTEGER, user_name VARCHAR, "region" VARCHAR, ts TIMESTAMP TIME INDEX);
Affected Rows: 0

View File

@@ -2,7 +2,7 @@
CREATE TABLE events_push(event_id INTEGER, user_id INTEGER, event_type VARCHAR, "value" INTEGER, ts TIMESTAMP TIME INDEX);
CREATE TABLE users_push(user_id INTEGER, user_name VARCHAR, region VARCHAR, ts TIMESTAMP TIME INDEX);
CREATE TABLE users_push(user_id INTEGER, user_name VARCHAR, "region" VARCHAR, ts TIMESTAMP TIME INDEX);
INSERT INTO events_push VALUES (1, 100, 'click', 1, 1000), (2, 100, 'view', 2, 2000), (3, 200, 'click', 1, 3000), (4, 300, 'purchase', 5, 4000);

View File

@@ -45,11 +45,11 @@ Error: 3000(PlanQuery), Failed to plan SQL: No field named a.
SELECT a FROM test LIMIT SUM(42);
Error: 1001(Unsupported), This feature is not implemented: Unsupported LIMIT expression: Some(AggregateFunction(AggregateFunction { func: AggregateUDF { inner: Sum { signature: Signature { type_signature: UserDefined, volatility: Immutable } } }, params: AggregateFunctionParams { args: [Literal(Int64(42), None)], distinct: false, filter: None, order_by: [], null_treatment: None } }))
Error: 1001(Unsupported), This feature is not implemented: Unsupported LIMIT expression: Some(AggregateFunction(AggregateFunction { func: AggregateUDF { inner: Sum { signature: Signature { type_signature: UserDefined, volatility: Immutable, parameter_names: None } } }, params: AggregateFunctionParams { args: [Literal(Int64(42), None)], distinct: false, filter: None, order_by: [], null_treatment: None } }))
SELECT a FROM test LIMIT row_number() OVER ();
Error: 3001(EngineExecuteQuery), This feature is not implemented: Unsupported LIMIT expression: Some(Cast(Cast { expr: WindowFunction(WindowFunction { fun: WindowUDF(WindowUDF { inner: RowNumber { signature: Signature { type_signature: Nullary, volatility: Immutable } } }), params: WindowFunctionParams { args: [], partition_by: [], order_by: [], window_frame: WindowFrame { units: Rows, start_bound: Preceding(UInt64(NULL)), end_bound: Following(UInt64(NULL)), is_causal: false }, filter: None, null_treatment: None, distinct: false } }), data_type: Int64 }))
Error: 3001(EngineExecuteQuery), This feature is not implemented: Unsupported LIMIT expression: Some(Cast(Cast { expr: WindowFunction(WindowFunction { fun: WindowUDF(WindowUDF { inner: RowNumber { signature: Signature { type_signature: Nullary, volatility: Immutable, parameter_names: None } } }), params: WindowFunctionParams { args: [], partition_by: [], order_by: [], window_frame: WindowFrame { units: Rows, start_bound: Preceding(UInt64(NULL)), end_bound: Following(UInt64(NULL)), is_causal: false }, filter: None, null_treatment: None, distinct: false } }), data_type: Int64 }))
CREATE TABLE test2 (a STRING, ts TIMESTAMP TIME INDEX);
@@ -102,23 +102,23 @@ SELECT * FROM integers LIMIT 4;
SELECT * FROM integers as int LIMIT (SELECT MIN(integers.i) FROM integers);
Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Expected LIMIT to be an integer or null, but got Timestamp(Millisecond, None)
Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Expected LIMIT to be an integer or null, but got Timestamp(ms)
SELECT * FROM integers as int OFFSET (SELECT MIN(integers.i) FROM integers);
Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Expected OFFSET to be an integer or null, but got Timestamp(Millisecond, None)
Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Expected OFFSET to be an integer or null, but got Timestamp(ms)
SELECT * FROM integers as int LIMIT (SELECT MAX(integers.i) FROM integers) OFFSET (SELECT MIN(integers.i) FROM integers);
Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Expected LIMIT to be an integer or null, but got Timestamp(Millisecond, None)
Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Expected LIMIT to be an integer or null, but got Timestamp(ms)
SELECT * FROM integers as int LIMIT (SELECT max(integers.i) FROM integers where i > 5);
Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Cannot infer common argument type for comparison operation Timestamp(Millisecond, None) > Int64
Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Cannot infer common argument type for comparison operation Timestamp(ms) > Int64
SELECT * FROM integers as int LIMIT (SELECT max(integers.i) FROM integers where i > 5);
Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Cannot infer common argument type for comparison operation Timestamp(Millisecond, None) > Int64
Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Cannot infer common argument type for comparison operation Timestamp(ms) > Int64
SELECT * FROM integers as int LIMIT (SELECT NULL);

View File

@@ -732,7 +732,7 @@ CREATE TABLE IF NOT EXISTS node_network_transmit_bytes_total (
host STRING NULL,
job STRING NULL,
node STRING NULL,
region STRING NULL,
"region" STRING NULL,
src_port STRING NULL,
src STRING NULL,
src_namespace STRING NULL,

View File

@@ -349,7 +349,7 @@ CREATE TABLE IF NOT EXISTS node_network_transmit_bytes_total (
host STRING NULL,
job STRING NULL,
node STRING NULL,
region STRING NULL,
"region" STRING NULL,
src_port STRING NULL,
src STRING NULL,
src_namespace STRING NULL,

View File

@@ -45,19 +45,19 @@ SELECT
FROM
ngx_access_log);
+---------------+-----------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-----------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Projection: count(Int64(1)) AS count(*) |
| | Aggregate: groupBy=[[]], aggr=[[count(Int64(1))]] |
| | Projection: TimestampNanosecond(NOW, Some("+00:00")) AS now() |
| | TableScan: ngx_access_log |
| | ]] |
| physical_plan | CooperativeExec |
+---------------+-------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Projection: count(Int64(1)) AS count(*) |
| | Aggregate: groupBy=[[]], aggr=[[count(Int64(1))]] |
| | Projection: TimestampNanosecond(NOW, None) AS now() |
| | TableScan: ngx_access_log |
| | ]] |
| physical_plan | CooperativeExec |
| | MergeScanExec: REDACTED
| | |
+---------------+-----------------------------------------------------------------------------------+
| | |
+---------------+-------------------------------------------------------------------------+
DROP TABLE ngx_access_log;

View File

@@ -477,28 +477,28 @@ SELECT
FROM tql_base
ORDER BY ts;
+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | Sort: tql_base.ts ASC NULLS LAST |
| | Projection: tql_base.ts, tql_base.val, lag(tql_base.val,Int64(1)) ORDER BY [tql_base.ts ASC NULLS LAST] RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS prev_value |
| | WindowAggr: windowExpr=[[lag(tql_base.val, Int64(1)) ORDER BY [tql_base.ts ASC NULLS LAST] RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW]] |
| | MergeScan [is_placeholder=false, remote_input=[ |
| | SubqueryAlias: tql_base |
| | Projection: metric.ts AS ts, metric.val AS val |
| | PromInstantManipulate: range=[0..40000], lookback=[300000], interval=[10000], time index=[ts] |
| | PromSeriesDivide: tags=[] |
| | Filter: metric.ts >= TimestampMillisecond(-300000, None) AND metric.ts <= TimestampMillisecond(340000, None) |
| | TableScan: metric |
| | ]] |
| physical_plan | ProjectionExec: expr=[ts@0 as ts, val@1 as val, lag(tql_base.val,Int64(1)) ORDER BY [tql_base.ts ASC NULLS LAST] RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW@2 as prev_value] |
| | BoundedWindowAggExec: wdw=[lag(tql_base.val,Int64(1)) ORDER BY [tql_base.ts ASC NULLS LAST] RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW: Field { name: "lag(tql_base.val,Int64(1)) ORDER BY [tql_base.ts ASC NULLS LAST] RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, frame: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW], mode=[Sorted] |
| | SortPreservingMergeExec: [ts@0 ASC NULLS LAST] |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | Sort: tql_base.ts ASC NULLS LAST |
| | Projection: tql_base.ts, tql_base.val, lag(tql_base.val,Int64(1)) ORDER BY [tql_base.ts ASC NULLS LAST] RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS prev_value |
| | WindowAggr: windowExpr=[[lag(tql_base.val, Int64(1)) ORDER BY [tql_base.ts ASC NULLS LAST] RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW]] |
| | MergeScan [is_placeholder=false, remote_input=[ |
| | SubqueryAlias: tql_base |
| | Projection: metric.ts AS ts, metric.val AS val |
| | PromInstantManipulate: range=[0..40000], lookback=[300000], interval=[10000], time index=[ts] |
| | PromSeriesDivide: tags=[] |
| | Filter: metric.ts >= TimestampMillisecond(-300000, None) AND metric.ts <= TimestampMillisecond(340000, None) |
| | TableScan: metric |
| | ]] |
| physical_plan | ProjectionExec: expr=[ts@0 as ts, val@1 as val, lag(tql_base.val,Int64(1)) ORDER BY [tql_base.ts ASC NULLS LAST] RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW@2 as prev_value] |
| | BoundedWindowAggExec: wdw=[lag(tql_base.val,Int64(1)) ORDER BY [tql_base.ts ASC NULLS LAST] RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW: Field { "lag(tql_base.val,Int64(1)) ORDER BY [tql_base.ts ASC NULLS LAST] RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW": nullable Float64 }, frame: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW], mode=[Sorted] |
| | SortPreservingMergeExec: [ts@0 ASC NULLS LAST] |
| | SortExec: expr=[ts@0 ASC NULLS LAST], preserve_REDACTED
| | CooperativeExec |
| | CooperativeExec |
| | MergeScanExec: REDACTED
| | |
+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| | |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- TQL CTE with HAVING clause
WITH tql_grouped(ts, host, cpu) AS (

View File

@@ -74,7 +74,7 @@ SELECT arrow_typeof(INTERVAL '1 month');
-- INTERVAL + TIME CONSTANT
SELECT current_time() + INTERVAL '1 hour';
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot coerce arithmetic expression Time64(Nanosecond) + Interval(MonthDayNano) to valid types
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot coerce arithmetic expression Time64(ns) + Interval(MonthDayNano) to valid types
-- table with interval type test
-- breaking change from #5422 table do not support interval type will raise an error
@@ -185,67 +185,67 @@ SELECT INTERVAL '2h' + INTERVAL 'P3Y3M700DT133H17M36.789S';
select '2022-01-01T00:00:01'::timestamp + '1 days'::interval;
+-----------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + Utf8("1 days") |
+-----------------------------------------------------------------------------------------------+
| 2022-01-02T00:00:01 |
+-----------------------------------------------------------------------------------------------+
+--------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(ms)")) + Utf8("1 days") |
+--------------------------------------------------------------------------------+
| 2022-01-02T00:00:01 |
+--------------------------------------------------------------------------------+
select '2022-01-01T00:00:01'::timestamp + '2 days'::interval;
+-----------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + Utf8("2 days") |
+-----------------------------------------------------------------------------------------------+
| 2022-01-03T00:00:01 |
+-----------------------------------------------------------------------------------------------+
+--------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(ms)")) + Utf8("2 days") |
+--------------------------------------------------------------------------------+
| 2022-01-03T00:00:01 |
+--------------------------------------------------------------------------------+
select '2022-01-01T00:00:01'::timestamp - '1 days'::interval;
+-----------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) - Utf8("1 days") |
+-----------------------------------------------------------------------------------------------+
| 2021-12-31T00:00:01 |
+-----------------------------------------------------------------------------------------------+
+--------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(ms)")) - Utf8("1 days") |
+--------------------------------------------------------------------------------+
| 2021-12-31T00:00:01 |
+--------------------------------------------------------------------------------+
select '2022-01-01T00:00:01'::timestamp - '2 days'::interval;
+-----------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) - Utf8("2 days") |
+-----------------------------------------------------------------------------------------------+
| 2021-12-30T00:00:01 |
+-----------------------------------------------------------------------------------------------+
+--------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(ms)")) - Utf8("2 days") |
+--------------------------------------------------------------------------------+
| 2021-12-30T00:00:01 |
+--------------------------------------------------------------------------------+
select '2022-01-01T00:00:01'::timestamp + '1 month'::interval;
+------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + Utf8("1 month") |
+------------------------------------------------------------------------------------------------+
| 2022-02-01T00:00:01 |
+------------------------------------------------------------------------------------------------+
+---------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(ms)")) + Utf8("1 month") |
+---------------------------------------------------------------------------------+
| 2022-02-01T00:00:01 |
+---------------------------------------------------------------------------------+
select '2022-01-01T00:00:01'::timestamp + '2 months'::interval;
+-------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + Utf8("2 months") |
+-------------------------------------------------------------------------------------------------+
| 2022-03-01T00:00:01 |
+-------------------------------------------------------------------------------------------------+
+----------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(ms)")) + Utf8("2 months") |
+----------------------------------------------------------------------------------+
| 2022-03-01T00:00:01 |
+----------------------------------------------------------------------------------+
select '2022-01-01T00:00:01'::timestamp + '1 year'::interval;
+-----------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + Utf8("1 year") |
+-----------------------------------------------------------------------------------------------+
| 2023-01-01T00:00:01 |
+-----------------------------------------------------------------------------------------------+
+--------------------------------------------------------------------------------+
| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(ms)")) + Utf8("1 year") |
+--------------------------------------------------------------------------------+
| 2023-01-01T00:00:01 |
+--------------------------------------------------------------------------------+
select '2023-01-01T00:00:01'::timestamp + '2 years'::interval;
+------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2023-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + Utf8("2 years") |
+------------------------------------------------------------------------------------------------+
| 2025-01-01T00:00:01 |
+------------------------------------------------------------------------------------------------+
+---------------------------------------------------------------------------------+
| arrow_cast(Utf8("2023-01-01T00:00:01"),Utf8("Timestamp(ms)")) + Utf8("2 years") |
+---------------------------------------------------------------------------------+
| 2025-01-01T00:00:01 |
+---------------------------------------------------------------------------------+
-- DATE + INTERVAL
SELECT DATE '2000-10-30' + '1 days'::interval;

View File

@@ -75,31 +75,31 @@ SELECT MAX(t) FROM timestamp;
SELECT SUM(t) FROM timestamp;
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Execution error: Function 'sum' user-defined coercion failed with "Execution error: Sum not supported for Timestamp(Millisecond, None)" No function matches the given name and argument types 'sum(Timestamp(Millisecond, None))'. You might need to add explicit type casts.
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Execution error: Function 'sum' user-defined coercion failed with "Execution error: Sum not supported for Timestamp(ms)" No function matches the given name and argument types 'sum(Timestamp(ms))'. You might need to add explicit type casts.
Candidate functions:
sum(UserDefined)
SELECT AVG(t) FROM timestamp;
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Execution error: Function 'avg' user-defined coercion failed with "Error during planning: The function \"avg\" does not support inputs of type Timestamp(Millisecond, None)." No function matches the given name and argument types 'avg(Timestamp(Millisecond, None))'. You might need to add explicit type casts.
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Execution error: Function 'avg' user-defined coercion failed with "Error during planning: Avg does not support inputs of type Timestamp(ms)." No function matches the given name and argument types 'avg(Timestamp(ms))'. You might need to add explicit type casts.
Candidate functions:
avg(UserDefined)
SELECT t+t FROM timestamp;
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for temporal operation Timestamp(Millisecond, None) + Timestamp(Millisecond, None): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(Millisecond, None) + Timestamp(Millisecond, None)
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for temporal operation Timestamp(ms) + Timestamp(ms): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(ms) + Timestamp(ms)
SELECT t*t FROM timestamp;
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for temporal operation Timestamp(Millisecond, None) * Timestamp(Millisecond, None): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(Millisecond, None) * Timestamp(Millisecond, None)
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for temporal operation Timestamp(ms) * Timestamp(ms): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(ms) * Timestamp(ms)
SELECT t/t FROM timestamp;
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for temporal operation Timestamp(Millisecond, None) / Timestamp(Millisecond, None): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(Millisecond, None) / Timestamp(Millisecond, None)
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for temporal operation Timestamp(ms) / Timestamp(ms): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(ms) / Timestamp(ms)
SELECT t%t FROM timestamp;
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for temporal operation Timestamp(Millisecond, None) % Timestamp(Millisecond, None): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(Millisecond, None) % Timestamp(Millisecond, None)
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for temporal operation Timestamp(ms) % Timestamp(ms): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(ms) % Timestamp(ms)
SELECT t-t FROM timestamp;
@@ -166,11 +166,11 @@ SELECT t::TIME FROM timestamp WHERE EXTRACT(YEAR from t)=2007 ORDER BY 1;
SELECT (DATE '1992-01-01')::TIMESTAMP;
+---------------------------------------------------------------------+
| arrow_cast(Utf8("1992-01-01"),Utf8("Timestamp(Millisecond, None)")) |
+---------------------------------------------------------------------+
| 1992-01-01T00:00:00 |
+---------------------------------------------------------------------+
+------------------------------------------------------+
| arrow_cast(Utf8("1992-01-01"),Utf8("Timestamp(ms)")) |
+------------------------------------------------------+
| 1992-01-01T00:00:00 |
+------------------------------------------------------+
SELECT TIMESTAMP '2008-01-01 00:00:01.5'::VARCHAR;
@@ -195,9 +195,9 @@ Affected Rows: 0
-- SQLNESS PROTOCOL POSTGRES
SELECT '2025-04-14 20:42:19.021000'::TIMESTAMP - '2025-04-14 20:42:18.103000'::TIMESTAMP;
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2025-04-14 20:42:19.021000"),Utf8("Timestamp(Millisecond, None)")) - arrow_cast(Utf8("2025-04-14 20:42:18.103000"),Utf8("Timestamp(Millisecond, None)")) |
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| 00:00:00.918000 |
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+---------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2025-04-14 20:42:19.021000"),Utf8("Timestamp(ms)")) - arrow_cast(Utf8("2025-04-14 20:42:18.103000"),Utf8("Timestamp(ms)")) |
+---------------------------------------------------------------------------------------------------------------------------------------------+
| 00:00:00.918000 |
+---------------------------------------------------------------------------------------------------------------------------------------------+

View File

@@ -33,59 +33,59 @@ SELECT EXTRACT(MICROSECONDS FROM sec), EXTRACT(MICROSECONDS FROM msec), EXTRACT(
-- any other precision is rounded up (e.g. 1/2 -> 3, 4/5 -> 6, 7/8 -> 9)
SELECT TIMESTAMP '2020-01-01 01:23:45.123456789'::TIMESTAMP(0);
+-----------------------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.123456789"),Utf8("Timestamp(Second, None)")) |
+-----------------------------------------------------------------------------------+
| 2020-01-01T01:23:45 |
+-----------------------------------------------------------------------------------+
+------------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.123456789"),Utf8("Timestamp(s)")) |
+------------------------------------------------------------------------+
| 2020-01-01T01:23:45 |
+------------------------------------------------------------------------+
SELECT TIMESTAMP '2020-01-01 01:23:45.123456789'::TIMESTAMP(3);
+----------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.123456789"),Utf8("Timestamp(Millisecond, None)")) |
+----------------------------------------------------------------------------------------+
| 2020-01-01T01:23:45.123 |
+----------------------------------------------------------------------------------------+
+-------------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.123456789"),Utf8("Timestamp(ms)")) |
+-------------------------------------------------------------------------+
| 2020-01-01T01:23:45.123 |
+-------------------------------------------------------------------------+
SELECT TIMESTAMP '2020-01-01 01:23:45.123456789'::TIMESTAMP(6);
+----------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.123456789"),Utf8("Timestamp(Microsecond, None)")) |
+----------------------------------------------------------------------------------------+
| 2020-01-01T01:23:45.123456 |
+----------------------------------------------------------------------------------------+
+-------------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.123456789"),Utf8("Timestamp(µs)")) |
+-------------------------------------------------------------------------+
| 2020-01-01T01:23:45.123456 |
+-------------------------------------------------------------------------+
SELECT TIMESTAMP '2020-01-01 01:23:45.123456789'::TIMESTAMP(9);
+---------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.123456789"),Utf8("Timestamp(Nanosecond, None)")) |
+---------------------------------------------------------------------------------------+
| 2020-01-01T01:23:45.123456789 |
+---------------------------------------------------------------------------------------+
+-------------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.123456789"),Utf8("Timestamp(ns)")) |
+-------------------------------------------------------------------------+
| 2020-01-01T01:23:45.123456789 |
+-------------------------------------------------------------------------+
SELECT TIMESTAMP '2020-01-01 01:23:45.12'::TIMESTAMP(3);
+---------------------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.12"),Utf8("Timestamp(Millisecond, None)")) |
+---------------------------------------------------------------------------------+
| 2020-01-01T01:23:45.120 |
+---------------------------------------------------------------------------------+
+------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.12"),Utf8("Timestamp(ms)")) |
+------------------------------------------------------------------+
| 2020-01-01T01:23:45.120 |
+------------------------------------------------------------------+
SELECT TIMESTAMP '2020-01-01 01:23:45.12345'::TIMESTAMP(6);
+------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.12345"),Utf8("Timestamp(Microsecond, None)")) |
+------------------------------------------------------------------------------------+
| 2020-01-01T01:23:45.123450 |
+------------------------------------------------------------------------------------+
+---------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.12345"),Utf8("Timestamp(µs)")) |
+---------------------------------------------------------------------+
| 2020-01-01T01:23:45.123450 |
+---------------------------------------------------------------------+
SELECT TIMESTAMP '2020-01-01 01:23:45.12345678'::TIMESTAMP(9);
+--------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.12345678"),Utf8("Timestamp(Nanosecond, None)")) |
+--------------------------------------------------------------------------------------+
| 2020-01-01T01:23:45.123456780 |
+--------------------------------------------------------------------------------------+
+------------------------------------------------------------------------+
| arrow_cast(Utf8("2020-01-01 01:23:45.12345678"),Utf8("Timestamp(ns)")) |
+------------------------------------------------------------------------+
| 2020-01-01T01:23:45.123456780 |
+------------------------------------------------------------------------+
DROP TABLE ts_precision;

View File

@@ -25,11 +25,11 @@ SELECT extract(YEAR from sec),extract( YEAR from milli),extract(YEAR from nano)
SELECT nano::TIMESTAMP, milli::TIMESTAMP,sec::TIMESTAMP from timestamp;
+-----------------------------------------------------------------+------------------------------------------------------------------+----------------------------------------------------------------+
| arrow_cast(timestamp.nano,Utf8("Timestamp(Millisecond, None)")) | arrow_cast(timestamp.milli,Utf8("Timestamp(Millisecond, None)")) | arrow_cast(timestamp.sec,Utf8("Timestamp(Millisecond, None)")) |
+-----------------------------------------------------------------+------------------------------------------------------------------+----------------------------------------------------------------+
| 2008-01-01T00:00:01.889 | 2008-01-01T00:00:01.594 | 2008-01-01T00:00:01 |
+-----------------------------------------------------------------+------------------------------------------------------------------+----------------------------------------------------------------+
+--------------------------------------------------+---------------------------------------------------+-------------------------------------------------+
| arrow_cast(timestamp.nano,Utf8("Timestamp(ms)")) | arrow_cast(timestamp.milli,Utf8("Timestamp(ms)")) | arrow_cast(timestamp.sec,Utf8("Timestamp(ms)")) |
+--------------------------------------------------+---------------------------------------------------+-------------------------------------------------+
| 2008-01-01T00:00:01.889 | 2008-01-01T00:00:01.594 | 2008-01-01T00:00:01 |
+--------------------------------------------------+---------------------------------------------------+-------------------------------------------------+
SELECT micro::TIMESTAMP_S as m1, micro::TIMESTAMP_MS as m2,micro::TIMESTAMP_NS as m3 from timestamp;
@@ -113,63 +113,63 @@ select nano::TIME from timestamp;
select sec::TIMESTAMP_MS from timestamp;
+----------------------------------------------------------------+
| arrow_cast(timestamp.sec,Utf8("Timestamp(Millisecond, None)")) |
+----------------------------------------------------------------+
| 2008-01-01T00:00:01 |
| 2008-01-01T00:00:51 |
| 2008-01-01T00:00:11 |
+----------------------------------------------------------------+
+-------------------------------------------------+
| arrow_cast(timestamp.sec,Utf8("Timestamp(ms)")) |
+-------------------------------------------------+
| 2008-01-01T00:00:01 |
| 2008-01-01T00:00:51 |
| 2008-01-01T00:00:11 |
+-------------------------------------------------+
select sec::TIMESTAMP_NS from timestamp;
+---------------------------------------------------------------+
| arrow_cast(timestamp.sec,Utf8("Timestamp(Nanosecond, None)")) |
+---------------------------------------------------------------+
| 2008-01-01T00:00:01 |
| 2008-01-01T00:00:51 |
| 2008-01-01T00:00:11 |
+---------------------------------------------------------------+
+-------------------------------------------------+
| arrow_cast(timestamp.sec,Utf8("Timestamp(ns)")) |
+-------------------------------------------------+
| 2008-01-01T00:00:01 |
| 2008-01-01T00:00:51 |
| 2008-01-01T00:00:11 |
+-------------------------------------------------+
select milli::TIMESTAMP_SEC from timestamp;
+-------------------------------------------------------------+
| arrow_cast(timestamp.milli,Utf8("Timestamp(Second, None)")) |
+-------------------------------------------------------------+
| 2008-01-01T00:00:01 |
| 2008-01-01T00:00:01 |
| 2008-01-01T00:00:01 |
+-------------------------------------------------------------+
+--------------------------------------------------+
| arrow_cast(timestamp.milli,Utf8("Timestamp(s)")) |
+--------------------------------------------------+
| 2008-01-01T00:00:01 |
| 2008-01-01T00:00:01 |
| 2008-01-01T00:00:01 |
+--------------------------------------------------+
select milli::TIMESTAMP_NS from timestamp;
+-----------------------------------------------------------------+
| arrow_cast(timestamp.milli,Utf8("Timestamp(Nanosecond, None)")) |
+-----------------------------------------------------------------+
| 2008-01-01T00:00:01.594 |
| 2008-01-01T00:00:01.894 |
| 2008-01-01T00:00:01.794 |
+-----------------------------------------------------------------+
+---------------------------------------------------+
| arrow_cast(timestamp.milli,Utf8("Timestamp(ns)")) |
+---------------------------------------------------+
| 2008-01-01T00:00:01.594 |
| 2008-01-01T00:00:01.894 |
| 2008-01-01T00:00:01.794 |
+---------------------------------------------------+
select nano::TIMESTAMP_SEC from timestamp;
+------------------------------------------------------------+
| arrow_cast(timestamp.nano,Utf8("Timestamp(Second, None)")) |
+------------------------------------------------------------+
| 2008-01-01T00:00:01 |
| 2008-01-01T00:00:01 |
| 2008-01-01T00:00:01 |
+------------------------------------------------------------+
+-------------------------------------------------+
| arrow_cast(timestamp.nano,Utf8("Timestamp(s)")) |
+-------------------------------------------------+
| 2008-01-01T00:00:01 |
| 2008-01-01T00:00:01 |
| 2008-01-01T00:00:01 |
+-------------------------------------------------+
select nano::TIMESTAMP_MS from timestamp;
+-----------------------------------------------------------------+
| arrow_cast(timestamp.nano,Utf8("Timestamp(Millisecond, None)")) |
+-----------------------------------------------------------------+
| 2008-01-01T00:00:01.889 |
| 2008-01-01T00:00:01.999 |
| 2008-01-01T00:00:01.899 |
+-----------------------------------------------------------------+
+--------------------------------------------------+
| arrow_cast(timestamp.nano,Utf8("Timestamp(ms)")) |
+--------------------------------------------------+
| 2008-01-01T00:00:01.889 |
| 2008-01-01T00:00:01.999 |
| 2008-01-01T00:00:01.899 |
+--------------------------------------------------+
select sec from timestamp order by sec;
@@ -276,99 +276,99 @@ select timestamp.nano from timestamp inner join timestamp_two on (timestamp.nan
select '2008-01-01 00:00:11'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_MS;
+-------------------------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Microsecond, None)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Millisecond, None)")) |
+-------------------------------------------------------------------------------------------------------------------------------------------------------------+
| true |
+-------------------------------------------------------------------------------------------------------------------------------------------------------------+
+-------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(µs)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(ms)")) |
+-------------------------------------------------------------------------------------------------------------------------------+
| true |
+-------------------------------------------------------------------------------------------------------------------------------+
select '2008-01-01 00:00:11'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_NS;
+------------------------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Microsecond, None)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Nanosecond, None)")) |
+------------------------------------------------------------------------------------------------------------------------------------------------------------+
| true |
+------------------------------------------------------------------------------------------------------------------------------------------------------------+
+-------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(µs)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(ns)")) |
+-------------------------------------------------------------------------------------------------------------------------------+
| true |
+-------------------------------------------------------------------------------------------------------------------------------+
select '2008-01-01 00:00:11'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_S;
+--------------------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Microsecond, None)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Second, None)")) |
+--------------------------------------------------------------------------------------------------------------------------------------------------------+
| true |
+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(µs)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(s)")) |
+------------------------------------------------------------------------------------------------------------------------------+
| true |
+------------------------------------------------------------------------------------------------------------------------------+
select '2008-01-01 00:00:11.1'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_MS;
+---------------------------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(Microsecond, None)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Millisecond, None)")) |
+---------------------------------------------------------------------------------------------------------------------------------------------------------------+
| false |
+---------------------------------------------------------------------------------------------------------------------------------------------------------------+
+---------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(µs)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(ms)")) |
+---------------------------------------------------------------------------------------------------------------------------------+
| false |
+---------------------------------------------------------------------------------------------------------------------------------+
select '2008-01-01 00:00:11.1'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_NS;
+--------------------------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(Microsecond, None)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Nanosecond, None)")) |
+--------------------------------------------------------------------------------------------------------------------------------------------------------------+
| false |
+--------------------------------------------------------------------------------------------------------------------------------------------------------------+
+---------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(µs)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(ns)")) |
+---------------------------------------------------------------------------------------------------------------------------------+
| false |
+---------------------------------------------------------------------------------------------------------------------------------+
select '2008-01-01 00:00:11.1'::TIMESTAMP_US = '2008-01-01 00:00:11.1'::TIMESTAMP_S;
+------------------------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(Microsecond, None)")) = arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(Second, None)")) |
+------------------------------------------------------------------------------------------------------------------------------------------------------------+
| true |
+------------------------------------------------------------------------------------------------------------------------------------------------------------+
+----------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(µs)")) = arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(s)")) |
+----------------------------------------------------------------------------------------------------------------------------------+
| true |
+----------------------------------------------------------------------------------------------------------------------------------+
select '2008-01-01 00:00:11.1'::TIMESTAMP_MS = '2008-01-01 00:00:11'::TIMESTAMP_NS;
+--------------------------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(Millisecond, None)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Nanosecond, None)")) |
+--------------------------------------------------------------------------------------------------------------------------------------------------------------+
| false |
+--------------------------------------------------------------------------------------------------------------------------------------------------------------+
+---------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(ms)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(ns)")) |
+---------------------------------------------------------------------------------------------------------------------------------+
| false |
+---------------------------------------------------------------------------------------------------------------------------------+
select '2008-01-01 00:00:11.1'::TIMESTAMP_MS = '2008-01-01 00:00:11'::TIMESTAMP_S;
+----------------------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(Millisecond, None)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Second, None)")) |
+----------------------------------------------------------------------------------------------------------------------------------------------------------+
| true |
+----------------------------------------------------------------------------------------------------------------------------------------------------------+
+--------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(ms)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(s)")) |
+--------------------------------------------------------------------------------------------------------------------------------+
| true |
+--------------------------------------------------------------------------------------------------------------------------------+
select '2008-01-01 00:00:11.1'::TIMESTAMP_NS = '2008-01-01 00:00:11'::TIMESTAMP_S;
+---------------------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(Nanosecond, None)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Second, None)")) |
+---------------------------------------------------------------------------------------------------------------------------------------------------------+
| true |
+---------------------------------------------------------------------------------------------------------------------------------------------------------+
+--------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11.1"),Utf8("Timestamp(ns)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(s)")) |
+--------------------------------------------------------------------------------------------------------------------------------+
| true |
+--------------------------------------------------------------------------------------------------------------------------------+
select '2008-01-01 00:00:11'::TIMESTAMP_MS = '2008-01-01 00:00:11'::TIMESTAMP_NS;
+------------------------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Millisecond, None)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Nanosecond, None)")) |
+------------------------------------------------------------------------------------------------------------------------------------------------------------+
| true |
+------------------------------------------------------------------------------------------------------------------------------------------------------------+
+-------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(ms)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(ns)")) |
+-------------------------------------------------------------------------------------------------------------------------------+
| true |
+-------------------------------------------------------------------------------------------------------------------------------+
select '2008-01-01 00:00:11'::TIMESTAMP_MS = '2008-01-01 00:00:11'::TIMESTAMP_S;
+--------------------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Millisecond, None)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Second, None)")) |
+--------------------------------------------------------------------------------------------------------------------------------------------------------+
| true |
+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(ms)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(s)")) |
+------------------------------------------------------------------------------------------------------------------------------+
| true |
+------------------------------------------------------------------------------------------------------------------------------+
select '2008-01-01 00:00:11'::TIMESTAMP_NS = '2008-01-01 00:00:11'::TIMESTAMP_S;
+-------------------------------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Nanosecond, None)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(Second, None)")) |
+-------------------------------------------------------------------------------------------------------------------------------------------------------+
| true |
+-------------------------------------------------------------------------------------------------------------------------------------------------------+
+------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(ns)")) = arrow_cast(Utf8("2008-01-01 00:00:11"),Utf8("Timestamp(s)")) |
+------------------------------------------------------------------------------------------------------------------------------+
| true |
+------------------------------------------------------------------------------------------------------------------------------+
DROP TABLE timestamp;

View File

@@ -8,25 +8,25 @@ select timestamptz '2021-11-15 02:30:00';
select '2021-11-15 02:30:00'::TIMESTAMP::TIMESTAMPTZ;
+-------------------------------------------------------------------------------------------------------------------------------+
| arrow_cast(arrow_cast(Utf8("2021-11-15 02:30:00"),Utf8("Timestamp(Millisecond, None)")),Utf8("Timestamp(Millisecond, None)")) |
+-------------------------------------------------------------------------------------------------------------------------------+
| 2021-11-15T02:30:00 |
+-------------------------------------------------------------------------------------------------------------------------------+
+-------------------------------------------------------------------------------------------------+
| arrow_cast(arrow_cast(Utf8("2021-11-15 02:30:00"),Utf8("Timestamp(ms)")),Utf8("Timestamp(ms)")) |
+-------------------------------------------------------------------------------------------------+
| 2021-11-15T02:30:00 |
+-------------------------------------------------------------------------------------------------+
SELECT '2021-04-29 10:50:09-05'::TIMESTAMPTZ::DATE;
+---------------------------------------------------------------------------------+
| arrow_cast(Utf8("2021-04-29 10:50:09-05"),Utf8("Timestamp(Millisecond, None)")) |
+---------------------------------------------------------------------------------+
| 2021-04-29 |
+---------------------------------------------------------------------------------+
+------------------------------------------------------------------+
| arrow_cast(Utf8("2021-04-29 10:50:09-05"),Utf8("Timestamp(ms)")) |
+------------------------------------------------------------------+
| 2021-04-29 |
+------------------------------------------------------------------+
SELECT '2021-04-29 10:50:09-05'::TIMESTAMPTZ::TIME;
+---------------------------------------------------------------------------------+
| arrow_cast(Utf8("2021-04-29 10:50:09-05"),Utf8("Timestamp(Millisecond, None)")) |
+---------------------------------------------------------------------------------+
| 15:50:09 |
+---------------------------------------------------------------------------------+
+------------------------------------------------------------------+
| arrow_cast(Utf8("2021-04-29 10:50:09-05"),Utf8("Timestamp(ms)")) |
+------------------------------------------------------------------+
| 15:50:09 |
+------------------------------------------------------------------+

View File

@@ -1,6 +1,6 @@
-- Migrated from DuckDB test: Multiple window aggregate tests
-- Tests window aggregate functions
CREATE TABLE sales(region VARCHAR, "quarter" INTEGER, amount INTEGER, ts TIMESTAMP TIME INDEX);
CREATE TABLE sales("region" VARCHAR, "quarter" INTEGER, amount INTEGER, ts TIMESTAMP TIME INDEX);
Affected Rows: 0

View File

@@ -1,7 +1,7 @@
-- Migrated from DuckDB test: Multiple window aggregate tests
-- Tests window aggregate functions
CREATE TABLE sales(region VARCHAR, "quarter" INTEGER, amount INTEGER, ts TIMESTAMP TIME INDEX);
CREATE TABLE sales("region" VARCHAR, "quarter" INTEGER, amount INTEGER, ts TIMESTAMP TIME INDEX);
INSERT INTO sales VALUES
('North', 1, 1000, 1000), ('North', 2, 1200, 2000), ('North', 3, 1100, 3000),

View File

@@ -1,101 +1,101 @@
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
explain select * from numbers;
+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | CooperativeExec |
+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | CooperativeExec |
| | RepartitionExec: partitioning=REDACTED
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {} }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32 }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
explain select * from numbers order by number desc;
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Sort: numbers.number DESC NULLS FIRST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 DESC] |
| | CooperativeExec |
| | SortExec: expr=[number@0 DESC], preserve_partitioning=[true] |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Sort: numbers.number DESC NULLS FIRST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 DESC] |
| | CooperativeExec |
| | SortExec: expr=[number@0 DESC], preserve_partitioning=[true] |
| | RepartitionExec: partitioning=REDACTED
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {} }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32 }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
explain select * from numbers order by number asc;
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Sort: numbers.number ASC NULLS LAST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 ASC NULLS LAST] |
| | CooperativeExec |
| | SortExec: expr=[number@0 ASC NULLS LAST], preserve_partitioning=[true] |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Sort: numbers.number ASC NULLS LAST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 ASC NULLS LAST] |
| | CooperativeExec |
| | SortExec: expr=[number@0 ASC NULLS LAST], preserve_partitioning=[true] |
| | RepartitionExec: partitioning=REDACTED
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {} }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32 }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
explain select * from numbers order by number desc limit 10;
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Limit: skip=0, fetch=10 |
| | Sort: numbers.number DESC NULLS FIRST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 DESC], fetch=10 |
| | CooperativeExec |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Limit: skip=0, fetch=10 |
| | Sort: numbers.number DESC NULLS FIRST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 DESC], fetch=10 |
| | CooperativeExec |
| | RepartitionExec: partitioning=REDACTED
| | SortExec: TopK(fetch=10), expr=[number@0 DESC], preserve_partitioning=[false] |
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {} }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| | SortExec: TopK(fetch=10), expr=[number@0 DESC], preserve_partitioning=[false] |
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32 }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
explain select * from numbers order by number asc limit 10;
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Limit: skip=0, fetch=10 |
| | Sort: numbers.number ASC NULLS LAST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 ASC NULLS LAST], fetch=10 |
| | CooperativeExec |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| plan_type | plan |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| logical_plan | MergeScan [is_placeholder=false, remote_input=[ |
| | Limit: skip=0, fetch=10 |
| | Sort: numbers.number ASC NULLS LAST |
| | Projection: numbers.number |
| | TableScan: numbers |
| | ]] |
| physical_plan | SortPreservingMergeExec: [number@0 ASC NULLS LAST], fetch=10 |
| | CooperativeExec |
| | RepartitionExec: partitioning=REDACTED
| | SortExec: TopK(fetch=10), expr=[number@0 ASC NULLS LAST], preserve_partitioning=[false] |
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {} }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| | SortExec: TopK(fetch=10), expr=[number@0 ASC NULLS LAST], preserve_partitioning=[false] |
| | CooperativeExec |
| | StreamScanAdapter: [<SendableRecordBatchStream>], schema: [Schema { fields: [Field { name: "number", data_type: UInt32 }], metadata: {"greptime:version": "0"} }] |
| | |
+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
CREATE TABLE test_pk(pk INTEGER PRIMARY KEY, i INTEGER, t TIMESTAMP TIME INDEX) WITH('compaction.type'='twcs');