diff --git a/Cargo.lock b/Cargo.lock index cf6085bdd8..b491895cf5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8940,8 +8940,9 @@ dependencies = [ [[package]] name = "sqlness" -version = "0.4.3" -source = "git+https://github.com/CeresDB/sqlness.git?rev=a4663365795d2067eb53966c383e1bb0c89c7627#a4663365795d2067eb53966c383e1bb0c89c7627" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0860f149718809371602b42573693e1ed2b1d0aed35fe69e04e4e4e9918d81f7" dependencies = [ "async-trait", "derive_builder 0.11.2", diff --git a/tests/cases/distributed/alter/rename_table.result b/tests/cases/distributed/alter/rename_table.result index dbc77584e6..6e703c1010 100644 --- a/tests/cases/distributed/alter/rename_table.result +++ b/tests/cases/distributed/alter/rename_table.result @@ -25,6 +25,7 @@ SELECT * from t; | | 4 | +---+---+ +-- TODO(LFC): Port test cases from standalone env when distribute rename table is implemented (#723). ALTER TABLE t RENAME new_table; Affected Rows: 0 @@ -33,6 +34,7 @@ DROP TABLE t; Error: 4001(TableNotFound), Table not found: greptime.public.t +-- TODO: this clause should success -- SQLNESS REPLACE details.* DROP TABLE new_table; diff --git a/tests/cases/distributed/optimizer/filter_push_down.result b/tests/cases/distributed/optimizer/filter_push_down.result index 6859a0b7ed..4c06353723 100644 --- a/tests/cases/distributed/optimizer/filter_push_down.result +++ b/tests/cases/distributed/optimizer/filter_push_down.result @@ -180,6 +180,16 @@ SELECT i FROM (SELECT * FROM integers i1 UNION SELECT * FROM integers i2) a WHER | 3 | +---+ +-- TODO(LFC): Somehow the following SQL does not order by column 1 under new DataFusion occasionally. Should further investigate it. Comment it out temporarily. +-- expected: +-- +---+---+--------------+ +-- | a | b | ROW_NUMBER() | +-- +---+---+--------------+ +-- | 1 | 1 | 1 | +-- | 2 | 2 | 5 | +-- | 3 | 3 | 9 | +-- +---+---+--------------+ +-- SELECT * FROM (SELECT i1.i AS a, i2.i AS b, row_number() OVER (ORDER BY i1.i, i2.i) FROM integers i1, integers i2 WHERE i1.i IS NOT NULL AND i2.i IS NOT NULL) a1 WHERE a=b ORDER BY 1; SELECT * FROM (SELECT 0=1 AS cond FROM integers i1, integers i2) a1 WHERE cond ORDER BY 1; ++ diff --git a/tests/cases/distributed/tql-explain-analyze/analyze.result b/tests/cases/distributed/tql-explain-analyze/analyze.result index 4087943ce8..1cd0c5c988 100644 --- a/tests/cases/distributed/tql-explain-analyze/analyze.result +++ b/tests/cases/distributed/tql-explain-analyze/analyze.result @@ -2,10 +2,12 @@ CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY); Affected Rows: 0 +-- insert two points at 1ms and one point at 2ms INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a"); Affected Rows: 3 +-- analyze at 0s, 5s and 10s. No point at 0s. -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED -- SQLNESS REPLACE (-+) - diff --git a/tests/cases/distributed/tql-explain-analyze/explain.result b/tests/cases/distributed/tql-explain-analyze/explain.result index cac729473b..2be8b54bfa 100644 --- a/tests/cases/distributed/tql-explain-analyze/explain.result +++ b/tests/cases/distributed/tql-explain-analyze/explain.result @@ -2,10 +2,12 @@ CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY); Affected Rows: 0 +-- insert two points at 1ms and one point at 2ms INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a"); Affected Rows: 3 +-- explain at 0s, 5s and 10s. No point at 0s. -- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED -- SQLNESS REPLACE (peer-.*) REDACTED TQL EXPLAIN (0, 10, '5s') test; diff --git a/tests/cases/standalone/common/aggregate/distinct_order_by.result b/tests/cases/standalone/common/aggregate/distinct_order_by.result index bacfd3badb..81649b776a 100644 --- a/tests/cases/standalone/common/aggregate/distinct_order_by.result +++ b/tests/cases/standalone/common/aggregate/distinct_order_by.result @@ -15,6 +15,14 @@ SELECT DISTINCT i%2 FROM integers ORDER BY 1; | 1 | +-----------------------+ +-- TODO(LFC): Failed to run under new DataFusion +-- expected: +-- +-----------------------+ +-- | integers.i % Int64(2) | +-- +-----------------------+ +-- | 1 | +-- | 0 | +-- +-----------------------+ SELECT DISTINCT i % 2 FROM integers WHERE i<3 ORDER BY i; Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY expressions i must appear in select list diff --git a/tests/cases/standalone/common/insert/insert.result b/tests/cases/standalone/common/insert/insert.result index 0de3fcda45..45f6a9137b 100644 --- a/tests/cases/standalone/common/insert/insert.result +++ b/tests/cases/standalone/common/insert/insert.result @@ -21,6 +21,7 @@ SELECT * FROM integers; | 1970-01-01T00:00:00.005 | +-------------------------+ +-- Test insert with long string constant CREATE TABLE IF NOT EXISTS presentations ( presentation_date TIMESTAMP, author VARCHAR NOT NULL, diff --git a/tests/cases/standalone/common/order/nulls_first.result b/tests/cases/standalone/common/order/nulls_first.result index 8e0d8a733f..1bab6062f9 100644 --- a/tests/cases/standalone/common/order/nulls_first.result +++ b/tests/cases/standalone/common/order/nulls_first.result @@ -36,6 +36,10 @@ SELECT * FROM test ORDER BY i NULLS LAST, j NULLS FIRST; | | 1 | 2 | +---+---+---+ +-- TODO(ruihang): The following two SQL will fail under distributed mode with error +-- Error: 1003(Internal), status: Internal, message: "Failed to collect recordbatch, source: Failed to poll stream, source: Arrow error: Invalid argument error: batches[0] schema is different with argument schema.\n batches[0] schema: Schema { fields: [Field { name: \"i\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"j\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"t\", data_type: Int64, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {\"greptime:time_index\": \"true\"} }], metadata: {\"greptime:version\": \"0\"} },\n argument schema: Schema { fields: [Field { name: \"i\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"j\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"t\", data_type: Int64, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {\"greptime:time_index\": \"true\"} }], metadata: {} }\n ", details: [], metadata: MetadataMap { headers: {"inner_error_code": "Internal"} } +-- SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS FIRST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST; +-- SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS LAST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST; SELECT * FROM test ORDER BY i NULLS FIRST, j NULLS LAST LIMIT 2; +---+---+---+ diff --git a/tests/cases/standalone/common/order/order_by.result b/tests/cases/standalone/common/order/order_by.result index 640c593ac5..18210bfc53 100644 --- a/tests/cases/standalone/common/order/order_by.result +++ b/tests/cases/standalone/common/order/order_by.result @@ -192,10 +192,13 @@ SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY k; | 3 | +---+ +-- ORDER BY on alias in right-most query +-- CONTROVERSIAL: SQLite allows both "k" and "l" to be referenced here, Postgres and MonetDB give an error. SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY l; Error: 3000(PlanQuery), No field named l. Valid fields are k. +-- Not compatible with duckdb, work in gretimedb SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY 1-k; +---+ @@ -206,10 +209,18 @@ SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY 1-k; | 1 | +---+ +-- Not compatible with duckdb, give an error in greptimedb +-- TODO(LFC): Failed to meet the expected error: +-- expected: +-- Error: 3000(PlanQuery), Schema error: No field named 'a'. Valid fields are 'k'. SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY a-10; Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY expressions a must appear in select list +-- Not compatible with duckdb, give an error in greptimedb +-- TODO(LFC): Failed to meet the expected error: +-- expected: +-- Error: 3000(PlanQuery), Schema error: No field named 'a'. Valid fields are 'k'. SELECT a-10 AS k FROM test UNION SELECT a-11 AS l FROM test ORDER BY a-11; Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY expressions a must appear in select list diff --git a/tests/cases/standalone/common/order/order_by_exceptions.result b/tests/cases/standalone/common/order/order_by_exceptions.result index f14bb99c01..d75f2c0437 100644 --- a/tests/cases/standalone/common/order/order_by_exceptions.result +++ b/tests/cases/standalone/common/order/order_by_exceptions.result @@ -10,10 +10,12 @@ SELECT a FROM test ORDER BY 2; Error: 3000(PlanQuery), Error during planning: Order by column out of bounds, specified: 2, max: 1 +-- Not work in greptimedb SELECT a FROM test ORDER BY 'hello', a; Error: 1003(Internal), Error during planning: Sort operation is not applicable to scalar value hello +-- Ambiguous reference in union alias, give and error in duckdb, but works in greptimedb SELECT a AS k, b FROM test UNION SELECT a, b AS k FROM test ORDER BY k; +----+----+ @@ -38,6 +40,10 @@ SELECT a % 2, b FROM test UNION SELECT b, a % 2 AS k ORDER BY a % 2; Error: 3000(PlanQuery), No field named b. +-- Works duckdb, but not work in greptimedb +-- TODO(LFC): Failed to meet the expected error: +-- expected: +-- Error: 3000(PlanQuery), Schema error: No field named 'a'. Valid fields are 'test.a % Int64(2)', 'b'. SELECT a % 2, b FROM test UNION SELECT a % 2 AS k, b FROM test ORDER BY a % 2; Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY expressions a must appear in select list diff --git a/tests/cases/standalone/common/tql/aggr_over_time.result b/tests/cases/standalone/common/tql/aggr_over_time.result index 1608262bff..5b73fc5445 100644 --- a/tests/cases/standalone/common/tql/aggr_over_time.result +++ b/tests/cases/standalone/common/tql/aggr_over_time.result @@ -1,3 +1,7 @@ +-- Port from functions.test L607 - L630, commit 001ee2620e094970e5657ce39275b2fccdbd1359 +-- Include stddev/stdvar over time +-- load 10s +-- metric 0 8 8 2 3 create table metric (ts timestamp(3) time index, val double); Affected Rows: 0 @@ -23,6 +27,8 @@ select * from metric; | 1970-01-01T00:00:40 | 3.0 | +---------------------+-----+ +-- eval instant at 1m stdvar_over_time(metric[1m]) +-- {} 10.56 tql eval (60, 61, '10s') stdvar_over_time(metric[1m]); +---------------------+-------------------------------------+ @@ -31,6 +37,8 @@ tql eval (60, 61, '10s') stdvar_over_time(metric[1m]); | 1970-01-01T00:01:00 | 10.559999999999999 | +---------------------+-------------------------------------+ +-- eval instant at 1m stddev_over_time(metric[1m]) +-- {} 3.249615 tql eval (60, 60, '1s') stddev_over_time(metric[1m]); +---------------------+-------------------------------------+ @@ -39,6 +47,8 @@ tql eval (60, 60, '1s') stddev_over_time(metric[1m]); | 1970-01-01T00:01:00 | 3.249615361854384 | +---------------------+-------------------------------------+ +-- eval instant at 1m stddev_over_time((metric[1m])) +-- {} 3.249615 tql eval (60, 60, '1s') stddev_over_time((metric[1m])); +---------------------+-------------------------------------+ @@ -51,6 +61,8 @@ drop table metric; Affected Rows: 1 +-- load 10s +-- metric 1.5990505637277868 1.5990505637277868 1.5990505637277868 create table metric (ts timestamp(3) time index, val double); Affected Rows: 0 @@ -63,6 +75,8 @@ insert into metric values Affected Rows: 4 +-- eval instant at 1m stdvar_over_time(metric[1m]) +-- {} 0 tql eval (60, 60, '1s') stdvar_over_time(metric[1m]); +---------------------+-------------------------------------+ @@ -71,6 +85,8 @@ tql eval (60, 60, '1s') stdvar_over_time(metric[1m]); | 1970-01-01T00:01:00 | 0.47943050725465364 | +---------------------+-------------------------------------+ +-- eval instant at 1m stddev_over_time(metric[1m]) +-- {} 0 tql eval (60, 60, '1s') stddev_over_time(metric[1m]); +---------------------+-------------------------------------+ @@ -83,6 +99,12 @@ drop table metric; Affected Rows: 1 +-- Port from functions.test L632 - L680, commit 001ee2620e094970e5657ce39275b2fccdbd1359 +-- Include quantile over time +-- load 10s +-- data{test="two samples"} 0 1 +-- data{test="three samples"} 0 1 2 +-- data{test="uneven samples"} 0 1 4 create table data (ts timestamp(3) time index, val double, test string primary key); Affected Rows: 0 @@ -99,10 +121,58 @@ insert into data values Affected Rows: 8 +-- eval instant at 1m quantile_over_time(0, data[1m]) +-- {test="two samples"} 0 +-- {test="three samples"} 0 +-- {test="uneven samples"} 0 +-- tql eval (60, 60, '1s') quantile_over_time(0, data[1m]); +-- eval instant at 1m quantile_over_time(0.5, data[1m]) +-- {test="two samples"} 0.5 +-- {test="three samples"} 1 +-- {test="uneven samples"} 1 +-- tql eval (60, 60, '1s') quantile_over_time(0.5, data[1m]); +-- eval instant at 1m quantile_over_time(0.75, data[1m]) +-- {test="two samples"} 0.75 +-- {test="three samples"} 1.5 +-- {test="uneven samples"} 2.5 +-- tql eval (60, 60, '1s') quantile_over_time(0.75, data[1m]); +-- eval instant at 1m quantile_over_time(0.8, data[1m]) +-- {test="two samples"} 0.8 +-- {test="three samples"} 1.6 +-- {test="uneven samples"} 2.8 +-- tql eval (60, 60, '1s') quantile_over_time(0.8, data[1m]); +-- eval instant at 1m quantile_over_time(1, data[1m]) +-- {test="two samples"} 1 +-- {test="three samples"} 2 +-- {test="uneven samples"} 4 +-- tql eval (60, 60, '1s') quantile_over_time(1, data[1m]); +-- eval instant at 1m quantile_over_time(-1, data[1m]) +-- {test="two samples"} -Inf +-- {test="three samples"} -Inf +-- {test="uneven samples"} -Inf +-- tql eval (60, 60, '1s') quantile_over_time(-1, data[1m]); +-- eval instant at 1m quantile_over_time(2, data[1m]) +-- {test="two samples"} +Inf +-- {test="three samples"} +Inf +-- {test="uneven samples"} +Inf +-- tql eval (60, 60, '1s') quantile_over_time(2, data[1m]); +-- eval instant at 1m (quantile_over_time(2, (data[1m]))) +-- {test="two samples"} +Inf +-- {test="three samples"} +Inf +-- {test="uneven samples"} +Inf +-- tql eval (60, 60, '1s') (quantile_over_time(2, (data[1m]))); drop table data; Affected Rows: 1 +-- Port from functions.test L773 - L802, commit 001ee2620e094970e5657ce39275b2fccdbd1359 +-- Include max/min/last over time +-- load 10s +-- data{type="numbers"} 2 0 3 +-- data{type="some_nan"} 2 0 NaN +-- data{type="some_nan2"} 2 NaN 1 +-- data{type="some_nan3"} NaN 0 1 +-- data{type="only_nan"} NaN NaN NaN create table data (ts timestamp(3) time index, val double, ty string primary key); Affected Rows: 0 @@ -126,6 +196,27 @@ insert into data values Affected Rows: 15 +-- eval instant at 1m min_over_time(data[1m]) +-- {type="numbers"} 0 +-- {type="some_nan"} 0 +-- {type="some_nan2"} 1 +-- {type="some_nan3"} 0 +-- {type="only_nan"} NaN +-- tql eval (60, 60, '1s') min_over_time(data[1m]); +-- eval instant at 1m max_over_time(data[1m]) +-- {type="numbers"} 3 +-- {type="some_nan"} 2 +-- {type="some_nan2"} 2 +-- {type="some_nan3"} 1 +-- {type="only_nan"} NaN +-- tql eval (60, 60, '1s') max_over_time(data[1m]); +-- eval instant at 1m last_over_time(data[1m]) +-- data{type="numbers"} 3 +-- data{type="some_nan"} NaN +-- data{type="some_nan2"} 1 +-- data{type="some_nan3"} 1 +-- data{type="only_nan"} NaN +-- tql eval (60, 60, '1s') last_over_time(data[1m]); drop table data; Affected Rows: 1 diff --git a/tests/cases/standalone/common/tql/basic.result b/tests/cases/standalone/common/tql/basic.result index 09ce38cba5..46f600f9ce 100644 --- a/tests/cases/standalone/common/tql/basic.result +++ b/tests/cases/standalone/common/tql/basic.result @@ -2,11 +2,13 @@ CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY); Affected Rows: 0 +-- insert two points at 1ms and one point at 2ms INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a"); Affected Rows: 3 -- SQLNESS SORT_RESULT 2 1 +-- evaluate at 0s, 5s and 10s. No point at 0s. TQL EVAL (0, 10, '5s') test; +-----+---------------------+---+ @@ -18,6 +20,7 @@ TQL EVAL (0, 10, '5s') test; | 2.0 | 1970-01-01T00:00:10 | a | +-----+---------------------+---+ +-- the point at 1ms will be shadowed by the point at 2ms TQL EVAL (0, 10, '5s') test{k="a"}; +-----+---------------------+---+ diff --git a/tests/cases/standalone/common/tql/operator.result b/tests/cases/standalone/common/tql/operator.result index 360d2bc48a..1ad0461978 100644 --- a/tests/cases/standalone/common/tql/operator.result +++ b/tests/cases/standalone/common/tql/operator.result @@ -1,3 +1,9 @@ +-- Port from operators.test L607 - L630, commit 001ee2620e094970e5657ce39275b2fccdbd1359 +-- Include atan2 +-- load 5m +-- trigy{} 10 +-- trigx{} 20 +-- trigNaN{} NaN create table trigy (ts timestamp(3) time index, val double); Affected Rows: 0 @@ -22,6 +28,8 @@ insert into trignan values (0, 'NaN'::double); Affected Rows: 1 +-- eval instant at 5m trigy atan2 trigx +-- trigy{} 0.4636476090008061 tql eval (300, 300, '1s') trigy atan2 trigx; +---------------------+----------------------------+ @@ -30,11 +38,16 @@ tql eval (300, 300, '1s') trigy atan2 trigx; | 1970-01-01T00:05:00 | 0.4636476090008061 | +---------------------+----------------------------+ +-- eval instant at 5m trigy atan2 trigNaN +-- trigy{} NaN +-- This query doesn't have result because `trignan` is NaN and will be filtered out. tql eval (300, 300, '1s') trigy atan2 trignan; ++ ++ +-- eval instant at 5m 10 atan2 20 +-- 0.4636476090008061 tql eval (300, 300, '1s') 10 atan2 20; +---------------------+--------------------+ @@ -43,6 +56,8 @@ tql eval (300, 300, '1s') 10 atan2 20; | 1970-01-01T00:05:00 | 0.4636476090008061 | +---------------------+--------------------+ +-- eval instant at 5m 10 atan2 NaN +-- NaN tql eval (300, 300, '1s') 10 atan2 NaN; +---------------------+-------+ diff --git a/tests/cases/standalone/common/types/blob.result b/tests/cases/standalone/common/types/blob.result index a35e89bc11..1673d99fe2 100644 --- a/tests/cases/standalone/common/types/blob.result +++ b/tests/cases/standalone/common/types/blob.result @@ -2,6 +2,7 @@ CREATE TABLE blobs (b BYTEA, t timestamp time index); Affected Rows: 0 +--Insert valid hex strings-- INSERT INTO blobs VALUES('\xaa\xff\xaa'::BYTEA, 1), ('\xAA\xFF\xAA\xAA\xFF\xAA'::BYTEA, 2), ('\xAA\xFF\xAA\xAA\xFF\xAA\xAA\xFF\xAA'::BYTEA, 3); Affected Rows: 3 @@ -16,6 +17,7 @@ SELECT * FROM blobs; | 5c7841415c7846465c7841415c7841415c7846465c7841415c7841415c7846465c784141 | 1970-01-01T00:00:00.003 | +--------------------------------------------------------------------------+-------------------------+ +--Insert valid hex strings, lower case-- DELETE FROM blobs; Affected Rows: 3 @@ -34,6 +36,7 @@ SELECT * FROM blobs; | 5c7861615c7866665c7861615c7861615c7866665c7861615c7861615c7866665c786161 | 1970-01-01T00:00:00.003 | +--------------------------------------------------------------------------+-------------------------+ +--Insert valid hex strings with number and letters-- DELETE FROM blobs; Affected Rows: 3 @@ -52,10 +55,12 @@ SELECT * FROM blobs; | 5c78616131313939616131313939616131313939 | 1970-01-01T00:00:00.003 | +------------------------------------------+-------------------------+ +--Insert invalid hex strings (invalid hex chars: G, H, I)-- INSERT INTO blobs VALUES('\xGA\xFF\xAA'::BYTEA, 4); Affected Rows: 1 +--Insert invalid hex strings (odd # of chars)-- INSERT INTO blobs VALUES('\xA'::BYTEA, 4); Affected Rows: 1 diff --git a/tests/cases/standalone/cte/cte.result b/tests/cases/standalone/cte/cte.result index 5dedec787e..562883a9ef 100644 --- a/tests/cases/standalone/cte/cte.result +++ b/tests/cases/standalone/cte/cte.result @@ -59,6 +59,7 @@ with cte1 as (select 42), cte1 as (select 42) select * FROM cte1; Error: 3000(PlanQuery), sql parser error: WITH query name "cte1" specified more than once +-- reference to CTE before its actually defined, it's not supported by datafusion with cte3 as (select ref2.j as i from cte1 as ref2), cte1 as (Select i as j from a), cte2 as (select ref.j+1 as k from cte1 as ref) select * from cte2 union all select * FROM cte3; Error: 3000(PlanQuery), Error during planning: Table not found: greptime.public.cte1 @@ -96,6 +97,7 @@ SELECT 1 UNION ALL (WITH cte AS (SELECT 42) SELECT * FROM cte) order by 1; | 42 | +----------+ +-- Recursive CTEs are not supported in datafusion WITH RECURSIVE cte(d) AS ( SELECT 1 UNION ALL @@ -109,6 +111,7 @@ SELECT max(d) FROM cte; Error: 3000(PlanQuery), This feature is not implemented: Recursive CTEs are not supported +-- Nested aliases is not supported in datafusion with cte (a) as ( select 1 ) diff --git a/tests/cases/standalone/cte/cte_in_cte.result b/tests/cases/standalone/cte/cte_in_cte.result index d9b18ebe2e..b20ba4b40c 100644 --- a/tests/cases/standalone/cte/cte_in_cte.result +++ b/tests/cases/standalone/cte/cte_in_cte.result @@ -50,14 +50,19 @@ with cte1 as (Select i as j from a) select * from (with cte2 as (select max(j) a | 42 | +----+ +-- Refer to CTE in subquery expression, +-- this feature is not implemented in datafusion with cte1 as (Select i as j from a) select * from cte1 where j = (with cte2 as (select max(j) as j from cte1) select j from cte2); Error: 3001(EngineExecuteQuery), This feature is not implemented: Physical plan does not support logical expression () +-- Refer to same-named CTE in a subquery expression +-- this feature is not implemented in datafusion with cte as (Select i as j from a) select * from cte where j = (with cte as (select max(j) as j from cte) select j from cte); Error: 3000(PlanQuery), sql parser error: WITH query name "cte" specified more than once +-- self-refer to non-existent cte- with cte as (select * from cte) select * from cte; Error: 3000(PlanQuery), Error during planning: Table not found: greptime.public.cte diff --git a/tests/cases/standalone/optimizer/filter_push_down.result b/tests/cases/standalone/optimizer/filter_push_down.result index e48471107f..85ded032de 100644 --- a/tests/cases/standalone/optimizer/filter_push_down.result +++ b/tests/cases/standalone/optimizer/filter_push_down.result @@ -187,6 +187,16 @@ SELECT i FROM (SELECT * FROM integers i1 UNION SELECT * FROM integers i2) a WHER | 3 | +---+ +-- TODO(LFC): Somehow the following SQL does not order by column 1 under new DataFusion occasionally. Should further investigate it. Comment it out temporarily. +-- expected: +-- +---+---+--------------+ +-- | a | b | ROW_NUMBER() | +-- +---+---+--------------+ +-- | 1 | 1 | 1 | +-- | 2 | 2 | 5 | +-- | 3 | 3 | 9 | +-- +---+---+--------------+ +-- SELECT * FROM (SELECT i1.i AS a, i2.i AS b, row_number() OVER (ORDER BY i1.i, i2.i) FROM integers i1, integers i2 WHERE i1.i IS NOT NULL AND i2.i IS NOT NULL) a1 WHERE a=b ORDER BY 1; SELECT * FROM (SELECT 0=1 AS cond FROM integers i1, integers i2) a1 WHERE cond ORDER BY 1; ++ diff --git a/tests/cases/standalone/tql-explain-analyze/analyze.result b/tests/cases/standalone/tql-explain-analyze/analyze.result index d8e767d84b..db3dcb265d 100644 --- a/tests/cases/standalone/tql-explain-analyze/analyze.result +++ b/tests/cases/standalone/tql-explain-analyze/analyze.result @@ -2,10 +2,12 @@ CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY); Affected Rows: 0 +-- insert two points at 1ms and one point at 2ms INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a"); Affected Rows: 3 +-- analyze at 0s, 5s and 10s. No point at 0s. -- SQLNESS REPLACE (metrics.*) REDACTED -- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED -- SQLNESS REPLACE (-+) - diff --git a/tests/cases/standalone/tql-explain-analyze/explain.result b/tests/cases/standalone/tql-explain-analyze/explain.result index 8a1c23626b..a9f501ff78 100644 --- a/tests/cases/standalone/tql-explain-analyze/explain.result +++ b/tests/cases/standalone/tql-explain-analyze/explain.result @@ -2,10 +2,12 @@ CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY); Affected Rows: 0 +-- insert two points at 1ms and one point at 2ms INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a"); Affected Rows: 3 +-- explain at 0s, 5s and 10s. No point at 0s. -- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED TQL EXPLAIN (0, 10, '5s') test; diff --git a/tests/runner/Cargo.toml b/tests/runner/Cargo.toml index f9e0bdd032..d14566f6f0 100644 --- a/tests/runner/Cargo.toml +++ b/tests/runner/Cargo.toml @@ -13,6 +13,6 @@ common-grpc = { path = "../../src/common/grpc" } common-query = { path = "../../src/common/query" } common-time = { path = "../../src/common/time" } serde.workspace = true -sqlness = { git = "https://github.com/CeresDB/sqlness.git", rev = "a4663365795d2067eb53966c383e1bb0c89c7627" } +sqlness = { version = "0.5" } tinytemplate = "1.2" tokio.workspace = true