mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-04 04:12:55 +00:00
feat: bring back sqlness and integration tests (#2448)
* enable integration test
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
* update sqlness result
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
* disable sqlness region failover
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
* enable sqlness in CI
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
* sort unstable result
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
* set require_lease_before_startup to true
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
* fix: fix inconsistent cache
* replace windows path chars
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
* ignore some integration cases in windows
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
* Revert "ignore some integration cases in windows"
This reverts commit 122478b7c1.
* disable windows for now
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
* fix: fix close region bug in RegionHeartbeatResponseHandler
* disable failover tests
---------
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
Co-authored-by: WenyXu <wenymedia@gmail.com>
This commit is contained in:
123
.github/workflows/develop.yml
vendored
123
.github/workflows/develop.yml
vendored
@@ -74,33 +74,33 @@ jobs:
|
||||
- name: Run taplo
|
||||
run: taplo format --check
|
||||
|
||||
# sqlness:
|
||||
# name: Sqlness Test
|
||||
# if: github.event.pull_request.draft == false
|
||||
# runs-on: ${{ matrix.os }}
|
||||
# strategy:
|
||||
# matrix:
|
||||
# os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
|
||||
# timeout-minutes: 60
|
||||
# steps:
|
||||
# - uses: actions/checkout@v3
|
||||
# - uses: arduino/setup-protoc@v1
|
||||
# with:
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# - uses: dtolnay/rust-toolchain@master
|
||||
# with:
|
||||
# toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
# - name: Rust Cache
|
||||
# uses: Swatinem/rust-cache@v2
|
||||
# - name: Run sqlness
|
||||
# run: cargo sqlness
|
||||
# - name: Upload sqlness logs
|
||||
# if: always()
|
||||
# uses: actions/upload-artifact@v3
|
||||
# with:
|
||||
# name: sqlness-logs
|
||||
# path: ${{ runner.temp }}/greptime-*.log
|
||||
# retention-days: 3
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
@@ -194,37 +194,38 @@ jobs:
|
||||
runs-on: windows-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- run: 'echo "temporary disabled"'
|
||||
# - run: git config --global core.autocrlf false
|
||||
# - uses: actions/checkout@v3
|
||||
# - uses: arduino/setup-protoc@v1
|
||||
# with:
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# - name: Install Rust toolchain
|
||||
# uses: dtolnay/rust-toolchain@master
|
||||
# with:
|
||||
# toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
# components: llvm-tools-preview
|
||||
# - name: Rust Cache
|
||||
# uses: Swatinem/rust-cache@v2
|
||||
# - name: Install Cargo Nextest
|
||||
# uses: taiki-e/install-action@nextest
|
||||
# - name: Install Python
|
||||
# uses: actions/setup-python@v4
|
||||
# with:
|
||||
# python-version: '3.10'
|
||||
# - name: Install PyArrow Package
|
||||
# run: pip install pyarrow
|
||||
# - name: Install WSL distribution
|
||||
# uses: Vampire/setup-wsl@v2
|
||||
# with:
|
||||
# distribution: Ubuntu-22.04
|
||||
# - name: Running tests
|
||||
# run: cargo nextest run -F pyo3_backend,dashboard
|
||||
# env:
|
||||
# RUST_BACKTRACE: 1
|
||||
# CARGO_INCREMENTAL: 0
|
||||
# GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
# GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
# GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
# GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
# UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
569
Cargo.lock
generated
569
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -48,8 +48,7 @@ members = [
|
||||
"src/storage",
|
||||
"src/store-api",
|
||||
"src/table",
|
||||
# TODO: add this back once the region server is available
|
||||
# "tests-integration",
|
||||
"tests-integration",
|
||||
"tests/runner",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -121,14 +121,15 @@ impl DropTableProcedure {
|
||||
};
|
||||
|
||||
let cache_invalidator = &self.context.cache_invalidator;
|
||||
cache_invalidator
|
||||
.invalidate_table_id(&ctx, self.data.table_id())
|
||||
.await?;
|
||||
|
||||
cache_invalidator
|
||||
.invalidate_table_name(&ctx, self.data.table_ref().into())
|
||||
.await?;
|
||||
|
||||
cache_invalidator
|
||||
.invalidate_table_id(&ctx, self.data.table_id())
|
||||
.await?;
|
||||
|
||||
self.data.state = DropTableState::DatanodeDropRegions;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_meta::error::{InvalidHeartbeatResponseSnafu, Result as MetaResult};
|
||||
use common_meta::heartbeat::handler::{
|
||||
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
|
||||
@@ -92,16 +94,23 @@ impl RegionHeartbeatResponseHandler {
|
||||
|
||||
fn fill_reply(mut template: InstructionReply, result: Result<Output>) -> InstructionReply {
|
||||
let success = result.is_ok();
|
||||
let error = result.map_err(|e| e.to_string()).err();
|
||||
let error = result.as_ref().map_err(|e| e.to_string()).err();
|
||||
match &mut template {
|
||||
InstructionReply::OpenRegion(reply) => {
|
||||
reply.result = success;
|
||||
reply.error = error;
|
||||
}
|
||||
InstructionReply::CloseRegion(reply) => {
|
||||
reply.result = success;
|
||||
reply.error = error;
|
||||
}
|
||||
InstructionReply::CloseRegion(reply) => match result {
|
||||
Err(e) => {
|
||||
if e.status_code() == StatusCode::RegionNotFound {
|
||||
reply.result = true;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
reply.result = success;
|
||||
reply.error = error;
|
||||
}
|
||||
},
|
||||
InstructionReply::InvalidateTableCache(reply) => {
|
||||
reply.result = success;
|
||||
reply.error = error;
|
||||
|
||||
@@ -123,12 +123,6 @@ impl StatementExecutor {
|
||||
|
||||
let table = DistTable::table(table_info);
|
||||
|
||||
// Invalidates local cache ASAP.
|
||||
self.cache_invalidator
|
||||
.invalidate_table_id(&Context::default(), table_id)
|
||||
.await
|
||||
.context(error::InvalidateTableCacheSnafu)?;
|
||||
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
@@ -154,6 +148,11 @@ impl StatementExecutor {
|
||||
.await
|
||||
.context(error::InvalidateTableCacheSnafu)?;
|
||||
|
||||
self.cache_invalidator
|
||||
.invalidate_table_name(&Context::default(), table_name.clone())
|
||||
.await
|
||||
.context(error::InvalidateTableCacheSnafu)?;
|
||||
|
||||
Ok(Output::AffectedRows(1))
|
||||
}
|
||||
|
||||
@@ -262,6 +261,14 @@ impl StatementExecutor {
|
||||
.await
|
||||
.context(error::InvalidateTableCacheSnafu)?;
|
||||
|
||||
self.cache_invalidator
|
||||
.invalidate_table_name(
|
||||
&Context::default(),
|
||||
TableName::new(catalog_name, schema_name, table_name),
|
||||
)
|
||||
.await
|
||||
.context(error::InvalidateTableCacheSnafu)?;
|
||||
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
|
||||
|
||||
@@ -35,7 +35,6 @@ frontend = { workspace = true, features = ["testing"] }
|
||||
futures.workspace = true
|
||||
meta-client = { workspace = true }
|
||||
meta-srv = { workspace = true, features = ["mock"] }
|
||||
mito = { workspace = true, features = ["test"] }
|
||||
object-store = { workspace = true }
|
||||
once_cell.workspace = true
|
||||
operator = { workspace = true }
|
||||
|
||||
@@ -292,6 +292,7 @@ pub fn create_tmp_dir_and_datanode_opts(
|
||||
pub(crate) fn create_datanode_opts(store: ObjectStoreConfig, home_dir: String) -> DatanodeOptions {
|
||||
DatanodeOptions {
|
||||
node_id: Some(0),
|
||||
require_lease_before_startup: true,
|
||||
storage: StorageConfig {
|
||||
data_home: home_dir,
|
||||
store,
|
||||
|
||||
@@ -476,7 +476,6 @@ async fn test_execute_show_databases_tables(instance: Arc<dyn MockInstance>) {
|
||||
| Tables |
|
||||
+---------+
|
||||
| numbers |
|
||||
| scripts |
|
||||
+---------+\
|
||||
";
|
||||
let output = execute_sql(&instance, "show tables").await;
|
||||
@@ -494,7 +493,6 @@ async fn test_execute_show_databases_tables(instance: Arc<dyn MockInstance>) {
|
||||
+---------+
|
||||
| demo |
|
||||
| numbers |
|
||||
| scripts |
|
||||
+---------+\
|
||||
";
|
||||
check_unordered_output_stream(output, expected).await;
|
||||
@@ -577,7 +575,12 @@ async fn test_execute_external_create_infer_format(instance: Arc<dyn MockInstanc
|
||||
let instance = instance.frontend();
|
||||
|
||||
let tmp_dir = temp_dir::create_temp_dir("test_execute_external_create_infer_format");
|
||||
let location = tmp_dir.path().to_str().unwrap();
|
||||
let location = tmp_dir
|
||||
.path()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.replace(':', "")
|
||||
.replace('\\', "/");
|
||||
|
||||
let output = execute_sql(
|
||||
&instance,
|
||||
@@ -1743,7 +1746,6 @@ async fn test_information_schema_dot_tables(instance: Arc<dyn MockInstance>) {
|
||||
+---------------+--------------------+------------+-----------------+----------+-------------+
|
||||
| greptime | information_schema | columns | LOCAL TEMPORARY | 4 | |
|
||||
| greptime | public | numbers | LOCAL TEMPORARY | 2 | test_engine |
|
||||
| greptime | public | scripts | BASE TABLE | 1024 | mito |
|
||||
| greptime | information_schema | tables | LOCAL TEMPORARY | 3 | |
|
||||
+---------------+--------------------+------------+-----------------+----------+-------------+";
|
||||
|
||||
@@ -1754,7 +1756,7 @@ async fn test_information_schema_dot_tables(instance: Arc<dyn MockInstance>) {
|
||||
+-----------------+--------------------+---------------+-----------------+----------+--------+
|
||||
| table_catalog | table_schema | table_name | table_type | table_id | engine |
|
||||
+-----------------+--------------------+---------------+-----------------+----------+--------+
|
||||
| another_catalog | another_schema | another_table | BASE TABLE | 1025 | mito |
|
||||
| another_catalog | another_schema | another_table | BASE TABLE | 1024 | mito |
|
||||
| another_catalog | information_schema | columns | LOCAL TEMPORARY | 4 | |
|
||||
| another_catalog | information_schema | tables | LOCAL TEMPORARY | 3 | |
|
||||
+-----------------+--------------------+---------------+-----------------+----------+--------+";
|
||||
@@ -1776,30 +1778,23 @@ async fn test_information_schema_dot_columns(instance: Arc<dyn MockInstance>) {
|
||||
|
||||
let output = execute_sql(&instance, sql).await;
|
||||
let expected = "\
|
||||
+---------------+--------------------+------------+---------------+----------------------+---------------+
|
||||
| table_catalog | table_schema | table_name | column_name | data_type | semantic_type |
|
||||
+---------------+--------------------+------------+---------------+----------------------+---------------+
|
||||
| greptime | information_schema | columns | table_catalog | String | FIELD |
|
||||
| greptime | information_schema | columns | table_schema | String | FIELD |
|
||||
| greptime | information_schema | columns | table_name | String | FIELD |
|
||||
| greptime | information_schema | columns | column_name | String | FIELD |
|
||||
| greptime | information_schema | columns | data_type | String | FIELD |
|
||||
| greptime | information_schema | columns | semantic_type | String | FIELD |
|
||||
| greptime | public | numbers | number | UInt32 | TAG |
|
||||
| greptime | public | scripts | schema | String | TAG |
|
||||
| greptime | public | scripts | name | String | TAG |
|
||||
| greptime | public | scripts | script | String | FIELD |
|
||||
| greptime | public | scripts | engine | String | FIELD |
|
||||
| greptime | public | scripts | timestamp | TimestampMillisecond | TIMESTAMP |
|
||||
| greptime | public | scripts | gmt_created | TimestampMillisecond | FIELD |
|
||||
| greptime | public | scripts | gmt_modified | TimestampMillisecond | FIELD |
|
||||
| greptime | information_schema | tables | table_catalog | String | FIELD |
|
||||
| greptime | information_schema | tables | table_schema | String | FIELD |
|
||||
| greptime | information_schema | tables | table_name | String | FIELD |
|
||||
| greptime | information_schema | tables | table_type | String | FIELD |
|
||||
| greptime | information_schema | tables | table_id | UInt32 | FIELD |
|
||||
| greptime | information_schema | tables | engine | String | FIELD |
|
||||
+---------------+--------------------+------------+---------------+----------------------+---------------+";
|
||||
+---------------+--------------------+------------+---------------+-----------+---------------+
|
||||
| table_catalog | table_schema | table_name | column_name | data_type | semantic_type |
|
||||
+---------------+--------------------+------------+---------------+-----------+---------------+
|
||||
| greptime | information_schema | columns | table_catalog | String | FIELD |
|
||||
| greptime | information_schema | columns | table_schema | String | FIELD |
|
||||
| greptime | information_schema | columns | table_name | String | FIELD |
|
||||
| greptime | information_schema | columns | column_name | String | FIELD |
|
||||
| greptime | information_schema | columns | data_type | String | FIELD |
|
||||
| greptime | information_schema | columns | semantic_type | String | FIELD |
|
||||
| greptime | public | numbers | number | UInt32 | TAG |
|
||||
| greptime | information_schema | tables | table_catalog | String | FIELD |
|
||||
| greptime | information_schema | tables | table_schema | String | FIELD |
|
||||
| greptime | information_schema | tables | table_name | String | FIELD |
|
||||
| greptime | information_schema | tables | table_type | String | FIELD |
|
||||
| greptime | information_schema | tables | table_id | UInt32 | FIELD |
|
||||
| greptime | information_schema | tables | engine | String | FIELD |
|
||||
+---------------+--------------------+------------+---------------+-----------+---------------+";
|
||||
|
||||
check_output_stream(output, expected).await;
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ pub fn find_testing_resource(path: &str) -> String {
|
||||
// We need unix style path even in the Windows, because the path is used in object-store, must
|
||||
// be delimited with '/'. Inside the object-store, it will be converted to file system needed
|
||||
// path in the end.
|
||||
let p = p.replace('\\', "/");
|
||||
let p = p.replace(':', "").replace('\\', "/");
|
||||
|
||||
// Prepend a '/' to indicate it's a file system path when parsed as object-store url in Windows.
|
||||
format!("/{p}")
|
||||
|
||||
@@ -606,7 +606,7 @@ pub async fn test_config_api(store_type: StorageType) {
|
||||
let expected_toml_str = format!(
|
||||
r#"mode = "standalone"
|
||||
node_id = 0
|
||||
coordination = false
|
||||
require_lease_before_startup = true
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
rpc_runtime_size = 8
|
||||
enable_telemetry = true
|
||||
@@ -689,7 +689,10 @@ fn drop_lines_with_inconsistent_results(input: String) -> String {
|
||||
&& !line.trim().starts_with("scope =")
|
||||
})
|
||||
.collect::<Vec<&str>>()
|
||||
.join("\n")
|
||||
.join(
|
||||
"
|
||||
",
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(feature = "dashboard")]
|
||||
|
||||
@@ -18,10 +18,10 @@ mod grpc;
|
||||
mod http;
|
||||
#[macro_use]
|
||||
mod sql;
|
||||
#[macro_use]
|
||||
mod region_failover;
|
||||
// #[macro_use]
|
||||
// mod region_failover;
|
||||
|
||||
grpc_tests!(File, S3, S3WithCache, Oss, Azblob, Gcs);
|
||||
http_tests!(File, S3, S3WithCache, Oss, Azblob, Gcs);
|
||||
region_failover_tests!(File, S3, S3WithCache, Oss, Azblob);
|
||||
// region_failover_tests!(File, S3, S3WithCache, Oss, Azblob);
|
||||
sql_tests!(File);
|
||||
|
||||
@@ -54,10 +54,13 @@ macro_rules! sql_tests {
|
||||
$service,
|
||||
|
||||
test_mysql_auth,
|
||||
test_mysql_crud,
|
||||
// ignore: https://github.com/GreptimeTeam/greptimedb/issues/2445
|
||||
// test_mysql_crud,
|
||||
test_postgres_auth,
|
||||
test_postgres_crud,
|
||||
test_postgres_parameter_inference,
|
||||
// ignore: https://github.com/GreptimeTeam/greptimedb/issues/2445
|
||||
// test_postgres_crud,
|
||||
// ignore: https://github.com/GreptimeTeam/greptimedb/issues/2445
|
||||
// test_postgres_parameter_inference,
|
||||
);
|
||||
)*
|
||||
};
|
||||
@@ -120,6 +123,7 @@ pub async fn test_mysql_auth(store_type: StorageType) {
|
||||
guard.remove_all().await;
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn test_mysql_crud(store_type: StorageType) {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
@@ -266,6 +270,7 @@ pub async fn test_postgres_auth(store_type: StorageType) {
|
||||
guard.remove_all().await;
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn test_postgres_crud(store_type: StorageType) {
|
||||
let (addr, mut guard, fe_pg_server) = setup_pg_server(store_type, "sql_crud").await;
|
||||
|
||||
@@ -342,6 +347,7 @@ pub async fn test_postgres_crud(store_type: StorageType) {
|
||||
guard.remove_all().await;
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn test_postgres_parameter_inference(store_type: StorageType) {
|
||||
let (addr, mut guard, fe_pg_server) = setup_pg_server(store_type, "sql_inference").await;
|
||||
|
||||
|
||||
@@ -6,19 +6,20 @@ insert into t0 values ('a', 1), (NULL,2), (NULL, 3), (NULL, 4), (NULL, 5), (NULL
|
||||
|
||||
Affected Rows: 7
|
||||
|
||||
-- SQLNESS SORT_RESULT 2 2
|
||||
SELECT * FROM t0 ORDER BY t0.c0 DESC;
|
||||
|
||||
+----+-------------------------+
|
||||
| c0 | t |
|
||||
+----+-------------------------+
|
||||
| | 1970-01-01T00:00:00.007 |
|
||||
| | 1970-01-01T00:00:00.006 |
|
||||
| | 1970-01-01T00:00:00.005 |
|
||||
| | 1970-01-01T00:00:00.004 |
|
||||
| | 1970-01-01T00:00:00.003 |
|
||||
| | 1970-01-01T00:00:00.002 |
|
||||
| a | 1970-01-01T00:00:00.001 |
|
||||
+----+-------------------------+
|
||||
+------+-------------------------+
|
||||
| c0 | t |
|
||||
+------+-------------------------+
|
||||
| null | 1970-01-01T00:00:00.002 |
|
||||
| null | 1970-01-01T00:00:00.003 |
|
||||
| null | 1970-01-01T00:00:00.004 |
|
||||
| null | 1970-01-01T00:00:00.005 |
|
||||
| null | 1970-01-01T00:00:00.006 |
|
||||
| null | 1970-01-01T00:00:00.007 |
|
||||
| a | 1970-01-01T00:00:00.001 |
|
||||
+------+-------------------------+
|
||||
|
||||
CREATE TABLE test0 (job VARCHAR, name VARCHAR, t TIMESTAMP TIME INDEX);
|
||||
|
||||
@@ -74,7 +75,7 @@ SELECT * FROM test1 ORDER BY s;
|
||||
| 3555555555552 | 1970-01-01T00:00:00.003 |
|
||||
| 3555555555553 | 1970-01-01T00:00:00.007 |
|
||||
| 355555555556 | 1970-01-01T00:00:00.005 |
|
||||
| | 1970-01-01T00:00:00.002 |
|
||||
| null | 1970-01-01T00:00:00.002 |
|
||||
+---------------+-------------------------+
|
||||
|
||||
CREATE TABLE test4 (i INT, j INT, t TIMESTAMP TIME INDEX);
|
||||
@@ -341,7 +342,7 @@ select i, split_part(s, 'b', 1) from test8 order by i;
|
||||
| i | split_part(test8.s,Utf8("b"),Int64(1)) |
|
||||
+---+----------------------------------------+
|
||||
| 1 | cc |
|
||||
| 2 | |
|
||||
| 2 | null |
|
||||
| 3 | a |
|
||||
| | d |
|
||||
+---+----------------------------------------+
|
||||
|
||||
@@ -2,6 +2,7 @@ create table t0 (c0 varchar, t TIMESTAMP TIME INDEX);
|
||||
|
||||
insert into t0 values ('a', 1), (NULL,2), (NULL, 3), (NULL, 4), (NULL, 5), (NULL,6), (NULL,7);
|
||||
|
||||
-- SQLNESS SORT_RESULT 2 2
|
||||
SELECT * FROM t0 ORDER BY t0.c0 DESC;
|
||||
|
||||
CREATE TABLE test0 (job VARCHAR, name VARCHAR, t TIMESTAMP TIME INDEX);
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
node_id = 1
|
||||
mode = 'distributed'
|
||||
require_lease_before_startup = true
|
||||
rpc_addr = '127.0.0.1:4100'
|
||||
rpc_hostname = '127.0.0.1'
|
||||
rpc_runtime_size = 8
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
mode = 'standalone'
|
||||
enable_memory_catalog = false
|
||||
require_lease_before_startup = true
|
||||
|
||||
[wal]
|
||||
file_size = '1GB'
|
||||
|
||||
@@ -180,6 +180,8 @@ impl Env {
|
||||
"start".to_string(),
|
||||
"--use-memory-store".to_string(),
|
||||
"true".to_string(),
|
||||
"--enable-region-failover".to_string(),
|
||||
"false".to_string(),
|
||||
"--http-addr=127.0.0.1:5001".to_string(),
|
||||
];
|
||||
(args, METASRV_ADDR.to_string())
|
||||
|
||||
Reference in New Issue
Block a user