mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-26 16:10:02 +00:00
Compare commits
27 Commits
v0.5.0-nig
...
script_wra
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24f5e56196 | ||
|
|
c85d569797 | ||
|
|
e95a8e070c | ||
|
|
b71bf11772 | ||
|
|
ee0a3972fc | ||
|
|
8fb40c66a4 | ||
|
|
e855f6370e | ||
|
|
fb5dcbc40c | ||
|
|
0d109436b8 | ||
|
|
cbae03af07 | ||
|
|
902e6ead60 | ||
|
|
f9e7762c5b | ||
|
|
0b421b5177 | ||
|
|
aa89d9deef | ||
|
|
b3ffe5cd1e | ||
|
|
d6ef7a75de | ||
|
|
6344b1e0db | ||
|
|
7d506b3c5f | ||
|
|
96e12e9ee5 | ||
|
|
a9db80ab1a | ||
|
|
5f5dbe0172 | ||
|
|
dac7a41cbd | ||
|
|
de416465a6 | ||
|
|
58c13739f0 | ||
|
|
806400caff | ||
|
|
f78dab078c | ||
|
|
7a14db68a6 |
118
Cargo.lock
generated
118
Cargo.lock
generated
@@ -1866,7 +1866,7 @@ dependencies = [
|
||||
"datatypes",
|
||||
"serde",
|
||||
"snafu",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef)",
|
||||
"sqlparser_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"statrs",
|
||||
"tokio",
|
||||
@@ -2797,6 +2797,12 @@ version = "0.1.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
|
||||
|
||||
[[package]]
|
||||
name = "difflib"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8"
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.10.7"
|
||||
@@ -2895,6 +2901,12 @@ version = "0.15.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b"
|
||||
|
||||
[[package]]
|
||||
name = "downcast"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1"
|
||||
|
||||
[[package]]
|
||||
name = "dyn-clone"
|
||||
version = "1.0.16"
|
||||
@@ -3182,6 +3194,15 @@ dependencies = [
|
||||
"miniz_oxide",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "float-cmp"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4"
|
||||
dependencies = [
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.7"
|
||||
@@ -3206,6 +3227,12 @@ dependencies = [
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fragile"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.4.4"
|
||||
@@ -3269,7 +3296,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu",
|
||||
"sql",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef)",
|
||||
"store-api",
|
||||
"strfmt",
|
||||
"substrait 0.4.4",
|
||||
@@ -3543,7 +3570,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=2b3ae45740a49ec6a0830d71fc09c3093aeb5fe7#2b3ae45740a49ec6a0830d71fc09c3093aeb5fe7"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=b1d403088f02136bcebde53d604f491c260ca8e2#b1d403088f02136bcebde53d604f491c260ca8e2"
|
||||
dependencies = [
|
||||
"prost 0.12.2",
|
||||
"serde",
|
||||
@@ -3913,13 +3940,17 @@ name = "index"
|
||||
version = "0.4.4"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytemuck",
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"fst",
|
||||
"futures",
|
||||
"greptime-proto",
|
||||
"mockall",
|
||||
"prost 0.12.2",
|
||||
"regex",
|
||||
"regex-automata 0.1.10",
|
||||
"snafu",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -4871,6 +4902,33 @@ dependencies = [
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mockall"
|
||||
version = "0.11.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96"
|
||||
dependencies = [
|
||||
"cfg-if 1.0.0",
|
||||
"downcast",
|
||||
"fragile",
|
||||
"lazy_static",
|
||||
"mockall_derive",
|
||||
"predicates",
|
||||
"predicates-tree",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mockall_derive"
|
||||
version = "0.11.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb"
|
||||
dependencies = [
|
||||
"cfg-if 1.0.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "moka"
|
||||
version = "0.12.1"
|
||||
@@ -5099,6 +5157,12 @@ dependencies = [
|
||||
"minimal-lexical",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "normalize-line-endings"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
|
||||
|
||||
[[package]]
|
||||
name = "nu-ansi-term"
|
||||
version = "0.46.0"
|
||||
@@ -5349,7 +5413,6 @@ dependencies = [
|
||||
"parking_lot 0.12.1",
|
||||
"percent-encoding",
|
||||
"pin-project",
|
||||
"prometheus",
|
||||
"quick-xml 0.29.0",
|
||||
"reqsign",
|
||||
"reqwest",
|
||||
@@ -5562,7 +5625,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu",
|
||||
"sql",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef)",
|
||||
"store-api",
|
||||
"substrait 0.4.4",
|
||||
"table",
|
||||
@@ -6223,6 +6286,36 @@ version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
|
||||
|
||||
[[package]]
|
||||
name = "predicates"
|
||||
version = "2.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd"
|
||||
dependencies = [
|
||||
"difflib",
|
||||
"float-cmp",
|
||||
"itertools 0.10.5",
|
||||
"normalize-line-endings",
|
||||
"predicates-core",
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "predicates-core"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174"
|
||||
|
||||
[[package]]
|
||||
name = "predicates-tree"
|
||||
version = "1.0.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf"
|
||||
dependencies = [
|
||||
"predicates-core",
|
||||
"termtree",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prettydiff"
|
||||
version = "0.6.4"
|
||||
@@ -6953,6 +7046,7 @@ version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
|
||||
dependencies = [
|
||||
"fst",
|
||||
"regex-syntax 0.6.29",
|
||||
]
|
||||
|
||||
@@ -8562,7 +8656,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"regex",
|
||||
"snafu",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
|
||||
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef)",
|
||||
"sqlparser_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"table",
|
||||
]
|
||||
@@ -8625,13 +8719,13 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.38.0"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd#0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef#6a93567ae38d42be5c8d08b13c8ff4dde26502ef"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
"regex",
|
||||
"sqlparser 0.38.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sqlparser_derive 0.1.1 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd)",
|
||||
"sqlparser_derive 0.1.1 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8648,7 +8742,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sqlparser_derive"
|
||||
version = "0.1.1"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd#0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef#6a93567ae38d42be5c8d08b13c8ff4dde26502ef"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -9192,6 +9286,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termtree"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.4.4"
|
||||
|
||||
@@ -74,6 +74,7 @@ async-trait = "0.1"
|
||||
base64 = "0.21"
|
||||
bigdecimal = "0.4.2"
|
||||
bitflags = "2.4.1"
|
||||
bytemuck = "1.12"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
@@ -87,11 +88,12 @@ etcd-client = "0.12"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "2b3ae45740a49ec6a0830d71fc09c3093aeb5fe7" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b1d403088f02136bcebde53d604f491c260ca8e2" }
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
||||
@@ -107,6 +109,7 @@ prost = "0.12"
|
||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
regex-automata = { version = "0.1", features = ["transducer"] }
|
||||
reqwest = { version = "0.11", default-features = false, features = [
|
||||
"json",
|
||||
"rustls-tls-native-roots",
|
||||
@@ -118,7 +121,7 @@ serde_json = "1.0"
|
||||
smallvec = "1"
|
||||
snafu = "0.7"
|
||||
# on branch v0.38.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
|
||||
157
scripts/run-pyo3-greptime.sh
Executable file
157
scripts/run-pyo3-greptime.sh
Executable file
@@ -0,0 +1,157 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script configures the environment to run 'greptime' with the required Python version
|
||||
|
||||
# This script should be compatible both in Linux and macOS
|
||||
OS_TYPE="$(uname)"
|
||||
readonly OS_TYPE
|
||||
|
||||
check_command_existence() {
|
||||
command -v "$1" &> /dev/null
|
||||
}
|
||||
|
||||
get_python_version() {
|
||||
case "$OS_TYPE" in
|
||||
Darwin)
|
||||
otool -L $GREPTIME_BIN_PATH | grep -o 'Python.framework/Versions/3.[0-9]\+/Python' | grep -o '3.[0-9]\+'
|
||||
;;
|
||||
Linux)
|
||||
ldd $GREPTIME_BIN_PATH | grep -o 'libpython3\.[0-9]\+' | grep -o '3\.[0-9]\+'
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported OS type: $OS_TYPE"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
setup_virtualenv() {
|
||||
local req_py_version="$1"
|
||||
local env_name="GreptimeTmpVenv$req_py_version"
|
||||
virtualenv --python=python"$req_py_version" "$env_name"
|
||||
source "$env_name/bin/activate"
|
||||
}
|
||||
|
||||
setup_conda_env() {
|
||||
local req_py_version="$1"
|
||||
local conda_base
|
||||
conda_base=$(conda info --base) || { echo "Error obtaining conda base directory"; exit 1; }
|
||||
. "$conda_base/etc/profile.d/conda.sh"
|
||||
|
||||
if ! conda list --name "GreptimeTmpPyO3Env$req_py_version" &> /dev/null; then
|
||||
conda create --yes --name "GreptimeTmpPyO3Env$req_py_version" python="$req_py_version"
|
||||
fi
|
||||
|
||||
conda activate "GreptimeTmpPyO3Env$req_py_version"
|
||||
}
|
||||
|
||||
GREPTIME_BIN_PATH="./greptime"
|
||||
YES="false"
|
||||
|
||||
usage() {
|
||||
echo "Usage:"
|
||||
echo " $0 -f <greptime-bin-path> [-y] <args-pass-to-greptime>"
|
||||
echo "Set $PY_ENV_MAN to 1 to use virtualenv, 2 to use conda"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function parse_args() {
|
||||
while getopts ":f:y" opt; do
|
||||
case $opt in
|
||||
f)
|
||||
GREPTIME_BIN_PATH=$OPTARG
|
||||
;;
|
||||
y)
|
||||
YES="yes"
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG" >&2
|
||||
exit 1
|
||||
;;
|
||||
:)
|
||||
echo "Option -$OPTARG requires an argument." >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $((OPTIND -1))
|
||||
|
||||
REST_ARGS=$*
|
||||
|
||||
if [ -z "$GREPTIME_BIN_PATH" ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
echo "Run greptime binary at '$GREPTIME_BIN_PATH' (yes=$YES)..."
|
||||
echo "The args pass to greptime: '$REST_ARGS'"
|
||||
}
|
||||
|
||||
# Set library path and pass all arguments to greptime to run it
|
||||
execute_greptime() {
|
||||
if [[ "$OS_TYPE" == "Darwin" ]]; then
|
||||
DYLD_LIBRARY_PATH="${CONDA_PREFIX:-$PREFIX}/lib:${LD_LIBRARY_PATH:-}" $GREPTIME_BIN_PATH $@
|
||||
elif [[ "$OS_TYPE" == "Linux" ]]; then
|
||||
LD_LIBRARY_PATH="${CONDA_PREFIX:-$PREFIX}/lib:${LD_LIBRARY_PATH:-}" $GREPTIME_BIN_PATH $@
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
parse_args $@
|
||||
|
||||
local req_py_version
|
||||
req_py_version=$(get_python_version)
|
||||
readonly req_py_version
|
||||
|
||||
if [[ -z "$req_py_version" ]]; then
|
||||
if $GREPTIME_BIN_PATH --version &> /dev/null; then
|
||||
$GREPTIME_BIN_PATH $REST_ARGS
|
||||
else
|
||||
echo "The 'greptime' binary is not valid or encountered an error."
|
||||
$GREPTIME_BIN_PATH --version
|
||||
exit 1
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
echo "The required version of Python shared library is $req_py_version"
|
||||
|
||||
# if YES exist, assign it to yn, else read from stdin
|
||||
if [[ -z "$YES" ]]; then
|
||||
echo "Now this script will try to install or find correct Python Version"
|
||||
echo "Do you want to continue? (yes/no): "
|
||||
read -r yn
|
||||
else
|
||||
yn="$YES"
|
||||
fi
|
||||
case $yn in
|
||||
[Yy]* ) ;;
|
||||
[Nn]* ) exit;;
|
||||
* ) echo "Please answer yes or no.";;
|
||||
esac
|
||||
|
||||
# if USE_ENV exist, assign it to option
|
||||
# else read from stdin
|
||||
if [[ -z "$PY_ENV_MAN" ]]; then
|
||||
echo "Do you want to use virtualenv or conda? (virtualenv(1)/conda(2)): "
|
||||
read -r option
|
||||
else
|
||||
option="$PY_ENV_MAN"
|
||||
fi
|
||||
|
||||
case $option in
|
||||
1)
|
||||
setup_virtualenv "$req_py_version"
|
||||
;;
|
||||
2)
|
||||
setup_conda_env "$req_py_version"
|
||||
;;
|
||||
*)
|
||||
echo "Please input 1 or 2"; exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
execute_greptime $REST_ARGS
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -86,6 +86,10 @@ impl DropTableProcedure {
|
||||
))
|
||||
.await?;
|
||||
|
||||
if !exist && self.data.task.drop_if_exists {
|
||||
return Ok(Status::Done);
|
||||
}
|
||||
|
||||
ensure!(
|
||||
exist,
|
||||
error::TableNotFoundSnafu {
|
||||
|
||||
@@ -33,5 +33,8 @@ pub const DATANODE_LEASE_SECS: u64 = REGION_LEASE_SECS;
|
||||
/// The lease seconds of metasrv leader.
|
||||
pub const META_LEASE_SECS: u64 = 3;
|
||||
|
||||
// In a lease, there are two opportunities for renewal.
|
||||
/// In a lease, there are two opportunities for renewal.
|
||||
pub const META_KEEP_ALIVE_INTERVAL_SECS: u64 = META_LEASE_SECS / 2;
|
||||
|
||||
/// The default mailbox round-trip timeout.
|
||||
pub const MAILBOX_RTT_SECS: u64 = 1;
|
||||
|
||||
@@ -37,7 +37,7 @@ pub struct HeartbeatResponseHandlerContext {
|
||||
/// HandleControl
|
||||
///
|
||||
/// Controls process of handling heartbeat response.
|
||||
#[derive(PartialEq)]
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum HandleControl {
|
||||
Continue,
|
||||
Done,
|
||||
|
||||
@@ -30,8 +30,8 @@ pub struct MessageMeta {
|
||||
pub from: String,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl MessageMeta {
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn new_test(id: u64, subject: &str, to: &str, from: &str) -> Self {
|
||||
MessageMeta {
|
||||
id,
|
||||
|
||||
@@ -111,6 +111,7 @@ impl OpenRegion {
|
||||
/// The instruction of downgrading leader region.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DowngradeRegion {
|
||||
/// The [RegionId].
|
||||
pub region_id: RegionId,
|
||||
}
|
||||
|
||||
@@ -120,20 +121,67 @@ impl Display for DowngradeRegion {
|
||||
}
|
||||
}
|
||||
|
||||
/// Upgrades a follower region to leader region.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpgradeRegion {
|
||||
/// The [RegionId].
|
||||
pub region_id: RegionId,
|
||||
/// The `last_entry_id` of old leader region.
|
||||
pub last_entry_id: Option<u64>,
|
||||
/// The second of waiting for a wal replay.
|
||||
///
|
||||
/// `None` stands for no wait,
|
||||
/// it's helpful to verify whether the leader region is ready.
|
||||
pub wait_for_replay_secs: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Display)]
|
||||
pub enum Instruction {
|
||||
/// Opens a region.
|
||||
///
|
||||
/// - Returns true if a specified region exists.
|
||||
OpenRegion(OpenRegion),
|
||||
/// Closes a region.
|
||||
///
|
||||
/// - Returns true if a specified region does not exist.
|
||||
CloseRegion(RegionIdent),
|
||||
/// Upgrades a region.
|
||||
UpgradeRegion(UpgradeRegion),
|
||||
/// Downgrades a region.
|
||||
DowngradeRegion(DowngradeRegion),
|
||||
/// Invalidates a specified table cache.
|
||||
InvalidateTableIdCache(TableId),
|
||||
/// Invalidates a specified table name index cache.
|
||||
InvalidateTableNameCache(TableName),
|
||||
}
|
||||
|
||||
/// The reply of [UpgradeRegion].
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
pub struct UpgradeRegionReply {
|
||||
/// Returns true if `last_entry_id` has been replayed to the latest.
|
||||
pub ready: bool,
|
||||
/// Indicates whether the region exists.
|
||||
pub exists: bool,
|
||||
/// Returns error if any.
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
impl Display for UpgradeRegionReply {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"(ready={}, exists={}, error={:?})",
|
||||
self.ready, self.exists, self.error
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
pub enum InstructionReply {
|
||||
OpenRegion(SimpleReply),
|
||||
CloseRegion(SimpleReply),
|
||||
UpgradeRegion(UpgradeRegionReply),
|
||||
InvalidateTableCache(SimpleReply),
|
||||
DowngradeRegion(DowngradeRegionReply),
|
||||
}
|
||||
@@ -143,6 +191,7 @@ impl Display for InstructionReply {
|
||||
match self {
|
||||
Self::OpenRegion(reply) => write!(f, "InstructionReply::OpenRegion({})", reply),
|
||||
Self::CloseRegion(reply) => write!(f, "InstructionReply::CloseRegion({})", reply),
|
||||
Self::UpgradeRegion(reply) => write!(f, "InstructionReply::UpgradeRegion({})", reply),
|
||||
Self::InvalidateTableCache(reply) => {
|
||||
write!(f, "InstructionReply::Invalidate({})", reply)
|
||||
}
|
||||
|
||||
@@ -54,12 +54,14 @@ impl DdlTask {
|
||||
schema: String,
|
||||
table: String,
|
||||
table_id: TableId,
|
||||
drop_if_exists: bool,
|
||||
) -> Self {
|
||||
DdlTask::DropTable(DropTableTask {
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
table_id,
|
||||
drop_if_exists,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -118,6 +120,7 @@ impl TryFrom<SubmitDdlTaskRequest> for PbSubmitDdlTaskRequest {
|
||||
schema_name: task.schema,
|
||||
table_name: task.table,
|
||||
table_id: Some(api::v1::TableId { id: task.table_id }),
|
||||
drop_if_exists: task.drop_if_exists,
|
||||
}),
|
||||
}),
|
||||
DdlTask::AlterTable(task) => Task::AlterTableTask(PbAlterTableTask {
|
||||
@@ -176,6 +179,8 @@ pub struct DropTableTask {
|
||||
pub schema: String,
|
||||
pub table: String,
|
||||
pub table_id: TableId,
|
||||
#[serde(default)]
|
||||
pub drop_if_exists: bool,
|
||||
}
|
||||
|
||||
impl DropTableTask {
|
||||
@@ -214,6 +219,7 @@ impl TryFrom<PbDropTableTask> for DropTableTask {
|
||||
err_msg: "expected table_id",
|
||||
})?
|
||||
.id,
|
||||
drop_if_exists: drop_table.drop_if_exists,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,6 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
arrow-flight.workspace = true
|
||||
@@ -77,7 +74,9 @@ uuid.workspace = true
|
||||
[dev-dependencies]
|
||||
axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" }
|
||||
client.workspace = true
|
||||
common-meta = { workspace = true, features = ["testing"] }
|
||||
common-query.workspace = true
|
||||
common-test-util.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
mito2 = { workspace = true, features = ["test"] }
|
||||
session.workspace = true
|
||||
|
||||
@@ -13,123 +13,129 @@
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_meta::error::{InvalidHeartbeatResponseSnafu, Result as MetaResult};
|
||||
use common_meta::heartbeat::handler::{
|
||||
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
|
||||
};
|
||||
use common_meta::instruction::{Instruction, InstructionReply, OpenRegion, SimpleReply};
|
||||
use common_meta::instruction::{
|
||||
DowngradeRegion, DowngradeRegionReply, Instruction, InstructionReply, OpenRegion, SimpleReply,
|
||||
};
|
||||
use common_meta::RegionIdent;
|
||||
use common_query::Output;
|
||||
use common_telemetry::error;
|
||||
use futures::future::BoxFuture;
|
||||
use snafu::OptionExt;
|
||||
use store_api::path_utils::region_dir;
|
||||
use store_api::region_engine::SetReadonlyResponse;
|
||||
use store_api::region_request::{RegionCloseRequest, RegionOpenRequest, RegionRequest};
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::error;
|
||||
use crate::region_server::RegionServer;
|
||||
|
||||
/// Handler for [Instruction::OpenRegion] and [Instruction::CloseRegion].
|
||||
#[derive(Clone)]
|
||||
pub struct RegionHeartbeatResponseHandler {
|
||||
region_server: RegionServer,
|
||||
}
|
||||
|
||||
/// Handler of the instruction.
|
||||
pub type InstructionHandler =
|
||||
Box<dyn FnOnce(RegionServer) -> BoxFuture<'static, InstructionReply> + Send>;
|
||||
|
||||
impl RegionHeartbeatResponseHandler {
|
||||
/// Returns the [RegionHeartbeatResponseHandler].
|
||||
pub fn new(region_server: RegionServer) -> Self {
|
||||
Self { region_server }
|
||||
}
|
||||
|
||||
fn instruction_to_request(instruction: Instruction) -> MetaResult<(RegionId, RegionRequest)> {
|
||||
/// Builds the [InstructionHandler].
|
||||
fn build_handler(instruction: Instruction) -> MetaResult<InstructionHandler> {
|
||||
match instruction {
|
||||
Instruction::OpenRegion(OpenRegion {
|
||||
region_ident,
|
||||
region_storage_path,
|
||||
options,
|
||||
}) => {
|
||||
let region_id = Self::region_ident_to_region_id(®ion_ident);
|
||||
let open_region_req = RegionRequest::Open(RegionOpenRequest {
|
||||
engine: region_ident.engine,
|
||||
region_dir: region_dir(®ion_storage_path, region_id),
|
||||
options,
|
||||
});
|
||||
Ok((region_id, open_region_req))
|
||||
}) => Ok(Box::new(|region_server| {
|
||||
Box::pin(async move {
|
||||
let region_id = Self::region_ident_to_region_id(®ion_ident);
|
||||
let request = RegionRequest::Open(RegionOpenRequest {
|
||||
engine: region_ident.engine,
|
||||
region_dir: region_dir(®ion_storage_path, region_id),
|
||||
options,
|
||||
});
|
||||
let result = region_server.handle_request(region_id, request).await;
|
||||
|
||||
let success = result.is_ok();
|
||||
let error = result.as_ref().map_err(|e| e.to_string()).err();
|
||||
|
||||
InstructionReply::OpenRegion(SimpleReply {
|
||||
result: success,
|
||||
error,
|
||||
})
|
||||
})
|
||||
})),
|
||||
Instruction::CloseRegion(region_ident) => Ok(Box::new(|region_server| {
|
||||
Box::pin(async move {
|
||||
let region_id = Self::region_ident_to_region_id(®ion_ident);
|
||||
let request = RegionRequest::Close(RegionCloseRequest {});
|
||||
let result = region_server.handle_request(region_id, request).await;
|
||||
|
||||
match result {
|
||||
Ok(_) => InstructionReply::CloseRegion(SimpleReply {
|
||||
result: true,
|
||||
error: None,
|
||||
}),
|
||||
Err(error::Error::RegionNotFound { .. }) => {
|
||||
InstructionReply::CloseRegion(SimpleReply {
|
||||
result: true,
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
Err(err) => InstructionReply::CloseRegion(SimpleReply {
|
||||
result: false,
|
||||
error: Some(err.to_string()),
|
||||
}),
|
||||
}
|
||||
})
|
||||
})),
|
||||
Instruction::DowngradeRegion(DowngradeRegion { region_id }) => {
|
||||
Ok(Box::new(move |region_server| {
|
||||
Box::pin(async move {
|
||||
match region_server.set_readonly_gracefully(region_id).await {
|
||||
Ok(SetReadonlyResponse::Success { last_entry_id }) => {
|
||||
InstructionReply::DowngradeRegion(DowngradeRegionReply {
|
||||
last_entry_id,
|
||||
exists: true,
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
Ok(SetReadonlyResponse::NotFound) => {
|
||||
InstructionReply::DowngradeRegion(DowngradeRegionReply {
|
||||
last_entry_id: None,
|
||||
exists: false,
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
Err(err) => InstructionReply::DowngradeRegion(DowngradeRegionReply {
|
||||
last_entry_id: None,
|
||||
exists: false,
|
||||
error: Some(err.to_string()),
|
||||
}),
|
||||
}
|
||||
})
|
||||
}))
|
||||
}
|
||||
Instruction::CloseRegion(region_ident) => {
|
||||
let region_id = Self::region_ident_to_region_id(®ion_ident);
|
||||
let close_region_req = RegionRequest::Close(RegionCloseRequest {});
|
||||
Ok((region_id, close_region_req))
|
||||
Instruction::UpgradeRegion(_) => {
|
||||
todo!()
|
||||
}
|
||||
Instruction::InvalidateTableIdCache(_) | Instruction::InvalidateTableNameCache(_) => {
|
||||
InvalidHeartbeatResponseSnafu.fail()
|
||||
}
|
||||
Instruction::DowngradeRegion(_) => {
|
||||
// TODO(weny): add it later.
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn region_ident_to_region_id(region_ident: &RegionIdent) -> RegionId {
|
||||
RegionId::new(region_ident.table_id, region_ident.region_number)
|
||||
}
|
||||
|
||||
fn reply_template_from_instruction(instruction: &Instruction) -> InstructionReply {
|
||||
match instruction {
|
||||
Instruction::OpenRegion(_) => InstructionReply::OpenRegion(SimpleReply {
|
||||
result: false,
|
||||
error: None,
|
||||
}),
|
||||
Instruction::CloseRegion(_) => InstructionReply::CloseRegion(SimpleReply {
|
||||
result: false,
|
||||
error: None,
|
||||
}),
|
||||
Instruction::InvalidateTableIdCache(_) | Instruction::InvalidateTableNameCache(_) => {
|
||||
InstructionReply::InvalidateTableCache(SimpleReply {
|
||||
result: false,
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
Instruction::DowngradeRegion(_) => {
|
||||
// TODO(weny): add it later.
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn fill_reply(mut template: InstructionReply, result: Result<Output>) -> InstructionReply {
|
||||
let success = result.is_ok();
|
||||
let error = result.as_ref().map_err(|e| e.to_string()).err();
|
||||
match &mut template {
|
||||
InstructionReply::OpenRegion(reply) => {
|
||||
reply.result = success;
|
||||
reply.error = error;
|
||||
}
|
||||
InstructionReply::CloseRegion(reply) => match result {
|
||||
Err(e) => {
|
||||
if e.status_code() == StatusCode::RegionNotFound {
|
||||
reply.result = true;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
reply.result = success;
|
||||
reply.error = error;
|
||||
}
|
||||
},
|
||||
InstructionReply::InvalidateTableCache(reply) => {
|
||||
reply.result = success;
|
||||
reply.error = error;
|
||||
}
|
||||
InstructionReply::DowngradeRegion(_) => {
|
||||
// TODO(weny): add it later.
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
template
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -137,7 +143,9 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
|
||||
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
|
||||
matches!(
|
||||
ctx.incoming_message.as_ref(),
|
||||
Some((_, Instruction::OpenRegion { .. })) | Some((_, Instruction::CloseRegion { .. }))
|
||||
Some((_, Instruction::OpenRegion { .. }))
|
||||
| Some((_, Instruction::CloseRegion { .. }))
|
||||
| Some((_, Instruction::DowngradeRegion { .. }))
|
||||
)
|
||||
}
|
||||
|
||||
@@ -149,15 +157,11 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
|
||||
|
||||
let mailbox = ctx.mailbox.clone();
|
||||
let region_server = self.region_server.clone();
|
||||
let reply_template = Self::reply_template_from_instruction(&instruction);
|
||||
let (region_id, region_req) = Self::instruction_to_request(instruction)?;
|
||||
let handler = Self::build_handler(instruction)?;
|
||||
let _handle = common_runtime::spawn_bg(async move {
|
||||
let result = region_server.handle_request(region_id, region_req).await;
|
||||
let reply = handler(region_server).await;
|
||||
|
||||
if let Err(e) = mailbox
|
||||
.send((meta, Self::fill_reply(reply_template, result)))
|
||||
.await
|
||||
{
|
||||
if let Err(e) = mailbox.send((meta, reply)).await {
|
||||
error!(e; "Failed to send reply to mailbox");
|
||||
}
|
||||
});
|
||||
@@ -165,3 +169,266 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
|
||||
Ok(HandleControl::Done)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_meta::heartbeat::mailbox::{
|
||||
HeartbeatMailbox, IncomingMessage, MailboxRef, MessageMeta,
|
||||
};
|
||||
use mito2::config::MitoConfig;
|
||||
use mito2::engine::MITO_ENGINE_NAME;
|
||||
use mito2::test_util::{CreateRequestBuilder, TestEnv};
|
||||
use store_api::region_request::RegionRequest;
|
||||
use store_api::storage::RegionId;
|
||||
use tokio::sync::mpsc::{self, Receiver};
|
||||
|
||||
use super::*;
|
||||
use crate::error;
|
||||
use crate::tests::mock_region_server;
|
||||
|
||||
pub struct HeartbeatResponseTestEnv {
|
||||
mailbox: MailboxRef,
|
||||
receiver: Receiver<(MessageMeta, InstructionReply)>,
|
||||
}
|
||||
|
||||
impl HeartbeatResponseTestEnv {
|
||||
pub fn new() -> Self {
|
||||
let (tx, rx) = mpsc::channel(8);
|
||||
let mailbox = Arc::new(HeartbeatMailbox::new(tx));
|
||||
|
||||
HeartbeatResponseTestEnv {
|
||||
mailbox,
|
||||
receiver: rx,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_handler_ctx(
|
||||
&self,
|
||||
incoming_message: IncomingMessage,
|
||||
) -> HeartbeatResponseHandlerContext {
|
||||
HeartbeatResponseHandlerContext {
|
||||
mailbox: self.mailbox.clone(),
|
||||
response: Default::default(),
|
||||
incoming_message: Some(incoming_message),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn close_region_instruction(region_id: RegionId) -> Instruction {
|
||||
Instruction::CloseRegion(RegionIdent {
|
||||
table_id: region_id.table_id(),
|
||||
region_number: region_id.region_number(),
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
engine: MITO_ENGINE_NAME.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
fn open_region_instruction(region_id: RegionId, path: &str) -> Instruction {
|
||||
Instruction::OpenRegion(OpenRegion::new(
|
||||
RegionIdent {
|
||||
table_id: region_id.table_id(),
|
||||
region_number: region_id.region_number(),
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
engine: MITO_ENGINE_NAME.to_string(),
|
||||
},
|
||||
path,
|
||||
HashMap::new(),
|
||||
))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_close_region() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("close-region");
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
|
||||
let builder = CreateRequestBuilder::new();
|
||||
let create_req = builder.build();
|
||||
region_server
|
||||
.handle_request(region_id, RegionRequest::Create(create_req))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut heartbeat_env = HeartbeatResponseTestEnv::new();
|
||||
|
||||
// Should be ok, if we try to close it twice.
|
||||
for _ in 0..2 {
|
||||
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
|
||||
let instruction = close_region_instruction(region_id);
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
if let InstructionReply::CloseRegion(reply) = reply {
|
||||
assert!(reply.result);
|
||||
assert!(reply.error.is_none());
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
assert_matches!(
|
||||
region_server.set_writable(region_id, true).unwrap_err(),
|
||||
error::Error::RegionNotFound { .. }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_open_region_ok() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("open-region");
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
|
||||
let builder = CreateRequestBuilder::new();
|
||||
let mut create_req = builder.build();
|
||||
let storage_path = "test";
|
||||
create_req.region_dir = region_dir(storage_path, region_id);
|
||||
|
||||
region_server
|
||||
.handle_request(region_id, RegionRequest::Create(create_req))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
region_server
|
||||
.handle_request(region_id, RegionRequest::Close(RegionCloseRequest {}))
|
||||
.await
|
||||
.unwrap();
|
||||
let mut heartbeat_env = HeartbeatResponseTestEnv::new();
|
||||
|
||||
// Should be ok, if we try to open it twice.
|
||||
for _ in 0..2 {
|
||||
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
|
||||
let instruction = open_region_instruction(region_id, storage_path);
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
if let InstructionReply::OpenRegion(reply) = reply {
|
||||
assert!(reply.result);
|
||||
assert!(reply.error.is_none());
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_open_not_exists_region() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("open-not-exists-region");
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
let storage_path = "test";
|
||||
|
||||
let mut heartbeat_env = HeartbeatResponseTestEnv::new();
|
||||
|
||||
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
|
||||
let instruction = open_region_instruction(region_id, storage_path);
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
if let InstructionReply::OpenRegion(reply) = reply {
|
||||
assert!(!reply.result);
|
||||
assert!(reply.error.is_some());
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_downgrade_region() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("downgrade-region");
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
|
||||
let builder = CreateRequestBuilder::new();
|
||||
let mut create_req = builder.build();
|
||||
let storage_path = "test";
|
||||
create_req.region_dir = region_dir(storage_path, region_id);
|
||||
|
||||
region_server
|
||||
.handle_request(region_id, RegionRequest::Create(create_req))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut heartbeat_env = HeartbeatResponseTestEnv::new();
|
||||
|
||||
// Should be ok, if we try to downgrade it twice.
|
||||
for _ in 0..2 {
|
||||
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
|
||||
let instruction = Instruction::DowngradeRegion(DowngradeRegion { region_id });
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
if let InstructionReply::DowngradeRegion(reply) = reply {
|
||||
assert!(reply.exists);
|
||||
assert!(reply.error.is_none());
|
||||
assert_eq!(reply.last_entry_id.unwrap(), 0);
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
// Downgrades a not exists region.
|
||||
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
|
||||
let instruction = Instruction::DowngradeRegion(DowngradeRegion {
|
||||
region_id: RegionId::new(2048, 1),
|
||||
});
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
if let InstructionReply::DowngradeRegion(reply) = reply {
|
||||
assert!(!reply.exists);
|
||||
assert!(reply.error.is_none());
|
||||
assert!(reply.last_entry_id.is_none());
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,5 +26,4 @@ pub mod metrics;
|
||||
pub mod region_server;
|
||||
mod store;
|
||||
#[cfg(test)]
|
||||
#[allow(dead_code)]
|
||||
mod tests;
|
||||
|
||||
@@ -49,7 +49,7 @@ use servers::grpc::region_server::RegionServerHandler;
|
||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::region_engine::{RegionEngineRef, RegionRole};
|
||||
use store_api::region_engine::{RegionEngineRef, RegionRole, SetReadonlyResponse};
|
||||
use store_api::region_request::{RegionCloseRequest, RegionRequest};
|
||||
use store_api::storage::{RegionId, ScanRequest};
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
@@ -148,6 +148,19 @@ impl RegionServer {
|
||||
.with_context(|_| HandleRegionRequestSnafu { region_id })
|
||||
}
|
||||
|
||||
pub async fn set_readonly_gracefully(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
) -> Result<SetReadonlyResponse> {
|
||||
match self.inner.region_map.get(®ion_id) {
|
||||
Some(engine) => Ok(engine
|
||||
.set_readonly_gracefully(region_id)
|
||||
.await
|
||||
.with_context(|_| HandleRegionRequestSnafu { region_id })?),
|
||||
None => Ok(SetReadonlyResponse::NotFound),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn runtime(&self) -> Arc<Runtime> {
|
||||
self.inner.runtime.clone()
|
||||
}
|
||||
|
||||
@@ -66,23 +66,9 @@ pub(crate) async fn new_object_store(opts: &DatanodeOptions) -> Result<ObjectSto
|
||||
.with_error_level(Some("debug"))
|
||||
.expect("input error level must be valid"),
|
||||
)
|
||||
.layer(TracingLayer);
|
||||
|
||||
// In the test environment, multiple datanodes will be started in the same process.
|
||||
// If each datanode registers Prometheus metric when it starts, it will cause the program to crash. (Because the same metric is registered repeatedly.)
|
||||
// So the Prometheus metric layer is disabled in the test environment.
|
||||
#[cfg(feature = "testing")]
|
||||
return Ok(store);
|
||||
|
||||
#[cfg(not(feature = "testing"))]
|
||||
{
|
||||
let registry = prometheus::default_registry();
|
||||
Ok(
|
||||
store.layer(object_store::layers::PrometheusLayer::with_registry(
|
||||
registry.clone(),
|
||||
)),
|
||||
)
|
||||
}
|
||||
.layer(TracingLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer);
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
async fn create_object_store_with_cache(
|
||||
|
||||
@@ -13,19 +13,12 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::HeartbeatResponse;
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
|
||||
use common_function::scalars::FunctionRef;
|
||||
use common_meta::heartbeat::handler::{
|
||||
HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutor,
|
||||
};
|
||||
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MessageMeta};
|
||||
use common_meta::instruction::{Instruction, OpenRegion, RegionIdent};
|
||||
use common_query::prelude::ScalarUdf;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
@@ -46,51 +39,6 @@ use tokio::sync::mpsc::{Receiver, Sender};
|
||||
use crate::event_listener::NoopRegionServerEventListener;
|
||||
use crate::region_server::RegionServer;
|
||||
|
||||
pub fn test_message_meta(id: u64, subject: &str, to: &str, from: &str) -> MessageMeta {
|
||||
MessageMeta {
|
||||
id,
|
||||
subject: subject.to_string(),
|
||||
to: to.to_string(),
|
||||
from: from.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_instruction(
|
||||
executor: Arc<dyn HeartbeatResponseHandlerExecutor>,
|
||||
mailbox: Arc<HeartbeatMailbox>,
|
||||
instruction: Instruction,
|
||||
) {
|
||||
let response = HeartbeatResponse::default();
|
||||
let mut ctx: HeartbeatResponseHandlerContext =
|
||||
HeartbeatResponseHandlerContext::new(mailbox, response);
|
||||
ctx.incoming_message = Some((test_message_meta(1, "hi", "foo", "bar"), instruction));
|
||||
executor.handle(ctx).await.unwrap();
|
||||
}
|
||||
|
||||
fn close_region_instruction() -> Instruction {
|
||||
Instruction::CloseRegion(RegionIdent {
|
||||
table_id: 1024,
|
||||
region_number: 0,
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
engine: "mito2".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
fn open_region_instruction() -> Instruction {
|
||||
Instruction::OpenRegion(OpenRegion::new(
|
||||
RegionIdent {
|
||||
table_id: 1024,
|
||||
region_number: 0,
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
engine: "mito2".to_string(),
|
||||
},
|
||||
"path/dir",
|
||||
HashMap::new(),
|
||||
))
|
||||
}
|
||||
|
||||
pub struct MockQueryEngine;
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -122,7 +122,9 @@ impl GrpcQueryHandler for Instance {
|
||||
DdlExpr::DropTable(expr) => {
|
||||
let table_name =
|
||||
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
|
||||
self.statement_executor.drop_table(table_name).await?
|
||||
self.statement_executor
|
||||
.drop_table(table_name, expr.drop_if_exists)
|
||||
.await?
|
||||
}
|
||||
DdlExpr::TruncateTable(expr) => {
|
||||
let table_name =
|
||||
|
||||
@@ -6,13 +6,17 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
bytemuck.workspace = true
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
fst.workspace = true
|
||||
futures.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
mockall.workspace = true
|
||||
prost.workspace = true
|
||||
regex-automata.workspace = true
|
||||
regex.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -14,3 +14,7 @@
|
||||
|
||||
pub mod error;
|
||||
pub mod format;
|
||||
pub mod search;
|
||||
|
||||
pub type FstMap = fst::Map<Vec<u8>>;
|
||||
pub type Bytes = Vec<u8>;
|
||||
|
||||
@@ -20,6 +20,8 @@ use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
use crate::inverted_index::search::predicate::Predicate;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
@@ -62,6 +64,9 @@ pub enum Error {
|
||||
payload_size: u64,
|
||||
},
|
||||
|
||||
#[snafu(display("Unexpected zero segment row count"))]
|
||||
UnexpectedZeroSegmentRowCount { location: Location },
|
||||
|
||||
#[snafu(display("Failed to decode fst"))]
|
||||
DecodeFst {
|
||||
#[snafu(source)]
|
||||
@@ -75,6 +80,41 @@ pub enum Error {
|
||||
error: prost::DecodeError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse regex pattern: {pattern}"))]
|
||||
ParseRegex {
|
||||
#[snafu(source)]
|
||||
error: regex::Error,
|
||||
pattern: String,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse regex DFA"))]
|
||||
ParseDFA {
|
||||
#[snafu(source)]
|
||||
error: regex_automata::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unexpected empty predicates to construct fst applier"))]
|
||||
EmptyPredicates { location: Location },
|
||||
|
||||
#[snafu(display("Failed to construct intersection fst applier with InList predicate"))]
|
||||
IntersectionApplierWithInList { location: Location },
|
||||
|
||||
#[snafu(display("Failed to construct keys fst applier without InList predicate"))]
|
||||
KeysApplierWithoutInList { location: Location },
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to construct keys fst applier with unexpected predicates: {predicates:?}"
|
||||
))]
|
||||
KeysApplierUnexpectedPredicates {
|
||||
location: Location,
|
||||
predicates: Vec<Predicate>,
|
||||
},
|
||||
|
||||
#[snafu(display("index not found, name: {name}"))]
|
||||
IndexNotFound { name: String, location: Location },
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -84,10 +124,19 @@ impl ErrorExt for Error {
|
||||
Seek { .. }
|
||||
| Read { .. }
|
||||
| UnexpectedFooterPayloadSize { .. }
|
||||
| UnexpectedZeroSegmentRowCount { .. }
|
||||
| UnexpectedOffsetSize { .. }
|
||||
| UnexpectedBlobSize { .. }
|
||||
| DecodeProto { .. }
|
||||
| DecodeFst { .. } => StatusCode::Unexpected,
|
||||
| DecodeFst { .. }
|
||||
| KeysApplierUnexpectedPredicates { .. } => StatusCode::Unexpected,
|
||||
|
||||
ParseRegex { .. }
|
||||
| ParseDFA { .. }
|
||||
| KeysApplierWithoutInList { .. }
|
||||
| IntersectionApplierWithInList { .. }
|
||||
| EmptyPredicates { .. }
|
||||
| IndexNotFound { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,16 +17,15 @@ mod footer;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_base::BitVec;
|
||||
use fst::Map;
|
||||
use greptime_proto::v1::index::{InvertedIndexMeta, InvertedIndexMetas};
|
||||
|
||||
use crate::inverted_index::error::Result;
|
||||
|
||||
pub type FstMap = Map<Vec<u8>>;
|
||||
use crate::inverted_index::FstMap;
|
||||
|
||||
/// InvertedIndexReader defines an asynchronous reader of inverted index data
|
||||
#[mockall::automock]
|
||||
#[async_trait]
|
||||
pub trait InvertedIndexReader {
|
||||
pub trait InvertedIndexReader: Send {
|
||||
/// Retrieve metadata of all inverted indices stored within the blob.
|
||||
async fn metadata(&mut self) -> Result<InvertedIndexMetas>;
|
||||
|
||||
|
||||
@@ -143,7 +143,11 @@ mod tests {
|
||||
};
|
||||
|
||||
// metas
|
||||
let mut metas = InvertedIndexMetas::default();
|
||||
let mut metas = InvertedIndexMetas {
|
||||
total_row_count: 10,
|
||||
segment_row_count: 1,
|
||||
..Default::default()
|
||||
};
|
||||
metas.metas.insert(meta.name.clone(), meta);
|
||||
metas.metas.insert(meta1.name.clone(), meta1);
|
||||
let mut meta_buf = Vec::new();
|
||||
|
||||
@@ -21,7 +21,7 @@ use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::inverted_index::error::{
|
||||
DecodeProtoSnafu, ReadSnafu, Result, SeekSnafu, UnexpectedFooterPayloadSizeSnafu,
|
||||
UnexpectedOffsetSizeSnafu,
|
||||
UnexpectedOffsetSizeSnafu, UnexpectedZeroSegmentRowCountSnafu,
|
||||
};
|
||||
use crate::inverted_index::format::FOOTER_PAYLOAD_SIZE_SIZE;
|
||||
|
||||
@@ -85,6 +85,11 @@ impl<R: AsyncRead + AsyncSeek + Unpin> InvertedIndeFooterReader<R> {
|
||||
|
||||
/// Check if the read metadata is consistent with expected sizes and offsets.
|
||||
fn validate_metas(&self, metas: &InvertedIndexMetas, payload_size: u64) -> Result<()> {
|
||||
ensure!(
|
||||
metas.segment_row_count > 0,
|
||||
UnexpectedZeroSegmentRowCountSnafu
|
||||
);
|
||||
|
||||
for meta in metas.metas.values() {
|
||||
let InvertedIndexMeta {
|
||||
base_offset,
|
||||
@@ -116,7 +121,10 @@ mod tests {
|
||||
use super::*;
|
||||
|
||||
fn create_test_payload(meta: InvertedIndexMeta) -> Vec<u8> {
|
||||
let mut metas = InvertedIndexMetas::default();
|
||||
let mut metas = InvertedIndexMetas {
|
||||
segment_row_count: 1,
|
||||
..Default::default()
|
||||
};
|
||||
metas.metas.insert("test".to_string(), meta);
|
||||
|
||||
let mut payload_buf = vec![];
|
||||
@@ -131,7 +139,6 @@ mod tests {
|
||||
async fn test_read_payload() {
|
||||
let meta = InvertedIndexMeta {
|
||||
name: "test".to_string(),
|
||||
segment_row_count: 4096,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -145,14 +152,12 @@ mod tests {
|
||||
assert_eq!(metas.metas.len(), 1);
|
||||
let index_meta = &metas.metas.get("test").unwrap();
|
||||
assert_eq!(index_meta.name, "test");
|
||||
assert_eq!(index_meta.segment_row_count, 4096);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invalid_footer_payload_size() {
|
||||
let meta = InvertedIndexMeta {
|
||||
name: "test".to_string(),
|
||||
segment_row_count: 4096,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -171,7 +176,6 @@ mod tests {
|
||||
name: "test".to_string(),
|
||||
base_offset: 0,
|
||||
inverted_index_size: 1, // Set size to 1 to make ecceed the blob size
|
||||
segment_row_count: 4096,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
|
||||
18
src/index/src/inverted_index/search.rs
Normal file
18
src/index/src/inverted_index/search.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod fst_apply;
|
||||
pub mod fst_values_mapper;
|
||||
pub mod index_apply;
|
||||
pub mod predicate;
|
||||
33
src/index/src/inverted_index/search/fst_apply.rs
Normal file
33
src/index/src/inverted_index/search/fst_apply.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod intersection_apply;
|
||||
mod keys_apply;
|
||||
|
||||
pub use intersection_apply::IntersectionFstApplier;
|
||||
pub use keys_apply::KeysFstApplier;
|
||||
|
||||
use crate::inverted_index::FstMap;
|
||||
|
||||
/// A trait for objects that can process a finite state transducer (FstMap) and return
|
||||
/// associated values.
|
||||
#[mockall::automock]
|
||||
pub trait FstApplier: Send + Sync {
|
||||
/// Retrieves values from an FstMap.
|
||||
///
|
||||
/// * `fst`: A reference to the FstMap from which the values will be fetched.
|
||||
///
|
||||
/// Returns a `Vec<u64>`, with each u64 being a value from the FstMap.
|
||||
fn apply(&self, fst: &FstMap) -> Vec<u64>;
|
||||
}
|
||||
@@ -0,0 +1,325 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use fst::map::OpBuilder;
|
||||
use fst::{IntoStreamer, Streamer};
|
||||
use regex_automata::DenseDFA;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::inverted_index::error::{
|
||||
EmptyPredicatesSnafu, IntersectionApplierWithInListSnafu, ParseDFASnafu, Result,
|
||||
};
|
||||
use crate::inverted_index::search::fst_apply::FstApplier;
|
||||
use crate::inverted_index::search::predicate::{Predicate, Range};
|
||||
use crate::inverted_index::FstMap;
|
||||
|
||||
type Dfa = DenseDFA<Vec<usize>, usize>;
|
||||
|
||||
/// `IntersectionFstApplier` applies intersection operations on an FstMap using specified ranges and regex patterns.
|
||||
pub struct IntersectionFstApplier {
|
||||
/// A list of `Range` which define inclusive or exclusive ranges for keys to be queried in the FstMap.
|
||||
ranges: Vec<Range>,
|
||||
|
||||
/// A list of `Dfa` compiled from regular expression patterns.
|
||||
dfas: Vec<Dfa>,
|
||||
}
|
||||
|
||||
impl FstApplier for IntersectionFstApplier {
|
||||
fn apply(&self, fst: &FstMap) -> Vec<u64> {
|
||||
let mut op = OpBuilder::new();
|
||||
|
||||
for range in &self.ranges {
|
||||
match (range.lower.as_ref(), range.upper.as_ref()) {
|
||||
(Some(lower), Some(upper)) => match (lower.inclusive, upper.inclusive) {
|
||||
(true, true) => op.push(fst.range().ge(&lower.value).le(&upper.value)),
|
||||
(true, false) => op.push(fst.range().ge(&lower.value).lt(&upper.value)),
|
||||
(false, true) => op.push(fst.range().gt(&lower.value).le(&upper.value)),
|
||||
(false, false) => op.push(fst.range().gt(&lower.value).lt(&upper.value)),
|
||||
},
|
||||
(Some(lower), None) => match lower.inclusive {
|
||||
true => op.push(fst.range().ge(&lower.value)),
|
||||
false => op.push(fst.range().gt(&lower.value)),
|
||||
},
|
||||
(None, Some(upper)) => match upper.inclusive {
|
||||
true => op.push(fst.range().le(&upper.value)),
|
||||
false => op.push(fst.range().lt(&upper.value)),
|
||||
},
|
||||
(None, None) => op.push(fst),
|
||||
}
|
||||
}
|
||||
|
||||
for dfa in &self.dfas {
|
||||
op.push(fst.search(dfa));
|
||||
}
|
||||
|
||||
let mut stream = op.intersection().into_stream();
|
||||
let mut values = Vec::new();
|
||||
while let Some((_, v)) = stream.next() {
|
||||
values.push(v[0].value)
|
||||
}
|
||||
values
|
||||
}
|
||||
}
|
||||
|
||||
impl IntersectionFstApplier {
|
||||
/// Attempts to create an `IntersectionFstApplier` from a list of `Predicate`.
|
||||
///
|
||||
/// This function only accepts predicates of the variants `Range` and `RegexMatch`.
|
||||
/// It does not accept `InList` predicates and will return an error if any are found.
|
||||
/// `InList` predicates are handled by `KeysFstApplier`.
|
||||
pub fn try_from(predicates: Vec<Predicate>) -> Result<Self> {
|
||||
ensure!(!predicates.is_empty(), EmptyPredicatesSnafu);
|
||||
|
||||
let mut dfas = Vec::with_capacity(predicates.len());
|
||||
let mut ranges = Vec::with_capacity(predicates.len());
|
||||
|
||||
for predicate in predicates {
|
||||
match predicate {
|
||||
Predicate::Range(range) => ranges.push(range.range),
|
||||
Predicate::RegexMatch(regex) => {
|
||||
let dfa = DenseDFA::new(®ex.pattern);
|
||||
let dfa = dfa.context(ParseDFASnafu)?;
|
||||
dfas.push(dfa);
|
||||
}
|
||||
// Rejection of `InList` predicates is enforced here.
|
||||
Predicate::InList(_) => {
|
||||
return IntersectionApplierWithInListSnafu.fail();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self { dfas, ranges })
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Vec<Predicate>> for IntersectionFstApplier {
|
||||
type Error = crate::inverted_index::error::Error;
|
||||
|
||||
fn try_from(predicates: Vec<Predicate>) -> Result<Self> {
|
||||
Self::try_from(predicates)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashSet;
|
||||
|
||||
use super::*;
|
||||
use crate::inverted_index::error::Error;
|
||||
use crate::inverted_index::search::predicate::{
|
||||
Bound, InListPredicate, RangePredicate, RegexMatchPredicate,
|
||||
};
|
||||
|
||||
fn create_applier_from_range(range: Range) -> Result<IntersectionFstApplier> {
|
||||
IntersectionFstApplier::try_from(vec![Predicate::Range(RangePredicate { range })])
|
||||
}
|
||||
|
||||
fn create_applier_from_pattern(pattern: &str) -> Result<IntersectionFstApplier> {
|
||||
IntersectionFstApplier::try_from(vec![Predicate::RegexMatch(RegexMatchPredicate {
|
||||
pattern: pattern.to_string(),
|
||||
})])
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intersection_fst_applier_with_ranges() {
|
||||
let test_fst = FstMap::from_iter([("aa", 1), ("bb", 2), ("cc", 3)]).unwrap();
|
||||
|
||||
let applier_inclusive_lower = create_applier_from_range(Range {
|
||||
lower: Some(Bound {
|
||||
value: b"bb".to_vec(),
|
||||
inclusive: true,
|
||||
}),
|
||||
upper: None,
|
||||
})
|
||||
.unwrap();
|
||||
let results = applier_inclusive_lower.apply(&test_fst);
|
||||
assert_eq!(results, vec![2, 3]);
|
||||
|
||||
let applier_exclusive_lower = create_applier_from_range(Range {
|
||||
lower: Some(Bound {
|
||||
value: b"bb".to_vec(),
|
||||
inclusive: false,
|
||||
}),
|
||||
upper: None,
|
||||
})
|
||||
.unwrap();
|
||||
let results = applier_exclusive_lower.apply(&test_fst);
|
||||
assert_eq!(results, vec![3]);
|
||||
|
||||
let applier_inclusive_upper = create_applier_from_range(Range {
|
||||
lower: None,
|
||||
upper: Some(Bound {
|
||||
value: b"bb".to_vec(),
|
||||
inclusive: true,
|
||||
}),
|
||||
})
|
||||
.unwrap();
|
||||
let results = applier_inclusive_upper.apply(&test_fst);
|
||||
assert_eq!(results, vec![1, 2]);
|
||||
|
||||
let applier_exclusive_upper = create_applier_from_range(Range {
|
||||
lower: None,
|
||||
upper: Some(Bound {
|
||||
value: b"bb".to_vec(),
|
||||
inclusive: false,
|
||||
}),
|
||||
})
|
||||
.unwrap();
|
||||
let results = applier_exclusive_upper.apply(&test_fst);
|
||||
assert_eq!(results, vec![1]);
|
||||
|
||||
let applier_inclusive_bounds = create_applier_from_range(Range {
|
||||
lower: Some(Bound {
|
||||
value: b"aa".to_vec(),
|
||||
inclusive: true,
|
||||
}),
|
||||
upper: Some(Bound {
|
||||
value: b"cc".to_vec(),
|
||||
inclusive: true,
|
||||
}),
|
||||
})
|
||||
.unwrap();
|
||||
let results = applier_inclusive_bounds.apply(&test_fst);
|
||||
assert_eq!(results, vec![1, 2, 3]);
|
||||
|
||||
let applier_exclusive_bounds = create_applier_from_range(Range {
|
||||
lower: Some(Bound {
|
||||
value: b"aa".to_vec(),
|
||||
inclusive: false,
|
||||
}),
|
||||
upper: Some(Bound {
|
||||
value: b"cc".to_vec(),
|
||||
inclusive: false,
|
||||
}),
|
||||
})
|
||||
.unwrap();
|
||||
let results = applier_exclusive_bounds.apply(&test_fst);
|
||||
assert_eq!(results, vec![2]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intersection_fst_applier_with_valid_pattern() {
|
||||
let test_fst = FstMap::from_iter([("aa", 1), ("bb", 2), ("cc", 3)]).unwrap();
|
||||
|
||||
let applier = create_applier_from_pattern("a.?").unwrap();
|
||||
let results = applier.apply(&test_fst);
|
||||
assert_eq!(results, vec![1]);
|
||||
|
||||
let applier = create_applier_from_pattern("b.?").unwrap();
|
||||
let results = applier.apply(&test_fst);
|
||||
assert_eq!(results, vec![2]);
|
||||
|
||||
let applier = create_applier_from_pattern("c.?").unwrap();
|
||||
let results = applier.apply(&test_fst);
|
||||
assert_eq!(results, vec![3]);
|
||||
|
||||
let applier = create_applier_from_pattern("a.*").unwrap();
|
||||
let results = applier.apply(&test_fst);
|
||||
assert_eq!(results, vec![1]);
|
||||
|
||||
let applier = create_applier_from_pattern("b.*").unwrap();
|
||||
let results = applier.apply(&test_fst);
|
||||
assert_eq!(results, vec![2]);
|
||||
|
||||
let applier = create_applier_from_pattern("c.*").unwrap();
|
||||
let results = applier.apply(&test_fst);
|
||||
assert_eq!(results, vec![3]);
|
||||
|
||||
let applier = create_applier_from_pattern("d.?").unwrap();
|
||||
let results = applier.apply(&test_fst);
|
||||
assert!(results.is_empty());
|
||||
|
||||
let applier = create_applier_from_pattern("a.?|b.?").unwrap();
|
||||
let results = applier.apply(&test_fst);
|
||||
assert_eq!(results, vec![1, 2]);
|
||||
|
||||
let applier = create_applier_from_pattern("d.?|a.?").unwrap();
|
||||
let results = applier.apply(&test_fst);
|
||||
assert_eq!(results, vec![1]);
|
||||
|
||||
let applier = create_applier_from_pattern(".*").unwrap();
|
||||
let results = applier.apply(&test_fst);
|
||||
assert_eq!(results, vec![1, 2, 3]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intersection_fst_applier_with_composite_predicates() {
|
||||
let test_fst = FstMap::from_iter([("aa", 1), ("bb", 2), ("cc", 3)]).unwrap();
|
||||
|
||||
let applier = IntersectionFstApplier::try_from(vec![
|
||||
Predicate::Range(RangePredicate {
|
||||
range: Range {
|
||||
lower: Some(Bound {
|
||||
value: b"aa".to_vec(),
|
||||
inclusive: true,
|
||||
}),
|
||||
upper: Some(Bound {
|
||||
value: b"cc".to_vec(),
|
||||
inclusive: true,
|
||||
}),
|
||||
},
|
||||
}),
|
||||
Predicate::RegexMatch(RegexMatchPredicate {
|
||||
pattern: "a.?".to_string(),
|
||||
}),
|
||||
])
|
||||
.unwrap();
|
||||
let results = applier.apply(&test_fst);
|
||||
assert_eq!(results, vec![1]);
|
||||
|
||||
let applier = IntersectionFstApplier::try_from(vec![
|
||||
Predicate::Range(RangePredicate {
|
||||
range: Range {
|
||||
lower: Some(Bound {
|
||||
value: b"aa".to_vec(),
|
||||
inclusive: false,
|
||||
}),
|
||||
upper: Some(Bound {
|
||||
value: b"cc".to_vec(),
|
||||
inclusive: true,
|
||||
}),
|
||||
},
|
||||
}),
|
||||
Predicate::RegexMatch(RegexMatchPredicate {
|
||||
pattern: "a.?".to_string(),
|
||||
}),
|
||||
])
|
||||
.unwrap();
|
||||
let results = applier.apply(&test_fst);
|
||||
assert!(results.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intersection_fst_applier_with_invalid_pattern() {
|
||||
let result = create_applier_from_pattern("a(");
|
||||
assert!(matches!(result, Err(Error::ParseDFA { .. })));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intersection_fst_applier_with_empty_predicates() {
|
||||
let result = IntersectionFstApplier::try_from(vec![]);
|
||||
assert!(matches!(result, Err(Error::EmptyPredicates { .. })));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intersection_fst_applier_with_in_list_predicate() {
|
||||
let result = IntersectionFstApplier::try_from(vec![Predicate::InList(InListPredicate {
|
||||
list: HashSet::from_iter([b"one".to_vec(), b"two".to_vec()]),
|
||||
})]);
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(Error::IntersectionApplierWithInList { .. })
|
||||
));
|
||||
}
|
||||
}
|
||||
305
src/index/src/inverted_index/search/fst_apply/keys_apply.rs
Normal file
305
src/index/src/inverted_index/search/fst_apply/keys_apply.rs
Normal file
@@ -0,0 +1,305 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::inverted_index::error::{
|
||||
EmptyPredicatesSnafu, KeysApplierUnexpectedPredicatesSnafu, KeysApplierWithoutInListSnafu,
|
||||
ParseRegexSnafu, Result,
|
||||
};
|
||||
use crate::inverted_index::search::fst_apply::FstApplier;
|
||||
use crate::inverted_index::search::predicate::Predicate;
|
||||
use crate::inverted_index::{Bytes, FstMap};
|
||||
|
||||
/// `KeysFstApplier` is responsible for applying a search using a set of predefined keys
|
||||
/// against an FstMap to fetch associated values.
|
||||
pub struct KeysFstApplier {
|
||||
/// A list of keys to be fetched directly from the FstMap.
|
||||
keys: Vec<Bytes>,
|
||||
}
|
||||
|
||||
impl FstApplier for KeysFstApplier {
|
||||
fn apply(&self, fst: &FstMap) -> Vec<u64> {
|
||||
self.keys.iter().filter_map(|k| fst.get(k)).collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl KeysFstApplier {
|
||||
/// Tries to create a `KeysFstApplier` from a list of predicates.
|
||||
///
|
||||
/// This function constructs the applier by intersecting keys from one or more `InList` predicates,
|
||||
/// which are required. It then optionally refines this set using any additional `Range` and `RegexMatch`
|
||||
/// predicates provided.
|
||||
pub fn try_from(mut predicates: Vec<Predicate>) -> Result<Self> {
|
||||
ensure!(!predicates.is_empty(), EmptyPredicatesSnafu);
|
||||
|
||||
let (in_lists, others) = Self::split_at_in_lists(&mut predicates);
|
||||
let (ranges, regexes) = Self::split_at_ranges(others);
|
||||
Self::ensure_all_regexes(regexes)?;
|
||||
|
||||
ensure!(!in_lists.is_empty(), KeysApplierWithoutInListSnafu);
|
||||
let intersected_keys = Self::intersect_with_lists(in_lists);
|
||||
let range_matched_keys = Self::filter_by_ranges(intersected_keys, ranges);
|
||||
let regex_matched_keys = Self::filter_by_regexes(range_matched_keys, regexes)?;
|
||||
|
||||
Ok(Self {
|
||||
keys: regex_matched_keys,
|
||||
})
|
||||
}
|
||||
|
||||
fn split_at_in_lists(predicates: &mut [Predicate]) -> (&mut [Predicate], &mut [Predicate]) {
|
||||
let in_list_index = predicates
|
||||
.iter_mut()
|
||||
.partition_in_place(|p| matches!(p, Predicate::InList(_)));
|
||||
predicates.split_at_mut(in_list_index)
|
||||
}
|
||||
|
||||
fn split_at_ranges(predicates: &mut [Predicate]) -> (&mut [Predicate], &mut [Predicate]) {
|
||||
let range_index = predicates
|
||||
.iter_mut()
|
||||
.partition_in_place(|p| matches!(p, Predicate::Range(_)));
|
||||
predicates.split_at_mut(range_index)
|
||||
}
|
||||
|
||||
fn ensure_all_regexes(ps: &[Predicate]) -> Result<()> {
|
||||
ensure!(
|
||||
ps.iter().all(|p| matches!(p, Predicate::RegexMatch(_))),
|
||||
KeysApplierUnexpectedPredicatesSnafu {
|
||||
predicates: ps.to_vec()
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn intersect_with_lists(in_lists: &mut [Predicate]) -> Vec<Bytes> {
|
||||
#[inline]
|
||||
fn get_list(p: &Predicate) -> &HashSet<Bytes> {
|
||||
match p {
|
||||
Predicate::InList(i) => &i.list,
|
||||
_ => unreachable!(), // `in_lists` is filtered by `split_at_in_lists`
|
||||
}
|
||||
}
|
||||
|
||||
in_lists.sort_unstable_by_key(|p| get_list(p).len());
|
||||
get_list(&in_lists[0])
|
||||
.iter()
|
||||
.filter(|c| in_lists[1..].iter().all(|s| get_list(s).contains(*c)))
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn filter_by_ranges(mut keys: Vec<Bytes>, ranges: &[Predicate]) -> Vec<Bytes> {
|
||||
#[inline]
|
||||
fn range_contains(p: &Predicate, key: &Bytes) -> bool {
|
||||
let (lower, upper) = match p {
|
||||
Predicate::Range(r) => (&r.range.lower, &r.range.upper),
|
||||
_ => unreachable!(), // `ranges` is filtered by `split_at_ranges`
|
||||
};
|
||||
|
||||
match (lower, upper) {
|
||||
(Some(lower), Some(upper)) => match (lower.inclusive, upper.inclusive) {
|
||||
(true, true) => &lower.value <= key && key <= &upper.value,
|
||||
(true, false) => &lower.value <= key && key < &upper.value,
|
||||
(false, true) => &lower.value < key && key <= &upper.value,
|
||||
(false, false) => &lower.value < key && key < &upper.value,
|
||||
},
|
||||
(Some(lower), None) => match lower.inclusive {
|
||||
true => &lower.value <= key,
|
||||
false => &lower.value < key,
|
||||
},
|
||||
(None, Some(upper)) => match upper.inclusive {
|
||||
true => key <= &upper.value,
|
||||
false => key < &upper.value,
|
||||
},
|
||||
(None, None) => true,
|
||||
}
|
||||
}
|
||||
|
||||
keys.retain(|k| ranges.iter().all(|r| range_contains(r, k)));
|
||||
keys
|
||||
}
|
||||
|
||||
fn filter_by_regexes(mut keys: Vec<Bytes>, regexes: &[Predicate]) -> Result<Vec<Bytes>> {
|
||||
for p in regexes {
|
||||
let pattern = match p {
|
||||
Predicate::RegexMatch(r) => &r.pattern,
|
||||
_ => unreachable!(), // checked by `ensure_all_regexes`
|
||||
};
|
||||
|
||||
let regex = regex::Regex::new(pattern).with_context(|_| ParseRegexSnafu {
|
||||
pattern: pattern.to_owned(),
|
||||
})?;
|
||||
|
||||
keys.retain(|k| {
|
||||
std::str::from_utf8(k)
|
||||
.map(|k| regex.is_match(k))
|
||||
.unwrap_or_default()
|
||||
});
|
||||
if keys.is_empty() {
|
||||
return Ok(keys);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Vec<Predicate>> for KeysFstApplier {
|
||||
type Error = crate::inverted_index::error::Error;
|
||||
fn try_from(predicates: Vec<Predicate>) -> Result<Self> {
|
||||
Self::try_from(predicates)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use fst::Map as FstMap;
|
||||
|
||||
use super::*;
|
||||
use crate::inverted_index::error::Error;
|
||||
use crate::inverted_index::search::predicate::{
|
||||
Bound, InListPredicate, Predicate, Range, RangePredicate, RegexMatchPredicate,
|
||||
};
|
||||
|
||||
fn create_fst_map(items: &[(&[u8], u64)]) -> FstMap<Vec<u8>> {
|
||||
let mut items = items
|
||||
.iter()
|
||||
.map(|(k, v)| (k.to_vec(), *v))
|
||||
.collect::<Vec<_>>();
|
||||
items.sort();
|
||||
FstMap::from_iter(items).unwrap()
|
||||
}
|
||||
|
||||
fn b(s: &str) -> Vec<u8> {
|
||||
s.as_bytes().to_vec()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keys_fst_applier_apply() {
|
||||
let test_fst = create_fst_map(&[(b"foo", 1), (b"bar", 2), (b"baz", 3)]);
|
||||
let applier = KeysFstApplier {
|
||||
keys: vec![b("foo"), b("baz")],
|
||||
};
|
||||
|
||||
let results = applier.apply(&test_fst);
|
||||
assert_eq!(results, vec![1, 3]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keys_fst_applier_with_empty_keys() {
|
||||
let test_fst = create_fst_map(&[(b"foo", 1), (b"bar", 2), (b"baz", 3)]);
|
||||
let applier = KeysFstApplier { keys: vec![] };
|
||||
|
||||
let results = applier.apply(&test_fst);
|
||||
assert!(results.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keys_fst_applier_with_unmatched_keys() {
|
||||
let test_fst = create_fst_map(&[(b"foo", 1), (b"bar", 2), (b"baz", 3)]);
|
||||
let applier = KeysFstApplier {
|
||||
keys: vec![b("qux"), b("quux")],
|
||||
};
|
||||
|
||||
let results = applier.apply(&test_fst);
|
||||
assert!(results.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keys_fst_applier_try_from() {
|
||||
let predicates = vec![
|
||||
Predicate::InList(InListPredicate {
|
||||
list: HashSet::from_iter(vec![b("foo"), b("bar")]),
|
||||
}),
|
||||
Predicate::Range(RangePredicate {
|
||||
range: Range {
|
||||
lower: Some(Bound {
|
||||
value: b("bar"),
|
||||
inclusive: true,
|
||||
}),
|
||||
upper: None,
|
||||
},
|
||||
}),
|
||||
Predicate::RegexMatch(RegexMatchPredicate {
|
||||
pattern: ".*r".to_string(),
|
||||
}),
|
||||
];
|
||||
let applier = KeysFstApplier::try_from(predicates).unwrap();
|
||||
assert_eq!(applier.keys, vec![b("bar")]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keys_fst_applier_try_from_filter_out_unmatched_keys() {
|
||||
let predicates = vec![
|
||||
Predicate::InList(InListPredicate {
|
||||
list: HashSet::from_iter(vec![b("foo"), b("bar")]),
|
||||
}),
|
||||
Predicate::Range(RangePredicate {
|
||||
range: Range {
|
||||
lower: Some(Bound {
|
||||
value: b("f"),
|
||||
inclusive: true,
|
||||
}),
|
||||
upper: None,
|
||||
},
|
||||
}),
|
||||
Predicate::RegexMatch(RegexMatchPredicate {
|
||||
pattern: ".*o".to_string(),
|
||||
}),
|
||||
];
|
||||
let applier = KeysFstApplier::try_from(predicates).unwrap();
|
||||
assert_eq!(applier.keys, vec![b("foo")]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keys_fst_applier_try_from_empty_predicates() {
|
||||
let predicates = vec![];
|
||||
let result = KeysFstApplier::try_from(predicates);
|
||||
assert!(matches!(result, Err(Error::EmptyPredicates { .. })));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keys_fst_applier_try_from_without_in_list() {
|
||||
let predicates = vec![Predicate::Range(RangePredicate {
|
||||
range: Range {
|
||||
lower: Some(Bound {
|
||||
value: b("bar"),
|
||||
inclusive: true,
|
||||
}),
|
||||
upper: None,
|
||||
},
|
||||
})];
|
||||
let result = KeysFstApplier::try_from(predicates);
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(Error::KeysApplierWithoutInList { .. })
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keys_fst_applier_try_from_with_invalid_regex() {
|
||||
let predicates = vec![
|
||||
Predicate::InList(InListPredicate {
|
||||
list: HashSet::from_iter(vec![b("foo"), b("bar")]),
|
||||
}),
|
||||
Predicate::RegexMatch(RegexMatchPredicate {
|
||||
pattern: "*invalid regex".to_string(),
|
||||
}),
|
||||
];
|
||||
let result = KeysFstApplier::try_from(predicates);
|
||||
assert!(matches!(result, Err(Error::ParseRegex { .. })));
|
||||
}
|
||||
}
|
||||
112
src/index/src/inverted_index/search/fst_values_mapper.rs
Normal file
112
src/index/src/inverted_index/search/fst_values_mapper.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::BitVec;
|
||||
use greptime_proto::v1::index::InvertedIndexMeta;
|
||||
|
||||
use crate::inverted_index::error::Result;
|
||||
use crate::inverted_index::format::reader::InvertedIndexReader;
|
||||
|
||||
/// `FstValuesMapper` maps FST-encoded u64 values to their corresponding bitmaps
|
||||
/// within an inverted index. The higher 32 bits of each u64 value represent the
|
||||
/// bitmap offset and the lower 32 bits represent its size. This mapper uses these
|
||||
/// combined offset-size pairs to fetch and union multiple bitmaps into a single `BitVec`.
|
||||
pub struct FstValuesMapper<'a> {
|
||||
/// `reader` retrieves bitmap data using offsets and sizes from FST values.
|
||||
reader: &'a mut dyn InvertedIndexReader,
|
||||
|
||||
/// `metadata` provides context for interpreting the index structures.
|
||||
metadata: &'a InvertedIndexMeta,
|
||||
}
|
||||
|
||||
impl<'a> FstValuesMapper<'a> {
|
||||
pub fn new(
|
||||
reader: &'a mut dyn InvertedIndexReader,
|
||||
metadata: &'a InvertedIndexMeta,
|
||||
) -> FstValuesMapper<'a> {
|
||||
FstValuesMapper { reader, metadata }
|
||||
}
|
||||
|
||||
/// Maps an array of FST values to a `BitVec` by retrieving and combining bitmaps.
|
||||
pub async fn map_values(&mut self, values: &[u64]) -> Result<BitVec> {
|
||||
let mut bitmap = BitVec::new();
|
||||
|
||||
for value in values {
|
||||
// relative_offset (higher 32 bits), size (lower 32 bits)
|
||||
let [relative_offset, size] = bytemuck::cast::<u64, [u32; 2]>(*value);
|
||||
|
||||
let bm = self
|
||||
.reader
|
||||
.bitmap(self.metadata, relative_offset, size)
|
||||
.await?;
|
||||
|
||||
// Ensure the longest BitVec is the left operand to prevent truncation during OR.
|
||||
if bm.len() > bitmap.len() {
|
||||
bitmap = bm | bitmap
|
||||
} else {
|
||||
bitmap |= bm
|
||||
}
|
||||
}
|
||||
|
||||
Ok(bitmap)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_base::bit_vec::prelude::*;
|
||||
|
||||
use super::*;
|
||||
use crate::inverted_index::format::reader::MockInvertedIndexReader;
|
||||
|
||||
fn value(offset: u32, size: u32) -> u64 {
|
||||
bytemuck::cast::<[u32; 2], u64>([offset, size])
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_map_values() {
|
||||
let mut mock_reader = MockInvertedIndexReader::new();
|
||||
mock_reader
|
||||
.expect_bitmap()
|
||||
.returning(|_, offset, size| match (offset, size) {
|
||||
(1, 1) => Ok(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]),
|
||||
(2, 1) => Ok(bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]),
|
||||
_ => unreachable!(),
|
||||
});
|
||||
|
||||
let meta = InvertedIndexMeta::default();
|
||||
let mut values_mapper = FstValuesMapper::new(&mut mock_reader, &meta);
|
||||
|
||||
let result = values_mapper.map_values(&[]).await.unwrap();
|
||||
assert_eq!(result.count_ones(), 0);
|
||||
|
||||
let result = values_mapper.map_values(&[value(1, 1)]).await.unwrap();
|
||||
assert_eq!(result, bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]);
|
||||
|
||||
let result = values_mapper.map_values(&[value(2, 1)]).await.unwrap();
|
||||
assert_eq!(result, bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]);
|
||||
|
||||
let result = values_mapper
|
||||
.map_values(&[value(1, 1), value(2, 1)])
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result, bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
|
||||
|
||||
let result = values_mapper
|
||||
.map_values(&[value(2, 1), value(1, 1)])
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result, bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
|
||||
}
|
||||
}
|
||||
57
src/index/src/inverted_index/search/index_apply.rs
Normal file
57
src/index/src/inverted_index/search/index_apply.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod predicates_apply;
|
||||
|
||||
use async_trait::async_trait;
|
||||
pub use predicates_apply::PredicatesIndexApplier;
|
||||
|
||||
use crate::inverted_index::error::Result;
|
||||
use crate::inverted_index::format::reader::InvertedIndexReader;
|
||||
|
||||
/// A trait for processing and transforming indices obtained from an inverted index.
|
||||
///
|
||||
/// Applier instances are reusable and work with various `InvertedIndexReader` instances,
|
||||
/// avoiding repeated compilation of fixed predicates such as regex patterns.
|
||||
#[async_trait]
|
||||
pub trait IndexApplier {
|
||||
/// Applies the predefined predicates to the data read by the given index reader, returning
|
||||
/// a list of relevant indices (e.g., post IDs, group IDs, row IDs).
|
||||
async fn apply(
|
||||
&self,
|
||||
context: SearchContext,
|
||||
reader: &mut dyn InvertedIndexReader,
|
||||
) -> Result<Vec<usize>>;
|
||||
}
|
||||
|
||||
/// A context for searching the inverted index.
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Default)]
|
||||
pub struct SearchContext {
|
||||
/// `index_not_found_strategy` controls the behavior of the applier when the index is not found.
|
||||
pub index_not_found_strategy: IndexNotFoundStrategy,
|
||||
}
|
||||
|
||||
/// Defines the behavior of an applier when the index is not found.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Default)]
|
||||
pub enum IndexNotFoundStrategy {
|
||||
/// Return an empty list of indices.
|
||||
#[default]
|
||||
ReturnEmpty,
|
||||
|
||||
/// Ignore the index and continue.
|
||||
Ignore,
|
||||
|
||||
/// Throw an error.
|
||||
ThrowError,
|
||||
}
|
||||
@@ -0,0 +1,346 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_base::BitVec;
|
||||
use greptime_proto::v1::index::InvertedIndexMetas;
|
||||
|
||||
use crate::inverted_index::error::{IndexNotFoundSnafu, Result};
|
||||
use crate::inverted_index::format::reader::InvertedIndexReader;
|
||||
use crate::inverted_index::search::fst_apply::{
|
||||
FstApplier, IntersectionFstApplier, KeysFstApplier,
|
||||
};
|
||||
use crate::inverted_index::search::fst_values_mapper::FstValuesMapper;
|
||||
use crate::inverted_index::search::index_apply::{
|
||||
IndexApplier, IndexNotFoundStrategy, SearchContext,
|
||||
};
|
||||
use crate::inverted_index::search::predicate::Predicate;
|
||||
|
||||
type IndexName = String;
|
||||
|
||||
/// `PredicatesIndexApplier` contains a collection of `FstApplier`s, each associated with an index name,
|
||||
/// to process and filter index data based on compiled predicates.
|
||||
pub struct PredicatesIndexApplier {
|
||||
/// A list of `FstApplier`s, each associated with a specific index name
|
||||
/// (e.g. a tag field uses its column name as index name)
|
||||
fst_appliers: Vec<(IndexName, Box<dyn FstApplier>)>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl IndexApplier for PredicatesIndexApplier {
|
||||
/// Applies all `FstApplier`s to the data in the inverted index reader, intersecting the individual
|
||||
/// bitmaps obtained for each index to result in a final set of indices.
|
||||
async fn apply(
|
||||
&self,
|
||||
context: SearchContext,
|
||||
reader: &mut dyn InvertedIndexReader,
|
||||
) -> Result<Vec<usize>> {
|
||||
let metadata = reader.metadata().await?;
|
||||
|
||||
let mut bitmap = Self::bitmap_full_range(&metadata);
|
||||
// TODO(zhongzc): optimize the order of applying to make it quicker to return empty.
|
||||
for (name, fst_applier) in &self.fst_appliers {
|
||||
if bitmap.count_ones() == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let Some(meta) = metadata.metas.get(name) else {
|
||||
match context.index_not_found_strategy {
|
||||
IndexNotFoundStrategy::ReturnEmpty => {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
IndexNotFoundStrategy::Ignore => {
|
||||
continue;
|
||||
}
|
||||
IndexNotFoundStrategy::ThrowError => {
|
||||
return IndexNotFoundSnafu { name }.fail();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let fst = reader.fst(meta).await?;
|
||||
let values = fst_applier.apply(&fst);
|
||||
|
||||
let mut mapper = FstValuesMapper::new(&mut *reader, meta);
|
||||
let bm = mapper.map_values(&values).await?;
|
||||
|
||||
bitmap &= bm;
|
||||
}
|
||||
|
||||
Ok(bitmap.iter_ones().collect())
|
||||
}
|
||||
}
|
||||
|
||||
impl PredicatesIndexApplier {
|
||||
/// Constructs an instance of `PredicatesIndexApplier` based on a list of tag predicates.
|
||||
/// Chooses an appropriate `FstApplier` for each index name based on the nature of its predicates.
|
||||
pub fn try_from(mut predicates: Vec<(IndexName, Vec<Predicate>)>) -> Result<Self> {
|
||||
let mut fst_appliers = Vec::with_capacity(predicates.len());
|
||||
|
||||
// InList predicates are applied first to benefit from higher selectivity.
|
||||
let in_list_index = predicates
|
||||
.iter_mut()
|
||||
.partition_in_place(|(_, ps)| ps.iter().any(|p| matches!(p, Predicate::InList(_))));
|
||||
let mut iter = predicates.into_iter();
|
||||
for _ in 0..in_list_index {
|
||||
let (tag_name, predicates) = iter.next().unwrap();
|
||||
let fst_applier = Box::new(KeysFstApplier::try_from(predicates)?) as _;
|
||||
fst_appliers.push((tag_name, fst_applier));
|
||||
}
|
||||
|
||||
for (tag_name, predicates) in iter {
|
||||
if predicates.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let fst_applier = Box::new(IntersectionFstApplier::try_from(predicates)?) as _;
|
||||
fst_appliers.push((tag_name, fst_applier));
|
||||
}
|
||||
|
||||
Ok(PredicatesIndexApplier { fst_appliers })
|
||||
}
|
||||
|
||||
/// Creates a `BitVec` representing the full range of data in the index for initial scanning.
|
||||
fn bitmap_full_range(metadata: &InvertedIndexMetas) -> BitVec {
|
||||
let total_count = metadata.total_row_count;
|
||||
let segment_count = metadata.segment_row_count;
|
||||
let len = (total_count + segment_count - 1) / segment_count;
|
||||
BitVec::repeat(true, len as _)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Vec<(String, Vec<Predicate>)>> for PredicatesIndexApplier {
|
||||
type Error = crate::inverted_index::error::Error;
|
||||
fn try_from(predicates: Vec<(String, Vec<Predicate>)>) -> Result<Self> {
|
||||
Self::try_from(predicates)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_base::bit_vec::prelude::*;
|
||||
use greptime_proto::v1::index::InvertedIndexMeta;
|
||||
|
||||
use super::*;
|
||||
use crate::inverted_index::error::Error;
|
||||
use crate::inverted_index::format::reader::MockInvertedIndexReader;
|
||||
use crate::inverted_index::search::fst_apply::MockFstApplier;
|
||||
use crate::inverted_index::FstMap;
|
||||
|
||||
fn s(s: &'static str) -> String {
|
||||
s.to_owned()
|
||||
}
|
||||
|
||||
fn mock_metas(tags: impl IntoIterator<Item = &'static str>) -> InvertedIndexMetas {
|
||||
let mut metas = InvertedIndexMetas {
|
||||
total_row_count: 8,
|
||||
segment_row_count: 1,
|
||||
..Default::default()
|
||||
};
|
||||
for tag in tags.into_iter() {
|
||||
let meta = InvertedIndexMeta {
|
||||
name: s(tag),
|
||||
..Default::default()
|
||||
};
|
||||
metas.metas.insert(s(tag), meta);
|
||||
}
|
||||
metas
|
||||
}
|
||||
|
||||
fn key_fst_applier(value: &'static str) -> Box<dyn FstApplier> {
|
||||
let mut mock_fst_applier = MockFstApplier::new();
|
||||
mock_fst_applier
|
||||
.expect_apply()
|
||||
.returning(move |fst| fst.get(value).into_iter().collect());
|
||||
Box::new(mock_fst_applier)
|
||||
}
|
||||
|
||||
fn fst_value(offset: u32, size: u32) -> u64 {
|
||||
bytemuck::cast::<_, u64>([offset, size])
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_index_applier_apply_get_key() {
|
||||
// An index applier that point-gets "tag-0_value-0" on tag "tag-0"
|
||||
let applier = PredicatesIndexApplier {
|
||||
fst_appliers: vec![(s("tag-0"), key_fst_applier("tag-0_value-0"))],
|
||||
};
|
||||
|
||||
// An index reader with a single tag "tag-0" and a corresponding value "tag-0_value-0"
|
||||
let mut mock_reader = MockInvertedIndexReader::new();
|
||||
mock_reader
|
||||
.expect_metadata()
|
||||
.returning(|| Ok(mock_metas(["tag-0"])));
|
||||
mock_reader
|
||||
.expect_fst()
|
||||
.returning(|meta| match meta.name.as_str() {
|
||||
"tag-0" => Ok(FstMap::from_iter([(b"tag-0_value-0", fst_value(2, 1))]).unwrap()),
|
||||
_ => unreachable!(),
|
||||
});
|
||||
mock_reader.expect_bitmap().returning(|meta, offset, size| {
|
||||
match (meta.name.as_str(), offset, size) {
|
||||
("tag-0", 2, 1) => Ok(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1, 0]),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
});
|
||||
let indices = applier
|
||||
.apply(SearchContext::default(), &mut mock_reader)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(indices, vec![0, 2, 4, 6]);
|
||||
|
||||
// An index reader with a single tag "tag-0" but without value "tag-0_value-0"
|
||||
let mut mock_reader = MockInvertedIndexReader::new();
|
||||
mock_reader
|
||||
.expect_metadata()
|
||||
.returning(|| Ok(mock_metas(["tag-0"])));
|
||||
mock_reader
|
||||
.expect_fst()
|
||||
.returning(|meta| match meta.name.as_str() {
|
||||
"tag-0" => Ok(FstMap::from_iter([(b"tag-0_value-1", fst_value(2, 1))]).unwrap()),
|
||||
_ => unreachable!(),
|
||||
});
|
||||
let indices = applier
|
||||
.apply(SearchContext::default(), &mut mock_reader)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(indices.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_index_applier_apply_intersection_with_two_tags() {
|
||||
// An index applier that intersects "tag-0_value-0" on tag "tag-0" and "tag-1_value-a" on tag "tag-1"
|
||||
let applier = PredicatesIndexApplier {
|
||||
fst_appliers: vec![
|
||||
(s("tag-0"), key_fst_applier("tag-0_value-0")),
|
||||
(s("tag-1"), key_fst_applier("tag-1_value-a")),
|
||||
],
|
||||
};
|
||||
|
||||
// An index reader with two tags "tag-0" and "tag-1" and respective values "tag-0_value-0" and "tag-1_value-a"
|
||||
let mut mock_reader = MockInvertedIndexReader::new();
|
||||
mock_reader
|
||||
.expect_metadata()
|
||||
.returning(|| Ok(mock_metas(["tag-0", "tag-1"])));
|
||||
mock_reader
|
||||
.expect_fst()
|
||||
.returning(|meta| match meta.name.as_str() {
|
||||
"tag-0" => Ok(FstMap::from_iter([(b"tag-0_value-0", fst_value(1, 1))]).unwrap()),
|
||||
"tag-1" => Ok(FstMap::from_iter([(b"tag-1_value-a", fst_value(2, 1))]).unwrap()),
|
||||
_ => unreachable!(),
|
||||
});
|
||||
mock_reader.expect_bitmap().returning(|meta, offset, size| {
|
||||
match (meta.name.as_str(), offset, size) {
|
||||
("tag-0", 1, 1) => Ok(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1, 0]),
|
||||
("tag-1", 2, 1) => Ok(bitvec![u8, Lsb0; 1, 1, 0, 1, 1, 0, 1, 1]),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
});
|
||||
|
||||
let indices = applier
|
||||
.apply(SearchContext::default(), &mut mock_reader)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(indices, vec![0, 4, 6]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_index_applier_without_predicates() {
|
||||
let applier = PredicatesIndexApplier {
|
||||
fst_appliers: vec![],
|
||||
};
|
||||
|
||||
let mut mock_reader: MockInvertedIndexReader = MockInvertedIndexReader::new();
|
||||
mock_reader
|
||||
.expect_metadata()
|
||||
.returning(|| Ok(mock_metas(["tag-0"])));
|
||||
|
||||
let indices = applier
|
||||
.apply(SearchContext::default(), &mut mock_reader)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(indices, vec![0, 1, 2, 3, 4, 5, 6, 7]); // full range to scan
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_index_applier_with_empty_index() {
|
||||
let mut mock_reader = MockInvertedIndexReader::new();
|
||||
mock_reader.expect_metadata().returning(move || {
|
||||
Ok(InvertedIndexMetas {
|
||||
total_row_count: 0, // No rows
|
||||
segment_row_count: 1,
|
||||
..Default::default()
|
||||
})
|
||||
});
|
||||
|
||||
let mut mock_fst_applier = MockFstApplier::new();
|
||||
mock_fst_applier.expect_apply().never();
|
||||
|
||||
let applier = PredicatesIndexApplier {
|
||||
fst_appliers: vec![(s("tag-0"), Box::new(mock_fst_applier))],
|
||||
};
|
||||
|
||||
let indices = applier
|
||||
.apply(SearchContext::default(), &mut mock_reader)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(indices.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_index_applier_with_nonexistent_index() {
|
||||
let mut mock_reader = MockInvertedIndexReader::new();
|
||||
mock_reader
|
||||
.expect_metadata()
|
||||
.returning(|| Ok(mock_metas(vec![])));
|
||||
|
||||
let mut mock_fst_applier = MockFstApplier::new();
|
||||
mock_fst_applier.expect_apply().never();
|
||||
|
||||
let applier = PredicatesIndexApplier {
|
||||
fst_appliers: vec![(s("tag-0"), Box::new(mock_fst_applier))],
|
||||
};
|
||||
|
||||
let result = applier
|
||||
.apply(
|
||||
SearchContext {
|
||||
index_not_found_strategy: IndexNotFoundStrategy::ThrowError,
|
||||
},
|
||||
&mut mock_reader,
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(result, Err(Error::IndexNotFound { .. })));
|
||||
|
||||
let indices = applier
|
||||
.apply(
|
||||
SearchContext {
|
||||
index_not_found_strategy: IndexNotFoundStrategy::ReturnEmpty,
|
||||
},
|
||||
&mut mock_reader,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(indices.is_empty());
|
||||
|
||||
let indices = applier
|
||||
.apply(
|
||||
SearchContext {
|
||||
index_not_found_strategy: IndexNotFoundStrategy::Ignore,
|
||||
},
|
||||
&mut mock_reader,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(indices, vec![0, 1, 2, 3, 4, 5, 6, 7]);
|
||||
}
|
||||
}
|
||||
73
src/index/src/inverted_index/search/predicate.rs
Normal file
73
src/index/src/inverted_index/search/predicate.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use crate::inverted_index::Bytes;
|
||||
|
||||
/// Enumerates types of predicates for value filtering.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Predicate {
|
||||
/// Predicate for matching values in a list.
|
||||
InList(InListPredicate),
|
||||
|
||||
/// Predicate for matching values within a range.
|
||||
Range(RangePredicate),
|
||||
|
||||
/// Predicate for matching values against a regex pattern.
|
||||
RegexMatch(RegexMatchPredicate),
|
||||
}
|
||||
|
||||
/// `InListPredicate` contains a list of acceptable values. A value needs to match at least
|
||||
/// one of the elements (logical OR semantic) for the predicate to be satisfied.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct InListPredicate {
|
||||
/// List of acceptable values.
|
||||
pub list: HashSet<Bytes>,
|
||||
}
|
||||
|
||||
/// `Bound` is a sub-component of a range, representing a single-sided limit that could be inclusive or exclusive.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Bound {
|
||||
/// Whether the bound is inclusive or exclusive.
|
||||
pub inclusive: bool,
|
||||
/// The value of the bound.
|
||||
pub value: Bytes,
|
||||
}
|
||||
|
||||
/// `Range` defines a single continuous range which can optionally have a lower and/or upper limit.
|
||||
/// Both the lower and upper bounds must be satisfied for the range condition to be true.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Range {
|
||||
/// The lower bound of the range.
|
||||
pub lower: Option<Bound>,
|
||||
/// The upper bound of the range.
|
||||
pub upper: Option<Bound>,
|
||||
}
|
||||
|
||||
/// `RangePredicate` encapsulates a range condition that must be satisfied
|
||||
/// for the predicate to hold true (logical AND semantic between the bounds).
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct RangePredicate {
|
||||
/// The range condition.
|
||||
pub range: Range,
|
||||
}
|
||||
|
||||
/// `RegexMatchPredicate` encapsulates a single regex pattern. A value must match
|
||||
/// the pattern for the predicate to be satisfied.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct RegexMatchPredicate {
|
||||
/// The regex pattern.
|
||||
pub pattern: String,
|
||||
}
|
||||
@@ -12,4 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(iter_partition_in_place)]
|
||||
|
||||
pub mod inverted_index;
|
||||
|
||||
@@ -61,19 +61,19 @@ fn flip_role(role: RegionRole) -> RegionRole {
|
||||
///
|
||||
/// - If a region is in an `operable` set, it will be granted an `flip_role(current)`([RegionRole]);
|
||||
/// otherwise, it will be granted a `current`([RegionRole]).
|
||||
/// - If a region is in a `closable` set, it won't be granted.
|
||||
/// - If a region is in a `closeable` set, it won't be granted.
|
||||
fn grant(
|
||||
granted_regions: &mut Vec<GrantedRegion>,
|
||||
operable: &HashSet<RegionId>,
|
||||
closable: &HashSet<RegionId>,
|
||||
closeable: &HashSet<RegionId>,
|
||||
regions: &[RegionId],
|
||||
current: RegionRole,
|
||||
) {
|
||||
for region in regions {
|
||||
if operable.contains(region) {
|
||||
granted_regions.push(GrantedRegion::new(*region, flip_role(current)));
|
||||
} else if closable.contains(region) {
|
||||
// Filters out the closable regions.
|
||||
} else if closeable.contains(region) {
|
||||
// Filters out the closeable regions.
|
||||
} else {
|
||||
granted_regions.push(GrantedRegion::new(*region, current))
|
||||
}
|
||||
@@ -112,7 +112,7 @@ impl HeartbeatHandler for RegionLeaseHandler {
|
||||
|
||||
let leaders = leaders.into_iter().flatten().collect::<Vec<_>>();
|
||||
|
||||
let (downgradable, closable) = self
|
||||
let (downgradable, closeable) = self
|
||||
.region_lease_keeper
|
||||
.find_staled_leader_regions(cluster_id, datanode_id, &leaders)
|
||||
.await?;
|
||||
@@ -120,44 +120,44 @@ impl HeartbeatHandler for RegionLeaseHandler {
|
||||
grant(
|
||||
&mut granted_regions,
|
||||
&downgradable,
|
||||
&closable,
|
||||
&closeable,
|
||||
&leaders,
|
||||
RegionRole::Leader,
|
||||
);
|
||||
if !closable.is_empty() {
|
||||
if !closeable.is_empty() {
|
||||
info!(
|
||||
"Granting region lease, found closable leader regions: {:?} on datanode {}",
|
||||
closable, datanode_id
|
||||
"Granting region lease, found closeable leader regions: {:?} on datanode {}",
|
||||
closeable, datanode_id
|
||||
);
|
||||
}
|
||||
inactive_regions.extend(closable);
|
||||
inactive_regions.extend(closeable);
|
||||
|
||||
let followers = followers.into_iter().flatten().collect::<Vec<_>>();
|
||||
|
||||
let (upgradeable, closable) = self
|
||||
let (upgradeable, closeable) = self
|
||||
.region_lease_keeper
|
||||
.find_staled_follower_regions(cluster_id, datanode_id, &followers)
|
||||
.await?;
|
||||
|
||||
// If a region is opening, it will be filtered out from the closable regions set.
|
||||
let closable = self
|
||||
// If a region is opening, it will be filtered out from the closeable regions set.
|
||||
let closeable = self
|
||||
.opening_region_keeper
|
||||
.filter_opening_regions(datanode_id, closable);
|
||||
.filter_opening_regions(datanode_id, closeable);
|
||||
|
||||
grant(
|
||||
&mut granted_regions,
|
||||
&upgradeable,
|
||||
&closable,
|
||||
&closeable,
|
||||
&followers,
|
||||
RegionRole::Follower,
|
||||
);
|
||||
if !closable.is_empty() {
|
||||
if !closeable.is_empty() {
|
||||
info!(
|
||||
"Granting region lease, found closable follower regions {:?} on datanode {}",
|
||||
closable, datanode_id
|
||||
"Granting region lease, found closeable follower regions {:?} on datanode {}",
|
||||
closeable, datanode_id
|
||||
);
|
||||
}
|
||||
inactive_regions.extend(closable);
|
||||
inactive_regions.extend(closeable);
|
||||
|
||||
acc.inactive_region_ids = inactive_regions;
|
||||
acc.region_lease = Some(RegionLease {
|
||||
@@ -167,6 +167,7 @@ impl HeartbeatHandler for RegionLeaseHandler {
|
||||
.collect::<Vec<_>>(),
|
||||
duration_since_epoch: req.duration_since_epoch,
|
||||
lease_seconds: self.region_lease_seconds,
|
||||
closeable_region_ids: vec![],
|
||||
});
|
||||
|
||||
Ok(HandleControl::Continue)
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::any::Any;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::MailboxMessage;
|
||||
use common_meta::distributed_time_constants::REGION_LEASE_SECS;
|
||||
use common_meta::distributed_time_constants::{MAILBOX_RTT_SECS, REGION_LEASE_SECS};
|
||||
use common_meta::instruction::{
|
||||
DowngradeRegion, DowngradeRegionReply, Instruction, InstructionReply,
|
||||
};
|
||||
@@ -31,7 +31,7 @@ use crate::handler::HeartbeatMailbox;
|
||||
use crate::procedure::region_migration::{Context, State};
|
||||
use crate::service::mailbox::Channel;
|
||||
|
||||
const DOWNGRADE_LEADER_REGION_TIMEOUT: Duration = Duration::from_secs(1);
|
||||
const DOWNGRADE_LEADER_REGION_TIMEOUT: Duration = Duration::from_secs(MAILBOX_RTT_SECS);
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct DowngradeLeaderRegion {
|
||||
@@ -64,7 +64,7 @@ impl State for DowngradeLeaderRegion {
|
||||
tokio::time::sleep_until(*deadline).await;
|
||||
}
|
||||
|
||||
Ok(Box::new(UpgradeCandidateRegion))
|
||||
Ok(Box::<UpgradeCandidateRegion>::default())
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
@@ -159,7 +159,7 @@ impl DowngradeLeaderRegion {
|
||||
}
|
||||
Err(error::Error::MailboxTimeout { .. }) => {
|
||||
let reason = format!(
|
||||
"Mailbox received timeout for downgrade leader region {region_id} on Datanode {:?}",
|
||||
"Mailbox received timeout for downgrade leader region {region_id} on datanode {:?}",
|
||||
leader,
|
||||
);
|
||||
error::RetryLaterSnafu { reason }.fail()
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::time::Duration;
|
||||
|
||||
use api::v1::meta::MailboxMessage;
|
||||
use common_meta::ddl::utils::region_storage_path;
|
||||
use common_meta::distributed_time_constants::MAILBOX_RTT_SECS;
|
||||
use common_meta::instruction::{Instruction, InstructionReply, OpenRegion, SimpleReply};
|
||||
use common_meta::RegionIdent;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -29,7 +30,7 @@ use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeader
|
||||
use crate::procedure::region_migration::{Context, State};
|
||||
use crate::service::mailbox::Channel;
|
||||
|
||||
const OPEN_CANDIDATE_REGION_TIMEOUT: Duration = Duration::from_secs(1);
|
||||
const OPEN_CANDIDATE_REGION_TIMEOUT: Duration = Duration::from_secs(MAILBOX_RTT_SECS);
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct OpenCandidateRegion;
|
||||
@@ -152,7 +153,7 @@ impl OpenCandidateRegion {
|
||||
} else {
|
||||
error::RetryLaterSnafu {
|
||||
reason: format!(
|
||||
"Region {region_id} is not opened by Datanode {:?}, error: {error:?}",
|
||||
"Region {region_id} is not opened by datanode {:?}, error: {error:?}",
|
||||
candidate,
|
||||
),
|
||||
}
|
||||
@@ -161,7 +162,7 @@ impl OpenCandidateRegion {
|
||||
}
|
||||
Err(error::Error::MailboxTimeout { .. }) => {
|
||||
let reason = format!(
|
||||
"Mailbox received timeout for open candidate region {region_id} on Datanode {:?}",
|
||||
"Mailbox received timeout for open candidate region {region_id} on datanode {:?}",
|
||||
candidate,
|
||||
);
|
||||
error::RetryLaterSnafu { reason }.fail()
|
||||
|
||||
@@ -32,9 +32,9 @@ use crate::procedure::region_migration::{Context, State};
|
||||
pub enum UpdateMetadata {
|
||||
/// Downgrades the leader region.
|
||||
Downgrade,
|
||||
/// Upgrade the candidate region.
|
||||
/// Upgrades the candidate region.
|
||||
Upgrade,
|
||||
/// Rollback the downgraded leader region.
|
||||
/// Rolls back the downgraded region.
|
||||
Rollback,
|
||||
}
|
||||
|
||||
|
||||
@@ -13,19 +13,55 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::MailboxMessage;
|
||||
use common_meta::distributed_time_constants::MAILBOX_RTT_SECS;
|
||||
use common_meta::instruction::{Instruction, InstructionReply, UpgradeRegion, UpgradeRegionReply};
|
||||
use common_telemetry::warn;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tokio::time::sleep;
|
||||
|
||||
use crate::error::Result;
|
||||
use super::update_metadata::UpdateMetadata;
|
||||
use crate::error::{self, Result};
|
||||
use crate::handler::HeartbeatMailbox;
|
||||
use crate::procedure::region_migration::{Context, State};
|
||||
use crate::service::mailbox::Channel;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct UpgradeCandidateRegion;
|
||||
pub struct UpgradeCandidateRegion {
|
||||
// The optimistic retry times.
|
||||
optimistic_retry: usize,
|
||||
// The retry initial interval.
|
||||
retry_initial_interval: Duration,
|
||||
// The replay timeout of a instruction.
|
||||
replay_timeout: Duration,
|
||||
// If it's true it requires the candidate region MUST replay the WAL to the latest entry id.
|
||||
// Otherwise, it will rollback to the old leader region.
|
||||
require_ready: bool,
|
||||
}
|
||||
|
||||
impl Default for UpgradeCandidateRegion {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
optimistic_retry: 3,
|
||||
retry_initial_interval: Duration::from_millis(500),
|
||||
replay_timeout: Duration::from_millis(1000),
|
||||
require_ready: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
#[typetag::serde]
|
||||
impl State for UpgradeCandidateRegion {
|
||||
async fn next(&mut self, _ctx: &mut Context) -> Result<Box<dyn State>> {
|
||||
todo!();
|
||||
async fn next(&mut self, ctx: &mut Context) -> Result<Box<dyn State>> {
|
||||
if self.upgrade_region_with_retry(ctx).await {
|
||||
Ok(Box::new(UpdateMetadata::Upgrade))
|
||||
} else {
|
||||
Ok(Box::new(UpdateMetadata::Rollback))
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
@@ -33,4 +69,494 @@ impl State for UpgradeCandidateRegion {
|
||||
}
|
||||
}
|
||||
|
||||
impl UpgradeCandidateRegion {}
|
||||
impl UpgradeCandidateRegion {
|
||||
const UPGRADE_CANDIDATE_REGION_RTT: Duration = Duration::from_secs(MAILBOX_RTT_SECS);
|
||||
|
||||
/// Returns the timeout of the upgrade candidate region.
|
||||
///
|
||||
/// Equals `replay_timeout` + RTT
|
||||
fn send_upgrade_candidate_region_timeout(&self) -> Duration {
|
||||
self.replay_timeout + UpgradeCandidateRegion::UPGRADE_CANDIDATE_REGION_RTT
|
||||
}
|
||||
|
||||
/// Builds upgrade region instruction.
|
||||
fn build_upgrade_region_instruction(&self, ctx: &Context) -> Instruction {
|
||||
let pc = &ctx.persistent_ctx;
|
||||
let region_id = pc.region_id;
|
||||
let last_entry_id = ctx.volatile_ctx.leader_region_last_entry_id;
|
||||
|
||||
Instruction::UpgradeRegion(UpgradeRegion {
|
||||
region_id,
|
||||
last_entry_id,
|
||||
wait_for_replay_secs: Some(self.replay_timeout.as_secs()),
|
||||
})
|
||||
}
|
||||
|
||||
/// Tries to upgrade a candidate region.
|
||||
///
|
||||
/// Retry:
|
||||
/// - If `require_ready` is true, but the candidate region returns `ready` is false.
|
||||
/// - [MailboxTimeout](error::Error::MailboxTimeout), Timeout.
|
||||
///
|
||||
/// Abort:
|
||||
/// - The candidate region doesn't exist.
|
||||
/// - [PusherNotFound](error::Error::PusherNotFound), The datanode is unreachable.
|
||||
/// - [PushMessage](error::Error::PushMessage), The receiver is dropped.
|
||||
/// - [MailboxReceiver](error::Error::MailboxReceiver), The sender is dropped without sending (impossible).
|
||||
/// - [UnexpectedInstructionReply](error::Error::UnexpectedInstructionReply) (impossible).
|
||||
/// - Invalid JSON (impossible).
|
||||
async fn upgrade_region(&self, ctx: &Context, upgrade_instruction: &Instruction) -> Result<()> {
|
||||
let pc = &ctx.persistent_ctx;
|
||||
let region_id = pc.region_id;
|
||||
let candidate = &pc.to_peer;
|
||||
|
||||
let msg = MailboxMessage::json_message(
|
||||
&format!("Upgrade candidate region: {}", region_id),
|
||||
&format!("Meta@{}", ctx.server_addr()),
|
||||
&format!("Datanode-{}@{}", candidate.id, candidate.addr),
|
||||
common_time::util::current_time_millis(),
|
||||
upgrade_instruction,
|
||||
)
|
||||
.with_context(|_| error::SerializeToJsonSnafu {
|
||||
input: upgrade_instruction.to_string(),
|
||||
})?;
|
||||
|
||||
let ch = Channel::Datanode(candidate.id);
|
||||
let receiver = ctx
|
||||
.mailbox
|
||||
.send(&ch, msg, self.send_upgrade_candidate_region_timeout())
|
||||
.await?;
|
||||
|
||||
match receiver.await? {
|
||||
Ok(msg) => {
|
||||
let reply = HeartbeatMailbox::json_reply(&msg)?;
|
||||
let InstructionReply::UpgradeRegion(UpgradeRegionReply {
|
||||
ready,
|
||||
exists,
|
||||
error,
|
||||
}) = reply
|
||||
else {
|
||||
return error::UnexpectedInstructionReplySnafu {
|
||||
mailbox_message: msg.to_string(),
|
||||
reason: "Unexpected reply of the upgrade region instruction",
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
// Notes: The order of handling is important.
|
||||
if error.is_some() {
|
||||
return error::RetryLaterSnafu {
|
||||
reason: format!(
|
||||
"Failed to upgrade the region {} on datanode {:?}, error: {:?}",
|
||||
region_id, candidate, error
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
ensure!(
|
||||
exists,
|
||||
error::UnexpectedSnafu {
|
||||
violated: format!(
|
||||
"Expected region {} doesn't exist on datanode {:?}",
|
||||
region_id, candidate
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
if self.require_ready && !ready {
|
||||
return error::RetryLaterSnafu {
|
||||
reason: format!(
|
||||
"Candidate region {} still replaying the wal on datanode {:?}",
|
||||
region_id, candidate
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Err(error::Error::MailboxTimeout { .. }) => {
|
||||
let reason = format!(
|
||||
"Mailbox received timeout for upgrade candidate region {region_id} on datanode {:?}",
|
||||
candidate,
|
||||
);
|
||||
error::RetryLaterSnafu { reason }.fail()
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Upgrades a candidate region.
|
||||
///
|
||||
/// Returns true if the candidate region is upgraded successfully.
|
||||
async fn upgrade_region_with_retry(&self, ctx: &Context) -> bool {
|
||||
let upgrade_instruction = self.build_upgrade_region_instruction(ctx);
|
||||
|
||||
let mut retry = 0;
|
||||
let mut upgraded = false;
|
||||
|
||||
loop {
|
||||
if let Err(err) = self.upgrade_region(ctx, &upgrade_instruction).await {
|
||||
retry += 1;
|
||||
if err.is_retryable() && retry < self.optimistic_retry {
|
||||
warn!("Failed to upgrade region, error: {err:?}, retry later");
|
||||
sleep(self.retry_initial_interval).await;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
upgraded = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
upgraded
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
use api::v1::meta::mailbox_message::Payload;
|
||||
use common_meta::peer::Peer;
|
||||
use common_time::util::current_time_millis;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::procedure::region_migration::test_util::{
|
||||
new_close_region_reply, send_mock_reply, TestingEnv,
|
||||
};
|
||||
use crate::procedure::region_migration::{ContextFactory, PersistentContext};
|
||||
|
||||
fn new_persistent_context() -> PersistentContext {
|
||||
PersistentContext {
|
||||
from_peer: Peer::empty(1),
|
||||
to_peer: Peer::empty(2),
|
||||
region_id: RegionId::new(1024, 1),
|
||||
cluster_id: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn new_upgrade_region_reply(
|
||||
id: u64,
|
||||
ready: bool,
|
||||
|
||||
exists: bool,
|
||||
|
||||
error: Option<String>,
|
||||
) -> MailboxMessage {
|
||||
MailboxMessage {
|
||||
id,
|
||||
subject: "mock".to_string(),
|
||||
from: "datanode".to_string(),
|
||||
to: "meta".to_string(),
|
||||
timestamp_millis: current_time_millis(),
|
||||
payload: Some(Payload::Json(
|
||||
serde_json::to_string(&InstructionReply::UpgradeRegion(UpgradeRegionReply {
|
||||
ready,
|
||||
exists,
|
||||
error,
|
||||
}))
|
||||
.unwrap(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_datanode_is_unreachable() {
|
||||
let state = UpgradeCandidateRegion::default();
|
||||
let persistent_context = new_persistent_context();
|
||||
let env = TestingEnv::new();
|
||||
let ctx = env.context_factory().new_context(persistent_context);
|
||||
|
||||
let instruction = &state.build_upgrade_region_instruction(&ctx);
|
||||
let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
|
||||
|
||||
assert_matches!(err, Error::PusherNotFound { .. });
|
||||
assert!(!err.is_retryable());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pusher_dropped() {
|
||||
let state = UpgradeCandidateRegion::default();
|
||||
let persistent_context = new_persistent_context();
|
||||
let to_peer_id = persistent_context.to_peer.id;
|
||||
|
||||
let mut env = TestingEnv::new();
|
||||
let ctx = env.context_factory().new_context(persistent_context);
|
||||
let mailbox_ctx = env.mailbox_context();
|
||||
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
|
||||
mailbox_ctx
|
||||
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
|
||||
.await;
|
||||
|
||||
drop(rx);
|
||||
|
||||
let instruction = &state.build_upgrade_region_instruction(&ctx);
|
||||
let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
|
||||
|
||||
assert_matches!(err, Error::PushMessage { .. });
|
||||
assert!(!err.is_retryable());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_unexpected_instruction_reply() {
|
||||
let state = UpgradeCandidateRegion::default();
|
||||
let persistent_context = new_persistent_context();
|
||||
let to_peer_id = persistent_context.to_peer.id;
|
||||
|
||||
let mut env = TestingEnv::new();
|
||||
let ctx = env.context_factory().new_context(persistent_context);
|
||||
let mailbox_ctx = env.mailbox_context();
|
||||
let mailbox = mailbox_ctx.mailbox().clone();
|
||||
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
|
||||
mailbox_ctx
|
||||
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
|
||||
.await;
|
||||
|
||||
send_mock_reply(mailbox, rx, |id| Ok(new_close_region_reply(id)));
|
||||
|
||||
let instruction = &state.build_upgrade_region_instruction(&ctx);
|
||||
let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
|
||||
assert_matches!(err, Error::UnexpectedInstructionReply { .. });
|
||||
assert!(!err.is_retryable());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_upgrade_region_failed() {
|
||||
let state = UpgradeCandidateRegion::default();
|
||||
let persistent_context = new_persistent_context();
|
||||
let to_peer_id = persistent_context.to_peer.id;
|
||||
|
||||
let mut env = TestingEnv::new();
|
||||
let ctx = env.context_factory().new_context(persistent_context);
|
||||
let mailbox_ctx = env.mailbox_context();
|
||||
let mailbox = mailbox_ctx.mailbox().clone();
|
||||
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
|
||||
mailbox_ctx
|
||||
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
|
||||
.await;
|
||||
|
||||
// A reply contains an error.
|
||||
send_mock_reply(mailbox, rx, |id| {
|
||||
Ok(new_upgrade_region_reply(
|
||||
id,
|
||||
true,
|
||||
true,
|
||||
Some("test mocked".to_string()),
|
||||
))
|
||||
});
|
||||
|
||||
let instruction = &state.build_upgrade_region_instruction(&ctx);
|
||||
let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
|
||||
|
||||
assert_matches!(err, Error::RetryLater { .. });
|
||||
assert!(err.is_retryable());
|
||||
assert!(err.to_string().contains("test mocked"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_upgrade_region_not_found() {
|
||||
let state = UpgradeCandidateRegion::default();
|
||||
let persistent_context = new_persistent_context();
|
||||
let to_peer_id = persistent_context.to_peer.id;
|
||||
|
||||
let mut env = TestingEnv::new();
|
||||
let ctx = env.context_factory().new_context(persistent_context);
|
||||
let mailbox_ctx = env.mailbox_context();
|
||||
let mailbox = mailbox_ctx.mailbox().clone();
|
||||
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
|
||||
mailbox_ctx
|
||||
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
|
||||
.await;
|
||||
|
||||
send_mock_reply(mailbox, rx, |id| {
|
||||
Ok(new_upgrade_region_reply(id, true, false, None))
|
||||
});
|
||||
|
||||
let instruction = &state.build_upgrade_region_instruction(&ctx);
|
||||
let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
|
||||
|
||||
assert_matches!(err, Error::Unexpected { .. });
|
||||
assert!(!err.is_retryable());
|
||||
assert!(err.to_string().contains("doesn't exist"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_upgrade_region_require_ready() {
|
||||
let mut state = UpgradeCandidateRegion {
|
||||
require_ready: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let persistent_context = new_persistent_context();
|
||||
let to_peer_id = persistent_context.to_peer.id;
|
||||
|
||||
let mut env = TestingEnv::new();
|
||||
let ctx = env.context_factory().new_context(persistent_context);
|
||||
let mailbox_ctx = env.mailbox_context();
|
||||
let mailbox = mailbox_ctx.mailbox().clone();
|
||||
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
|
||||
mailbox_ctx
|
||||
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
|
||||
.await;
|
||||
|
||||
send_mock_reply(mailbox, rx, |id| {
|
||||
Ok(new_upgrade_region_reply(id, false, true, None))
|
||||
});
|
||||
|
||||
let instruction = &state.build_upgrade_region_instruction(&ctx);
|
||||
let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
|
||||
|
||||
assert_matches!(err, Error::RetryLater { .. });
|
||||
assert!(err.is_retryable());
|
||||
assert!(err.to_string().contains("still replaying the wal"));
|
||||
|
||||
// Sets the `require_ready` to false.
|
||||
state.require_ready = false;
|
||||
|
||||
let mailbox = mailbox_ctx.mailbox().clone();
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
|
||||
mailbox_ctx
|
||||
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
|
||||
.await;
|
||||
|
||||
send_mock_reply(mailbox, rx, |id| {
|
||||
Ok(new_upgrade_region_reply(id, false, true, None))
|
||||
});
|
||||
|
||||
let instruction = &state.build_upgrade_region_instruction(&ctx);
|
||||
state.upgrade_region(&ctx, instruction).await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_upgrade_region_with_retry_ok() {
|
||||
let mut state = Box::<UpgradeCandidateRegion>::default();
|
||||
state.retry_initial_interval = Duration::from_millis(100);
|
||||
let persistent_context = new_persistent_context();
|
||||
let to_peer_id = persistent_context.to_peer.id;
|
||||
|
||||
let mut env = TestingEnv::new();
|
||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||
let mailbox_ctx = env.mailbox_context();
|
||||
let mailbox = mailbox_ctx.mailbox().clone();
|
||||
|
||||
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
|
||||
|
||||
mailbox_ctx
|
||||
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
|
||||
.await;
|
||||
|
||||
common_runtime::spawn_bg(async move {
|
||||
let resp = rx.recv().await.unwrap().unwrap();
|
||||
let reply_id = resp.mailbox_message.unwrap().id;
|
||||
mailbox
|
||||
.on_recv(
|
||||
reply_id,
|
||||
Err(error::MailboxTimeoutSnafu { id: reply_id }.build()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// retry: 1
|
||||
let resp = rx.recv().await.unwrap().unwrap();
|
||||
let reply_id = resp.mailbox_message.unwrap().id;
|
||||
mailbox
|
||||
.on_recv(
|
||||
reply_id,
|
||||
Ok(new_upgrade_region_reply(reply_id, false, true, None)),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// retry: 2
|
||||
let resp = rx.recv().await.unwrap().unwrap();
|
||||
let reply_id = resp.mailbox_message.unwrap().id;
|
||||
mailbox
|
||||
.on_recv(
|
||||
reply_id,
|
||||
Ok(new_upgrade_region_reply(reply_id, true, true, None)),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
let next = state.next(&mut ctx).await.unwrap();
|
||||
|
||||
let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
|
||||
|
||||
assert_matches!(update_metadata, UpdateMetadata::Upgrade);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_upgrade_region_with_retry_failed() {
|
||||
let mut state = Box::<UpgradeCandidateRegion>::default();
|
||||
state.retry_initial_interval = Duration::from_millis(100);
|
||||
let persistent_context = new_persistent_context();
|
||||
let to_peer_id = persistent_context.to_peer.id;
|
||||
|
||||
let mut env = TestingEnv::new();
|
||||
let mut ctx = env.context_factory().new_context(persistent_context);
|
||||
let mailbox_ctx = env.mailbox_context();
|
||||
let mailbox = mailbox_ctx.mailbox().clone();
|
||||
|
||||
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
|
||||
|
||||
mailbox_ctx
|
||||
.insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
|
||||
.await;
|
||||
|
||||
common_runtime::spawn_bg(async move {
|
||||
let resp = rx.recv().await.unwrap().unwrap();
|
||||
let reply_id = resp.mailbox_message.unwrap().id;
|
||||
mailbox
|
||||
.on_recv(
|
||||
reply_id,
|
||||
Err(error::MailboxTimeoutSnafu { id: reply_id }.build()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// retry: 1
|
||||
let resp = rx.recv().await.unwrap().unwrap();
|
||||
let reply_id = resp.mailbox_message.unwrap().id;
|
||||
mailbox
|
||||
.on_recv(
|
||||
reply_id,
|
||||
Ok(new_upgrade_region_reply(reply_id, false, true, None)),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// retry: 2
|
||||
let resp = rx.recv().await.unwrap().unwrap();
|
||||
let reply_id = resp.mailbox_message.unwrap().id;
|
||||
mailbox
|
||||
.on_recv(
|
||||
reply_id,
|
||||
Ok(new_upgrade_region_reply(reply_id, false, false, None)),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
let next = state.next(&mut ctx).await.unwrap();
|
||||
|
||||
let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
|
||||
assert_matches!(update_metadata, UpdateMetadata::Rollback);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,6 +235,7 @@ async fn test_on_datanode_drop_regions() {
|
||||
schema: "my_schema".to_string(),
|
||||
table: "my_table".to_string(),
|
||||
table_id: 42,
|
||||
drop_if_exists: false,
|
||||
};
|
||||
|
||||
let (region_server, mut rx) = EchoRegionServer::new();
|
||||
|
||||
@@ -73,12 +73,12 @@ impl RegionLeaseKeeper {
|
||||
Ok(metadata_subset)
|
||||
}
|
||||
|
||||
/// Returns downgradable regions, and closable regions.
|
||||
/// Returns downgradable regions, and closeable regions.
|
||||
///
|
||||
/// - Downgradable regions:
|
||||
/// Region's peer(`datanode_id`) is the corresponding downgraded leader peer in `region_routes`.
|
||||
///
|
||||
/// - Closable regions:
|
||||
/// - closeable regions:
|
||||
/// - It returns a region if it's peer(`datanode_id`) isn't the corresponding leader peer in `region_routes`.
|
||||
/// - Expected as [RegionRole::Follower](store_api::region_engine::RegionRole::Follower) regions.
|
||||
/// - Unexpected [RegionRole::Leader](store_api::region_engine::RegionRole::Leader) regions.
|
||||
@@ -97,37 +97,37 @@ impl RegionLeaseKeeper {
|
||||
self.collect_tables_metadata(&table_ids).await?
|
||||
};
|
||||
|
||||
let mut closable_set = HashSet::new();
|
||||
let mut closeable_set = HashSet::new();
|
||||
let mut downgradable_set = HashSet::new();
|
||||
|
||||
for (table_id, regions) in tables {
|
||||
if let Some(metadata) = metadata_subset.get(&table_id) {
|
||||
let region_routes = &metadata.region_routes;
|
||||
|
||||
let (downgradable, closable) =
|
||||
let (downgradable, closeable) =
|
||||
find_staled_leader_regions(datanode_id, ®ions, region_routes);
|
||||
|
||||
downgradable_set.extend(downgradable);
|
||||
closable_set.extend(closable);
|
||||
closeable_set.extend(closeable);
|
||||
} else {
|
||||
warn!(
|
||||
"The table {} metadata is not found, appends closable leader regions: {:?}",
|
||||
"The table {} metadata is not found, appends closeable leader regions: {:?}",
|
||||
table_id, regions
|
||||
);
|
||||
// If table metadata is not found.
|
||||
closable_set.extend(regions);
|
||||
closeable_set.extend(regions);
|
||||
}
|
||||
}
|
||||
|
||||
Ok((downgradable_set, closable_set))
|
||||
Ok((downgradable_set, closeable_set))
|
||||
}
|
||||
|
||||
/// Returns upgradable regions, and closable regions.
|
||||
/// Returns upgradable regions, and closeable regions.
|
||||
///
|
||||
/// Upgradable regions:
|
||||
/// - Region's peer(`datanode_id`) is the corresponding leader peer in `region_routes`.
|
||||
///
|
||||
/// Closable regions:
|
||||
/// closeable regions:
|
||||
/// - Region's peer(`datanode_id`) isn't the corresponding leader/follower peer in `region_routes`.
|
||||
/// - Region's table metadata is not found.
|
||||
pub async fn find_staled_follower_regions(
|
||||
@@ -145,28 +145,28 @@ impl RegionLeaseKeeper {
|
||||
};
|
||||
|
||||
let mut upgradable_set = HashSet::new();
|
||||
let mut closable_set = HashSet::new();
|
||||
let mut closeable_set = HashSet::new();
|
||||
|
||||
for (table_id, regions) in tables {
|
||||
if let Some(metadata) = metadata_subset.get(&table_id) {
|
||||
let region_routes = &metadata.region_routes;
|
||||
|
||||
let (upgradable, closable) =
|
||||
let (upgradable, closeable) =
|
||||
find_staled_follower_regions(datanode_id, ®ions, region_routes);
|
||||
|
||||
upgradable_set.extend(upgradable);
|
||||
closable_set.extend(closable);
|
||||
closeable_set.extend(closeable);
|
||||
} else {
|
||||
warn!(
|
||||
"The table {} metadata is not found, appends closable followers regions: {:?}",
|
||||
"The table {} metadata is not found, appends closeable followers regions: {:?}",
|
||||
table_id, regions
|
||||
);
|
||||
// If table metadata is not found.
|
||||
closable_set.extend(regions);
|
||||
closeable_set.extend(regions);
|
||||
}
|
||||
}
|
||||
|
||||
Ok((upgradable_set, closable_set))
|
||||
Ok((upgradable_set, closeable_set))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -290,27 +290,27 @@ mod tests {
|
||||
|
||||
let datanode_regions = vec![region_id];
|
||||
|
||||
let (downgradable, closable) = keeper
|
||||
let (downgradable, closeable) = keeper
|
||||
.find_staled_leader_regions(0, datanode_id, &datanode_regions)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(closable.len(), 1);
|
||||
assert!(closable.contains(®ion_id));
|
||||
assert_eq!(closeable.len(), 1);
|
||||
assert!(closeable.contains(®ion_id));
|
||||
assert!(downgradable.is_empty());
|
||||
|
||||
let (upgradable, closable) = keeper
|
||||
let (upgradable, closeable) = keeper
|
||||
.find_staled_follower_regions(0, datanode_id, &datanode_regions)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(upgradable.is_empty());
|
||||
assert_eq!(closable.len(), 1);
|
||||
assert!(closable.contains(®ion_id));
|
||||
assert_eq!(closeable.len(), 1);
|
||||
assert!(closeable.contains(®ion_id));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_find_closable_regions_simple() {
|
||||
async fn test_find_closeable_regions_simple() {
|
||||
let datanode_id = 1;
|
||||
let region_number = 1u32;
|
||||
let table_id = 10;
|
||||
@@ -332,31 +332,31 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// `closable` should be empty.
|
||||
// `closeable` should be empty.
|
||||
let datanode_regions = vec![region_id];
|
||||
|
||||
let (downgradable, closable) = keeper
|
||||
let (downgradable, closeable) = keeper
|
||||
.find_staled_leader_regions(0, datanode_id, &datanode_regions)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(closable.is_empty());
|
||||
assert!(closeable.is_empty());
|
||||
assert!(downgradable.is_empty());
|
||||
|
||||
// `closable` should be empty.
|
||||
// `closeable` should be empty.
|
||||
let datanode_regions = vec![];
|
||||
|
||||
let (downgradable, closable) = keeper
|
||||
let (downgradable, closeable) = keeper
|
||||
.find_staled_leader_regions(0, datanode_id, &datanode_regions)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(closable.is_empty());
|
||||
assert!(closeable.is_empty());
|
||||
assert!(downgradable.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_find_closable_regions_2() {
|
||||
async fn test_find_closeable_regions_2() {
|
||||
let datanode_id = 1;
|
||||
let region_number = 1u32;
|
||||
let table_id = 10;
|
||||
@@ -393,29 +393,29 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
// Unexpected Leader region.
|
||||
// `closable` should be vec![unknown_region_id].
|
||||
// `closeable` should be vec![unknown_region_id].
|
||||
let datanode_regions = vec![region_id, unknown_region_id];
|
||||
|
||||
let (downgradable, closable) = keeper
|
||||
let (downgradable, closeable) = keeper
|
||||
.find_staled_leader_regions(0, datanode_id, &datanode_regions)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(closable.len(), 1);
|
||||
assert!(closable.contains(&unknown_region_id));
|
||||
assert_eq!(closeable.len(), 1);
|
||||
assert!(closeable.contains(&unknown_region_id));
|
||||
assert!(downgradable.is_empty());
|
||||
|
||||
// Expected as Follower region.
|
||||
// `closable` should be vec![another_region_id], because the `another_region_id` is a active region of `another_peer`.
|
||||
// `closeable` should be vec![another_region_id], because the `another_region_id` is a active region of `another_peer`.
|
||||
let datanode_regions = vec![another_region_id];
|
||||
|
||||
let (downgradable, closable) = keeper
|
||||
let (downgradable, closeable) = keeper
|
||||
.find_staled_leader_regions(0, datanode_id, &datanode_regions)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(closable.len(), 1);
|
||||
assert!(closable.contains(&another_region_id));
|
||||
assert_eq!(closeable.len(), 1);
|
||||
assert!(closeable.contains(&another_region_id));
|
||||
assert!(downgradable.is_empty());
|
||||
}
|
||||
|
||||
@@ -442,16 +442,16 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// `upgradable` should be empty, `closable` should be empty.
|
||||
// `upgradable` should be empty, `closeable` should be empty.
|
||||
let datanode_regions = vec![region_id, another_region_id];
|
||||
|
||||
let (downgradable, closable) = keeper
|
||||
let (downgradable, closeable) = keeper
|
||||
.find_staled_leader_regions(0, datanode_id, &datanode_regions)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(closable.len(), 1);
|
||||
assert!(closable.contains(&another_region_id));
|
||||
assert_eq!(closeable.len(), 1);
|
||||
assert!(closeable.contains(&another_region_id));
|
||||
assert_eq!(downgradable.len(), 1);
|
||||
assert!(downgradable.contains(®ion_id));
|
||||
}
|
||||
@@ -479,29 +479,29 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// `upgradable` should be vec![region_id], `closable` should be empty.
|
||||
// `upgradable` should be vec![region_id], `closeable` should be empty.
|
||||
let datanode_regions = vec![region_id];
|
||||
|
||||
let (upgradable, closable) = keeper
|
||||
let (upgradable, closeable) = keeper
|
||||
.find_staled_follower_regions(0, datanode_id, &datanode_regions)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(closable.is_empty());
|
||||
assert!(closeable.is_empty());
|
||||
assert_eq!(upgradable.len(), 1);
|
||||
assert!(upgradable.contains(®ion_id));
|
||||
|
||||
// `upgradable` should be empty, `closable` should be vec![region_id].
|
||||
// `upgradable` should be empty, `closeable` should be vec![region_id].
|
||||
let datanode_regions = vec![region_id];
|
||||
|
||||
let (upgradable, closable) = keeper
|
||||
let (upgradable, closeable) = keeper
|
||||
.find_staled_follower_regions(0, datanode_id + 1, &datanode_regions)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(upgradable.is_empty());
|
||||
assert_eq!(closable.len(), 1);
|
||||
assert!(closable.contains(®ion_id));
|
||||
assert_eq!(closeable.len(), 1);
|
||||
assert!(closeable.contains(®ion_id));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -528,12 +528,12 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (upgradable, closable) = keeper
|
||||
let (upgradable, closeable) = keeper
|
||||
.find_staled_follower_regions(0, datanode_id, &datanode_regions)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(upgradable.is_empty());
|
||||
assert!(closable.is_empty());
|
||||
assert!(closeable.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -20,14 +20,14 @@ use common_meta::rpc::router::{
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use super::utils::downgradable_leader_regions;
|
||||
use crate::region::lease_keeper::utils::closable_leader_region;
|
||||
use crate::region::lease_keeper::utils::closeable_leader_region;
|
||||
|
||||
/// Returns Downgradable regions and Closable regions.
|
||||
/// Returns Downgradable regions and closeable regions.
|
||||
///
|
||||
/// - Downgradable regions:
|
||||
/// Region's peer(`datanode_id`) is the corresponding downgraded leader peer in `region_routes`.
|
||||
///
|
||||
/// - Closable regions:
|
||||
/// - closeable regions:
|
||||
/// Region's peer(`datanode_id`) isn't the corresponding leader peer in `region_routes`.
|
||||
/// - Expected as [RegionRole::Follower](store_api::region_engine::RegionRole::Follower) regions.
|
||||
/// - Unexpected [RegionRole::Leader](store_api::region_engine::RegionRole::Leader) regions.
|
||||
@@ -39,7 +39,7 @@ pub fn find_staled_leader_regions(
|
||||
let region_leader_map = convert_to_region_leader_map(region_routes);
|
||||
let region_leader_status_map = convert_to_region_leader_status_map(region_routes);
|
||||
|
||||
let (downgradable, closable): (HashSet<_>, HashSet<_>) = datanode_regions
|
||||
let (downgradable, closeable): (HashSet<_>, HashSet<_>) = datanode_regions
|
||||
.iter()
|
||||
.map(|region_id| {
|
||||
(
|
||||
@@ -49,15 +49,15 @@ pub fn find_staled_leader_regions(
|
||||
®ion_leader_map,
|
||||
®ion_leader_status_map,
|
||||
),
|
||||
closable_leader_region(datanode_id, *region_id, ®ion_leader_map),
|
||||
closeable_leader_region(datanode_id, *region_id, ®ion_leader_map),
|
||||
)
|
||||
})
|
||||
.unzip();
|
||||
|
||||
let downgradable = downgradable.into_iter().flatten().collect();
|
||||
let closable = closable.into_iter().flatten().collect();
|
||||
let closeable = closeable.into_iter().flatten().collect();
|
||||
|
||||
(downgradable, closable)
|
||||
(downgradable, closeable)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -85,20 +85,20 @@ mod tests {
|
||||
}];
|
||||
|
||||
// Grants lease.
|
||||
// `closable` should be empty, `region_id` is a active leader region of the `peer`
|
||||
let (downgradable, closable) =
|
||||
// `closeable` should be empty, `region_id` is a active leader region of the `peer`
|
||||
let (downgradable, closeable) =
|
||||
find_staled_leader_regions(datanode_id, &datanode_regions, ®ion_routes);
|
||||
|
||||
assert!(closable.is_empty());
|
||||
assert!(closeable.is_empty());
|
||||
assert!(downgradable.is_empty());
|
||||
|
||||
// Unexpected Leader region.
|
||||
// `closable` should be vec![`region_id`];
|
||||
let (downgradable, closable) =
|
||||
// `closeable` should be vec![`region_id`];
|
||||
let (downgradable, closeable) =
|
||||
find_staled_leader_regions(datanode_id, &datanode_regions, &[]);
|
||||
|
||||
assert_eq!(closable.len(), 1);
|
||||
assert!(closable.contains(®ion_id));
|
||||
assert_eq!(closeable.len(), 1);
|
||||
assert!(closeable.contains(®ion_id));
|
||||
assert!(downgradable.is_empty());
|
||||
|
||||
let region_routes = vec![RegionRoute {
|
||||
@@ -111,12 +111,12 @@ mod tests {
|
||||
let retained_active_regions = datanode_regions.clone();
|
||||
|
||||
// Expected as Follower region.
|
||||
// `closable` should be vec![`region_id`], `region_id` is RegionRole::Leader.
|
||||
let (downgradable, closable) =
|
||||
// `closeable` should be vec![`region_id`], `region_id` is RegionRole::Leader.
|
||||
let (downgradable, closeable) =
|
||||
find_staled_leader_regions(datanode_id, &retained_active_regions, ®ion_routes);
|
||||
|
||||
assert!(downgradable.is_empty());
|
||||
assert_eq!(closable.len(), 1);
|
||||
assert!(closable.contains(®ion_id));
|
||||
assert_eq!(closeable.len(), 1);
|
||||
assert!(closeable.contains(®ion_id));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ use store_api::storage::{RegionId, RegionNumber};
|
||||
/// Returns Some(region_id) if it's not a leader region in `region_route`.
|
||||
///
|
||||
/// It removes a leader region if its peer(`node_id`) isn't the corresponding leader peer in `region_routes`.
|
||||
pub fn closable_leader_region(
|
||||
pub fn closeable_leader_region(
|
||||
node_id: u64,
|
||||
region_id: RegionId,
|
||||
region_leader_map: &HashMap<RegionNumber, &Peer>,
|
||||
@@ -63,12 +63,12 @@ pub fn downgradable_leader_regions(
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns upgradable regions, and closable regions.
|
||||
/// Returns upgradable regions, and closeable regions.
|
||||
///
|
||||
/// Upgradable regions:
|
||||
/// - Region's peer(`datanode_id`) is the corresponding leader peer in `region_routes`.
|
||||
///
|
||||
/// Closable regions:
|
||||
/// closeable regions:
|
||||
/// - Region's peer(`datanode_id`) isn't the corresponding leader/follower peer in `region_routes`.
|
||||
pub fn find_staled_follower_regions(
|
||||
datanode_id: u64,
|
||||
@@ -79,7 +79,7 @@ pub fn find_staled_follower_regions(
|
||||
let region_leader_status_map = convert_to_region_leader_status_map(region_routes);
|
||||
let region_peer_map = convert_to_region_peer_map(region_routes);
|
||||
|
||||
let (upgradable, closable): (HashSet<Option<RegionId>>, HashSet<Option<RegionId>>) =
|
||||
let (upgradable, closeable): (HashSet<Option<RegionId>>, HashSet<Option<RegionId>>) =
|
||||
datanode_regions
|
||||
.iter()
|
||||
.map(|region_id| {
|
||||
@@ -90,15 +90,15 @@ pub fn find_staled_follower_regions(
|
||||
®ion_leader_map,
|
||||
®ion_leader_status_map,
|
||||
),
|
||||
closable_region(datanode_id, *region_id, ®ion_peer_map),
|
||||
closeable_region(datanode_id, *region_id, ®ion_peer_map),
|
||||
)
|
||||
})
|
||||
.unzip();
|
||||
|
||||
let upgradable = upgradable.into_iter().flatten().collect();
|
||||
let closable = closable.into_iter().flatten().collect();
|
||||
let closeable = closeable.into_iter().flatten().collect();
|
||||
|
||||
(upgradable, closable)
|
||||
(upgradable, closeable)
|
||||
}
|
||||
|
||||
/// Returns Some(region) if its peer(`node_id`) a leader region peer in `region_routes`.
|
||||
@@ -124,7 +124,7 @@ pub fn upgradable_follower_region(
|
||||
}
|
||||
|
||||
/// Returns Some(region) if its peer(`node_id) is't a leader or follower region peer in `region_routes`.
|
||||
pub fn closable_region(
|
||||
pub fn closeable_region(
|
||||
node_id: u64,
|
||||
region_id: RegionId,
|
||||
region_peer_map: &HashMap<RegionNumber, HashSet<u64>>,
|
||||
@@ -149,7 +149,7 @@ mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_closable_leader_region() {
|
||||
fn test_closeable_leader_region() {
|
||||
let datanode_id = 1u64;
|
||||
let region_number = 1u32;
|
||||
let region_id = RegionId::from_u64(region_number as u64);
|
||||
@@ -160,19 +160,19 @@ mod tests {
|
||||
// Should be None, `region_id` is an active region of `peer`.
|
||||
assert_eq!(
|
||||
None,
|
||||
closable_leader_region(datanode_id, region_id, ®ion_leader_map,)
|
||||
closeable_leader_region(datanode_id, region_id, ®ion_leader_map,)
|
||||
);
|
||||
|
||||
// Should be Some(`region_id`), incorrect datanode_id.
|
||||
assert_eq!(
|
||||
Some(region_id),
|
||||
closable_leader_region(datanode_id + 1, region_id, ®ion_leader_map,)
|
||||
closeable_leader_region(datanode_id + 1, region_id, ®ion_leader_map,)
|
||||
);
|
||||
|
||||
// Should be Some(`region_id`), the inactive_leader_regions is empty.
|
||||
assert_eq!(
|
||||
Some(region_id),
|
||||
closable_leader_region(datanode_id, region_id, &Default::default(),)
|
||||
closeable_leader_region(datanode_id, region_id, &Default::default(),)
|
||||
);
|
||||
|
||||
let another_peer = Peer::empty(datanode_id + 1);
|
||||
@@ -181,7 +181,7 @@ mod tests {
|
||||
// Should be Some(`region_id`), `region_id` is active region of `another_peer`.
|
||||
assert_eq!(
|
||||
Some(region_id),
|
||||
closable_leader_region(datanode_id, region_id, ®ion_leader_map,)
|
||||
closeable_leader_region(datanode_id, region_id, ®ion_leader_map,)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -241,31 +241,31 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_closable_follower_region() {
|
||||
fn test_closeable_follower_region() {
|
||||
let region_number = 1u32;
|
||||
let region_id = RegionId::from_u64(region_number as u64);
|
||||
let another_region_id = RegionId::from_u64(region_number as u64 + 1);
|
||||
let region_peer_map = [(region_number, HashSet::from([1, 2, 3]))].into();
|
||||
|
||||
// Should be None.
|
||||
assert_eq!(None, closable_region(1, region_id, ®ion_peer_map));
|
||||
assert_eq!(None, closeable_region(1, region_id, ®ion_peer_map));
|
||||
|
||||
// Should be Some(`region_id`), incorrect `datanode_id`.
|
||||
assert_eq!(
|
||||
Some(region_id),
|
||||
closable_region(4, region_id, ®ion_peer_map)
|
||||
closeable_region(4, region_id, ®ion_peer_map)
|
||||
);
|
||||
|
||||
// Should be Some(`another_region_id`), `another_region_id` doesn't exist.
|
||||
assert_eq!(
|
||||
Some(another_region_id),
|
||||
closable_region(1, another_region_id, ®ion_peer_map)
|
||||
closeable_region(1, another_region_id, ®ion_peer_map)
|
||||
);
|
||||
|
||||
// Should be Some(`another_region_id`), `another_region_id` doesn't exist, incorrect `datanode_id`.
|
||||
assert_eq!(
|
||||
Some(another_region_id),
|
||||
closable_region(4, another_region_id, ®ion_peer_map)
|
||||
closeable_region(4, another_region_id, ®ion_peer_map)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -304,7 +304,7 @@ async fn test_engine_truncate_during_flush() {
|
||||
let entry_id = version_data.last_entry_id;
|
||||
let sequence = version_data.committed_sequence;
|
||||
|
||||
// Flush reigon.
|
||||
// Flush region.
|
||||
let engine_cloned = engine.clone();
|
||||
let flush_task = tokio::spawn(async move {
|
||||
info!("do flush task!!!!");
|
||||
|
||||
@@ -177,7 +177,7 @@ fn may_compat_primary_key(
|
||||
CompatReaderSnafu {
|
||||
region_id: expect.region_id,
|
||||
reason: format!(
|
||||
"primary key has more columns {} than exepct {}",
|
||||
"primary key has more columns {} than expect {}",
|
||||
actual.primary_key.len(),
|
||||
expect.primary_key.len()
|
||||
),
|
||||
|
||||
@@ -50,7 +50,7 @@ use store_api::storage::{ColumnId, RegionId};
|
||||
|
||||
use crate::config::MitoConfig;
|
||||
use crate::engine::listener::EventListenerRef;
|
||||
use crate::engine::MitoEngine;
|
||||
use crate::engine::{MitoEngine, MITO_ENGINE_NAME};
|
||||
use crate::error::Result;
|
||||
use crate::flush::{WriteBufferManager, WriteBufferManagerRef};
|
||||
use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions};
|
||||
@@ -278,6 +278,7 @@ pub struct CreateRequestBuilder {
|
||||
options: HashMap<String, String>,
|
||||
primary_key: Option<Vec<ColumnId>>,
|
||||
all_not_null: bool,
|
||||
engine: String,
|
||||
}
|
||||
|
||||
impl Default for CreateRequestBuilder {
|
||||
@@ -289,6 +290,7 @@ impl Default for CreateRequestBuilder {
|
||||
options: HashMap::new(),
|
||||
primary_key: None,
|
||||
all_not_null: false,
|
||||
engine: MITO_ENGINE_NAME.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -378,7 +380,7 @@ impl CreateRequestBuilder {
|
||||
|
||||
RegionCreateRequest {
|
||||
// We use empty engine name as we already locates the engine.
|
||||
engine: String::new(),
|
||||
engine: self.engine.to_string(),
|
||||
column_metadatas,
|
||||
primary_key: self.primary_key.clone().unwrap_or(primary_key),
|
||||
options: self.options.clone(),
|
||||
|
||||
@@ -17,7 +17,6 @@ md5 = "0.7"
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
opendal = { version = "0.40", features = [
|
||||
"layers-tracing",
|
||||
"layers-prometheus",
|
||||
] }
|
||||
prometheus.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod lru_cache;
|
||||
mod prometheus;
|
||||
|
||||
pub use lru_cache::*;
|
||||
pub use opendal::layers::*;
|
||||
pub use prometheus::PrometheusMetricsLayer;
|
||||
|
||||
@@ -114,7 +114,7 @@ impl<C: Accessor + Clone> ReadCache<C> {
|
||||
(self.mem_cache.entry_count(), self.mem_cache.weighted_size())
|
||||
}
|
||||
|
||||
/// Invalidte all cache items which key starts with `prefix`.
|
||||
/// Invalidate all cache items which key starts with `prefix`.
|
||||
pub(crate) async fn invalidate_entries_with_prefix(&self, prefix: String) {
|
||||
// Safety: always ok when building cache with `support_invalidation_closures`.
|
||||
self.mem_cache
|
||||
|
||||
562
src/object-store/src/layers/prometheus.rs
Normal file
562
src/object-store/src/layers/prometheus.rs
Normal file
@@ -0,0 +1,562 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! code originally from <https://github.com/apache/incubator-opendal/blob/main/core/src/layers/prometheus.rs>, make a tiny change to avoid crash in multi thread env
|
||||
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::io;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
use common_telemetry::debug;
|
||||
use futures::{FutureExt, TryFutureExt};
|
||||
use lazy_static::lazy_static;
|
||||
use opendal::raw::*;
|
||||
use opendal::ErrorKind;
|
||||
use prometheus::{
|
||||
exponential_buckets, histogram_opts, register_histogram_vec, register_int_counter_vec,
|
||||
HistogramVec, IntCounterVec,
|
||||
};
|
||||
|
||||
type Result<T> = std::result::Result<T, opendal::Error>;
|
||||
|
||||
lazy_static! {
|
||||
static ref REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
|
||||
"opendal_requests_total",
|
||||
"Total times of all kinds of operation being called",
|
||||
&["scheme", "operation"],
|
||||
)
|
||||
.unwrap();
|
||||
static ref REQUESTS_DURATION_SECONDS: HistogramVec = register_histogram_vec!(
|
||||
histogram_opts!(
|
||||
"opendal_requests_duration_seconds",
|
||||
"Histogram of the time spent on specific operation",
|
||||
exponential_buckets(0.01, 2.0, 16).unwrap()
|
||||
),
|
||||
&["scheme", "operation"]
|
||||
)
|
||||
.unwrap();
|
||||
static ref BYTES_TOTAL: HistogramVec = register_histogram_vec!(
|
||||
histogram_opts!(
|
||||
"opendal_bytes_total",
|
||||
"Total size of sync or async Read/Write",
|
||||
exponential_buckets(0.01, 2.0, 16).unwrap()
|
||||
),
|
||||
&["scheme", "operation"]
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn increment_errors_total(op: Operation, kind: ErrorKind) {
|
||||
debug!(
|
||||
"Prometheus statistics metrics error, operation {} error {}",
|
||||
op.into_static(),
|
||||
kind.into_static()
|
||||
);
|
||||
}
|
||||
|
||||
/// Please refer to [prometheus](https://docs.rs/prometheus) for every operations.
|
||||
///
|
||||
/// # Prometheus Metrics
|
||||
///
|
||||
/// In this section, we will introduce three metrics that are currently being exported by opendal. These metrics are essential for understanding the behavior and performance of opendal.
|
||||
///
|
||||
///
|
||||
/// | Metric Name | Type | Description | Labels |
|
||||
/// |-----------------------------------|-----------|------------------------------------------------------|---------------------|
|
||||
/// | opendal_requests_total | Counter | Total times of all kinds of operation being called | scheme, operation |
|
||||
/// | opendal_requests_duration_seconds | Histogram | Histogram of the time spent on specific operation | scheme, operation |
|
||||
/// | opendal_bytes_total | Histogram | Total size of sync or async Read/Write | scheme, operation |
|
||||
///
|
||||
/// For a more detailed explanation of these metrics and how they are used, please refer to the [Prometheus documentation](https://prometheus.io/docs/introduction/overview/).
|
||||
///
|
||||
/// # Histogram Configuration
|
||||
///
|
||||
/// The metric buckets for these histograms are automatically generated based on the `exponential_buckets(0.01, 2.0, 16)` configuration.
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct PrometheusMetricsLayer;
|
||||
|
||||
impl<A: Accessor> Layer<A> for PrometheusMetricsLayer {
|
||||
type LayeredAccessor = PrometheusAccessor<A>;
|
||||
|
||||
fn layer(&self, inner: A) -> Self::LayeredAccessor {
|
||||
let meta = inner.info();
|
||||
let scheme = meta.scheme();
|
||||
|
||||
PrometheusAccessor {
|
||||
inner,
|
||||
scheme: scheme.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PrometheusAccessor<A: Accessor> {
|
||||
inner: A,
|
||||
scheme: String,
|
||||
}
|
||||
|
||||
impl<A: Accessor> Debug for PrometheusAccessor<A> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("PrometheusAccessor")
|
||||
.field("inner", &self.inner)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<A: Accessor> LayeredAccessor for PrometheusAccessor<A> {
|
||||
type Inner = A;
|
||||
type Reader = PrometheusMetricWrapper<A::Reader>;
|
||||
type BlockingReader = PrometheusMetricWrapper<A::BlockingReader>;
|
||||
type Writer = PrometheusMetricWrapper<A::Writer>;
|
||||
type BlockingWriter = PrometheusMetricWrapper<A::BlockingWriter>;
|
||||
type Pager = A::Pager;
|
||||
type BlockingPager = A::BlockingPager;
|
||||
|
||||
fn inner(&self) -> &Self::Inner {
|
||||
&self.inner
|
||||
}
|
||||
|
||||
async fn create_dir(&self, path: &str, args: OpCreateDir) -> Result<RpCreateDir> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::CreateDir.into_static()])
|
||||
.inc();
|
||||
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme, Operation::CreateDir.into_static()])
|
||||
.start_timer();
|
||||
let create_res = self.inner.create_dir(path, args).await;
|
||||
|
||||
timer.observe_duration();
|
||||
create_res.map_err(|e| {
|
||||
increment_errors_total(Operation::CreateDir, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::Read.into_static()])
|
||||
.inc();
|
||||
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme, Operation::Read.into_static()])
|
||||
.start_timer();
|
||||
|
||||
let read_res = self
|
||||
.inner
|
||||
.read(path, args)
|
||||
.map(|v| {
|
||||
v.map(|(rp, r)| {
|
||||
(
|
||||
rp,
|
||||
PrometheusMetricWrapper::new(r, Operation::Read, &self.scheme),
|
||||
)
|
||||
})
|
||||
})
|
||||
.await;
|
||||
timer.observe_duration();
|
||||
read_res.map_err(|e| {
|
||||
increment_errors_total(Operation::Read, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::Write.into_static()])
|
||||
.inc();
|
||||
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme, Operation::Write.into_static()])
|
||||
.start_timer();
|
||||
|
||||
let write_res = self
|
||||
.inner
|
||||
.write(path, args)
|
||||
.map(|v| {
|
||||
v.map(|(rp, r)| {
|
||||
(
|
||||
rp,
|
||||
PrometheusMetricWrapper::new(r, Operation::Write, &self.scheme),
|
||||
)
|
||||
})
|
||||
})
|
||||
.await;
|
||||
timer.observe_duration();
|
||||
write_res.map_err(|e| {
|
||||
increment_errors_total(Operation::Write, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
async fn stat(&self, path: &str, args: OpStat) -> Result<RpStat> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::Stat.into_static()])
|
||||
.inc();
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme, Operation::Stat.into_static()])
|
||||
.start_timer();
|
||||
|
||||
let stat_res = self
|
||||
.inner
|
||||
.stat(path, args)
|
||||
.inspect_err(|e| {
|
||||
increment_errors_total(Operation::Stat, e.kind());
|
||||
})
|
||||
.await;
|
||||
timer.observe_duration();
|
||||
stat_res.map_err(|e| {
|
||||
increment_errors_total(Operation::Stat, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
async fn delete(&self, path: &str, args: OpDelete) -> Result<RpDelete> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::Delete.into_static()])
|
||||
.inc();
|
||||
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme, Operation::Delete.into_static()])
|
||||
.start_timer();
|
||||
|
||||
let delete_res = self.inner.delete(path, args).await;
|
||||
timer.observe_duration();
|
||||
delete_res.map_err(|e| {
|
||||
increment_errors_total(Operation::Delete, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Pager)> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::List.into_static()])
|
||||
.inc();
|
||||
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme, Operation::List.into_static()])
|
||||
.start_timer();
|
||||
|
||||
let list_res = self.inner.list(path, args).await;
|
||||
|
||||
timer.observe_duration();
|
||||
list_res.map_err(|e| {
|
||||
increment_errors_total(Operation::List, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
async fn batch(&self, args: OpBatch) -> Result<RpBatch> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::Batch.into_static()])
|
||||
.inc();
|
||||
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme, Operation::Batch.into_static()])
|
||||
.start_timer();
|
||||
let result = self.inner.batch(args).await;
|
||||
|
||||
timer.observe_duration();
|
||||
result.map_err(|e| {
|
||||
increment_errors_total(Operation::Batch, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
async fn presign(&self, path: &str, args: OpPresign) -> Result<RpPresign> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::Presign.into_static()])
|
||||
.inc();
|
||||
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme, Operation::Presign.into_static()])
|
||||
.start_timer();
|
||||
let result = self.inner.presign(path, args).await;
|
||||
timer.observe_duration();
|
||||
|
||||
result.map_err(|e| {
|
||||
increment_errors_total(Operation::Presign, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
fn blocking_create_dir(&self, path: &str, args: OpCreateDir) -> Result<RpCreateDir> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingCreateDir.into_static()])
|
||||
.inc();
|
||||
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingCreateDir.into_static()])
|
||||
.start_timer();
|
||||
let result = self.inner.blocking_create_dir(path, args);
|
||||
|
||||
timer.observe_duration();
|
||||
|
||||
result.map_err(|e| {
|
||||
increment_errors_total(Operation::BlockingCreateDir, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
fn blocking_read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::BlockingReader)> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingRead.into_static()])
|
||||
.inc();
|
||||
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme])
|
||||
.start_timer();
|
||||
let result = self.inner.blocking_read(path, args).map(|(rp, r)| {
|
||||
(
|
||||
rp,
|
||||
PrometheusMetricWrapper::new(r, Operation::BlockingRead, &self.scheme),
|
||||
)
|
||||
});
|
||||
timer.observe_duration();
|
||||
result.map_err(|e| {
|
||||
increment_errors_total(Operation::BlockingRead, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
fn blocking_write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::BlockingWriter)> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingWrite.into_static()])
|
||||
.inc();
|
||||
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingWrite.into_static()])
|
||||
.start_timer();
|
||||
let result = self.inner.blocking_write(path, args).map(|(rp, r)| {
|
||||
(
|
||||
rp,
|
||||
PrometheusMetricWrapper::new(r, Operation::BlockingWrite, &self.scheme),
|
||||
)
|
||||
});
|
||||
timer.observe_duration();
|
||||
result.map_err(|e| {
|
||||
increment_errors_total(Operation::BlockingWrite, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
fn blocking_stat(&self, path: &str, args: OpStat) -> Result<RpStat> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingStat.into_static()])
|
||||
.inc();
|
||||
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingStat.into_static()])
|
||||
.start_timer();
|
||||
let result = self.inner.blocking_stat(path, args);
|
||||
timer.observe_duration();
|
||||
result.map_err(|e| {
|
||||
increment_errors_total(Operation::BlockingStat, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
fn blocking_delete(&self, path: &str, args: OpDelete) -> Result<RpDelete> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingDelete.into_static()])
|
||||
.inc();
|
||||
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingDelete.into_static()])
|
||||
.start_timer();
|
||||
let result = self.inner.blocking_delete(path, args);
|
||||
timer.observe_duration();
|
||||
|
||||
result.map_err(|e| {
|
||||
increment_errors_total(Operation::BlockingDelete, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingPager)> {
|
||||
REQUESTS_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingList.into_static()])
|
||||
.inc();
|
||||
|
||||
let timer = REQUESTS_DURATION_SECONDS
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingList.into_static()])
|
||||
.start_timer();
|
||||
let result = self.inner.blocking_list(path, args);
|
||||
timer.observe_duration();
|
||||
|
||||
result.map_err(|e| {
|
||||
increment_errors_total(Operation::BlockingList, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PrometheusMetricWrapper<R> {
|
||||
inner: R,
|
||||
|
||||
op: Operation,
|
||||
scheme: String,
|
||||
}
|
||||
|
||||
impl<R> PrometheusMetricWrapper<R> {
|
||||
fn new(inner: R, op: Operation, scheme: &String) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
op,
|
||||
scheme: scheme.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: oio::Read> oio::Read for PrometheusMetricWrapper<R> {
|
||||
fn poll_read(&mut self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<Result<usize>> {
|
||||
self.inner.poll_read(cx, buf).map(|res| match res {
|
||||
Ok(bytes) => {
|
||||
BYTES_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::Read.into_static()])
|
||||
.observe(bytes as f64);
|
||||
Ok(bytes)
|
||||
}
|
||||
Err(e) => {
|
||||
increment_errors_total(self.op, e.kind());
|
||||
Err(e)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn poll_seek(&mut self, cx: &mut Context<'_>, pos: io::SeekFrom) -> Poll<Result<u64>> {
|
||||
self.inner.poll_seek(cx, pos).map(|res| match res {
|
||||
Ok(n) => Ok(n),
|
||||
Err(e) => {
|
||||
increment_errors_total(self.op, e.kind());
|
||||
Err(e)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes>>> {
|
||||
self.inner.poll_next(cx).map(|res| match res {
|
||||
Some(Ok(bytes)) => {
|
||||
BYTES_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::Read.into_static()])
|
||||
.observe(bytes.len() as f64);
|
||||
Some(Ok(bytes))
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
increment_errors_total(self.op, e.kind());
|
||||
Some(Err(e))
|
||||
}
|
||||
None => None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: oio::BlockingRead> oio::BlockingRead for PrometheusMetricWrapper<R> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
|
||||
self.inner
|
||||
.read(buf)
|
||||
.map(|n| {
|
||||
BYTES_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingRead.into_static()])
|
||||
.observe(n as f64);
|
||||
n
|
||||
})
|
||||
.map_err(|e| {
|
||||
increment_errors_total(self.op, e.kind());
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
fn seek(&mut self, pos: io::SeekFrom) -> Result<u64> {
|
||||
self.inner.seek(pos).map_err(|err| {
|
||||
increment_errors_total(self.op, err.kind());
|
||||
err
|
||||
})
|
||||
}
|
||||
|
||||
fn next(&mut self) -> Option<Result<Bytes>> {
|
||||
self.inner.next().map(|res| match res {
|
||||
Ok(bytes) => {
|
||||
BYTES_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingRead.into_static()])
|
||||
.observe(bytes.len() as f64);
|
||||
Ok(bytes)
|
||||
}
|
||||
Err(e) => {
|
||||
increment_errors_total(self.op, e.kind());
|
||||
Err(e)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<R: oio::Write> oio::Write for PrometheusMetricWrapper<R> {
|
||||
fn poll_write(&mut self, cx: &mut Context<'_>, bs: &dyn oio::WriteBuf) -> Poll<Result<usize>> {
|
||||
self.inner
|
||||
.poll_write(cx, bs)
|
||||
.map_ok(|n| {
|
||||
BYTES_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::Write.into_static()])
|
||||
.observe(n as f64);
|
||||
n
|
||||
})
|
||||
.map_err(|err| {
|
||||
increment_errors_total(self.op, err.kind());
|
||||
err
|
||||
})
|
||||
}
|
||||
|
||||
fn poll_abort(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {
|
||||
self.inner.poll_abort(cx).map_err(|err| {
|
||||
increment_errors_total(self.op, err.kind());
|
||||
err
|
||||
})
|
||||
}
|
||||
|
||||
fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {
|
||||
self.inner.poll_close(cx).map_err(|err| {
|
||||
increment_errors_total(self.op, err.kind());
|
||||
err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: oio::BlockingWrite> oio::BlockingWrite for PrometheusMetricWrapper<R> {
|
||||
fn write(&mut self, bs: &dyn oio::WriteBuf) -> Result<usize> {
|
||||
self.inner
|
||||
.write(bs)
|
||||
.map(|n| {
|
||||
BYTES_TOTAL
|
||||
.with_label_values(&[&self.scheme, Operation::BlockingWrite.into_static()])
|
||||
.observe(n as f64);
|
||||
n
|
||||
})
|
||||
.map_err(|err| {
|
||||
increment_errors_total(self.op, err.kind());
|
||||
err
|
||||
})
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Result<()> {
|
||||
self.inner.close().map_err(|err| {
|
||||
increment_errors_total(self.op, err.kind());
|
||||
err
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -151,7 +151,7 @@ impl StatementExecutor {
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ExternalSnafu)?;
|
||||
let table_name = TableName::new(catalog, schema, table);
|
||||
self.drop_table(table_name).await
|
||||
self.drop_table(table_name, stmt.drop_if_exists()).await
|
||||
}
|
||||
Statement::TruncateTable(stmt) => {
|
||||
let (catalog, schema, table) =
|
||||
|
||||
@@ -36,7 +36,7 @@ use lazy_static::lazy_static;
|
||||
use partition::partition::{PartitionBound, PartitionDef};
|
||||
use regex::Regex;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{ensure, IntoError, OptionExt, ResultExt};
|
||||
use sql::ast::Value as SqlValue;
|
||||
use sql::statements::alter::AlterTable;
|
||||
use sql::statements::create::{CreateExternalTable, CreateTable, Partitions};
|
||||
@@ -168,8 +168,8 @@ impl StatementExecutor {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn drop_table(&self, table_name: TableName) -> Result<Output> {
|
||||
let table = self
|
||||
pub async fn drop_table(&self, table_name: TableName, drop_if_exists: bool) -> Result<Output> {
|
||||
if let Some(table) = self
|
||||
.catalog_manager
|
||||
.table(
|
||||
&table_name.catalog_name,
|
||||
@@ -178,24 +178,32 @@ impl StatementExecutor {
|
||||
)
|
||||
.await
|
||||
.context(CatalogSnafu)?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
{
|
||||
let table_id = table.table_info().table_id();
|
||||
self.drop_table_procedure(&table_name, table_id, drop_if_exists)
|
||||
.await?;
|
||||
|
||||
// Invalidates local cache ASAP.
|
||||
self.cache_invalidator
|
||||
.invalidate_table_id(&Context::default(), table_id)
|
||||
.await
|
||||
.context(error::InvalidateTableCacheSnafu)?;
|
||||
|
||||
self.cache_invalidator
|
||||
.invalidate_table_name(&Context::default(), table_name.clone())
|
||||
.await
|
||||
.context(error::InvalidateTableCacheSnafu)?;
|
||||
|
||||
Ok(Output::AffectedRows(0))
|
||||
} else if drop_if_exists {
|
||||
// DROP TABLE IF EXISTS meets table not found - ignored
|
||||
Ok(Output::AffectedRows(0))
|
||||
} else {
|
||||
Err(TableNotFoundSnafu {
|
||||
table_name: table_name.to_string(),
|
||||
})?;
|
||||
let table_id = table.table_info().table_id();
|
||||
self.drop_table_procedure(&table_name, table_id).await?;
|
||||
|
||||
// Invalidates local cache ASAP.
|
||||
self.cache_invalidator
|
||||
.invalidate_table_id(&Context::default(), table_id)
|
||||
.await
|
||||
.context(error::InvalidateTableCacheSnafu)?;
|
||||
|
||||
self.cache_invalidator
|
||||
.invalidate_table_name(&Context::default(), table_name.clone())
|
||||
.await
|
||||
.context(error::InvalidateTableCacheSnafu)?;
|
||||
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
.into_error(snafu::NoneError))
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
@@ -343,6 +351,7 @@ impl StatementExecutor {
|
||||
&self,
|
||||
table_name: &TableName,
|
||||
table_id: TableId,
|
||||
drop_if_exists: bool,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let request = SubmitDdlTaskRequest {
|
||||
task: DdlTask::new_drop_table(
|
||||
@@ -350,6 +359,7 @@ impl StatementExecutor {
|
||||
table_name.schema_name.to_string(),
|
||||
table_name.table_name.to_string(),
|
||||
table_id,
|
||||
drop_if_exists,
|
||||
),
|
||||
};
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ license.workspace = true
|
||||
[dependencies]
|
||||
async-recursion = "1.0"
|
||||
async-trait.workspace = true
|
||||
bytemuck = "1.12"
|
||||
bytemuck.workspace = true
|
||||
catalog.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
|
||||
@@ -19,6 +19,7 @@ use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use datafusion::arrow::array::ArrayRef;
|
||||
use datafusion::arrow::datatypes::{DataType, Schema as ArrowSchema, TimeUnit};
|
||||
use datafusion::common::{DFField, DFSchema, DFSchemaRef, Result as DataFusionResult, Statistics};
|
||||
use datafusion::error::DataFusionError;
|
||||
@@ -48,7 +49,7 @@ pub struct EmptyMetric {
|
||||
start: Millisecond,
|
||||
end: Millisecond,
|
||||
interval: Millisecond,
|
||||
expr: Expr,
|
||||
expr: Option<Expr>,
|
||||
/// Schema that only contains the time index column.
|
||||
/// This is for intermediate result only.
|
||||
time_index_schema: DFSchemaRef,
|
||||
@@ -63,17 +64,20 @@ impl EmptyMetric {
|
||||
interval: Millisecond,
|
||||
time_index_column_name: String,
|
||||
field_column_name: String,
|
||||
field_expr: Expr,
|
||||
field_expr: Option<Expr>,
|
||||
) -> DataFusionResult<Self> {
|
||||
let ts_only_schema = build_ts_only_schema(&time_index_column_name);
|
||||
let field_data_type = field_expr.get_type(&ts_only_schema)?;
|
||||
let schema = Arc::new(DFSchema::new_with_metadata(
|
||||
vec![
|
||||
ts_only_schema.field(0).clone(),
|
||||
DFField::new(Some(""), &field_column_name, field_data_type, true),
|
||||
],
|
||||
HashMap::new(),
|
||||
)?);
|
||||
let mut fields = vec![ts_only_schema.field(0).clone()];
|
||||
if let Some(field_expr) = &field_expr {
|
||||
let field_data_type = field_expr.get_type(&ts_only_schema)?;
|
||||
fields.push(DFField::new(
|
||||
Some(""),
|
||||
&field_column_name,
|
||||
field_data_type,
|
||||
true,
|
||||
));
|
||||
}
|
||||
let schema = Arc::new(DFSchema::new_with_metadata(fields, HashMap::new())?);
|
||||
|
||||
Ok(Self {
|
||||
start,
|
||||
@@ -94,12 +98,18 @@ impl EmptyMetric {
|
||||
session_state: &SessionState,
|
||||
physical_planner: &dyn PhysicalPlanner,
|
||||
) -> DataFusionResult<Arc<dyn ExecutionPlan>> {
|
||||
let physical_expr = physical_planner.create_physical_expr(
|
||||
&self.expr,
|
||||
&self.result_schema,
|
||||
&ArrowSchema::from(self.result_schema.as_ref()),
|
||||
session_state,
|
||||
)?;
|
||||
let physical_expr = self
|
||||
.expr
|
||||
.as_ref()
|
||||
.map(|expr| {
|
||||
physical_planner.create_physical_expr(
|
||||
expr,
|
||||
&self.result_schema,
|
||||
&ArrowSchema::from(self.result_schema.as_ref()),
|
||||
session_state,
|
||||
)
|
||||
})
|
||||
.transpose()?;
|
||||
|
||||
Ok(Arc::new(EmptyMetricExec {
|
||||
start: self.start,
|
||||
@@ -153,7 +163,7 @@ pub struct EmptyMetricExec {
|
||||
time_index_schema: SchemaRef,
|
||||
/// Schema of the output record batch
|
||||
result_schema: SchemaRef,
|
||||
expr: PhysicalExprRef,
|
||||
expr: Option<PhysicalExprRef>,
|
||||
|
||||
metric: ExecutionPlanMetricsSet,
|
||||
}
|
||||
@@ -241,7 +251,7 @@ pub struct EmptyMetricStream {
|
||||
start: Millisecond,
|
||||
end: Millisecond,
|
||||
interval: Millisecond,
|
||||
expr: PhysicalExprRef,
|
||||
expr: Option<PhysicalExprRef>,
|
||||
/// This stream only generate one record batch at the first poll
|
||||
is_first_poll: bool,
|
||||
/// Schema that only contains the time index column.
|
||||
@@ -272,20 +282,24 @@ impl Stream for EmptyMetricStream {
|
||||
.step_by(self.interval as _)
|
||||
.collect::<Vec<_>>();
|
||||
let time_array = Arc::new(TimestampMillisecondArray::from(time_array));
|
||||
let num_rows = time_array.len();
|
||||
let input_record_batch =
|
||||
RecordBatch::try_new(self.time_index_schema.clone(), vec![time_array.clone()])
|
||||
.map_err(DataFusionError::ArrowError)?;
|
||||
let mut result_arrays: Vec<ArrayRef> = vec![time_array];
|
||||
|
||||
// evaluate the field expr and get the result
|
||||
let field_array = self
|
||||
.expr
|
||||
.evaluate(&input_record_batch)?
|
||||
.into_array(time_array.len());
|
||||
if let Some(field_expr) = &self.expr {
|
||||
result_arrays.push(
|
||||
field_expr
|
||||
.evaluate(&input_record_batch)?
|
||||
.into_array(num_rows),
|
||||
);
|
||||
}
|
||||
|
||||
// assemble the output record batch
|
||||
let batch =
|
||||
RecordBatch::try_new(self.result_schema.clone(), vec![time_array, field_array])
|
||||
.map_err(DataFusionError::ArrowError);
|
||||
let batch = RecordBatch::try_new(self.result_schema.clone(), result_arrays)
|
||||
.map_err(DataFusionError::ArrowError);
|
||||
|
||||
Poll::Ready(Some(batch))
|
||||
} else {
|
||||
@@ -344,7 +358,7 @@ mod test {
|
||||
interval,
|
||||
time_column_name,
|
||||
field_column_name,
|
||||
time_expr,
|
||||
Some(time_expr),
|
||||
)
|
||||
.unwrap();
|
||||
let empty_metric_exec = empty_metric
|
||||
@@ -455,4 +469,32 @@ mod test {
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn no_field_expr() {
|
||||
let session_context = SessionContext::default();
|
||||
let df_default_physical_planner = DefaultPhysicalPlanner::default();
|
||||
let empty_metric =
|
||||
EmptyMetric::new(0, 200, 1000, "time".to_string(), "value".to_string(), None).unwrap();
|
||||
let empty_metric_exec = empty_metric
|
||||
.to_execution_plan(&session_context.state(), &df_default_physical_planner)
|
||||
.unwrap();
|
||||
|
||||
let result =
|
||||
datafusion::physical_plan::collect(empty_metric_exec, session_context.task_ctx())
|
||||
.await
|
||||
.unwrap();
|
||||
let result_literal = datatypes::arrow::util::pretty::pretty_format_batches(&result)
|
||||
.unwrap()
|
||||
.to_string();
|
||||
|
||||
let expected = String::from(
|
||||
"+---------------------+\
|
||||
\n| time |\
|
||||
\n+---------------------+\
|
||||
\n| 1970-01-01T00:00:00 |\
|
||||
\n+---------------------+",
|
||||
);
|
||||
assert_eq!(result_literal, expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,11 +44,10 @@ use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use crate::error::{
|
||||
CatalogSnafu, ColumnNotFoundSnafu, CombineTableColumnMismatchSnafu, DataFusionPlanningSnafu,
|
||||
ExpectExprSnafu, ExpectRangeSelectorSnafu, FunctionInvalidArgumentSnafu,
|
||||
MultipleMetricMatchersSnafu, MultipleVectorSnafu, NoMetricMatcherSnafu, Result,
|
||||
TableNameNotFoundSnafu, TimeIndexNotFoundSnafu, UnexpectedPlanExprSnafu, UnexpectedTokenSnafu,
|
||||
UnknownTableSnafu, UnsupportedExprSnafu, UnsupportedVectorMatchSnafu, ValueNotFoundSnafu,
|
||||
ZeroRangeSelectorSnafu,
|
||||
ExpectRangeSelectorSnafu, FunctionInvalidArgumentSnafu, MultipleMetricMatchersSnafu,
|
||||
MultipleVectorSnafu, NoMetricMatcherSnafu, Result, TableNameNotFoundSnafu,
|
||||
TimeIndexNotFoundSnafu, UnexpectedPlanExprSnafu, UnexpectedTokenSnafu, UnknownTableSnafu,
|
||||
UnsupportedExprSnafu, UnsupportedVectorMatchSnafu, ValueNotFoundSnafu, ZeroRangeSelectorSnafu,
|
||||
};
|
||||
use crate::extension_plan::{
|
||||
build_special_time_expr, EmptyMetric, HistogramFold, InstantManipulate, Millisecond,
|
||||
@@ -203,7 +202,14 @@ impl PromPlanner {
|
||||
self.ctx.field_columns = vec![DEFAULT_FIELD_COLUMN.to_string()];
|
||||
self.ctx.table_name = Some(String::new());
|
||||
let field_expr_builder = Self::prom_token_to_binary_expr_builder(*op)?;
|
||||
let field_expr = field_expr_builder(lhs, rhs)?;
|
||||
let mut field_expr = field_expr_builder(lhs, rhs)?;
|
||||
|
||||
if is_comparison_op && should_return_bool {
|
||||
field_expr = DfExpr::Cast(Cast {
|
||||
expr: Box::new(field_expr),
|
||||
data_type: ArrowDataType::Float64,
|
||||
});
|
||||
}
|
||||
|
||||
LogicalPlan::Extension(Extension {
|
||||
node: Arc::new(
|
||||
@@ -213,15 +219,22 @@ impl PromPlanner {
|
||||
self.ctx.interval,
|
||||
SPECIAL_TIME_FUNCTION.to_string(),
|
||||
DEFAULT_FIELD_COLUMN.to_string(),
|
||||
field_expr,
|
||||
Some(field_expr),
|
||||
)
|
||||
.context(DataFusionPlanningSnafu)?,
|
||||
),
|
||||
})
|
||||
}
|
||||
// lhs is a literal, rhs is a column
|
||||
(Some(expr), None) => {
|
||||
(Some(mut expr), None) => {
|
||||
let input = self.prom_expr_to_plan(*rhs.clone()).await?;
|
||||
// check if the literal is a special time expr
|
||||
if let Some(time_expr) = Self::try_build_special_time_expr(
|
||||
lhs,
|
||||
self.ctx.time_index_column.as_ref().unwrap(),
|
||||
) {
|
||||
expr = time_expr
|
||||
}
|
||||
let bin_expr_builder = |col: &String| {
|
||||
let binary_expr_builder = Self::prom_token_to_binary_expr_builder(*op)?;
|
||||
let mut binary_expr =
|
||||
@@ -242,8 +255,15 @@ impl PromPlanner {
|
||||
}
|
||||
}
|
||||
// lhs is a column, rhs is a literal
|
||||
(None, Some(expr)) => {
|
||||
(None, Some(mut expr)) => {
|
||||
let input = self.prom_expr_to_plan(*lhs.clone()).await?;
|
||||
// check if the literal is a special time expr
|
||||
if let Some(time_expr) = Self::try_build_special_time_expr(
|
||||
rhs,
|
||||
self.ctx.time_index_column.as_ref().unwrap(),
|
||||
) {
|
||||
expr = time_expr
|
||||
}
|
||||
let bin_expr_builder = |col: &String| {
|
||||
let binary_expr_builder = Self::prom_token_to_binary_expr_builder(*op)?;
|
||||
let mut binary_expr =
|
||||
@@ -353,7 +373,7 @@ impl PromPlanner {
|
||||
self.ctx.interval,
|
||||
SPECIAL_TIME_FUNCTION.to_string(),
|
||||
DEFAULT_FIELD_COLUMN.to_string(),
|
||||
literal_expr,
|
||||
Some(literal_expr),
|
||||
)
|
||||
.context(DataFusionPlanningSnafu)?,
|
||||
),
|
||||
@@ -373,7 +393,7 @@ impl PromPlanner {
|
||||
self.ctx.interval,
|
||||
SPECIAL_TIME_FUNCTION.to_string(),
|
||||
DEFAULT_FIELD_COLUMN.to_string(),
|
||||
literal_expr,
|
||||
Some(literal_expr),
|
||||
)
|
||||
.context(DataFusionPlanningSnafu)?,
|
||||
),
|
||||
@@ -443,28 +463,6 @@ impl PromPlanner {
|
||||
})
|
||||
}
|
||||
PromExpr::Call(Call { func, args }) => {
|
||||
// TODO(ruihang): refactor this, transform the AST in advance to include an empty metric table.
|
||||
if func.name == SPECIAL_TIME_FUNCTION {
|
||||
self.ctx.time_index_column = Some(SPECIAL_TIME_FUNCTION.to_string());
|
||||
self.ctx.field_columns = vec![DEFAULT_FIELD_COLUMN.to_string()];
|
||||
self.ctx.table_name = Some(String::new());
|
||||
let time_expr = build_special_time_expr(SPECIAL_TIME_FUNCTION);
|
||||
|
||||
return Ok(LogicalPlan::Extension(Extension {
|
||||
node: Arc::new(
|
||||
EmptyMetric::new(
|
||||
self.ctx.start,
|
||||
self.ctx.end,
|
||||
self.ctx.interval,
|
||||
SPECIAL_TIME_FUNCTION.to_string(),
|
||||
DEFAULT_FIELD_COLUMN.to_string(),
|
||||
time_expr,
|
||||
)
|
||||
.context(DataFusionPlanningSnafu)?,
|
||||
),
|
||||
}));
|
||||
}
|
||||
|
||||
if func.name == SPECIAL_HISTOGRAM_QUANTILE {
|
||||
if args.args.len() != 2 {
|
||||
return FunctionInvalidArgumentSnafu {
|
||||
@@ -481,7 +479,6 @@ impl PromPlanner {
|
||||
let input_plan = self.prom_expr_to_plan(input).await?;
|
||||
|
||||
if !self.ctx.has_le_tag() {
|
||||
common_telemetry::info!("[DEBUG] valid tags: {:?}", self.ctx.tag_columns);
|
||||
return ColumnNotFoundSnafu {
|
||||
col: LE_COLUMN_NAME.to_string(),
|
||||
}
|
||||
@@ -518,11 +515,25 @@ impl PromPlanner {
|
||||
}
|
||||
|
||||
let args = self.create_function_args(&args.args)?;
|
||||
let input = self
|
||||
.prom_expr_to_plan(args.input.with_context(|| ExpectExprSnafu {
|
||||
expr: prom_expr.clone(),
|
||||
})?)
|
||||
.await?;
|
||||
let input = if let Some(prom_expr) = args.input {
|
||||
self.prom_expr_to_plan(prom_expr).await?
|
||||
} else {
|
||||
self.ctx.time_index_column = Some(SPECIAL_TIME_FUNCTION.to_string());
|
||||
self.ctx.table_name = Some(String::new());
|
||||
LogicalPlan::Extension(Extension {
|
||||
node: Arc::new(
|
||||
EmptyMetric::new(
|
||||
self.ctx.start,
|
||||
self.ctx.end,
|
||||
self.ctx.interval,
|
||||
SPECIAL_TIME_FUNCTION.to_string(),
|
||||
DEFAULT_FIELD_COLUMN.to_string(),
|
||||
None,
|
||||
)
|
||||
.context(DataFusionPlanningSnafu)?,
|
||||
),
|
||||
})
|
||||
};
|
||||
let mut func_exprs = self.create_function_expr(func, args.literals)?;
|
||||
func_exprs.insert(0, self.create_time_index_column_expr()?);
|
||||
func_exprs.extend_from_slice(&self.create_tag_column_exprs()?);
|
||||
@@ -968,6 +979,7 @@ impl PromPlanner {
|
||||
|
||||
// TODO(ruihang): set this according to in-param list
|
||||
let field_column_pos = 0;
|
||||
let mut exprs = Vec::with_capacity(self.ctx.field_columns.len());
|
||||
let scalar_func = match func.name {
|
||||
"increase" => ScalarFunc::ExtrapolateUdf(Increase::scalar_udf(
|
||||
self.ctx.range.context(ExpectRangeSelectorSnafu)?,
|
||||
@@ -1033,6 +1045,87 @@ impl PromPlanner {
|
||||
};
|
||||
ScalarFunc::Udf(HoltWinters::scalar_udf(sf_exp, tf_exp))
|
||||
}
|
||||
"time" => {
|
||||
exprs.push(build_special_time_expr(
|
||||
self.ctx.time_index_column.as_ref().unwrap(),
|
||||
));
|
||||
ScalarFunc::GeneratedExpr
|
||||
}
|
||||
"minute" => {
|
||||
// date_part('minute', time_index)
|
||||
let expr = self.date_part_on_time_index("minute")?;
|
||||
exprs.push(expr);
|
||||
ScalarFunc::GeneratedExpr
|
||||
}
|
||||
"hour" => {
|
||||
// date_part('hour', time_index)
|
||||
let expr = self.date_part_on_time_index("hour")?;
|
||||
exprs.push(expr);
|
||||
ScalarFunc::GeneratedExpr
|
||||
}
|
||||
"month" => {
|
||||
// date_part('month', time_index)
|
||||
let expr = self.date_part_on_time_index("month")?;
|
||||
exprs.push(expr);
|
||||
ScalarFunc::GeneratedExpr
|
||||
}
|
||||
"year" => {
|
||||
// date_part('year', time_index)
|
||||
let expr = self.date_part_on_time_index("year")?;
|
||||
exprs.push(expr);
|
||||
ScalarFunc::GeneratedExpr
|
||||
}
|
||||
"day_of_month" => {
|
||||
// date_part('day', time_index)
|
||||
let expr = self.date_part_on_time_index("day")?;
|
||||
exprs.push(expr);
|
||||
ScalarFunc::GeneratedExpr
|
||||
}
|
||||
"day_of_week" => {
|
||||
// date_part('dow', time_index)
|
||||
let expr = self.date_part_on_time_index("dow")?;
|
||||
exprs.push(expr);
|
||||
ScalarFunc::GeneratedExpr
|
||||
}
|
||||
"day_of_year" => {
|
||||
// date_part('doy', time_index)
|
||||
let expr = self.date_part_on_time_index("doy")?;
|
||||
exprs.push(expr);
|
||||
ScalarFunc::GeneratedExpr
|
||||
}
|
||||
"days_in_month" => {
|
||||
// date_part(
|
||||
// 'days',
|
||||
// (date_trunc('month', <TIME INDEX>::date) + interval '1 month - 1 day')
|
||||
// );
|
||||
let day_lit_expr = DfExpr::Literal(ScalarValue::Utf8(Some("day".to_string())));
|
||||
let month_lit_expr = DfExpr::Literal(ScalarValue::Utf8(Some("month".to_string())));
|
||||
let interval_1month_lit_expr =
|
||||
DfExpr::Literal(ScalarValue::IntervalYearMonth(Some(1)));
|
||||
let interval_1day_lit_expr =
|
||||
DfExpr::Literal(ScalarValue::IntervalDayTime(Some(1 << 32)));
|
||||
let the_1month_minus_1day_expr = DfExpr::BinaryExpr(BinaryExpr {
|
||||
left: Box::new(interval_1month_lit_expr),
|
||||
op: Operator::Minus,
|
||||
right: Box::new(interval_1day_lit_expr),
|
||||
});
|
||||
let date_trunc_expr = DfExpr::ScalarFunction(ScalarFunction {
|
||||
fun: BuiltinScalarFunction::DateTrunc,
|
||||
args: vec![month_lit_expr, self.create_time_index_column_expr()?],
|
||||
});
|
||||
let date_trunc_plus_interval_expr = DfExpr::BinaryExpr(BinaryExpr {
|
||||
left: Box::new(date_trunc_expr),
|
||||
op: Operator::Plus,
|
||||
right: Box::new(the_1month_minus_1day_expr),
|
||||
});
|
||||
let date_part_expr = DfExpr::ScalarFunction(ScalarFunction {
|
||||
fun: BuiltinScalarFunction::DatePart,
|
||||
args: vec![day_lit_expr, date_trunc_plus_interval_expr],
|
||||
});
|
||||
|
||||
exprs.push(date_part_expr);
|
||||
ScalarFunc::GeneratedExpr
|
||||
}
|
||||
_ => ScalarFunc::DataFusionBuiltin(
|
||||
BuiltinScalarFunction::from_str(func.name).map_err(|_| {
|
||||
UnsupportedExprSnafu {
|
||||
@@ -1043,8 +1136,6 @@ impl PromPlanner {
|
||||
),
|
||||
};
|
||||
|
||||
// TODO(ruihang): handle those functions doesn't require input
|
||||
let mut exprs = Vec::with_capacity(self.ctx.field_columns.len());
|
||||
for value in &self.ctx.field_columns {
|
||||
let col_expr = DfExpr::Column(Column::from_name(value));
|
||||
|
||||
@@ -1093,6 +1184,7 @@ impl PromPlanner {
|
||||
let _ = other_input_exprs.remove(field_column_pos + 1);
|
||||
let _ = other_input_exprs.remove(field_column_pos);
|
||||
}
|
||||
ScalarFunc::GeneratedExpr => {}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1224,10 +1316,16 @@ impl PromPlanner {
|
||||
}
|
||||
PromExpr::VectorSelector(_)
|
||||
| PromExpr::MatrixSelector(_)
|
||||
| PromExpr::Call(_)
|
||||
| PromExpr::Extension(_)
|
||||
| PromExpr::Aggregate(_)
|
||||
| PromExpr::Subquery(_) => None,
|
||||
PromExpr::Call(Call { func, .. }) => {
|
||||
if func.name == SPECIAL_TIME_FUNCTION {
|
||||
Some(build_special_time_expr(SPECIAL_TIME_FUNCTION))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
PromExpr::Paren(ParenExpr { expr }) => Self::try_build_literal_expr(expr),
|
||||
// TODO(ruihang): support Unary operator
|
||||
PromExpr::Unary(UnaryExpr { expr, .. }) => Self::try_build_literal_expr(expr),
|
||||
@@ -1260,6 +1358,19 @@ impl PromPlanner {
|
||||
}
|
||||
}
|
||||
|
||||
fn try_build_special_time_expr(expr: &PromExpr, time_index_col: &str) -> Option<DfExpr> {
|
||||
match expr {
|
||||
PromExpr::Call(Call { func, .. }) => {
|
||||
if func.name == SPECIAL_TIME_FUNCTION {
|
||||
Some(build_special_time_expr(time_index_col))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to build a [f64] from [PromExpr].
|
||||
fn try_build_float_literal(expr: &PromExpr) -> Option<f64> {
|
||||
match expr {
|
||||
@@ -1561,6 +1672,26 @@ impl PromPlanner {
|
||||
.build()
|
||||
.context(DataFusionPlanningSnafu)
|
||||
}
|
||||
|
||||
/// Generate an expr like `date_part("hour", <TIME_INDEX>)`. Caller should ensure the
|
||||
/// time index column in context is set
|
||||
fn date_part_on_time_index(&self, date_part: &str) -> Result<DfExpr> {
|
||||
let lit_expr = DfExpr::Literal(ScalarValue::Utf8(Some(date_part.to_string())));
|
||||
let input_expr = datafusion::logical_expr::col(
|
||||
self.ctx
|
||||
.time_index_column
|
||||
.as_ref()
|
||||
// table name doesn't matters here
|
||||
.with_context(|| TimeIndexNotFoundSnafu {
|
||||
table: "<doesn't matter>",
|
||||
})?,
|
||||
);
|
||||
let fn_expr = DfExpr::ScalarFunction(ScalarFunction {
|
||||
fun: BuiltinScalarFunction::DatePart,
|
||||
args: vec![lit_expr, input_expr],
|
||||
});
|
||||
Ok(fn_expr)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
@@ -1576,6 +1707,8 @@ enum ScalarFunc {
|
||||
// todo(ruihang): maybe merge with Udf later
|
||||
/// UDF that require extra information like range length to be evaluated.
|
||||
ExtrapolateUdf(ScalarUdfDef),
|
||||
/// Func that doesn't require input, like `time()`.
|
||||
GeneratedExpr,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -2228,7 +2361,7 @@ mod test {
|
||||
"some_metric.timestamp",
|
||||
],
|
||||
),
|
||||
// single not_eq mathcer
|
||||
// single not_eq matcher
|
||||
(
|
||||
r#"some_metric{__field__!="field_1"}"#,
|
||||
vec![
|
||||
@@ -2240,7 +2373,7 @@ mod test {
|
||||
"some_metric.timestamp",
|
||||
],
|
||||
),
|
||||
// two not_eq mathcers
|
||||
// two not_eq matchers
|
||||
(
|
||||
r#"some_metric{__field__!="field_1", __field__!="field_2"}"#,
|
||||
vec![
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(let_chains)]
|
||||
#![feature(int_roundings)]
|
||||
|
||||
pub mod dataframe;
|
||||
pub mod datafusion;
|
||||
|
||||
@@ -150,41 +150,39 @@ impl TypeConverter {
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_type<'b>(&self, mut left: &'b Expr, mut right: &'b Expr) -> Result<(Expr, Expr)> {
|
||||
fn convert_type<'b>(&self, left: &'b Expr, right: &'b Expr) -> Result<(Expr, Expr)> {
|
||||
let left_type = self.column_type(left);
|
||||
let right_type = self.column_type(right);
|
||||
|
||||
let mut reverse = false;
|
||||
let left_type = match (&left_type, &right_type) {
|
||||
let target_type = match (&left_type, &right_type) {
|
||||
(Some(v), None) => v,
|
||||
(None, Some(v)) => {
|
||||
reverse = true;
|
||||
std::mem::swap(&mut left, &mut right);
|
||||
v
|
||||
}
|
||||
(None, Some(v)) => v,
|
||||
_ => return Ok((left.clone(), right.clone())),
|
||||
};
|
||||
|
||||
// only try to convert timestamp or boolean types
|
||||
if !matches!(left_type, DataType::Timestamp(_, _))
|
||||
&& !matches!(left_type, DataType::Boolean)
|
||||
{
|
||||
if !matches!(target_type, DataType::Timestamp(_, _) | DataType::Boolean) {
|
||||
return Ok((left.clone(), right.clone()));
|
||||
}
|
||||
|
||||
match (left, right) {
|
||||
(Expr::Column(col), Expr::Literal(value)) => {
|
||||
let casted_right = Self::cast_scalar_value(value, left_type)?;
|
||||
let casted_right = Self::cast_scalar_value(value, target_type)?;
|
||||
if casted_right.is_null() {
|
||||
return Err(DataFusionError::Plan(format!(
|
||||
"column:{col:?}. Casting value:{value:?} to {left_type:?} is invalid",
|
||||
"column:{col:?}. Casting value:{value:?} to {target_type:?} is invalid",
|
||||
)));
|
||||
}
|
||||
if reverse {
|
||||
Ok((Expr::Literal(casted_right), left.clone()))
|
||||
} else {
|
||||
Ok((left.clone(), Expr::Literal(casted_right)))
|
||||
Ok((left.clone(), Expr::Literal(casted_right)))
|
||||
}
|
||||
(Expr::Literal(value), Expr::Column(col)) => {
|
||||
let casted_left = Self::cast_scalar_value(value, target_type)?;
|
||||
if casted_left.is_null() {
|
||||
return Err(DataFusionError::Plan(format!(
|
||||
"column:{col:?}. Casting value:{value:?} to {target_type:?} is invalid",
|
||||
)));
|
||||
}
|
||||
Ok((Expr::Literal(casted_left), right.clone()))
|
||||
}
|
||||
_ => Ok((left.clone(), right.clone())),
|
||||
}
|
||||
@@ -250,7 +248,6 @@ impl TreeNodeRewriter for TypeConverter {
|
||||
ScalarValue::TimestampMillisecond(Some(i), _) => {
|
||||
timestamp_to_timestamp_ms_expr(i, TimeUnit::Millisecond)
|
||||
}
|
||||
|
||||
ScalarValue::TimestampMicrosecond(Some(i), _) => {
|
||||
timestamp_to_timestamp_ms_expr(i, TimeUnit::Microsecond)
|
||||
}
|
||||
@@ -425,6 +422,13 @@ mod tests {
|
||||
ScalarValue::Utf8(Some("1970-01-01 00:00:00+08:00".to_string())),
|
||||
)))
|
||||
.unwrap()
|
||||
.filter(
|
||||
Expr::Literal(ScalarValue::Utf8(Some(
|
||||
"1970-01-01 00:00:00+08:00".to_string(),
|
||||
)))
|
||||
.lt_eq(Expr::Column(Column::from_name("column3"))),
|
||||
)
|
||||
.unwrap()
|
||||
.aggregate(
|
||||
Vec::<Expr>::new(),
|
||||
vec![Expr::AggregateFunction(AggrExpr {
|
||||
@@ -444,8 +448,37 @@ mod tests {
|
||||
.unwrap();
|
||||
let expected = String::from(
|
||||
"Aggregate: groupBy=[[]], aggr=[[COUNT(column1)]]\
|
||||
\n Filter: column3 > TimestampSecond(-28800, None)\
|
||||
\n Values: (Int64(1), Float64(1), TimestampMillisecond(1, None))",
|
||||
\n Filter: TimestampSecond(-28800, None) <= column3\
|
||||
\n Filter: column3 > TimestampSecond(-28800, None)\
|
||||
\n Values: (Int64(1), Float64(1), TimestampMillisecond(1, None))",
|
||||
);
|
||||
assert_eq!(format!("{}", transformed_plan.display_indent()), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reverse_non_ts_type() {
|
||||
let plan =
|
||||
LogicalPlanBuilder::values(vec![vec![Expr::Literal(ScalarValue::Float64(Some(1.0)))]])
|
||||
.unwrap()
|
||||
.filter(
|
||||
Expr::Column(Column::from_name("column1"))
|
||||
.gt_eq(Expr::Literal(ScalarValue::Utf8(Some("1.2345".to_string())))),
|
||||
)
|
||||
.unwrap()
|
||||
.filter(
|
||||
Expr::Literal(ScalarValue::Utf8(Some("1.2345".to_string())))
|
||||
.lt(Expr::Column(Column::from_name("column1"))),
|
||||
)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
let transformed_plan = TypeConversionRule
|
||||
.analyze(plan, &ConfigOptions::default())
|
||||
.unwrap();
|
||||
let expected = String::from(
|
||||
"Filter: Utf8(\"1.2345\") < column1\
|
||||
\n Filter: column1 >= Utf8(\"1.2345\")\
|
||||
\n Values: (Float64(1))",
|
||||
);
|
||||
assert_eq!(format!("{}", transformed_plan.display_indent()), expected);
|
||||
}
|
||||
|
||||
@@ -52,9 +52,9 @@ use datatypes::arrow::record_batch::RecordBatch;
|
||||
use datatypes::arrow::row::{OwnedRow, RowConverter, SortField};
|
||||
use futures::{ready, Stream};
|
||||
use futures_util::StreamExt;
|
||||
use snafu::ResultExt;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::error::{DataFusionSnafu, Result};
|
||||
use crate::error::{DataFusionSnafu, RangeQuerySnafu, Result};
|
||||
|
||||
type Millisecond = <TimestampMillisecondType as ArrowPrimitiveType>::Native;
|
||||
|
||||
@@ -147,7 +147,7 @@ impl Fill {
|
||||
|
||||
#[derive(Eq, Clone, Debug)]
|
||||
pub struct RangeFn {
|
||||
/// with format like `max(a) 300s null`
|
||||
/// with format like `max(a) RANGE 300s FILL NULL`
|
||||
pub name: String,
|
||||
pub data_type: DataType,
|
||||
pub expr: Expr,
|
||||
@@ -197,6 +197,7 @@ pub struct RangeSelect {
|
||||
/// all range expressions
|
||||
pub range_expr: Vec<RangeFn>,
|
||||
pub align: Duration,
|
||||
pub align_to: i64,
|
||||
pub time_index: String,
|
||||
pub by: Vec<Expr>,
|
||||
pub schema: DFSchemaRef,
|
||||
@@ -216,10 +217,28 @@ impl RangeSelect {
|
||||
input: Arc<LogicalPlan>,
|
||||
range_expr: Vec<RangeFn>,
|
||||
align: Duration,
|
||||
align_to: i64,
|
||||
time_index: Expr,
|
||||
by: Vec<Expr>,
|
||||
projection_expr: &[Expr],
|
||||
) -> Result<Self> {
|
||||
ensure!(
|
||||
align.as_millis() != 0,
|
||||
RangeQuerySnafu {
|
||||
msg: "Can't use 0 as align in Range Query"
|
||||
}
|
||||
);
|
||||
for expr in &range_expr {
|
||||
ensure!(
|
||||
expr.range.as_millis() != 0,
|
||||
RangeQuerySnafu {
|
||||
msg: format!(
|
||||
"Invalid Range expr `{}`, Can't use 0 as range in Range Query",
|
||||
expr.name
|
||||
)
|
||||
}
|
||||
);
|
||||
}
|
||||
let mut fields = range_expr
|
||||
.iter()
|
||||
.map(
|
||||
@@ -289,6 +308,7 @@ impl RangeSelect {
|
||||
input,
|
||||
range_expr,
|
||||
align,
|
||||
align_to,
|
||||
time_index: time_index_name,
|
||||
schema,
|
||||
by_schema,
|
||||
@@ -322,13 +342,19 @@ impl UserDefinedLogicalNodeCore for RangeSelect {
|
||||
fn fmt_for_explain(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"RangeSelect: range_exprs=[{}], align={}s time_index={}",
|
||||
"RangeSelect: range_exprs=[{}], align={}ms, align_to={}ms, align_by=[{}], time_index={}",
|
||||
self.range_expr
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect::<Vec<_>>()
|
||||
.join(", "),
|
||||
self.align.as_secs(),
|
||||
self.align.as_millis(),
|
||||
self.align_to,
|
||||
self.by
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect::<Vec<_>>()
|
||||
.join(", "),
|
||||
self.time_index
|
||||
)
|
||||
}
|
||||
@@ -338,6 +364,7 @@ impl UserDefinedLogicalNodeCore for RangeSelect {
|
||||
|
||||
Self {
|
||||
align: self.align,
|
||||
align_to: self.align_to,
|
||||
range_expr: self.range_expr.clone(),
|
||||
input: Arc::new(inputs[0].clone()),
|
||||
time_index: self.time_index.clone(),
|
||||
@@ -463,6 +490,7 @@ impl RangeSelect {
|
||||
input: exec_input,
|
||||
range_exec,
|
||||
align: self.align.as_millis() as Millisecond,
|
||||
align_to: self.align_to,
|
||||
by: self.create_physical_expr_list(
|
||||
&self.by,
|
||||
input_dfschema,
|
||||
@@ -493,6 +521,7 @@ pub struct RangeSelectExec {
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
range_exec: Vec<RangeFnExec>,
|
||||
align: Millisecond,
|
||||
align_to: i64,
|
||||
time_index: String,
|
||||
by: Vec<Arc<dyn PhysicalExpr>>,
|
||||
schema: SchemaRef,
|
||||
@@ -510,16 +539,24 @@ impl DisplayAs for RangeSelectExec {
|
||||
let range_expr_strs: Vec<String> = self
|
||||
.range_exec
|
||||
.iter()
|
||||
.map(|e| format!("RangeFnExec{{ {}, range: {:?}}}", e.expr.name(), e.range))
|
||||
.map(|e| {
|
||||
format!(
|
||||
"{} RANGE {}s FILL {}",
|
||||
e.expr.name(),
|
||||
e.range / 1000,
|
||||
e.fill
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
let by: Vec<String> = self.by.iter().map(|e| e.to_string()).collect();
|
||||
write!(
|
||||
f,
|
||||
"range_expr=[{}], align={}, time_index={}, by=[{}]",
|
||||
"range_expr=[{}], align={}ms, align_to={}ms, align_by=[{}], time_index={}",
|
||||
range_expr_strs.join(", "),
|
||||
self.align,
|
||||
self.align_to,
|
||||
by.join(", "),
|
||||
self.time_index,
|
||||
by.join(", ")
|
||||
)?;
|
||||
}
|
||||
}
|
||||
@@ -563,6 +600,7 @@ impl ExecutionPlan for RangeSelectExec {
|
||||
time_index: self.time_index.clone(),
|
||||
by: self.by.clone(),
|
||||
align: self.align,
|
||||
align_to: self.align_to,
|
||||
schema: self.schema.clone(),
|
||||
by_schema: self.by_schema.clone(),
|
||||
metric: self.metric.clone(),
|
||||
@@ -599,6 +637,7 @@ impl ExecutionPlan for RangeSelectExec {
|
||||
random_state: RandomState::new(),
|
||||
time_index,
|
||||
align: self.align,
|
||||
align_to: self.align_to,
|
||||
by: self.by.clone(),
|
||||
series_map: HashMap::new(),
|
||||
exec_state: ExecutionState::ReadingInput,
|
||||
@@ -629,6 +668,7 @@ struct RangeSelectStream {
|
||||
time_index: usize,
|
||||
/// the unit of `align` is millisecond
|
||||
align: Millisecond,
|
||||
align_to: i64,
|
||||
by: Vec<Arc<dyn PhysicalExpr>>,
|
||||
exec_state: ExecutionState,
|
||||
/// Converter for the by values
|
||||
@@ -657,11 +697,13 @@ struct SeriesState {
|
||||
align_ts_accumulator: HashMap<Millisecond, Vec<Box<dyn Accumulator>>>,
|
||||
}
|
||||
|
||||
/// According to `align`, produces a calendar-based aligned time.
|
||||
/// Use `align_to` as time origin.
|
||||
/// According to `align` as time interval, produces aligned time.
|
||||
/// Combining the parameters related to the range query,
|
||||
/// determine for each `Accumulator` `(hash, align_ts)` define,
|
||||
/// which rows of data will be applied to it.
|
||||
fn align_to_calendar(
|
||||
fn produce_align_time(
|
||||
align_to: i64,
|
||||
range: Millisecond,
|
||||
align: Millisecond,
|
||||
ts_column: &TimestampMillisecondArray,
|
||||
@@ -672,7 +714,8 @@ fn align_to_calendar(
|
||||
// make modify_map for range_fn[i]
|
||||
for (row, hash) in by_columns_hash.iter().enumerate() {
|
||||
let ts = ts_column.value(row);
|
||||
let mut align_ts = ((ts + align - 1) / align) * align;
|
||||
let ith_slot = (ts - align_to).div_ceil(align);
|
||||
let mut align_ts = ith_slot * align + align_to;
|
||||
while align_ts - range < ts && ts <= align_ts {
|
||||
modify_map
|
||||
.entry((*hash, align_ts))
|
||||
@@ -733,7 +776,8 @@ impl RangeSelectStream {
|
||||
for i in 0..self.range_exec.len() {
|
||||
let args = self.evaluate_many(&batch, &self.range_exec[i].args)?;
|
||||
// use self.modify_map record (hash, align_ts) => [row_nums]
|
||||
align_to_calendar(
|
||||
produce_align_time(
|
||||
self.align_to,
|
||||
self.range_exec[i].range,
|
||||
self.align,
|
||||
ts_column_ref,
|
||||
@@ -1065,6 +1109,7 @@ mod test {
|
||||
},
|
||||
],
|
||||
align,
|
||||
align_to: 0,
|
||||
by: vec![Arc::new(Column::new("host", 2))],
|
||||
time_index: TIME_INDEX_COLUMN.to_string(),
|
||||
schema: schema.clone(),
|
||||
|
||||
@@ -13,12 +13,16 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::BTreeSet;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use arrow_schema::DataType;
|
||||
use async_recursion::async_recursion;
|
||||
use catalog::table_source::DfTableSourceProvider;
|
||||
use common_time::interval::NANOS_PER_MILLI;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{Interval, Timestamp};
|
||||
use datafusion::datasource::DefaultTableSource;
|
||||
use datafusion::prelude::Column;
|
||||
use datafusion::scalar::ScalarValue;
|
||||
@@ -47,25 +51,13 @@ use crate::range_select::plan::{RangeFn, RangeSelect};
|
||||
pub struct RangeExprRewriter<'a> {
|
||||
input_plan: &'a Arc<LogicalPlan>,
|
||||
align: Duration,
|
||||
align_to: i64,
|
||||
by: Vec<Expr>,
|
||||
/// Use `BTreeSet` to avoid in case like `avg(a) RANGE '5m' + avg(a) RANGE '5m'`, duplicate range expr `avg(a) RANGE '5m'` be calculate twice
|
||||
range_fn: BTreeSet<RangeFn>,
|
||||
sub_aggr: &'a Aggregate,
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn dispose_parse_error(expr: Option<&Expr>) -> DataFusionError {
|
||||
DataFusionError::Plan(
|
||||
expr.map(|x| {
|
||||
format!(
|
||||
"Illegal argument `{}` in range select query",
|
||||
x.display_name().unwrap_or_default()
|
||||
)
|
||||
})
|
||||
.unwrap_or("Missing argument in range select query".into()),
|
||||
)
|
||||
}
|
||||
|
||||
impl<'a> RangeExprRewriter<'a> {
|
||||
pub fn get_range_expr(&self, args: &[Expr], i: usize) -> DFResult<Expr> {
|
||||
match args.get(i) {
|
||||
@@ -85,6 +77,19 @@ impl<'a> RangeExprRewriter<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn dispose_parse_error(expr: Option<&Expr>) -> DataFusionError {
|
||||
DataFusionError::Plan(
|
||||
expr.map(|x| {
|
||||
format!(
|
||||
"Illegal argument `{}` in range select query",
|
||||
x.display_name().unwrap_or_default()
|
||||
)
|
||||
})
|
||||
.unwrap_or("Missing argument in range select query".into()),
|
||||
)
|
||||
}
|
||||
|
||||
fn parse_str_expr(args: &[Expr], i: usize) -> DFResult<&str> {
|
||||
match args.get(i) {
|
||||
Some(Expr::Literal(ScalarValue::Utf8(Some(str)))) => Ok(str.as_str()),
|
||||
@@ -92,6 +97,64 @@ fn parse_str_expr(args: &[Expr], i: usize) -> DFResult<&str> {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_expr_to_string(args: &[Expr], i: usize) -> DFResult<String> {
|
||||
match args.get(i) {
|
||||
Some(Expr::Literal(ScalarValue::Utf8(Some(str)))) => Ok(str.to_string()),
|
||||
Some(expr) => Ok(expr.display_name().unwrap_or_default()),
|
||||
None => Err(dispose_parse_error(None)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a duraion expr:
|
||||
/// 1. duration string (e.g. `'1h'`)
|
||||
/// 2. Interval expr (e.g. `INTERVAL '1 year 3 hours 20 minutes'`)
|
||||
fn parse_duration_expr(args: &[Expr], i: usize) -> DFResult<Duration> {
|
||||
let interval_to_duration = |interval: Interval| -> Duration {
|
||||
Duration::from_millis((interval.to_nanosecond() / NANOS_PER_MILLI as i128) as u64)
|
||||
};
|
||||
match args.get(i) {
|
||||
Some(Expr::Literal(ScalarValue::Utf8(Some(str)))) => {
|
||||
parse_duration(str).map_err(DataFusionError::Plan)
|
||||
}
|
||||
Some(Expr::Literal(ScalarValue::IntervalYearMonth(Some(i)))) => {
|
||||
Ok(interval_to_duration(Interval::from_i32(*i)))
|
||||
}
|
||||
Some(Expr::Literal(ScalarValue::IntervalDayTime(Some(i)))) => {
|
||||
Ok(interval_to_duration(Interval::from_i64(*i)))
|
||||
}
|
||||
Some(Expr::Literal(ScalarValue::IntervalMonthDayNano(Some(i)))) => {
|
||||
Ok(interval_to_duration(Interval::from_i128(*i)))
|
||||
}
|
||||
other => Err(dispose_parse_error(other)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse the `align to` clause and return a UTC timestamp with unit of millisecond,
|
||||
/// which is used as the basis for dividing time slot during the align operation.
|
||||
/// 1. NOW: align to current execute time
|
||||
/// 2. CALENDAR (as Default Option): align to timestamp `0`
|
||||
/// 2. Timestamp string: align to specific timestamp
|
||||
fn parse_align_to(args: &[Expr], i: usize) -> DFResult<i64> {
|
||||
let s = parse_str_expr(args, i)?;
|
||||
let upper = s.to_uppercase();
|
||||
match upper.as_str() {
|
||||
"NOW" => return Ok(Timestamp::current_millis().value()),
|
||||
"CALENDAR" | "" => return Ok(0),
|
||||
_ => (),
|
||||
}
|
||||
Timestamp::from_str(s)
|
||||
.map_err(|e| {
|
||||
DataFusionError::Plan(format!(
|
||||
"Illegal `align to` argument `{}` in range select query, can't be parse as NOW/CALENDAR/Timestamp, error: {}",
|
||||
s, e
|
||||
))
|
||||
})?.convert_to(TimeUnit::Millisecond).map(|x|x.value()).ok_or(DataFusionError::Plan(format!(
|
||||
"Illegal `align to` argument `{}` in range select query, can't be convert to a valid Timestamp",
|
||||
s
|
||||
))
|
||||
)
|
||||
}
|
||||
|
||||
fn parse_expr_list(args: &[Expr], start: usize, len: usize) -> DFResult<Vec<Expr>> {
|
||||
let mut outs = Vec::with_capacity(len);
|
||||
for i in start..start + len {
|
||||
@@ -111,21 +174,38 @@ fn parse_expr_list(args: &[Expr], start: usize, len: usize) -> DFResult<Vec<Expr
|
||||
Ok(outs)
|
||||
}
|
||||
|
||||
macro_rules! inconsistent_check {
|
||||
($self: ident.$name: ident, $cond: expr) => {
|
||||
if $cond && $self.$name != $name {
|
||||
return Err(DataFusionError::Plan(
|
||||
concat!(
|
||||
"Inconsistent ",
|
||||
stringify!($name),
|
||||
" given in Range Function Rewrite"
|
||||
)
|
||||
.into(),
|
||||
));
|
||||
} else {
|
||||
$self.$name = $name;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl<'a> TreeNodeRewriter for RangeExprRewriter<'a> {
|
||||
type N = Expr;
|
||||
|
||||
fn mutate(&mut self, node: Expr) -> DFResult<Expr> {
|
||||
if let Expr::ScalarUDF(func) = &node {
|
||||
if func.fun.name == "range_fn" {
|
||||
// `range_fn(func, range, fill, byc, [byv], align)`
|
||||
// `range_fn(func, range, fill, byc, [byv], align, to)`
|
||||
// `[byv]` are variadic arguments, byc indicate the length of arguments
|
||||
let range_expr = self.get_range_expr(&func.args, 0)?;
|
||||
let range_str = parse_str_expr(&func.args, 1)?;
|
||||
let range = parse_duration_expr(&func.args, 1)?;
|
||||
let byc = str::parse::<usize>(parse_str_expr(&func.args, 3)?)
|
||||
.map_err(|e| DataFusionError::Plan(e.to_string()))?;
|
||||
let by = parse_expr_list(&func.args, 4, byc)?;
|
||||
let align = parse_duration(parse_str_expr(&func.args, byc + 4)?)
|
||||
.map_err(DataFusionError::Plan)?;
|
||||
let align = parse_duration_expr(&func.args, byc + 4)?;
|
||||
let align_to = parse_align_to(&func.args, byc + 5)?;
|
||||
let mut data_type = range_expr.get_type(self.input_plan.schema())?;
|
||||
let mut need_cast = false;
|
||||
let fill = Fill::try_from_str(parse_str_expr(&func.args, 2)?, &data_type)?;
|
||||
@@ -133,30 +213,19 @@ impl<'a> TreeNodeRewriter for RangeExprRewriter<'a> {
|
||||
data_type = DataType::Float64;
|
||||
need_cast = true;
|
||||
}
|
||||
if !self.by.is_empty() && self.by != by {
|
||||
return Err(DataFusionError::Plan(
|
||||
"Inconsistent by given in Range Function Rewrite".into(),
|
||||
));
|
||||
} else {
|
||||
self.by = by;
|
||||
}
|
||||
if self.align != Duration::default() && self.align != align {
|
||||
return Err(DataFusionError::Plan(
|
||||
"Inconsistent align given in Range Function Rewrite".into(),
|
||||
));
|
||||
} else {
|
||||
self.align = align;
|
||||
}
|
||||
inconsistent_check!(self.by, !self.by.is_empty());
|
||||
inconsistent_check!(self.align, self.align != Duration::default());
|
||||
inconsistent_check!(self.align_to, self.align_to != 0);
|
||||
let range_fn = RangeFn {
|
||||
name: format!(
|
||||
"{} RANGE {} FILL {}",
|
||||
range_expr.display_name()?,
|
||||
range_str,
|
||||
parse_expr_to_string(&func.args, 1)?,
|
||||
fill
|
||||
),
|
||||
data_type,
|
||||
expr: range_expr,
|
||||
range: parse_duration(range_str).map_err(DataFusionError::Plan)?,
|
||||
range,
|
||||
fill,
|
||||
need_cast,
|
||||
};
|
||||
@@ -221,6 +290,7 @@ impl RangePlanRewriter {
|
||||
let mut range_rewriter = RangeExprRewriter {
|
||||
input_plan: &input,
|
||||
align: Duration::default(),
|
||||
align_to: 0,
|
||||
by: vec![],
|
||||
range_fn: BTreeSet::new(),
|
||||
sub_aggr: aggr_plan,
|
||||
@@ -237,6 +307,7 @@ impl RangePlanRewriter {
|
||||
input.clone(),
|
||||
range_rewriter.range_fn.into_iter().collect(),
|
||||
range_rewriter.align,
|
||||
range_rewriter.align_to,
|
||||
time_index,
|
||||
range_rewriter.by,
|
||||
&new_expr,
|
||||
@@ -468,7 +539,7 @@ mod test {
|
||||
async fn range_no_project() {
|
||||
let query = r#"SELECT timestamp, tag_0, tag_1, avg(field_0 + field_1) RANGE '5m' FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"RangeSelect: range_exprs=[AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL:Float64;N]\
|
||||
"RangeSelect: range_exprs=[AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL:Float64;N]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -479,7 +550,7 @@ mod test {
|
||||
let query = r#"SELECT (avg(field_0 + field_1)/4) RANGE '5m' FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL / Int64(4) [AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -491,7 +562,7 @@ mod test {
|
||||
r#"SELECT (covar(field_0 + field_1, field_1)/4) RANGE '5m' FROM test ALIGN '1h';"#;
|
||||
let expected = String::from(
|
||||
"Projection: COVARIANCE(test.field_0 + test.field_1,test.field_1) RANGE 5m FILL NULL / Int64(4) [COVARIANCE(test.field_0 + test.field_1,test.field_1) RANGE 5m FILL NULL / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[COVARIANCE(test.field_0 + test.field_1,test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [COVARIANCE(test.field_0 + test.field_1,test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8]\
|
||||
\n RangeSelect: range_exprs=[COVARIANCE(test.field_0 + test.field_1,test.field_1) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1, test.tag_2, test.tag_3, test.tag_4], time_index=timestamp [COVARIANCE(test.field_0 + test.field_1,test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -502,7 +573,7 @@ mod test {
|
||||
let query = r#"SELECT ((avg(field_0)+sum(field_1))/4) RANGE '5m' FROM test ALIGN '1h' by (tag_0,tag_1) FILL NULL;"#;
|
||||
let expected = String::from(
|
||||
"Projection: (AVG(test.field_0) RANGE 5m FILL NULL + SUM(test.field_1) RANGE 5m FILL NULL) / Int64(4) [AVG(test.field_0) RANGE 5m FILL NULL + SUM(test.field_1) RANGE 5m FILL NULL / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0) RANGE 5m FILL NULL, SUM(test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [AVG(test.field_0) RANGE 5m FILL NULL:Float64;N, SUM(test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0) RANGE 5m FILL NULL, SUM(test.field_1) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [AVG(test.field_0) RANGE 5m FILL NULL:Float64;N, SUM(test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -515,7 +586,7 @@ mod test {
|
||||
"Projection: foo + Int64(1) [foo + Int64(1):Float64;N]\
|
||||
\n Filter: foo > Int64(1) [foo:Float64;N]\
|
||||
\n Projection: (AVG(test.field_0) RANGE 5m FILL NULL + SUM(test.field_1) RANGE 5m FILL NULL) / Int64(4) AS foo [foo:Float64;N]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0) RANGE 5m FILL NULL, SUM(test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [AVG(test.field_0) RANGE 5m FILL NULL:Float64;N, SUM(test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0) RANGE 5m FILL NULL, SUM(test.field_1) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [AVG(test.field_0) RANGE 5m FILL NULL:Float64;N, SUM(test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -526,7 +597,7 @@ mod test {
|
||||
let query = r#"SELECT ((avg(a)+sum(b))/4) RANGE '5m' FROM (SELECT field_0 as a, field_1 as b, tag_0 as c, tag_1 as d, timestamp from test where field_0 > 1.0) ALIGN '1h' by (c, d) FILL NULL;"#;
|
||||
let expected = String::from(
|
||||
"Projection: (AVG(a) RANGE 5m FILL NULL + SUM(b) RANGE 5m FILL NULL) / Int64(4) [AVG(a) RANGE 5m FILL NULL + SUM(b) RANGE 5m FILL NULL / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[AVG(a) RANGE 5m FILL NULL, SUM(b) RANGE 5m FILL NULL], align=3600s time_index=timestamp [AVG(a) RANGE 5m FILL NULL:Float64;N, SUM(b) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), c:Utf8, d:Utf8]\
|
||||
\n RangeSelect: range_exprs=[AVG(a) RANGE 5m FILL NULL, SUM(b) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[c, d], time_index=timestamp [AVG(a) RANGE 5m FILL NULL:Float64;N, SUM(b) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), c:Utf8, d:Utf8]\
|
||||
\n Projection: test.field_0 AS a, test.field_1 AS b, test.tag_0 AS c, test.tag_1 AS d, test.timestamp [a:Float64;N, b:Float64;N, c:Utf8, d:Utf8, timestamp:Timestamp(Millisecond, None)]\
|
||||
\n Filter: test.field_0 > Float64(1) [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
@@ -539,7 +610,7 @@ mod test {
|
||||
let query = r#"SELECT sin(avg(field_0 + field_1) RANGE '5m' + 1) FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: sin(AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL + Int64(1)) [sin(AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL + Int64(1)):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -550,7 +621,7 @@ mod test {
|
||||
let query = r#"SELECT avg(field_0) RANGE '5m' FILL 6.0 + avg(field_0) RANGE '5m' FILL 6.0 FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: AVG(test.field_0) RANGE 5m FILL 6 + AVG(test.field_0) RANGE 5m FILL 6 [AVG(test.field_0) RANGE 5m FILL 6 + AVG(test.field_0) RANGE 5m FILL 6:Float64]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0) RANGE 5m FILL 6], align=3600s time_index=timestamp [AVG(test.field_0) RANGE 5m FILL 6:Float64, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0) RANGE 5m FILL 6], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [AVG(test.field_0) RANGE 5m FILL 6:Float64, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -561,7 +632,7 @@ mod test {
|
||||
let query = r#"SELECT round(sin(avg(field_0 + field_1) RANGE '5m' + 1)) FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: round(sin(AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL + Int64(1))) [round(sin(AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL + Int64(1))):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -572,7 +643,7 @@ mod test {
|
||||
let query = r#"SELECT gcd(CAST(max(field_0 + 1) Range '5m' FILL NULL AS Int64), CAST(tag_0 AS Int64)) + round(max(field_2+1) Range '6m' FILL NULL + 1) + max(field_2+3) Range '10m' FILL NULL * CAST(tag_1 AS Float64) + 1 FROM test ALIGN '1h' by (tag_0, tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: gcd(CAST(MAX(test.field_0 + Int64(1)) RANGE 5m FILL NULL AS Int64), CAST(test.tag_0 AS Int64)) + round(MAX(test.field_2 + Int64(1)) RANGE 6m FILL NULL + Int64(1)) + MAX(test.field_2 + Int64(3)) RANGE 10m FILL NULL * CAST(test.tag_1 AS Float64) + Int64(1) [gcd(MAX(test.field_0 + Int64(1)) RANGE 5m FILL NULL,test.tag_0) + round(MAX(test.field_2 + Int64(1)) RANGE 6m FILL NULL + Int64(1)) + MAX(test.field_2 + Int64(3)) RANGE 10m FILL NULL * test.tag_1 + Int64(1):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[MAX(test.field_0 + Int64(1)) RANGE 5m FILL NULL, MAX(test.field_2 + Int64(1)) RANGE 6m FILL NULL, MAX(test.field_2 + Int64(3)) RANGE 10m FILL NULL], align=3600s time_index=timestamp [MAX(test.field_0 + Int64(1)) RANGE 5m FILL NULL:Float64;N, MAX(test.field_2 + Int64(1)) RANGE 6m FILL NULL:Float64;N, MAX(test.field_2 + Int64(3)) RANGE 10m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n RangeSelect: range_exprs=[MAX(test.field_0 + Int64(1)) RANGE 5m FILL NULL, MAX(test.field_2 + Int64(1)) RANGE 6m FILL NULL, MAX(test.field_2 + Int64(3)) RANGE 10m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [MAX(test.field_0 + Int64(1)) RANGE 5m FILL NULL:Float64;N, MAX(test.field_2 + Int64(1)) RANGE 6m FILL NULL:Float64;N, MAX(test.field_2 + Int64(3)) RANGE 10m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -582,7 +653,7 @@ mod test {
|
||||
async fn range_linear_on_integer() {
|
||||
let query = r#"SELECT min(CAST(field_0 AS Int64) + CAST(field_1 AS Int64)) RANGE '5m' FILL LINEAR FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"RangeSelect: range_exprs=[MIN(test.field_0 + test.field_1) RANGE 5m FILL LINEAR], align=3600s time_index=timestamp [MIN(test.field_0 + test.field_1) RANGE 5m FILL LINEAR:Float64;N]\
|
||||
"RangeSelect: range_exprs=[MIN(test.field_0 + test.field_1) RANGE 5m FILL LINEAR], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [MIN(test.field_0 + test.field_1) RANGE 5m FILL LINEAR:Float64;N]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -628,4 +699,68 @@ mod test {
|
||||
"Error during planning: Illegal argument `Int64(5)` in range select query"
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_duration_expr() {
|
||||
let interval_to_ms = |interval: Interval| -> u128 {
|
||||
(interval.to_nanosecond() / NANOS_PER_MILLI as i128) as u128
|
||||
};
|
||||
// test IntervalYearMonth
|
||||
let interval = Interval::from_year_month(10);
|
||||
let args = vec![Expr::Literal(ScalarValue::IntervalYearMonth(Some(
|
||||
interval.to_i32(),
|
||||
)))];
|
||||
assert_eq!(
|
||||
parse_duration_expr(&args, 0).unwrap().as_millis(),
|
||||
interval_to_ms(interval)
|
||||
);
|
||||
// test IntervalDayTime
|
||||
let interval = Interval::from_day_time(10, 10);
|
||||
let args = vec![Expr::Literal(ScalarValue::IntervalDayTime(Some(
|
||||
interval.to_i64(),
|
||||
)))];
|
||||
assert_eq!(
|
||||
parse_duration_expr(&args, 0).unwrap().as_millis(),
|
||||
interval_to_ms(interval)
|
||||
);
|
||||
// test IntervalMonthDayNano
|
||||
let interval = Interval::from_month_day_nano(10, 10, 10);
|
||||
let args = vec![Expr::Literal(ScalarValue::IntervalMonthDayNano(Some(
|
||||
interval.to_i128(),
|
||||
)))];
|
||||
assert_eq!(
|
||||
parse_duration_expr(&args, 0).unwrap().as_millis(),
|
||||
interval_to_ms(interval)
|
||||
);
|
||||
// test Duration
|
||||
let args = vec![Expr::Literal(ScalarValue::Utf8(Some("1y4w".into())))];
|
||||
assert_eq!(
|
||||
parse_duration_expr(&args, 0).unwrap(),
|
||||
parse_duration("1y4w").unwrap()
|
||||
);
|
||||
// test err
|
||||
assert!(parse_duration_expr(&args, 10).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_align_to() {
|
||||
// test NOW
|
||||
let args = vec![Expr::Literal(ScalarValue::Utf8(Some("NOW".into())))];
|
||||
let epsinon = parse_align_to(&args, 0).unwrap() - Timestamp::current_millis().value();
|
||||
assert!(epsinon.abs() < 100);
|
||||
// test CALENDAR
|
||||
let args = vec![
|
||||
Expr::Literal(ScalarValue::Utf8(Some("".into()))),
|
||||
Expr::Literal(ScalarValue::Utf8(Some("CALENDAR".into()))),
|
||||
];
|
||||
assert!(
|
||||
parse_align_to(&args, 0).unwrap() == parse_align_to(&args, 1).unwrap()
|
||||
&& parse_align_to(&args, 0).unwrap() == 0
|
||||
);
|
||||
// test CALENDAR
|
||||
let args = vec![Expr::Literal(ScalarValue::Utf8(Some(
|
||||
"1970-01-01T00:00:00+08:00".into(),
|
||||
)))];
|
||||
assert!(parse_align_to(&args, 0).unwrap() == -8 * 60 * 60 * 1000);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -200,7 +200,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tls_option_verifiy_ca() {
|
||||
fn test_tls_option_verify_ca() {
|
||||
let s = r#"
|
||||
{
|
||||
"mode": "verify_ca",
|
||||
@@ -219,7 +219,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tls_option_verifiy_full() {
|
||||
fn test_tls_option_verify_full() {
|
||||
let s = r#"
|
||||
{
|
||||
"mode": "verify_full",
|
||||
|
||||
@@ -29,6 +29,7 @@ impl<'a> ParserContext<'a> {
|
||||
}
|
||||
let _ = self.parser.next_token();
|
||||
|
||||
let if_exists = self.parser.parse_keywords(&[Keyword::IF, Keyword::EXISTS]);
|
||||
let raw_table_ident =
|
||||
self.parser
|
||||
.parse_object_name()
|
||||
@@ -45,7 +46,7 @@ impl<'a> ParserContext<'a> {
|
||||
}
|
||||
);
|
||||
|
||||
Ok(Statement::DropTable(DropTable::new(table_ident)))
|
||||
Ok(Statement::DropTable(DropTable::new(table_ident, if_exists)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,7 +64,15 @@ mod tests {
|
||||
let mut stmts = result.unwrap();
|
||||
assert_eq!(
|
||||
stmts.pop().unwrap(),
|
||||
Statement::DropTable(DropTable::new(ObjectName(vec![Ident::new("foo")])))
|
||||
Statement::DropTable(DropTable::new(ObjectName(vec![Ident::new("foo")]), false))
|
||||
);
|
||||
|
||||
let sql = "DROP TABLE IF EXISTS foo";
|
||||
let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {});
|
||||
let mut stmts = result.unwrap();
|
||||
assert_eq!(
|
||||
stmts.pop().unwrap(),
|
||||
Statement::DropTable(DropTable::new(ObjectName(vec![Ident::new("foo")]), true))
|
||||
);
|
||||
|
||||
let sql = "DROP TABLE my_schema.foo";
|
||||
@@ -71,10 +80,10 @@ mod tests {
|
||||
let mut stmts = result.unwrap();
|
||||
assert_eq!(
|
||||
stmts.pop().unwrap(),
|
||||
Statement::DropTable(DropTable::new(ObjectName(vec![
|
||||
Ident::new("my_schema"),
|
||||
Ident::new("foo")
|
||||
])))
|
||||
Statement::DropTable(DropTable::new(
|
||||
ObjectName(vec![Ident::new("my_schema"), Ident::new("foo")]),
|
||||
false
|
||||
))
|
||||
);
|
||||
|
||||
let sql = "DROP TABLE my_catalog.my_schema.foo";
|
||||
@@ -82,11 +91,14 @@ mod tests {
|
||||
let mut stmts = result.unwrap();
|
||||
assert_eq!(
|
||||
stmts.pop().unwrap(),
|
||||
Statement::DropTable(DropTable::new(ObjectName(vec![
|
||||
Ident::new("my_catalog"),
|
||||
Ident::new("my_schema"),
|
||||
Ident::new("foo")
|
||||
])))
|
||||
Statement::DropTable(DropTable::new(
|
||||
ObjectName(vec![
|
||||
Ident::new("my_catalog"),
|
||||
Ident::new("my_schema"),
|
||||
Ident::new("foo")
|
||||
]),
|
||||
false
|
||||
))
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -383,13 +383,23 @@ pub fn sql_column_def_to_grpc_column_def(col: &ColumnDef) -> Result<api::v1::Col
|
||||
.context(ConvertToGrpcDataTypeSnafu)?
|
||||
.to_parts();
|
||||
|
||||
let is_primary_key = col
|
||||
.options
|
||||
.iter()
|
||||
.any(|o| matches!(o.option, ColumnOption::Unique { is_primary: true }));
|
||||
|
||||
let semantic_type = if is_primary_key {
|
||||
SemanticType::Tag
|
||||
} else {
|
||||
SemanticType::Field
|
||||
};
|
||||
|
||||
Ok(api::v1::ColumnDef {
|
||||
name,
|
||||
data_type: datatype as i32,
|
||||
is_nullable,
|
||||
default_constraint: default_constraint.unwrap_or_default(),
|
||||
// TODO(#1308): support adding new primary key columns
|
||||
semantic_type: SemanticType::Field as _,
|
||||
semantic_type: semantic_type as _,
|
||||
comment: String::new(),
|
||||
datatype_extension: datatype_ext,
|
||||
})
|
||||
@@ -826,6 +836,7 @@ mod tests {
|
||||
assert!(grpc_column_def.is_nullable); // nullable when options are empty
|
||||
assert_eq!(ColumnDataType::Float64 as i32, grpc_column_def.data_type);
|
||||
assert!(grpc_column_def.default_constraint.is_empty());
|
||||
assert_eq!(grpc_column_def.semantic_type, SemanticType::Field as i32);
|
||||
|
||||
// test not null
|
||||
let column_def = ColumnDef {
|
||||
@@ -840,6 +851,20 @@ mod tests {
|
||||
|
||||
let grpc_column_def = sql_column_def_to_grpc_column_def(&column_def).unwrap();
|
||||
assert!(!grpc_column_def.is_nullable);
|
||||
|
||||
// test primary key
|
||||
let column_def = ColumnDef {
|
||||
name: "col".into(),
|
||||
data_type: SqlDataType::Double,
|
||||
collation: None,
|
||||
options: vec![ColumnOptionDef {
|
||||
name: None,
|
||||
option: ColumnOption::Unique { is_primary: true },
|
||||
}],
|
||||
};
|
||||
|
||||
let grpc_column_def = sql_column_def_to_grpc_column_def(&column_def).unwrap();
|
||||
assert_eq!(grpc_column_def.semantic_type, SemanticType::Tag as i32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -19,15 +19,24 @@ use sqlparser_derive::{Visit, VisitMut};
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
|
||||
pub struct DropTable {
|
||||
table_name: ObjectName,
|
||||
/// drop table if exists
|
||||
drop_if_exists: bool,
|
||||
}
|
||||
|
||||
impl DropTable {
|
||||
/// Creates a statement for `DROP TABLE`
|
||||
pub fn new(table_name: ObjectName) -> Self {
|
||||
Self { table_name }
|
||||
pub fn new(table_name: ObjectName, if_exists: bool) -> Self {
|
||||
Self {
|
||||
table_name,
|
||||
drop_if_exists: if_exists,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn table_name(&self) -> &ObjectName {
|
||||
&self.table_name
|
||||
}
|
||||
|
||||
pub fn drop_if_exists(&self) -> bool {
|
||||
self.drop_if_exists
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-test-util.workspace = true
|
||||
datanode = { workspace = true, features = ["testing"] }
|
||||
datanode = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
dotenv = "0.15"
|
||||
frontend = { workspace = true, features = ["testing"] }
|
||||
|
||||
@@ -270,7 +270,8 @@ impl GreptimeDbClusterBuilder {
|
||||
Arc::new(handlers_executor),
|
||||
);
|
||||
|
||||
let instance = FrontendBuilder::new(meta_backend, datanode_clients, meta_client)
|
||||
let instance = FrontendBuilder::new(meta_backend.clone(), datanode_clients, meta_client)
|
||||
.with_cache_invalidator(meta_backend)
|
||||
.with_heartbeat_task(heartbeat_task)
|
||||
.try_build()
|
||||
.await
|
||||
|
||||
@@ -3,7 +3,9 @@
|
||||
## Sqlness manual
|
||||
|
||||
### Case file
|
||||
Sqlness has two types of file
|
||||
|
||||
Sqlness has two types of file:
|
||||
|
||||
- `.sql`: test input, SQL only
|
||||
- `.result`: expected test output, SQL and its results
|
||||
|
||||
@@ -14,17 +16,21 @@ check change logs to solve the problem.
|
||||
You only need to write test SQL in `.sql` file, and run the test.
|
||||
|
||||
### Case organization
|
||||
The root dir of input cases is `tests/cases`. It contains several sub-directories stand for different test
|
||||
|
||||
The root dir of input cases is `tests/cases`. It contains several subdirectories stand for different test
|
||||
modes. E.g., `standalone/` contains all the tests to run under `greptimedb standalone start` mode.
|
||||
|
||||
Under the first level of sub-directory (e.g. the `cases/standalone`), you can organize your cases as you like.
|
||||
Under the first level of subdirectory (e.g. the `cases/standalone`), you can organize your cases as you like.
|
||||
Sqlness walks through every file recursively and runs them.
|
||||
|
||||
## Run the test
|
||||
Unlike other tests, this harness is in a binary target form. You can run it with
|
||||
|
||||
Unlike other tests, this harness is in a binary target form. You can run it with:
|
||||
|
||||
```shell
|
||||
cargo sqlness
|
||||
```
|
||||
|
||||
It automatically finishes the following procedures: compile `GreptimeDB`, start it, grab tests and feed it to
|
||||
the server, then collect and compare the results. You only need to check if the `.result` files are changed.
|
||||
If not, congratulations, the test is passed 🥳!
|
||||
|
||||
@@ -59,7 +59,7 @@ DESC TABLE test;
|
||||
| i | Int32 | | YES | | FIELD |
|
||||
| j | TimestampMillisecond | PRI | NO | | TIMESTAMP |
|
||||
| k | Int32 | | YES | | FIELD |
|
||||
| host | String | | YES | | FIELD |
|
||||
| host | String | PRI | YES | | TAG |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
|
||||
ALTER TABLE test ADD COLUMN idc STRING default 'idc' PRIMARY KEY;
|
||||
@@ -83,8 +83,8 @@ DESC TABLE test;
|
||||
| i | Int32 | | YES | | FIELD |
|
||||
| j | TimestampMillisecond | PRI | NO | | TIMESTAMP |
|
||||
| k | Int32 | | YES | | FIELD |
|
||||
| host | String | | YES | | FIELD |
|
||||
| idc | String | | YES | idc | FIELD |
|
||||
| host | String | PRI | YES | | TAG |
|
||||
| idc | String | PRI | YES | idc | TAG |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
|
||||
ALTER TABLE test ADD COLUMN "IdC" STRING default 'idc' PRIMARY KEY;
|
||||
@@ -99,9 +99,9 @@ DESC TABLE test;
|
||||
| i | Int32 | | YES | | FIELD |
|
||||
| j | TimestampMillisecond | PRI | NO | | TIMESTAMP |
|
||||
| k | Int32 | | YES | | FIELD |
|
||||
| host | String | | YES | | FIELD |
|
||||
| idc | String | | YES | idc | FIELD |
|
||||
| IdC | String | | YES | idc | FIELD |
|
||||
| host | String | PRI | YES | | TAG |
|
||||
| idc | String | PRI | YES | idc | TAG |
|
||||
| IdC | String | PRI | YES | idc | TAG |
|
||||
+--------+----------------------+-----+------+---------+---------------+
|
||||
|
||||
DROP TABLE test;
|
||||
|
||||
22
tests/cases/standalone/common/drop/drop_table.result
Normal file
22
tests/cases/standalone/common/drop/drop_table.result
Normal file
@@ -0,0 +1,22 @@
|
||||
DROP TABLE IF EXISTS foo;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
create table foo (
|
||||
host string,
|
||||
ts timestamp DEFAULT '2023-04-29 00:00:00+00:00',
|
||||
cpu double default 0,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(host)
|
||||
) engine=mito with(regions=1);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE IF EXISTS foo;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE IF EXISTS foo;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
13
tests/cases/standalone/common/drop/drop_table.sql
Normal file
13
tests/cases/standalone/common/drop/drop_table.sql
Normal file
@@ -0,0 +1,13 @@
|
||||
DROP TABLE IF EXISTS foo;
|
||||
|
||||
create table foo (
|
||||
host string,
|
||||
ts timestamp DEFAULT '2023-04-29 00:00:00+00:00',
|
||||
cpu double default 0,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(host)
|
||||
) engine=mito with(regions=1);
|
||||
|
||||
DROP TABLE IF EXISTS foo;
|
||||
|
||||
DROP TABLE IF EXISTS foo;
|
||||
@@ -240,7 +240,7 @@ tql eval (3000, 3000, '1s') http_requests{g="canary"} unless ignoring(g) http_re
|
||||
-- NOT SUPPORTED: `vector()`
|
||||
tql eval (3000, 3000, '1s') http_requests AND ON (dummy) vector(1);
|
||||
|
||||
Error: 1004(InvalidArguments), Expect a PromQL expr but not found, input expr: Call(Call { func: Function { name: "vector", arg_types: [Scalar], variadic: false, return_type: Vector }, args: FunctionArgs { args: [NumberLiteral(NumberLiteral { val: 1.0 })] } })
|
||||
Error: 1004(InvalidArguments), Unsupported expr type: vector
|
||||
|
||||
-- eval instant at 50m http_requests AND IGNORING (group, instance, job) vector(1)
|
||||
-- http_requests{group="canary", instance="0", job="api-server"} 300
|
||||
@@ -254,7 +254,7 @@ Error: 1004(InvalidArguments), Expect a PromQL expr but not found, input expr: C
|
||||
-- NOT SUPPORTED: `vector()`
|
||||
tql eval (3000, 3000, '1s') http_requests AND IGNORING (g, instance, job) vector(1);
|
||||
|
||||
Error: 1004(InvalidArguments), Expect a PromQL expr but not found, input expr: Call(Call { func: Function { name: "vector", arg_types: [Scalar], variadic: false, return_type: Vector }, args: FunctionArgs { args: [NumberLiteral(NumberLiteral { val: 1.0 })] } })
|
||||
Error: 1004(InvalidArguments), Unsupported expr type: vector
|
||||
|
||||
drop table http_requests;
|
||||
|
||||
|
||||
368
tests/cases/standalone/common/promql/time_fn.result
Normal file
368
tests/cases/standalone/common/promql/time_fn.result
Normal file
@@ -0,0 +1,368 @@
|
||||
-- Test `time()` and related functions.
|
||||
-- Some cases are part of promql/testdata/functions.test, "Test time-related functions" section
|
||||
-- And others are from compliance test
|
||||
-- time() with itself or scalar
|
||||
tql eval (3000, 3000, '1s') time();
|
||||
|
||||
+---------------------+----------------------+
|
||||
| time | time / Float64(1000) |
|
||||
+---------------------+----------------------+
|
||||
| 1970-01-01T00:50:00 | 3000.0 |
|
||||
+---------------------+----------------------+
|
||||
|
||||
tql eval (0, 0, '1s') time();
|
||||
|
||||
+---------------------+----------------------+
|
||||
| time | time / Float64(1000) |
|
||||
+---------------------+----------------------+
|
||||
| 1970-01-01T00:00:00 | 0.0 |
|
||||
+---------------------+----------------------+
|
||||
|
||||
tql eval (0.001, 1, '1s') time();
|
||||
|
||||
+-------------------------+----------------------+
|
||||
| time | time / Float64(1000) |
|
||||
+-------------------------+----------------------+
|
||||
| 1970-01-01T00:00:00.001 | 0.001 |
|
||||
+-------------------------+----------------------+
|
||||
|
||||
tql eval (0, 0, '1s') time() + 1;
|
||||
|
||||
+---------------------+-------+
|
||||
| time | value |
|
||||
+---------------------+-------+
|
||||
| 1970-01-01T00:00:00 | 1.0 |
|
||||
+---------------------+-------+
|
||||
|
||||
tql eval (0, 0, '1s') 1 + time();
|
||||
|
||||
+---------------------+-------+
|
||||
| time | value |
|
||||
+---------------------+-------+
|
||||
| 1970-01-01T00:00:00 | 1.0 |
|
||||
+---------------------+-------+
|
||||
|
||||
-- expect error: parse error: comparisons between scalars must use BOOL modifier
|
||||
tql eval (0, 0, '1s') time() < 1;
|
||||
|
||||
Error: 2000(InvalidSyntax), comparisons between scalars must use BOOL modifier
|
||||
|
||||
tql eval (0, 0, '1s') time() < bool 1;
|
||||
|
||||
+---------------------+-------+
|
||||
| time | value |
|
||||
+---------------------+-------+
|
||||
| 1970-01-01T00:00:00 | 1.0 |
|
||||
+---------------------+-------+
|
||||
|
||||
tql eval (0, 0, '1s') time() > bool 1;
|
||||
|
||||
+---------------------+-------+
|
||||
| time | value |
|
||||
+---------------------+-------+
|
||||
| 1970-01-01T00:00:00 | 0.0 |
|
||||
+---------------------+-------+
|
||||
|
||||
tql eval (1000, 1000, '1s') time() + time();
|
||||
|
||||
+---------------------+--------+
|
||||
| time | value |
|
||||
+---------------------+--------+
|
||||
| 1970-01-01T00:16:40 | 2000.0 |
|
||||
+---------------------+--------+
|
||||
|
||||
-- expect error: parse error: comparisons between scalars must use BOOL modifier
|
||||
tql eval (1000, 1000, '1s') time() == time();
|
||||
|
||||
Error: 2000(InvalidSyntax), comparisons between scalars must use BOOL modifier
|
||||
|
||||
tql eval (1000, 1000, '1s') time() == bool time();
|
||||
|
||||
+---------------------+-------+
|
||||
| time | value |
|
||||
+---------------------+-------+
|
||||
| 1970-01-01T00:16:40 | 1.0 |
|
||||
+---------------------+-------+
|
||||
|
||||
tql eval (1000, 1000, '1s') time() != bool time();
|
||||
|
||||
+---------------------+-------+
|
||||
| time | value |
|
||||
+---------------------+-------+
|
||||
| 1970-01-01T00:16:40 | 0.0 |
|
||||
+---------------------+-------+
|
||||
|
||||
-- time() with table
|
||||
create table metrics (ts timestamp time index, val double);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
insert into metrics values (0, 0), (1000, 1), (2000, 2), (3000, 3);
|
||||
|
||||
Affected Rows: 4
|
||||
|
||||
tql eval (1, 2, '1s') time() + metrics;
|
||||
|
||||
+---------------------+--------------------------+
|
||||
| ts | ts / Float64(1000) + val |
|
||||
+---------------------+--------------------------+
|
||||
| 1970-01-01T00:00:01 | 2.0 |
|
||||
| 1970-01-01T00:00:02 | 4.0 |
|
||||
+---------------------+--------------------------+
|
||||
|
||||
tql eval (1, 2, '1s') time() == metrics;
|
||||
|
||||
+---------------------+-----+
|
||||
| ts | val |
|
||||
+---------------------+-----+
|
||||
| 1970-01-01T00:00:01 | 1.0 |
|
||||
| 1970-01-01T00:00:02 | 2.0 |
|
||||
+---------------------+-----+
|
||||
|
||||
tql eval (1, 2, '1s') time() == bool metrics;
|
||||
|
||||
+---------------------+--------------------------+
|
||||
| ts | ts / Float64(1000) = val |
|
||||
+---------------------+--------------------------+
|
||||
| 1970-01-01T00:00:01 | 1.0 |
|
||||
| 1970-01-01T00:00:02 | 1.0 |
|
||||
+---------------------+--------------------------+
|
||||
|
||||
tql eval (1, 2, '1s') metrics + time();
|
||||
|
||||
+---------------------+--------------------------+
|
||||
| ts | val + ts / Float64(1000) |
|
||||
+---------------------+--------------------------+
|
||||
| 1970-01-01T00:00:01 | 2.0 |
|
||||
| 1970-01-01T00:00:02 | 4.0 |
|
||||
+---------------------+--------------------------+
|
||||
|
||||
tql eval (1, 2, '1s') metrics == time();
|
||||
|
||||
+---------------------+-----+
|
||||
| ts | val |
|
||||
+---------------------+-----+
|
||||
| 1970-01-01T00:00:01 | 1.0 |
|
||||
| 1970-01-01T00:00:02 | 2.0 |
|
||||
+---------------------+-----+
|
||||
|
||||
tql eval (1, 2, '1s') metrics == bool time();
|
||||
|
||||
+---------------------+--------------------------+
|
||||
| ts | val = ts / Float64(1000) |
|
||||
+---------------------+--------------------------+
|
||||
| 1970-01-01T00:00:01 | 1.0 |
|
||||
| 1970-01-01T00:00:02 | 1.0 |
|
||||
+---------------------+--------------------------+
|
||||
|
||||
-- other time-related functions
|
||||
tql eval (1, 2, '1s') hour();
|
||||
|
||||
+---------------------+------------------------------+
|
||||
| time | date_part(Utf8("hour"),time) |
|
||||
+---------------------+------------------------------+
|
||||
| 1970-01-01T00:00:01 | 0.0 |
|
||||
| 1970-01-01T00:00:02 | 0.0 |
|
||||
+---------------------+------------------------------+
|
||||
|
||||
tql eval (1, 2, '1s') hour(metrics);
|
||||
|
||||
+---------------------+----------------------------+
|
||||
| ts | date_part(Utf8("hour"),ts) |
|
||||
+---------------------+----------------------------+
|
||||
| 1970-01-01T00:00:01 | 0.0 |
|
||||
| 1970-01-01T00:00:02 | 0.0 |
|
||||
+---------------------+----------------------------+
|
||||
|
||||
-- 2023-12-01T06:43:43Z
|
||||
tql eval (1701413023, 1701413023, '1s') hour();
|
||||
|
||||
+---------------------+------------------------------+
|
||||
| time | date_part(Utf8("hour"),time) |
|
||||
+---------------------+------------------------------+
|
||||
| 2023-12-01T06:43:43 | 6.0 |
|
||||
+---------------------+------------------------------+
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') hour(metrics);
|
||||
|
||||
++
|
||||
++
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') minute();
|
||||
|
||||
+---------------------+--------------------------------+
|
||||
| time | date_part(Utf8("minute"),time) |
|
||||
+---------------------+--------------------------------+
|
||||
| 2023-12-01T06:43:43 | 43.0 |
|
||||
+---------------------+--------------------------------+
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') month();
|
||||
|
||||
+---------------------+-------------------------------+
|
||||
| time | date_part(Utf8("month"),time) |
|
||||
+---------------------+-------------------------------+
|
||||
| 2023-12-01T06:43:43 | 12.0 |
|
||||
+---------------------+-------------------------------+
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') year();
|
||||
|
||||
+---------------------+------------------------------+
|
||||
| time | date_part(Utf8("year"),time) |
|
||||
+---------------------+------------------------------+
|
||||
| 2023-12-01T06:43:43 | 2023.0 |
|
||||
+---------------------+------------------------------+
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') day_of_month();
|
||||
|
||||
+---------------------+-----------------------------+
|
||||
| time | date_part(Utf8("day"),time) |
|
||||
+---------------------+-----------------------------+
|
||||
| 2023-12-01T06:43:43 | 1.0 |
|
||||
+---------------------+-----------------------------+
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') day_of_week();
|
||||
|
||||
+---------------------+-----------------------------+
|
||||
| time | date_part(Utf8("dow"),time) |
|
||||
+---------------------+-----------------------------+
|
||||
| 2023-12-01T06:43:43 | 5.0 |
|
||||
+---------------------+-----------------------------+
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') day_of_year();
|
||||
|
||||
+---------------------+-----------------------------+
|
||||
| time | date_part(Utf8("doy"),time) |
|
||||
+---------------------+-----------------------------+
|
||||
| 2023-12-01T06:43:43 | 335.0 |
|
||||
+---------------------+-----------------------------+
|
||||
|
||||
-- 2024-01-01T06:43:43Z leap year
|
||||
tql eval (1704091423, 1704091423, '1s') day_of_year();
|
||||
|
||||
+---------------------+-----------------------------+
|
||||
| time | date_part(Utf8("doy"),time) |
|
||||
+---------------------+-----------------------------+
|
||||
| 2024-01-01T06:43:43 | 1.0 |
|
||||
+---------------------+-----------------------------+
|
||||
|
||||
-- 2023-01-01T06:43:43Z
|
||||
tql eval (1672555423, 1672555423, '1s') days_in_month();
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| time | date_part(Utf8("day"),date_trunc(Utf8("month"),time) + IntervalYearMonth("1") - IntervalDayTime("4294967296")) |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| 2023-01-01T06:43:43 | 31.0 |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- 2023-02-01T06:43:43Z
|
||||
tql eval (1675233823, 1675233823, '1s') days_in_month();
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| time | date_part(Utf8("day"),date_trunc(Utf8("month"),time) + IntervalYearMonth("1") - IntervalDayTime("4294967296")) |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| 2023-02-01T06:43:43 | 28.0 |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- 2024-02-01T06:43:43Z leap year
|
||||
tql eval (1706769823, 1706769823, '1s') days_in_month();
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| time | date_part(Utf8("day"),date_trunc(Utf8("month"),time) + IntervalYearMonth("1") - IntervalDayTime("4294967296")) |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| 2024-02-01T06:43:43 | 29.0 |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- 2023-03-01T06:43:43Z
|
||||
tql eval (1677653023, 1677653023, '1s') days_in_month();
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| time | date_part(Utf8("day"),date_trunc(Utf8("month"),time) + IntervalYearMonth("1") - IntervalDayTime("4294967296")) |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| 2023-03-01T06:43:43 | 31.0 |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- 2023-04-01T06:43:43Z
|
||||
tql eval (1680331423, 1680331423, '1s') days_in_month();
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| time | date_part(Utf8("day"),date_trunc(Utf8("month"),time) + IntervalYearMonth("1") - IntervalDayTime("4294967296")) |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| 2023-04-01T06:43:43 | 30.0 |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- 2023-05-01T06:43:43Z
|
||||
tql eval (1682923423, 1682923423, '1s') days_in_month();
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| time | date_part(Utf8("day"),date_trunc(Utf8("month"),time) + IntervalYearMonth("1") - IntervalDayTime("4294967296")) |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| 2023-05-01T06:43:43 | 31.0 |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- 2023-06-01T06:43:43Z
|
||||
tql eval (1685601823, 1685601823, '1s') days_in_month();
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| time | date_part(Utf8("day"),date_trunc(Utf8("month"),time) + IntervalYearMonth("1") - IntervalDayTime("4294967296")) |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| 2023-06-01T06:43:43 | 30.0 |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- 2023-07-01T06:43:43Z
|
||||
tql eval (1688193823, 1688193823, '1s') days_in_month();
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| time | date_part(Utf8("day"),date_trunc(Utf8("month"),time) + IntervalYearMonth("1") - IntervalDayTime("4294967296")) |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| 2023-07-01T06:43:43 | 31.0 |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- 2023-08-01T06:43:43Z
|
||||
tql eval (1690872223, 1690872223, '1s') days_in_month();
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| time | date_part(Utf8("day"),date_trunc(Utf8("month"),time) + IntervalYearMonth("1") - IntervalDayTime("4294967296")) |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| 2023-08-01T06:43:43 | 31.0 |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- 2023-09-01T06:43:43Z
|
||||
tql eval (1693550623, 1693550623, '1s') days_in_month();
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| time | date_part(Utf8("day"),date_trunc(Utf8("month"),time) + IntervalYearMonth("1") - IntervalDayTime("4294967296")) |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| 2023-09-01T06:43:43 | 30.0 |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- 2023-10-01T06:43:43Z
|
||||
tql eval (1696142623, 1696142623, '1s') days_in_month();
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| time | date_part(Utf8("day"),date_trunc(Utf8("month"),time) + IntervalYearMonth("1") - IntervalDayTime("4294967296")) |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| 2023-10-01T06:43:43 | 31.0 |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- 2023-11-01T06:43:43Z
|
||||
tql eval (1698821023, 1698821023, '1s') days_in_month();
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| time | date_part(Utf8("day"),date_trunc(Utf8("month"),time) + IntervalYearMonth("1") - IntervalDayTime("4294967296")) |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| 2023-11-01T06:43:43 | 30.0 |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
-- 2023-12-01T06:43:43Z
|
||||
tql eval (1701413023, 1701413023, '1s') days_in_month();
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| time | date_part(Utf8("day"),date_trunc(Utf8("month"),time) + IntervalYearMonth("1") - IntervalDayTime("4294967296")) |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
| 2023-12-01T06:43:43 | 31.0 |
|
||||
+---------------------+----------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
drop table metrics;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
116
tests/cases/standalone/common/promql/time_fn.sql
Normal file
116
tests/cases/standalone/common/promql/time_fn.sql
Normal file
@@ -0,0 +1,116 @@
|
||||
-- Test `time()` and related functions.
|
||||
-- Some cases are part of promql/testdata/functions.test, "Test time-related functions" section
|
||||
-- And others are from compliance test
|
||||
|
||||
-- time() with itself or scalar
|
||||
|
||||
tql eval (3000, 3000, '1s') time();
|
||||
|
||||
tql eval (0, 0, '1s') time();
|
||||
|
||||
tql eval (0.001, 1, '1s') time();
|
||||
|
||||
tql eval (0, 0, '1s') time() + 1;
|
||||
|
||||
tql eval (0, 0, '1s') 1 + time();
|
||||
|
||||
-- expect error: parse error: comparisons between scalars must use BOOL modifier
|
||||
tql eval (0, 0, '1s') time() < 1;
|
||||
|
||||
tql eval (0, 0, '1s') time() < bool 1;
|
||||
|
||||
tql eval (0, 0, '1s') time() > bool 1;
|
||||
|
||||
tql eval (1000, 1000, '1s') time() + time();
|
||||
|
||||
-- expect error: parse error: comparisons between scalars must use BOOL modifier
|
||||
tql eval (1000, 1000, '1s') time() == time();
|
||||
|
||||
tql eval (1000, 1000, '1s') time() == bool time();
|
||||
|
||||
tql eval (1000, 1000, '1s') time() != bool time();
|
||||
|
||||
-- time() with table
|
||||
|
||||
create table metrics (ts timestamp time index, val double);
|
||||
|
||||
insert into metrics values (0, 0), (1000, 1), (2000, 2), (3000, 3);
|
||||
|
||||
tql eval (1, 2, '1s') time() + metrics;
|
||||
|
||||
tql eval (1, 2, '1s') time() == metrics;
|
||||
|
||||
tql eval (1, 2, '1s') time() == bool metrics;
|
||||
|
||||
tql eval (1, 2, '1s') metrics + time();
|
||||
|
||||
tql eval (1, 2, '1s') metrics == time();
|
||||
|
||||
tql eval (1, 2, '1s') metrics == bool time();
|
||||
|
||||
-- other time-related functions
|
||||
|
||||
tql eval (1, 2, '1s') hour();
|
||||
|
||||
tql eval (1, 2, '1s') hour(metrics);
|
||||
|
||||
-- 2023-12-01T06:43:43Z
|
||||
tql eval (1701413023, 1701413023, '1s') hour();
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') hour(metrics);
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') minute();
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') month();
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') year();
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') day_of_month();
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') day_of_week();
|
||||
|
||||
tql eval (1701413023, 1701413023, '1s') day_of_year();
|
||||
|
||||
-- 2024-01-01T06:43:43Z leap year
|
||||
tql eval (1704091423, 1704091423, '1s') day_of_year();
|
||||
|
||||
-- 2023-01-01T06:43:43Z
|
||||
tql eval (1672555423, 1672555423, '1s') days_in_month();
|
||||
|
||||
-- 2023-02-01T06:43:43Z
|
||||
tql eval (1675233823, 1675233823, '1s') days_in_month();
|
||||
|
||||
-- 2024-02-01T06:43:43Z leap year
|
||||
tql eval (1706769823, 1706769823, '1s') days_in_month();
|
||||
|
||||
-- 2023-03-01T06:43:43Z
|
||||
tql eval (1677653023, 1677653023, '1s') days_in_month();
|
||||
|
||||
-- 2023-04-01T06:43:43Z
|
||||
tql eval (1680331423, 1680331423, '1s') days_in_month();
|
||||
|
||||
-- 2023-05-01T06:43:43Z
|
||||
tql eval (1682923423, 1682923423, '1s') days_in_month();
|
||||
|
||||
-- 2023-06-01T06:43:43Z
|
||||
tql eval (1685601823, 1685601823, '1s') days_in_month();
|
||||
|
||||
-- 2023-07-01T06:43:43Z
|
||||
tql eval (1688193823, 1688193823, '1s') days_in_month();
|
||||
|
||||
-- 2023-08-01T06:43:43Z
|
||||
tql eval (1690872223, 1690872223, '1s') days_in_month();
|
||||
|
||||
-- 2023-09-01T06:43:43Z
|
||||
tql eval (1693550623, 1693550623, '1s') days_in_month();
|
||||
|
||||
-- 2023-10-01T06:43:43Z
|
||||
tql eval (1696142623, 1696142623, '1s') days_in_month();
|
||||
|
||||
-- 2023-11-01T06:43:43Z
|
||||
tql eval (1698821023, 1698821023, '1s') days_in_month();
|
||||
|
||||
-- 2023-12-01T06:43:43Z
|
||||
tql eval (1701413023, 1701413023, '1s') days_in_month();
|
||||
|
||||
drop table metrics;
|
||||
@@ -81,6 +81,23 @@ SELECT min(val) RANGE '5s' FROM host ALIGN '5s' FILL 3.0;
|
||||
|
||||
Error: 3000(PlanQuery), DataFusion error: Error during planning: 3.0 is not a valid fill option, fail to convert to a const value. { Arrow error: Cast error: Cannot cast string '3.0' to value of Int64 type }
|
||||
|
||||
-- 2.7 zero align/range
|
||||
SELECT min(val) RANGE '5s' FROM host ALIGN '0s';
|
||||
|
||||
Error: 3000(PlanQuery), DataFusion error: Error during planning: duration must be greater than 0
|
||||
|
||||
SELECT min(val) RANGE '0s' FROM host ALIGN '5s';
|
||||
|
||||
Error: 3000(PlanQuery), DataFusion error: Error during planning: duration must be greater than 0
|
||||
|
||||
SELECT min(val) RANGE '5s' FROM host ALIGN (INTERVAL '0' day);
|
||||
|
||||
Error: 2000(InvalidSyntax), Range Query: Can't use 0 as align in Range Query
|
||||
|
||||
SELECT min(val) RANGE (INTERVAL '0' day) FROM host ALIGN '5s';
|
||||
|
||||
Error: 2000(InvalidSyntax), Range Query: Invalid Range expr `MIN(host.val) RANGE IntervalMonthDayNano("0") FILL NULL`, Can't use 0 as range in Range Query
|
||||
|
||||
DROP TABLE host;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
@@ -58,4 +58,14 @@ SELECT min(val) RANGE '5s', min(val) RANGE '5s' FILL NULL FROM host ALIGN '5s';
|
||||
|
||||
SELECT min(val) RANGE '5s' FROM host ALIGN '5s' FILL 3.0;
|
||||
|
||||
-- 2.7 zero align/range
|
||||
|
||||
SELECT min(val) RANGE '5s' FROM host ALIGN '0s';
|
||||
|
||||
SELECT min(val) RANGE '0s' FROM host ALIGN '5s';
|
||||
|
||||
SELECT min(val) RANGE '5s' FROM host ALIGN (INTERVAL '0' day);
|
||||
|
||||
SELECT min(val) RANGE (INTERVAL '0' day) FROM host ALIGN '5s';
|
||||
|
||||
DROP TABLE host;
|
||||
|
||||
46
tests/cases/standalone/common/range/interval.result
Normal file
46
tests/cases/standalone/common/range/interval.result
Normal file
@@ -0,0 +1,46 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
("1970-01-01T01:00:00+08:00", 'host1', 0),
|
||||
("1970-01-01T02:00:00+08:00", 'host1', 1),
|
||||
("1971-01-02T03:00:00+08:00", 'host1', 2),
|
||||
("1971-01-02T04:00:00+08:00", 'host1', 3),
|
||||
("1970-01-01T01:00:00+08:00", 'host2', 4),
|
||||
("1970-01-01T02:00:00+08:00", 'host2', 5),
|
||||
("1971-01-02T03:00:00+08:00", 'host2', 6),
|
||||
("1971-01-02T04:00:00+08:00", 'host2', 7);
|
||||
|
||||
Affected Rows: 8
|
||||
|
||||
SELECT ts, host, min(val) RANGE (INTERVAL '1 year') FROM host ALIGN (INTERVAL '1 year') ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+--------------------------------------------------------------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE IntervalMonthDayNano("950737950171172051122527404032") FILL NULL |
|
||||
+---------------------+-------+--------------------------------------------------------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0 |
|
||||
| 1971-12-22T00:00:00 | host1 | 2 |
|
||||
| 1970-01-01T00:00:00 | host2 | 4 |
|
||||
| 1971-12-22T00:00:00 | host2 | 6 |
|
||||
+---------------------+-------+--------------------------------------------------------------------------------------+
|
||||
|
||||
SELECT ts, host, min(val) RANGE (INTERVAL '1' year) FROM host ALIGN (INTERVAL '1' year) ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+--------------------------------------------------------------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE IntervalMonthDayNano("950737950171172051122527404032") FILL NULL |
|
||||
+---------------------+-------+--------------------------------------------------------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0 |
|
||||
| 1971-12-22T00:00:00 | host1 | 2 |
|
||||
| 1970-01-01T00:00:00 | host2 | 4 |
|
||||
| 1971-12-22T00:00:00 | host2 | 6 |
|
||||
+---------------------+-------+--------------------------------------------------------------------------------------+
|
||||
|
||||
DROP TABLE host;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
21
tests/cases/standalone/common/range/interval.sql
Normal file
21
tests/cases/standalone/common/range/interval.sql
Normal file
@@ -0,0 +1,21 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
("1970-01-01T01:00:00+08:00", 'host1', 0),
|
||||
("1970-01-01T02:00:00+08:00", 'host1', 1),
|
||||
("1971-01-02T03:00:00+08:00", 'host1', 2),
|
||||
("1971-01-02T04:00:00+08:00", 'host1', 3),
|
||||
("1970-01-01T01:00:00+08:00", 'host2', 4),
|
||||
("1970-01-01T02:00:00+08:00", 'host2', 5),
|
||||
("1971-01-02T03:00:00+08:00", 'host2', 6),
|
||||
("1971-01-02T04:00:00+08:00", 'host2', 7);
|
||||
|
||||
SELECT ts, host, min(val) RANGE (INTERVAL '1 year') FROM host ALIGN (INTERVAL '1 year') ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(val) RANGE (INTERVAL '1' year) FROM host ALIGN (INTERVAL '1' year) ORDER BY host, ts;
|
||||
|
||||
DROP TABLE host;
|
||||
@@ -55,9 +55,9 @@ EXPLAIN SELECT ts, host, min(val) RANGE '5s' FROM host ALIGN '5s';
|
||||
+-+-+
|
||||
| plan_type_| plan_|
|
||||
+-+-+
|
||||
| logical_plan_| RangeSelect: range_exprs=[MIN(host.val) RANGE 5s FILL NULL], align=5s time_index=ts_|
|
||||
| logical_plan_| RangeSelect: range_exprs=[MIN(host.val) RANGE 5s FILL NULL], align=5000ms, align_to=0ms, align_by=[host.host], time_index=ts |
|
||||
|_|_MergeScan [is_placeholder=false]_|
|
||||
| physical_plan | RangeSelectExec: range_expr=[RangeFnExec{ MIN(host.val), range: 5000}], align=5000, time_index=ts, by=[host@1] |
|
||||
| physical_plan | RangeSelectExec: range_expr=[MIN(host.val) RANGE 5s FILL NULL], align=5000ms, align_to=0ms, align_by=[host@1], time_index=ts |
|
||||
|_|_MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
+-+-+
|
||||
@@ -71,7 +71,7 @@ EXPLAIN ANALYZE SELECT ts, host, min(val) RANGE '5s' FROM host ALIGN '5s';
|
||||
+-+-+
|
||||
| plan_type_| plan_|
|
||||
+-+-+
|
||||
| Plan with Metrics | RangeSelectExec: range_expr=[RangeFnExec{ MIN(host.val), range: 5000}], align=5000, time_index=ts, by=[host@1], REDACTED
|
||||
| Plan with Metrics | RangeSelectExec: range_expr=[MIN(host.val) RANGE 5s FILL NULL], align=5000ms, align_to=0ms, align_by=[host@1], time_index=ts, REDACTED
|
||||
|_|_MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
+-+-+
|
||||
|
||||
99
tests/cases/standalone/common/range/to.result
Normal file
99
tests/cases/standalone/common/range/to.result
Normal file
@@ -0,0 +1,99 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
("1970-01-01T23:30:00+00:00", 'host1', 0),
|
||||
("1970-01-01T22:30:00+00:00", 'host1', 1),
|
||||
("1970-01-02T23:30:00+00:00", 'host1', 2),
|
||||
("1970-01-02T22:30:00+00:00", 'host1', 3),
|
||||
("1970-01-01T23:30:00+00:00", 'host2', 4),
|
||||
("1970-01-01T22:30:00+00:00", 'host2', 5),
|
||||
("1970-01-02T23:30:00+00:00", 'host2', 6),
|
||||
("1970-01-02T22:30:00+00:00", 'host2', 7);
|
||||
|
||||
Affected Rows: 8
|
||||
|
||||
SELECT ts, host, min(val) RANGE '1d' FROM host ALIGN '1d' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+----------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE 1d FILL NULL |
|
||||
+---------------------+-------+----------------------------------+
|
||||
| 1970-01-02T00:00:00 | host1 | 0 |
|
||||
| 1970-01-03T00:00:00 | host1 | 2 |
|
||||
| 1970-01-02T00:00:00 | host2 | 4 |
|
||||
| 1970-01-03T00:00:00 | host2 | 6 |
|
||||
+---------------------+-------+----------------------------------+
|
||||
|
||||
SELECT ts, host, min(val) RANGE '1d' FROM host ALIGN '1d' TO CALENDAR ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+----------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE 1d FILL NULL |
|
||||
+---------------------+-------+----------------------------------+
|
||||
| 1970-01-02T00:00:00 | host1 | 0 |
|
||||
| 1970-01-03T00:00:00 | host1 | 2 |
|
||||
| 1970-01-02T00:00:00 | host2 | 4 |
|
||||
| 1970-01-03T00:00:00 | host2 | 6 |
|
||||
+---------------------+-------+----------------------------------+
|
||||
|
||||
SELECT ts, host, min(val) RANGE '1d' FROM host ALIGN '1d' TO UNKNOWN ORDER BY host, ts;
|
||||
|
||||
Error: 3000(PlanQuery), DataFusion error: Error during planning: Illegal `align to` argument `UNKNOWN` in range select query, can't be parse as NOW/CALENDAR/Timestamp, error: Failed to parse a string into Timestamp, raw string: UNKNOWN
|
||||
|
||||
SELECT ts, host, min(val) RANGE '1d' FROM host ALIGN '1d' TO '1900-01-01T00:00:00+01:00' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+----------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE 1d FILL NULL |
|
||||
+---------------------+-------+----------------------------------+
|
||||
| 1970-01-01T23:00:00 | host1 | 1 |
|
||||
| 1970-01-02T23:00:00 | host1 | 0 |
|
||||
| 1970-01-03T23:00:00 | host1 | 2 |
|
||||
| 1970-01-01T23:00:00 | host2 | 5 |
|
||||
| 1970-01-02T23:00:00 | host2 | 4 |
|
||||
| 1970-01-03T23:00:00 | host2 | 6 |
|
||||
+---------------------+-------+----------------------------------+
|
||||
|
||||
SELECT ts, host, min(val) RANGE '1d' FROM host ALIGN '1d' TO '1970-01-01T00:00:00+01:00' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+----------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE 1d FILL NULL |
|
||||
+---------------------+-------+----------------------------------+
|
||||
| 1970-01-01T23:00:00 | host1 | 1 |
|
||||
| 1970-01-02T23:00:00 | host1 | 0 |
|
||||
| 1970-01-03T23:00:00 | host1 | 2 |
|
||||
| 1970-01-01T23:00:00 | host2 | 5 |
|
||||
| 1970-01-02T23:00:00 | host2 | 4 |
|
||||
| 1970-01-03T23:00:00 | host2 | 6 |
|
||||
+---------------------+-------+----------------------------------+
|
||||
|
||||
SELECT ts, host, min(val) RANGE '1d' FROM host ALIGN '1d' TO '2023-01-01T00:00:00+01:00' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+----------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE 1d FILL NULL |
|
||||
+---------------------+-------+----------------------------------+
|
||||
| 1970-01-01T23:00:00 | host1 | 1 |
|
||||
| 1970-01-02T23:00:00 | host1 | 0 |
|
||||
| 1970-01-03T23:00:00 | host1 | 2 |
|
||||
| 1970-01-01T23:00:00 | host2 | 5 |
|
||||
| 1970-01-02T23:00:00 | host2 | 4 |
|
||||
| 1970-01-03T23:00:00 | host2 | 6 |
|
||||
+---------------------+-------+----------------------------------+
|
||||
|
||||
SELECT ts, min(val) RANGE (INTERVAL '1' day) FROM host ALIGN (INTERVAL '1' day) TO '1900-01-01T00:00:00+01:00' by (1) ORDER BY ts;
|
||||
|
||||
+---------------------+----------------------------------------------------------------------------+
|
||||
| ts | MIN(host.val) RANGE IntervalMonthDayNano("18446744073709551616") FILL NULL |
|
||||
+---------------------+----------------------------------------------------------------------------+
|
||||
| 1970-01-01T23:00:00 | 1 |
|
||||
| 1970-01-02T23:00:00 | 0 |
|
||||
| 1970-01-03T23:00:00 | 2 |
|
||||
+---------------------+----------------------------------------------------------------------------+
|
||||
|
||||
DROP TABLE host;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
31
tests/cases/standalone/common/range/to.sql
Normal file
31
tests/cases/standalone/common/range/to.sql
Normal file
@@ -0,0 +1,31 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
("1970-01-01T23:30:00+00:00", 'host1', 0),
|
||||
("1970-01-01T22:30:00+00:00", 'host1', 1),
|
||||
("1970-01-02T23:30:00+00:00", 'host1', 2),
|
||||
("1970-01-02T22:30:00+00:00", 'host1', 3),
|
||||
("1970-01-01T23:30:00+00:00", 'host2', 4),
|
||||
("1970-01-01T22:30:00+00:00", 'host2', 5),
|
||||
("1970-01-02T23:30:00+00:00", 'host2', 6),
|
||||
("1970-01-02T22:30:00+00:00", 'host2', 7);
|
||||
|
||||
SELECT ts, host, min(val) RANGE '1d' FROM host ALIGN '1d' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(val) RANGE '1d' FROM host ALIGN '1d' TO CALENDAR ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(val) RANGE '1d' FROM host ALIGN '1d' TO UNKNOWN ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(val) RANGE '1d' FROM host ALIGN '1d' TO '1900-01-01T00:00:00+01:00' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(val) RANGE '1d' FROM host ALIGN '1d' TO '1970-01-01T00:00:00+01:00' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(val) RANGE '1d' FROM host ALIGN '1d' TO '2023-01-01T00:00:00+01:00' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, min(val) RANGE (INTERVAL '1' day) FROM host ALIGN (INTERVAL '1' day) TO '1900-01-01T00:00:00+01:00' by (1) ORDER BY ts;
|
||||
|
||||
DROP TABLE host;
|
||||
@@ -0,0 +1,104 @@
|
||||
-- Test aggregation functions with decimal
|
||||
-- Port from https://github.com/duckdb/duckdb/blob/main/test/sql/types/decimal/decimal_aggregates.test
|
||||
SELECT arrow_typeof(FIRST_VALUE('0.1'::DECIMAL(4,1)));
|
||||
|
||||
+----------------------------------------+
|
||||
| arrow_typeof(FIRST_VALUE(Utf8("0.1"))) |
|
||||
+----------------------------------------+
|
||||
| Decimal128(4, 1) |
|
||||
+----------------------------------------+
|
||||
|
||||
-- first_value
|
||||
SELECT FIRST_VALUE(NULL::DECIMAL),
|
||||
FIRST_VALUE('0.1'::DECIMAL(4,1))::VARCHAR,
|
||||
FIRST_VALUE('4938245.1'::DECIMAL(9,1))::VARCHAR,
|
||||
FIRST_VALUE('45672564564938245.1'::DECIMAL(18,1))::VARCHAR,
|
||||
FIRST_VALUE('4567645908450368043562342564564938245.1'::DECIMAL(38,1))::VARCHAR;
|
||||
|
||||
+-------------------+--------------------------+--------------------------------+------------------------------------------+--------------------------------------------------------------+
|
||||
| FIRST_VALUE(NULL) | FIRST_VALUE(Utf8("0.1")) | FIRST_VALUE(Utf8("4938245.1")) | FIRST_VALUE(Utf8("45672564564938245.1")) | FIRST_VALUE(Utf8("4567645908450368043562342564564938245.1")) |
|
||||
+-------------------+--------------------------+--------------------------------+------------------------------------------+--------------------------------------------------------------+
|
||||
| | 0.1 | 4938245.1 | 45672564564938245.1 | 4567645908450368043562342564564938245.1 |
|
||||
+-------------------+--------------------------+--------------------------------+------------------------------------------+--------------------------------------------------------------+
|
||||
|
||||
-- min
|
||||
SELECT MIN(NULL::DECIMAL),
|
||||
MIN('0.1'::DECIMAL(4,1))::VARCHAR,
|
||||
MIN('4938245.1'::DECIMAL(9,1))::VARCHAR,
|
||||
MIN('45672564564938245.1'::DECIMAL(18,1))::VARCHAR,
|
||||
MIN('4567645908450368043562342564564938245.1'::DECIMAL(38,1))::VARCHAR;
|
||||
|
||||
+-----------+------------------+------------------------+----------------------------------+------------------------------------------------------+
|
||||
| MIN(NULL) | MIN(Utf8("0.1")) | MIN(Utf8("4938245.1")) | MIN(Utf8("45672564564938245.1")) | MIN(Utf8("4567645908450368043562342564564938245.1")) |
|
||||
+-----------+------------------+------------------------+----------------------------------+------------------------------------------------------+
|
||||
| | 0.1 | 4938245.1 | 45672564564938245.1 | 4567645908450368043562342564564938245.1 |
|
||||
+-----------+------------------+------------------------+----------------------------------+------------------------------------------------------+
|
||||
|
||||
-- max
|
||||
SELECT MAX(NULL::DECIMAL),
|
||||
MAX('0.1'::DECIMAL(4,1))::VARCHAR,
|
||||
MAX('4938245.1'::DECIMAL(9,1))::VARCHAR,
|
||||
MAX('45672564564938245.1'::DECIMAL(18,1))::VARCHAR,
|
||||
MAX('4567645908450368043562342564564938245.1'::DECIMAL(38,1))::VARCHAR;
|
||||
|
||||
+-----------+------------------+------------------------+----------------------------------+------------------------------------------------------+
|
||||
| MAX(NULL) | MAX(Utf8("0.1")) | MAX(Utf8("4938245.1")) | MAX(Utf8("45672564564938245.1")) | MAX(Utf8("4567645908450368043562342564564938245.1")) |
|
||||
+-----------+------------------+------------------------+----------------------------------+------------------------------------------------------+
|
||||
| | 0.1 | 4938245.1 | 45672564564938245.1 | 4567645908450368043562342564564938245.1 |
|
||||
+-----------+------------------+------------------------+----------------------------------+------------------------------------------------------+
|
||||
|
||||
-- sum
|
||||
SELECT SUM(NULL::DECIMAL),
|
||||
SUM('0.1'::DECIMAL(4,1))::VARCHAR,
|
||||
SUM('4938245.1'::DECIMAL(9,1))::VARCHAR,
|
||||
SUM('45672564564938245.1'::DECIMAL(18,1))::VARCHAR,
|
||||
SUM('4567645908450368043562342564564938245.1'::DECIMAL(38,1))::VARCHAR;
|
||||
|
||||
+-----------+------------------+------------------------+----------------------------------+------------------------------------------------------+
|
||||
| SUM(NULL) | SUM(Utf8("0.1")) | SUM(Utf8("4938245.1")) | SUM(Utf8("45672564564938245.1")) | SUM(Utf8("4567645908450368043562342564564938245.1")) |
|
||||
+-----------+------------------+------------------------+----------------------------------+------------------------------------------------------+
|
||||
| | 0.1 | 4938245.1 | 45672564564938245.1 | 4567645908450368043562342564564938245.1 |
|
||||
+-----------+------------------+------------------------+----------------------------------+------------------------------------------------------+
|
||||
|
||||
-- decimal aggregates over a table
|
||||
CREATE TABLE decimals (
|
||||
d1 DECIMAL(4,1),
|
||||
d2 DECIMAL(9,1),
|
||||
d3 DECIMAL(18,1),
|
||||
d4 DECIMAL(38,1),
|
||||
ts timestamp time index,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO decimals values
|
||||
(123,123*123,123*123*123,123*123*123*123,1000),
|
||||
(456,456*456,456*456*456,456*456*456*456,2000),
|
||||
(789,789*789,789*789*789,789*789*789*789,3000);
|
||||
|
||||
Affected Rows: 3
|
||||
|
||||
SELECT SUM(d1)::VARCHAR, SUM(d2)::VARCHAR, SUM(d3)::VARCHAR, SUM(d4)::VARCHAR FROM decimals;
|
||||
|
||||
+------------------+------------------+------------------+------------------+
|
||||
| SUM(decimals.d1) | SUM(decimals.d2) | SUM(decimals.d3) | SUM(decimals.d4) |
|
||||
+------------------+------------------+------------------+------------------+
|
||||
| 1368.0 | 845586.0 | 587848752.0 | 430998662178.0 |
|
||||
+------------------+------------------+------------------+------------------+
|
||||
|
||||
INSERT INTO decimals VALUES ('0.1', '0.1', '0.1', '0.1', 4000), ('0.2', '0.2', '0.2', '0.2', 5000);
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
SELECT SUM(d1)::VARCHAR, SUM(d2)::VARCHAR, SUM(d3)::VARCHAR, SUM(d4)::VARCHAR FROM decimals;
|
||||
|
||||
+------------------+------------------+------------------+------------------+
|
||||
| SUM(decimals.d1) | SUM(decimals.d2) | SUM(decimals.d3) | SUM(decimals.d4) |
|
||||
+------------------+------------------+------------------+------------------+
|
||||
| 1368.3 | 845586.3 | 587848752.3 | 430998662178.3 |
|
||||
+------------------+------------------+------------------+------------------+
|
||||
|
||||
DROP TABLE decimals;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
-- Test aggregation functions with decimal
|
||||
-- Port from https://github.com/duckdb/duckdb/blob/main/test/sql/types/decimal/decimal_aggregates.test
|
||||
|
||||
SELECT arrow_typeof(FIRST_VALUE('0.1'::DECIMAL(4,1)));
|
||||
|
||||
-- first_value
|
||||
|
||||
SELECT FIRST_VALUE(NULL::DECIMAL),
|
||||
FIRST_VALUE('0.1'::DECIMAL(4,1))::VARCHAR,
|
||||
FIRST_VALUE('4938245.1'::DECIMAL(9,1))::VARCHAR,
|
||||
FIRST_VALUE('45672564564938245.1'::DECIMAL(18,1))::VARCHAR,
|
||||
FIRST_VALUE('4567645908450368043562342564564938245.1'::DECIMAL(38,1))::VARCHAR;
|
||||
|
||||
-- min
|
||||
|
||||
SELECT MIN(NULL::DECIMAL),
|
||||
MIN('0.1'::DECIMAL(4,1))::VARCHAR,
|
||||
MIN('4938245.1'::DECIMAL(9,1))::VARCHAR,
|
||||
MIN('45672564564938245.1'::DECIMAL(18,1))::VARCHAR,
|
||||
MIN('4567645908450368043562342564564938245.1'::DECIMAL(38,1))::VARCHAR;
|
||||
|
||||
-- max
|
||||
|
||||
SELECT MAX(NULL::DECIMAL),
|
||||
MAX('0.1'::DECIMAL(4,1))::VARCHAR,
|
||||
MAX('4938245.1'::DECIMAL(9,1))::VARCHAR,
|
||||
MAX('45672564564938245.1'::DECIMAL(18,1))::VARCHAR,
|
||||
MAX('4567645908450368043562342564564938245.1'::DECIMAL(38,1))::VARCHAR;
|
||||
|
||||
-- sum
|
||||
|
||||
SELECT SUM(NULL::DECIMAL),
|
||||
SUM('0.1'::DECIMAL(4,1))::VARCHAR,
|
||||
SUM('4938245.1'::DECIMAL(9,1))::VARCHAR,
|
||||
SUM('45672564564938245.1'::DECIMAL(18,1))::VARCHAR,
|
||||
SUM('4567645908450368043562342564564938245.1'::DECIMAL(38,1))::VARCHAR;
|
||||
|
||||
-- decimal aggregates over a table
|
||||
|
||||
CREATE TABLE decimals (
|
||||
d1 DECIMAL(4,1),
|
||||
d2 DECIMAL(9,1),
|
||||
d3 DECIMAL(18,1),
|
||||
d4 DECIMAL(38,1),
|
||||
ts timestamp time index,
|
||||
);
|
||||
|
||||
INSERT INTO decimals values
|
||||
(123,123*123,123*123*123,123*123*123*123,1000),
|
||||
(456,456*456,456*456*456,456*456*456*456,2000),
|
||||
(789,789*789,789*789*789,789*789*789*789,3000);
|
||||
|
||||
SELECT SUM(d1)::VARCHAR, SUM(d2)::VARCHAR, SUM(d3)::VARCHAR, SUM(d4)::VARCHAR FROM decimals;
|
||||
|
||||
INSERT INTO decimals VALUES ('0.1', '0.1', '0.1', '0.1', 4000), ('0.2', '0.2', '0.2', '0.2', 5000);
|
||||
|
||||
SELECT SUM(d1)::VARCHAR, SUM(d2)::VARCHAR, SUM(d3)::VARCHAR, SUM(d4)::VARCHAR FROM decimals;
|
||||
|
||||
DROP TABLE decimals;
|
||||
@@ -0,0 +1,267 @@
|
||||
-- Port from https://github.com/duckdb/duckdb/blob/main/test/sql/types/decimal/decimal_arithmetic.test
|
||||
-- negate
|
||||
SELECT -('0.1'::DECIMAL), -('-0.1'::DECIMAL);
|
||||
|
||||
+-----------------+------------------+
|
||||
| (- Utf8("0.1")) | (- Utf8("-0.1")) |
|
||||
+-----------------+------------------+
|
||||
| -0.1000000000 | 0.1000000000 |
|
||||
+-----------------+------------------+
|
||||
|
||||
-- unary +
|
||||
SELECT +('0.1'::DECIMAL), +('-0.1'::DECIMAL);
|
||||
|
||||
+--------------+---------------+
|
||||
| Utf8("0.1") | Utf8("-0.1") |
|
||||
+--------------+---------------+
|
||||
| 0.1000000000 | -0.1000000000 |
|
||||
+--------------+---------------+
|
||||
|
||||
-- addition
|
||||
SELECT '0.1'::DECIMAL + '0.1'::DECIMAL;
|
||||
|
||||
+---------------------------+
|
||||
| Utf8("0.1") + Utf8("0.1") |
|
||||
+---------------------------+
|
||||
| 0.2000000000 |
|
||||
+---------------------------+
|
||||
|
||||
-- addition with non-decimal
|
||||
SELECT '0.1'::DECIMAL + 1::INTEGER;
|
||||
|
||||
+------------------------+
|
||||
| Utf8("0.1") + Int64(1) |
|
||||
+------------------------+
|
||||
| 1.1000000000 |
|
||||
+------------------------+
|
||||
|
||||
SELECT '0.5'::DECIMAL(4,4) + '0.5'::DECIMAL(4,4);
|
||||
|
||||
+---------------------------+
|
||||
| Utf8("0.5") + Utf8("0.5") |
|
||||
+---------------------------+
|
||||
| 1.0000 |
|
||||
+---------------------------+
|
||||
|
||||
-- addition between different decimal types
|
||||
SELECT '0.5'::DECIMAL(1,1) + '100.0'::DECIMAL(3,0);
|
||||
|
||||
+-----------------------------+
|
||||
| Utf8("0.5") + Utf8("100.0") |
|
||||
+-----------------------------+
|
||||
| 100.5 |
|
||||
+-----------------------------+
|
||||
|
||||
-- test decimals and integers with big decimals
|
||||
SELECT ('0.5'::DECIMAL(1,1) + 10000)::VARCHAR,
|
||||
('0.54321'::DECIMAL(5,5) + 10000)::VARCHAR,
|
||||
('0.5432154321'::DECIMAL(10,10) + 10000)::VARCHAR,
|
||||
('0.543215432154321'::DECIMAL(15,15) + 10000::DECIMAL(20,15))::VARCHAR,
|
||||
('0.54321543215432154321'::DECIMAL(20,20) + 10000)::VARCHAR,
|
||||
('0.5432154321543215432154321'::DECIMAL(25,25) + 10000)::VARCHAR;
|
||||
|
||||
+----------------------------+--------------------------------+-------------------------------------+------------------------------------------+-----------------------------------------------+----------------------------------------------------+
|
||||
| Utf8("0.5") + Int64(10000) | Utf8("0.54321") + Int64(10000) | Utf8("0.5432154321") + Int64(10000) | Utf8("0.543215432154321") + Int64(10000) | Utf8("0.54321543215432154321") + Int64(10000) | Utf8("0.5432154321543215432154321") + Int64(10000) |
|
||||
+----------------------------+--------------------------------+-------------------------------------+------------------------------------------+-----------------------------------------------+----------------------------------------------------+
|
||||
| 10000.5 | 10000.54321 | 10000.5432154321 | 10000.543215432154321 | 10000.54321543215432154321 | 10000.5432154321543215432154321 |
|
||||
+----------------------------+--------------------------------+-------------------------------------+------------------------------------------+-----------------------------------------------+----------------------------------------------------+
|
||||
|
||||
-- out of range
|
||||
SELECT ('0.54321543215432154321543215432154321'::DECIMAL(35,35) + 10000)::VARCHAR;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Compute error: Overflow happened on: 10000 * 100000000000000000000000000000000000
|
||||
|
||||
-- different types
|
||||
SELECT '0.5'::DECIMAL(1,1) + 1::TINYINT,
|
||||
'0.5'::DECIMAL(1,1) + 2::SMALLINT,
|
||||
'0.5'::DECIMAL(1,1) + 3::INTEGER,
|
||||
'0.5'::DECIMAL(1,1) + 4::BIGINT;
|
||||
|
||||
+------------------------+------------------------+------------------------+------------------------+
|
||||
| Utf8("0.5") + Int64(1) | Utf8("0.5") + Int64(2) | Utf8("0.5") + Int64(3) | Utf8("0.5") + Int64(4) |
|
||||
+------------------------+------------------------+------------------------+------------------------+
|
||||
| 1.5 | 2.5 | 3.5 | 4.5 |
|
||||
+------------------------+------------------------+------------------------+------------------------+
|
||||
|
||||
-- negative numbers
|
||||
SELECT '0.5'::DECIMAL(1,1) + -1::TINYINT,
|
||||
'0.5'::DECIMAL(1,1) + -2::SMALLINT,
|
||||
'0.5'::DECIMAL(1,1) + -3::INTEGER,
|
||||
'0.5'::DECIMAL(1,1) + -4::BIGINT;
|
||||
|
||||
+----------------------------+----------------------------+----------------------------+----------------------------+
|
||||
| Utf8("0.5") + (- Int64(1)) | Utf8("0.5") + (- Int64(2)) | Utf8("0.5") + (- Int64(3)) | Utf8("0.5") + (- Int64(4)) |
|
||||
+----------------------------+----------------------------+----------------------------+----------------------------+
|
||||
| -0.5 | -1.5 | -2.5 | -3.5 |
|
||||
+----------------------------+----------------------------+----------------------------+----------------------------+
|
||||
|
||||
-- subtract
|
||||
SELECT '0.5'::DECIMAL(1,1) - 1::TINYINT,
|
||||
'0.5'::DECIMAL(1,1) - 2::SMALLINT,
|
||||
'0.5'::DECIMAL(1,1) - 3::INTEGER,
|
||||
'0.5'::DECIMAL(1,1) - 4::BIGINT;
|
||||
|
||||
+------------------------+------------------------+------------------------+------------------------+
|
||||
| Utf8("0.5") - Int64(1) | Utf8("0.5") - Int64(2) | Utf8("0.5") - Int64(3) | Utf8("0.5") - Int64(4) |
|
||||
+------------------------+------------------------+------------------------+------------------------+
|
||||
| -0.5 | -1.5 | -2.5 | -3.5 |
|
||||
+------------------------+------------------------+------------------------+------------------------+
|
||||
|
||||
-- negative numbers
|
||||
SELECT '0.5'::DECIMAL(1,1) - -1::TINYINT,
|
||||
'0.5'::DECIMAL(1,1) - -2::SMALLINT,
|
||||
'0.5'::DECIMAL(1,1) - -3::INTEGER,
|
||||
'0.5'::DECIMAL(1,1) - -4::BIGINT;
|
||||
|
||||
+----------------------------+----------------------------+----------------------------+----------------------------+
|
||||
| Utf8("0.5") - (- Int64(1)) | Utf8("0.5") - (- Int64(2)) | Utf8("0.5") - (- Int64(3)) | Utf8("0.5") - (- Int64(4)) |
|
||||
+----------------------------+----------------------------+----------------------------+----------------------------+
|
||||
| 1.5 | 2.5 | 3.5 | 4.5 |
|
||||
+----------------------------+----------------------------+----------------------------+----------------------------+
|
||||
|
||||
-- now with a table
|
||||
CREATE TABLE decimals(d DECIMAL(3, 2), ts timestamp time index);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO decimals VALUES ('0.1',1000), ('0.2',1000);
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
SELECT * FROM decimals;
|
||||
|
||||
+------+---------------------+
|
||||
| d | ts |
|
||||
+------+---------------------+
|
||||
| 0.20 | 1970-01-01T00:00:01 |
|
||||
+------+---------------------+
|
||||
|
||||
SELECT d + 10000 FROM decimals;
|
||||
|
||||
+---------------------------+
|
||||
| decimals.d + Int64(10000) |
|
||||
+---------------------------+
|
||||
| 10000.20 |
|
||||
+---------------------------+
|
||||
|
||||
SELECT d + '0.1'::DECIMAL, d + 10000 FROM decimals;
|
||||
|
||||
+--------------------------+---------------------------+
|
||||
| decimals.d + Utf8("0.1") | decimals.d + Int64(10000) |
|
||||
+--------------------------+---------------------------+
|
||||
| 0.3000000000 | 10000.20 |
|
||||
+--------------------------+---------------------------+
|
||||
|
||||
DROP TABLE decimals;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- multiplication
|
||||
SELECT '0.1'::DECIMAL * '10.0'::DECIMAL;
|
||||
|
||||
+----------------------------+
|
||||
| Utf8("0.1") * Utf8("10.0") |
|
||||
+----------------------------+
|
||||
| 1.00000000000000000000 |
|
||||
+----------------------------+
|
||||
|
||||
SELECT arrow_typeof('0.1'::DECIMAL(2,1) * '10.0'::DECIMAL(3,1));
|
||||
|
||||
+------------------------------------------+
|
||||
| arrow_typeof(Utf8("0.1") * Utf8("10.0")) |
|
||||
+------------------------------------------+
|
||||
| Decimal128(6, 2) |
|
||||
+------------------------------------------+
|
||||
|
||||
SELECT '0.1'::DECIMAL * '0.1'::DECIMAL;
|
||||
|
||||
+---------------------------+
|
||||
| Utf8("0.1") * Utf8("0.1") |
|
||||
+---------------------------+
|
||||
| 0.01000000000000000000 |
|
||||
+---------------------------+
|
||||
|
||||
-- multiplication with non-decimal
|
||||
SELECT '0.1'::DECIMAL * 10::INTEGER;
|
||||
|
||||
+-------------------------+
|
||||
| Utf8("0.1") * Int64(10) |
|
||||
+-------------------------+
|
||||
| 1.0000000000 |
|
||||
+-------------------------+
|
||||
|
||||
SELECT '5.0'::DECIMAL(4,3) * '5.0'::DECIMAL(4,3);
|
||||
|
||||
+---------------------------+
|
||||
| Utf8("5.0") * Utf8("5.0") |
|
||||
+---------------------------+
|
||||
| 25.000000 |
|
||||
+---------------------------+
|
||||
|
||||
-- negative multiplication
|
||||
SELECT '-5.0'::DECIMAL(4,3) * '5.0'::DECIMAL(4,3);
|
||||
|
||||
+----------------------------+
|
||||
| Utf8("-5.0") * Utf8("5.0") |
|
||||
+----------------------------+
|
||||
| -25.000000 |
|
||||
+----------------------------+
|
||||
|
||||
-- no precision is lost
|
||||
SELECT ('18.25'::DECIMAL(4,2) * '17.25'::DECIMAL(4,2))::VARCHAR;
|
||||
|
||||
+-------------------------------+
|
||||
| Utf8("18.25") * Utf8("17.25") |
|
||||
+-------------------------------+
|
||||
| 314.8125 |
|
||||
+-------------------------------+
|
||||
|
||||
-- different types
|
||||
SELECT '0.001'::DECIMAL * 100::TINYINT,
|
||||
'0.001'::DECIMAL * 10000::SMALLINT,
|
||||
'0.001'::DECIMAL * 1000000::INTEGER,
|
||||
'0.001'::DECIMAL * 100000000::BIGINT;
|
||||
|
||||
+----------------------------+------------------------------+--------------------------------+----------------------------------+
|
||||
| Utf8("0.001") * Int64(100) | Utf8("0.001") * Int64(10000) | Utf8("0.001") * Int64(1000000) | Utf8("0.001") * Int64(100000000) |
|
||||
+----------------------------+------------------------------+--------------------------------+----------------------------------+
|
||||
| 0.1000000000 | 10.0000000000 | 1000.0000000000 | 100000.0000000000 |
|
||||
+----------------------------+------------------------------+--------------------------------+----------------------------------+
|
||||
|
||||
-- multiplication could not be performed exactly: throw error
|
||||
SELECT '0.000000000000000000000000000001'::DECIMAL(38,30) * '0.000000000000000000000000000001'::DECIMAL(38,30);
|
||||
|
||||
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for decimal operation Decimal128(38, 30) * Decimal128(38, 30): Invalid argument error: Output scale of Decimal128(38, 30) * Decimal128(38, 30) would exceed max scale of 38
|
||||
|
||||
-- test addition, subtraction and multiplication with various scales and precisions
|
||||
SELECT 2.0 + 1.0 as col1,
|
||||
2.0000 + 1.0000 as col2,
|
||||
2.000000000000 + 1.000000000000 as col3,
|
||||
2.00000000000000000000 + 1.00000000000000000000 as col4;
|
||||
|
||||
+------+------+------+------+
|
||||
| col1 | col2 | col3 | col4 |
|
||||
+------+------+------+------+
|
||||
| 3.0 | 3.0 | 3.0 | 3.0 |
|
||||
+------+------+------+------+
|
||||
|
||||
SELECT 2.0 - 1.0 as col1,
|
||||
2.0000 - 1.0000 as col2,
|
||||
2.000000000000 - 1.000000000000 as col3,
|
||||
2.00000000000000000000 - 1.00000000000000000000 as col4;
|
||||
|
||||
+------+------+------+------+
|
||||
| col1 | col2 | col3 | col4 |
|
||||
+------+------+------+------+
|
||||
| 1.0 | 1.0 | 1.0 | 1.0 |
|
||||
+------+------+------+------+
|
||||
|
||||
SELECT 2.0 * 1.0 as col1,
|
||||
2.0000 * 1.0000 as col2;
|
||||
|
||||
+------+------+
|
||||
| col1 | col2 |
|
||||
+------+------+
|
||||
| 2.0 | 2.0 |
|
||||
+------+------+
|
||||
|
||||
@@ -0,0 +1,127 @@
|
||||
-- Port from https://github.com/duckdb/duckdb/blob/main/test/sql/types/decimal/decimal_arithmetic.test
|
||||
|
||||
-- negate
|
||||
|
||||
SELECT -('0.1'::DECIMAL), -('-0.1'::DECIMAL);
|
||||
|
||||
-- unary +
|
||||
|
||||
SELECT +('0.1'::DECIMAL), +('-0.1'::DECIMAL);
|
||||
|
||||
-- addition
|
||||
|
||||
SELECT '0.1'::DECIMAL + '0.1'::DECIMAL;
|
||||
|
||||
-- addition with non-decimal
|
||||
|
||||
SELECT '0.1'::DECIMAL + 1::INTEGER;
|
||||
|
||||
SELECT '0.5'::DECIMAL(4,4) + '0.5'::DECIMAL(4,4);
|
||||
|
||||
-- addition between different decimal types
|
||||
|
||||
SELECT '0.5'::DECIMAL(1,1) + '100.0'::DECIMAL(3,0);
|
||||
|
||||
-- test decimals and integers with big decimals
|
||||
|
||||
SELECT ('0.5'::DECIMAL(1,1) + 10000)::VARCHAR,
|
||||
('0.54321'::DECIMAL(5,5) + 10000)::VARCHAR,
|
||||
('0.5432154321'::DECIMAL(10,10) + 10000)::VARCHAR,
|
||||
('0.543215432154321'::DECIMAL(15,15) + 10000::DECIMAL(20,15))::VARCHAR,
|
||||
('0.54321543215432154321'::DECIMAL(20,20) + 10000)::VARCHAR,
|
||||
('0.5432154321543215432154321'::DECIMAL(25,25) + 10000)::VARCHAR;
|
||||
|
||||
-- out of range
|
||||
|
||||
SELECT ('0.54321543215432154321543215432154321'::DECIMAL(35,35) + 10000)::VARCHAR;
|
||||
|
||||
-- different types
|
||||
|
||||
SELECT '0.5'::DECIMAL(1,1) + 1::TINYINT,
|
||||
'0.5'::DECIMAL(1,1) + 2::SMALLINT,
|
||||
'0.5'::DECIMAL(1,1) + 3::INTEGER,
|
||||
'0.5'::DECIMAL(1,1) + 4::BIGINT;
|
||||
|
||||
-- negative numbers
|
||||
|
||||
SELECT '0.5'::DECIMAL(1,1) + -1::TINYINT,
|
||||
'0.5'::DECIMAL(1,1) + -2::SMALLINT,
|
||||
'0.5'::DECIMAL(1,1) + -3::INTEGER,
|
||||
'0.5'::DECIMAL(1,1) + -4::BIGINT;
|
||||
|
||||
-- subtract
|
||||
|
||||
SELECT '0.5'::DECIMAL(1,1) - 1::TINYINT,
|
||||
'0.5'::DECIMAL(1,1) - 2::SMALLINT,
|
||||
'0.5'::DECIMAL(1,1) - 3::INTEGER,
|
||||
'0.5'::DECIMAL(1,1) - 4::BIGINT;
|
||||
|
||||
-- negative numbers
|
||||
|
||||
SELECT '0.5'::DECIMAL(1,1) - -1::TINYINT,
|
||||
'0.5'::DECIMAL(1,1) - -2::SMALLINT,
|
||||
'0.5'::DECIMAL(1,1) - -3::INTEGER,
|
||||
'0.5'::DECIMAL(1,1) - -4::BIGINT;
|
||||
|
||||
|
||||
-- now with a table
|
||||
|
||||
CREATE TABLE decimals(d DECIMAL(3, 2), ts timestamp time index);
|
||||
|
||||
INSERT INTO decimals VALUES ('0.1',1000), ('0.2',1000);
|
||||
|
||||
SELECT * FROM decimals;
|
||||
|
||||
SELECT d + 10000 FROM decimals;
|
||||
|
||||
SELECT d + '0.1'::DECIMAL, d + 10000 FROM decimals;
|
||||
|
||||
DROP TABLE decimals;
|
||||
|
||||
-- multiplication
|
||||
|
||||
SELECT '0.1'::DECIMAL * '10.0'::DECIMAL;
|
||||
|
||||
SELECT arrow_typeof('0.1'::DECIMAL(2,1) * '10.0'::DECIMAL(3,1));
|
||||
|
||||
SELECT '0.1'::DECIMAL * '0.1'::DECIMAL;
|
||||
|
||||
-- multiplication with non-decimal
|
||||
|
||||
SELECT '0.1'::DECIMAL * 10::INTEGER;
|
||||
|
||||
SELECT '5.0'::DECIMAL(4,3) * '5.0'::DECIMAL(4,3);
|
||||
|
||||
-- negative multiplication
|
||||
|
||||
SELECT '-5.0'::DECIMAL(4,3) * '5.0'::DECIMAL(4,3);
|
||||
|
||||
-- no precision is lost
|
||||
|
||||
SELECT ('18.25'::DECIMAL(4,2) * '17.25'::DECIMAL(4,2))::VARCHAR;
|
||||
|
||||
-- different types
|
||||
|
||||
SELECT '0.001'::DECIMAL * 100::TINYINT,
|
||||
'0.001'::DECIMAL * 10000::SMALLINT,
|
||||
'0.001'::DECIMAL * 1000000::INTEGER,
|
||||
'0.001'::DECIMAL * 100000000::BIGINT;
|
||||
|
||||
-- multiplication could not be performed exactly: throw error
|
||||
|
||||
SELECT '0.000000000000000000000000000001'::DECIMAL(38,30) * '0.000000000000000000000000000001'::DECIMAL(38,30);
|
||||
|
||||
-- test addition, subtraction and multiplication with various scales and precisions
|
||||
|
||||
SELECT 2.0 + 1.0 as col1,
|
||||
2.0000 + 1.0000 as col2,
|
||||
2.000000000000 + 1.000000000000 as col3,
|
||||
2.00000000000000000000 + 1.00000000000000000000 as col4;
|
||||
|
||||
SELECT 2.0 - 1.0 as col1,
|
||||
2.0000 - 1.0000 as col2,
|
||||
2.000000000000 - 1.000000000000 as col3,
|
||||
2.00000000000000000000 - 1.00000000000000000000 as col4;
|
||||
|
||||
SELECT 2.0 * 1.0 as col1,
|
||||
2.0000 * 1.0000 as col2;
|
||||
496
tests/cases/standalone/common/types/decimal/decimal_cast.result
Normal file
496
tests/cases/standalone/common/types/decimal/decimal_cast.result
Normal file
@@ -0,0 +1,496 @@
|
||||
-- Test casting from decimal to other types
|
||||
-- Port from https://github.com/duckdb/duckdb/blob/main/test/sql/types/decimal/cast_from_decimal.test
|
||||
-- and https://github.com/duckdb/duckdb/blob/main/test/sql/types/decimal/cast_to_decimal.test
|
||||
-- tinyint
|
||||
SELECT 127::DECIMAL(3,0)::TINYINT, -127::DECIMAL(3,0)::TINYINT, -7::DECIMAL(9,1)::TINYINT, 27::DECIMAL(18,1)::TINYINT, 33::DECIMAL(38,1)::TINYINT;
|
||||
|
||||
+------------+----------------+--------------+-----------+-----------+
|
||||
| Int64(127) | (- Int64(127)) | (- Int64(7)) | Int64(27) | Int64(33) |
|
||||
+------------+----------------+--------------+-----------+-----------+
|
||||
| 127 | -127 | -7 | 27 | 33 |
|
||||
+------------+----------------+--------------+-----------+-----------+
|
||||
|
||||
SELECT 128::DECIMAL(3,0)::TINYINT;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: value of 128 is out of range Int8
|
||||
|
||||
SELECT -128::DECIMAL(9,0)::TINYINT;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: value of 128 is out of range Int8
|
||||
|
||||
SELECT 128::DECIMAL(18,0)::TINYINT;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: value of 128 is out of range Int8
|
||||
|
||||
SELECT 14751947891758972421513::DECIMAL(38,0)::TINYINT;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: value of 14751947891758971486208 is out of range Int8
|
||||
|
||||
-- smallint
|
||||
SELECT 127::DECIMAL(3,0)::SMALLINT, -32767::DECIMAL(5,0)::SMALLINT, -7::DECIMAL(9,1)::SMALLINT, 27::DECIMAL(18,1)::SMALLINT, 33::DECIMAL(38,1)::SMALLINT;
|
||||
|
||||
+------------+------------------+--------------+-----------+-----------+
|
||||
| Int64(127) | (- Int64(32767)) | (- Int64(7)) | Int64(27) | Int64(33) |
|
||||
+------------+------------------+--------------+-----------+-----------+
|
||||
| 127 | -32767 | -7 | 27 | 33 |
|
||||
+------------+------------------+--------------+-----------+-----------+
|
||||
|
||||
SELECT -32768::DECIMAL(9,0)::SMALLINT;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: value of 32768 is out of range Int16
|
||||
|
||||
SELECT 32768::DECIMAL(18,0)::SMALLINT;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: value of 32768 is out of range Int16
|
||||
|
||||
SELECT 14751947891758972421513::DECIMAL(38,0)::SMALLINT;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: value of 14751947891758971486208 is out of range Int16
|
||||
|
||||
-- integer
|
||||
SELECT 127::DECIMAL(3,0)::INTEGER, -2147483647::DECIMAL(10,0)::INTEGER, -7::DECIMAL(9,1)::INTEGER, 27::DECIMAL(18,1)::INTEGER, 33::DECIMAL(38,1)::INTEGER;
|
||||
|
||||
+------------+-----------------------+--------------+-----------+-----------+
|
||||
| Int64(127) | (- Int64(2147483647)) | (- Int64(7)) | Int64(27) | Int64(33) |
|
||||
+------------+-----------------------+--------------+-----------+-----------+
|
||||
| 127 | -2147483647 | -7 | 27 | 33 |
|
||||
+------------+-----------------------+--------------+-----------+-----------+
|
||||
|
||||
SELECT 2147483648::DECIMAL(18,0)::INTEGER;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: value of 2147483648 is out of range Int32
|
||||
|
||||
SELECT 14751947891758972421513::DECIMAL(38,0)::INTEGER;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: value of 14751947891758971486208 is out of range Int32
|
||||
|
||||
-- bigint
|
||||
SELECT 127::DECIMAL(3,0)::BIGINT, -9223372036854775807::DECIMAL(19,0)::BIGINT, -7::DECIMAL(9,1)::BIGINT, 27::DECIMAL(18,1)::BIGINT, 33::DECIMAL(38,1)::BIGINT;
|
||||
|
||||
+------------+--------------------------------+--------------+-----------+-----------+
|
||||
| Int64(127) | (- Int64(9223372036854775807)) | (- Int64(7)) | Int64(27) | Int64(33) |
|
||||
+------------+--------------------------------+--------------+-----------+-----------+
|
||||
| 127 | -9223372036854775807 | -7 | 27 | 33 |
|
||||
+------------+--------------------------------+--------------+-----------+-----------+
|
||||
|
||||
SELECT 14751947891758972421513::DECIMAL(38,0)::BIGINT;
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: value of 14751947891758971486208 is out of range Int64
|
||||
|
||||
-- float
|
||||
SELECT 127::DECIMAL(3,0)::FLOAT, -17014118346046923173168730371588410572::DECIMAL(38,0)::FLOAT, -7::DECIMAL(9,1)::FLOAT, 27::DECIMAL(18,1)::FLOAT, 33::DECIMAL(38,1)::FLOAT;
|
||||
|
||||
+------------+-----------------------------------------------------+--------------+-----------+-----------+
|
||||
| Int64(127) | (- Float64(17014118346046924000000000000000000000)) | (- Int64(7)) | Int64(27) | Int64(33) |
|
||||
+------------+-----------------------------------------------------+--------------+-----------+-----------+
|
||||
| 127.0 | -1.7014119e37 | -7.0 | 27.0 | 33.0 |
|
||||
+------------+-----------------------------------------------------+--------------+-----------+-----------+
|
||||
|
||||
-- double
|
||||
SELECT 127::DECIMAL(3,0)::DOUBLE, -17014118346046923173168730371588410572::DECIMAL(38,0)::DOUBLE, -7::DECIMAL(9,1)::DOUBLE, 27::DECIMAL(18,1)::DOUBLE, 33::DECIMAL(38,1)::DOUBLE;
|
||||
|
||||
+------------+-----------------------------------------------------+--------------+-----------+-----------+
|
||||
| Int64(127) | (- Float64(17014118346046924000000000000000000000)) | (- Int64(7)) | Int64(27) | Int64(33) |
|
||||
+------------+-----------------------------------------------------+--------------+-----------+-----------+
|
||||
| 127.0 | -1.7014118346046924e37 | -7.0 | 27.0 | 33.0 |
|
||||
+------------+-----------------------------------------------------+--------------+-----------+-----------+
|
||||
|
||||
-- Test casting from other types to decimal
|
||||
-- tinyint
|
||||
SELECT 100::TINYINT::DECIMAL(18,3), 200::TINYINT::DECIMAL(3,0), (-300)::TINYINT::DECIMAL(3,0), 0::TINYINT::DECIMAL(3,3);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Can't cast value 200 to type Int8
|
||||
|
||||
SELECT 100::TINYINT::DECIMAL(38,35), 200::TINYINT::DECIMAL(9,6);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Can't cast value 200 to type Int8
|
||||
|
||||
-- overflow
|
||||
SELECT 100::TINYINT::DECIMAL(3,1);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 1000 is too large to store in a Decimal128 of precision 3. Max is 999
|
||||
|
||||
SELECT 1::TINYINT::DECIMAL(3,3);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 1000 is too large to store in a Decimal128 of precision 3. Max is 999
|
||||
|
||||
SELECT 100::TINYINT::DECIMAL(18,17);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 10000000000000000000 is too large to store in a Decimal128 of precision 18. Max is 999999999999999999
|
||||
|
||||
SELECT 100::TINYINT::DECIMAL(9,7);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 1000000000 is too large to store in a Decimal128 of precision 9. Max is 999999999
|
||||
|
||||
SELECT 100::TINYINT::DECIMAL(38,37);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Compute error: Overflow happened on: 100 * 10000000000000000000000000000000000000
|
||||
|
||||
-- smallint
|
||||
SELECT 100::SMALLINT::DECIMAL(18,3), 200::SMALLINT::DECIMAL(3,0), (-300)::SMALLINT::DECIMAL(3,0), 0::SMALLINT::DECIMAL(3,3);
|
||||
|
||||
+------------+------------+-------------+----------+
|
||||
| Int64(100) | Int64(200) | Int64(-300) | Int64(0) |
|
||||
+------------+------------+-------------+----------+
|
||||
| 100.000 | 200 | -300 | 0.000 |
|
||||
+------------+------------+-------------+----------+
|
||||
|
||||
SELECT 100::SMALLINT::DECIMAL(38,35), 200::SMALLINT::DECIMAL(9,6);
|
||||
|
||||
+-----------------------------------------+------------+
|
||||
| Int64(100) | Int64(200) |
|
||||
+-----------------------------------------+------------+
|
||||
| 100.00000000000000000000000000000000000 | 200.000000 |
|
||||
+-----------------------------------------+------------+
|
||||
|
||||
-- overflow
|
||||
SELECT 100::SMALLINT::DECIMAL(3,1);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 1000 is too large to store in a Decimal128 of precision 3. Max is 999
|
||||
|
||||
SELECT 1::SMALLINT::DECIMAL(3,3);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 1000 is too large to store in a Decimal128 of precision 3. Max is 999
|
||||
|
||||
SELECT 100::SMALLINT::DECIMAL(18,17);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 10000000000000000000 is too large to store in a Decimal128 of precision 18. Max is 999999999999999999
|
||||
|
||||
SELECT 100::SMALLINT::DECIMAL(9,7);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 1000000000 is too large to store in a Decimal128 of precision 9. Max is 999999999
|
||||
|
||||
SELECT 100::SMALLINT::DECIMAL(38,37);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Compute error: Overflow happened on: 100 * 10000000000000000000000000000000000000
|
||||
|
||||
-- integer
|
||||
SELECT 100::INTEGER::DECIMAL(18,3), 200::INTEGER::DECIMAL(3,0), (-300)::INTEGER::DECIMAL(3,0), 0::INTEGER::DECIMAL(3,3);
|
||||
|
||||
+------------+------------+-------------+----------+
|
||||
| Int64(100) | Int64(200) | Int64(-300) | Int64(0) |
|
||||
+------------+------------+-------------+----------+
|
||||
| 100.000 | 200 | -300 | 0.000 |
|
||||
+------------+------------+-------------+----------+
|
||||
|
||||
SELECT 100::INTEGER::DECIMAL(38,35), 200::INTEGER::DECIMAL(9,6), 2147483647::INTEGER::DECIMAL(10,0), (-2147483647)::INTEGER::DECIMAL(10,0);
|
||||
|
||||
+-----------------------------------------+------------+-------------------+--------------------+
|
||||
| Int64(100) | Int64(200) | Int64(2147483647) | Int64(-2147483647) |
|
||||
+-----------------------------------------+------------+-------------------+--------------------+
|
||||
| 100.00000000000000000000000000000000000 | 200.000000 | 2147483647 | -2147483647 |
|
||||
+-----------------------------------------+------------+-------------------+--------------------+
|
||||
|
||||
-- overflow
|
||||
SELECT 100::INTEGER::DECIMAL(3,1);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 1000 is too large to store in a Decimal128 of precision 3. Max is 999
|
||||
|
||||
SELECT 10000000::INTEGER::DECIMAL(3,1);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 100000000 is too large to store in a Decimal128 of precision 3. Max is 999
|
||||
|
||||
SELECT -10000000::INTEGER::DECIMAL(3,1);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 100000000 is too large to store in a Decimal128 of precision 3. Max is 999
|
||||
|
||||
SELECT 1::INTEGER::DECIMAL(3,3);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 1000 is too large to store in a Decimal128 of precision 3. Max is 999
|
||||
|
||||
SELECT 100::INTEGER::DECIMAL(18,17);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 10000000000000000000 is too large to store in a Decimal128 of precision 18. Max is 999999999999999999
|
||||
|
||||
SELECT 100::INTEGER::DECIMAL(9,7);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 1000000000 is too large to store in a Decimal128 of precision 9. Max is 999999999
|
||||
|
||||
SELECT 100::INTEGER::DECIMAL(38,37);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Compute error: Overflow happened on: 100 * 10000000000000000000000000000000000000
|
||||
|
||||
-- bigint
|
||||
SELECT 100::BIGINT::DECIMAL(18,3), 200::BIGINT::DECIMAL(3,0), (-100)::BIGINT::DECIMAL(3,0), 0::BIGINT::DECIMAL(3,3);
|
||||
|
||||
+------------+------------+-------------+----------+
|
||||
| Int64(100) | Int64(200) | Int64(-100) | Int64(0) |
|
||||
+------------+------------+-------------+----------+
|
||||
| 100.000 | 200 | -100 | 0.000 |
|
||||
+------------+------------+-------------+----------+
|
||||
|
||||
SELECT 100::BIGINT::DECIMAL(38,35), 200::BIGINT::DECIMAL(9,6), 9223372036854775807::BIGINT::DECIMAL(19,0), (-9223372036854775807)::BIGINT::DECIMAL(19,0);
|
||||
|
||||
+-----------------------------------------+------------+----------------------------+-----------------------------+
|
||||
| Int64(100) | Int64(200) | Int64(9223372036854775807) | Int64(-9223372036854775807) |
|
||||
+-----------------------------------------+------------+----------------------------+-----------------------------+
|
||||
| 100.00000000000000000000000000000000000 | 200.000000 | 9223372036854775807 | -9223372036854775807 |
|
||||
+-----------------------------------------+------------+----------------------------+-----------------------------+
|
||||
|
||||
SELECT 922337203685477580::BIGINT::DECIMAL(18,0), (-922337203685477580)::BIGINT::DECIMAL(18,0);
|
||||
|
||||
+---------------------------+----------------------------+
|
||||
| Int64(922337203685477580) | Int64(-922337203685477580) |
|
||||
+---------------------------+----------------------------+
|
||||
| 922337203685477580 | -922337203685477580 |
|
||||
+---------------------------+----------------------------+
|
||||
|
||||
-- overflow
|
||||
SELECT 100::BIGINT::DECIMAL(3,1);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 1000 is too large to store in a Decimal128 of precision 3. Max is 999
|
||||
|
||||
SELECT 10000000::BIGINT::DECIMAL(3,1);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 100000000 is too large to store in a Decimal128 of precision 3. Max is 999
|
||||
|
||||
SELECT -10000000::BIGINT::DECIMAL(3,1);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 100000000 is too large to store in a Decimal128 of precision 3. Max is 999
|
||||
|
||||
SELECT 1::BIGINT::DECIMAL(3,3);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 1000 is too large to store in a Decimal128 of precision 3. Max is 999
|
||||
|
||||
SELECT 100::BIGINT::DECIMAL(18,17);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 10000000000000000000 is too large to store in a Decimal128 of precision 18. Max is 999999999999999999
|
||||
|
||||
SELECT 100::BIGINT::DECIMAL(9,7);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Invalid argument error: 1000000000 is too large to store in a Decimal128 of precision 9. Max is 999999999
|
||||
|
||||
SELECT 100::BIGINT::DECIMAL(38,37);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Compute error: Overflow happened on: 100 * 10000000000000000000000000000000000000
|
||||
|
||||
-- float
|
||||
SELECT 100::FLOAT::DECIMAL(18,3), 200::FLOAT::DECIMAL(3,0), (-300)::FLOAT::DECIMAL(3,0), 0::FLOAT::DECIMAL(3,3);
|
||||
|
||||
+------------+------------+-------------+----------+
|
||||
| Int64(100) | Int64(200) | Int64(-300) | Int64(0) |
|
||||
+------------+------------+-------------+----------+
|
||||
| 100.000 | 200 | -300 | 0.000 |
|
||||
+------------+------------+-------------+----------+
|
||||
|
||||
SELECT 100::FLOAT::DECIMAL(38,35)::FLOAT, 200::FLOAT::DECIMAL(9,6)::FLOAT, 17014118346046923173168730371588410572::FLOAT::DECIMAL(38,0)::FLOAT, (-17014118346046923173168730371588410572)::FLOAT::DECIMAL(38,0)::FLOAT;
|
||||
|
||||
+------------+------------+-------------------------------------------------+--------------------------------------------------+
|
||||
| Int64(100) | Int64(200) | Float64(17014118346046924000000000000000000000) | Float64(-17014118346046924000000000000000000000) |
|
||||
+------------+------------+-------------------------------------------------+--------------------------------------------------+
|
||||
| 100.0 | 200.0 | 1.7014119e37 | -1.7014119e37 |
|
||||
+------------+------------+-------------------------------------------------+--------------------------------------------------+
|
||||
|
||||
SELECT 1.25::FLOAT::DECIMAL(3,2);
|
||||
|
||||
+---------------+
|
||||
| Float64(1.25) |
|
||||
+---------------+
|
||||
| 1.25 |
|
||||
+---------------+
|
||||
|
||||
-- overflow
|
||||
SELECT 100::FLOAT::DECIMAL(3,1);
|
||||
|
||||
+------------+
|
||||
| Int64(100) |
|
||||
+------------+
|
||||
| 10.0 |
|
||||
+------------+
|
||||
|
||||
SELECT 10000000::FLOAT::DECIMAL(3,1);
|
||||
|
||||
+-----------------+
|
||||
| Int64(10000000) |
|
||||
+-----------------+
|
||||
| 10.0 |
|
||||
+-----------------+
|
||||
|
||||
SELECT -10000000::FLOAT::DECIMAL(3,1);
|
||||
|
||||
+---------------------+
|
||||
| (- Int64(10000000)) |
|
||||
+---------------------+
|
||||
| -10.0 |
|
||||
+---------------------+
|
||||
|
||||
SELECT 1::FLOAT::DECIMAL(3,3);
|
||||
|
||||
+----------+
|
||||
| Int64(1) |
|
||||
+----------+
|
||||
| .100 |
|
||||
+----------+
|
||||
|
||||
SELECT 100::FLOAT::DECIMAL(18,17);
|
||||
|
||||
+---------------------+
|
||||
| Int64(100) |
|
||||
+---------------------+
|
||||
| 1.00000000000000000 |
|
||||
+---------------------+
|
||||
|
||||
SELECT 100::FLOAT::DECIMAL(9,7);
|
||||
|
||||
+------------+
|
||||
| Int64(100) |
|
||||
+------------+
|
||||
| 10.0000000 |
|
||||
+------------+
|
||||
|
||||
SELECT 100::FLOAT::DECIMAL(38,37);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(38, 37). Overflowing on 100.0
|
||||
|
||||
-- Some controversial cases
|
||||
SELECT 17014118346046923173168730371588410572::FLOAT::DECIMAL(38,1);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(38, 1). Overflowing on 1.7014119e37
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::FLOAT::DECIMAL(37,0);
|
||||
|
||||
+-------------------------------------------------+
|
||||
| Float64(17014118346046924000000000000000000000) |
|
||||
+-------------------------------------------------+
|
||||
| 1701411859957704321881461067092905164 |
|
||||
+-------------------------------------------------+
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::FLOAT::DECIMAL(18,0);
|
||||
|
||||
+-------------------------------------------------+
|
||||
| Float64(17014118346046924000000000000000000000) |
|
||||
+-------------------------------------------------+
|
||||
| 170141185995770432 |
|
||||
+-------------------------------------------------+
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::FLOAT::DECIMAL(9,0);
|
||||
|
||||
+-------------------------------------------------+
|
||||
| Float64(17014118346046924000000000000000000000) |
|
||||
+-------------------------------------------------+
|
||||
| 170141185 |
|
||||
+-------------------------------------------------+
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::FLOAT::DECIMAL(4,0);
|
||||
|
||||
+-------------------------------------------------+
|
||||
| Float64(17014118346046924000000000000000000000) |
|
||||
+-------------------------------------------------+
|
||||
| 1701 |
|
||||
+-------------------------------------------------+
|
||||
|
||||
-- double
|
||||
SELECT 100::DOUBLE::DECIMAL(18,3), 200::DOUBLE::DECIMAL(3,0), (-300)::DOUBLE::DECIMAL(3,0), 0::DOUBLE::DECIMAL(3,3);
|
||||
|
||||
+------------+------------+-------------+----------+
|
||||
| Int64(100) | Int64(200) | Int64(-300) | Int64(0) |
|
||||
+------------+------------+-------------+----------+
|
||||
| 100.000 | 200 | -300 | 0.000 |
|
||||
+------------+------------+-------------+----------+
|
||||
|
||||
SELECT 100::DOUBLE::DECIMAL(38,35)::DOUBLE, 200::DOUBLE::DECIMAL(9,6)::DOUBLE, 17014118346046923173168730371588410572::DOUBLE::DECIMAL(38,0)::DOUBLE, (-17014118346046923173168730371588410572)::DOUBLE::DECIMAL(38,0)::DOUBLE;
|
||||
|
||||
+------------+------------+-------------------------------------------------+--------------------------------------------------+
|
||||
| Int64(100) | Int64(200) | Float64(17014118346046924000000000000000000000) | Float64(-17014118346046924000000000000000000000) |
|
||||
+------------+------------+-------------------------------------------------+--------------------------------------------------+
|
||||
| 100.0 | 200.0 | 1.7014118346046924e37 | -1.7014118346046924e37 |
|
||||
+------------+------------+-------------------------------------------------+--------------------------------------------------+
|
||||
|
||||
SELECT 1.25::DOUBLE::DECIMAL(3,2);
|
||||
|
||||
+---------------+
|
||||
| Float64(1.25) |
|
||||
+---------------+
|
||||
| 1.25 |
|
||||
+---------------+
|
||||
|
||||
-- overflow
|
||||
SELECT 100::DOUBLE::DECIMAL(3,1);
|
||||
|
||||
+------------+
|
||||
| Int64(100) |
|
||||
+------------+
|
||||
| 10.0 |
|
||||
+------------+
|
||||
|
||||
SELECT 10000000::DOUBLE::DECIMAL(3,1);
|
||||
|
||||
+-----------------+
|
||||
| Int64(10000000) |
|
||||
+-----------------+
|
||||
| 10.0 |
|
||||
+-----------------+
|
||||
|
||||
SELECT -10000000::DOUBLE::DECIMAL(3,1);
|
||||
|
||||
+---------------------+
|
||||
| (- Int64(10000000)) |
|
||||
+---------------------+
|
||||
| -10.0 |
|
||||
+---------------------+
|
||||
|
||||
SELECT 1::DOUBLE::DECIMAL(3,3);
|
||||
|
||||
+----------+
|
||||
| Int64(1) |
|
||||
+----------+
|
||||
| .100 |
|
||||
+----------+
|
||||
|
||||
SELECT 100::DOUBLE::DECIMAL(18,17);
|
||||
|
||||
+---------------------+
|
||||
| Int64(100) |
|
||||
+---------------------+
|
||||
| 1.00000000000000000 |
|
||||
+---------------------+
|
||||
|
||||
SELECT 100::DOUBLE::DECIMAL(9,7);
|
||||
|
||||
+------------+
|
||||
| Int64(100) |
|
||||
+------------+
|
||||
| 10.0000000 |
|
||||
+------------+
|
||||
|
||||
SELECT 100::DOUBLE::DECIMAL(38,37);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(38, 37). Overflowing on 100.0
|
||||
|
||||
-- Some controversial cases
|
||||
SELECT 17014118346046923173168730371588410572::DOUBLE::DECIMAL(38,1);
|
||||
|
||||
Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(38, 1). Overflowing on 1.7014118346046924e37
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::DOUBLE::DECIMAL(37,0);
|
||||
|
||||
+-------------------------------------------------+
|
||||
| Float64(17014118346046924000000000000000000000) |
|
||||
+-------------------------------------------------+
|
||||
| 1701411834604692411764202694551745331 |
|
||||
+-------------------------------------------------+
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::DOUBLE::DECIMAL(18,0);
|
||||
|
||||
+-------------------------------------------------+
|
||||
| Float64(17014118346046924000000000000000000000) |
|
||||
+-------------------------------------------------+
|
||||
| 170141183460469241 |
|
||||
+-------------------------------------------------+
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::DOUBLE::DECIMAL(9,0);
|
||||
|
||||
+-------------------------------------------------+
|
||||
| Float64(17014118346046924000000000000000000000) |
|
||||
+-------------------------------------------------+
|
||||
| 170141183 |
|
||||
+-------------------------------------------------+
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::DOUBLE::DECIMAL(4,0);
|
||||
|
||||
+-------------------------------------------------+
|
||||
| Float64(17014118346046924000000000000000000000) |
|
||||
+-------------------------------------------------+
|
||||
| 1701 |
|
||||
+-------------------------------------------------+
|
||||
|
||||
203
tests/cases/standalone/common/types/decimal/decimal_cast.sql
Normal file
203
tests/cases/standalone/common/types/decimal/decimal_cast.sql
Normal file
@@ -0,0 +1,203 @@
|
||||
-- Test casting from decimal to other types
|
||||
-- Port from https://github.com/duckdb/duckdb/blob/main/test/sql/types/decimal/cast_from_decimal.test
|
||||
-- and https://github.com/duckdb/duckdb/blob/main/test/sql/types/decimal/cast_to_decimal.test
|
||||
|
||||
-- tinyint
|
||||
SELECT 127::DECIMAL(3,0)::TINYINT, -127::DECIMAL(3,0)::TINYINT, -7::DECIMAL(9,1)::TINYINT, 27::DECIMAL(18,1)::TINYINT, 33::DECIMAL(38,1)::TINYINT;
|
||||
|
||||
SELECT 128::DECIMAL(3,0)::TINYINT;
|
||||
|
||||
SELECT -128::DECIMAL(9,0)::TINYINT;
|
||||
|
||||
SELECT 128::DECIMAL(18,0)::TINYINT;
|
||||
|
||||
SELECT 14751947891758972421513::DECIMAL(38,0)::TINYINT;
|
||||
|
||||
-- smallint
|
||||
|
||||
SELECT 127::DECIMAL(3,0)::SMALLINT, -32767::DECIMAL(5,0)::SMALLINT, -7::DECIMAL(9,1)::SMALLINT, 27::DECIMAL(18,1)::SMALLINT, 33::DECIMAL(38,1)::SMALLINT;
|
||||
|
||||
SELECT -32768::DECIMAL(9,0)::SMALLINT;
|
||||
|
||||
SELECT 32768::DECIMAL(18,0)::SMALLINT;
|
||||
|
||||
SELECT 14751947891758972421513::DECIMAL(38,0)::SMALLINT;
|
||||
|
||||
-- integer
|
||||
|
||||
SELECT 127::DECIMAL(3,0)::INTEGER, -2147483647::DECIMAL(10,0)::INTEGER, -7::DECIMAL(9,1)::INTEGER, 27::DECIMAL(18,1)::INTEGER, 33::DECIMAL(38,1)::INTEGER;
|
||||
|
||||
SELECT 2147483648::DECIMAL(18,0)::INTEGER;
|
||||
|
||||
SELECT 14751947891758972421513::DECIMAL(38,0)::INTEGER;
|
||||
|
||||
-- bigint
|
||||
|
||||
SELECT 127::DECIMAL(3,0)::BIGINT, -9223372036854775807::DECIMAL(19,0)::BIGINT, -7::DECIMAL(9,1)::BIGINT, 27::DECIMAL(18,1)::BIGINT, 33::DECIMAL(38,1)::BIGINT;
|
||||
|
||||
SELECT 14751947891758972421513::DECIMAL(38,0)::BIGINT;
|
||||
|
||||
-- float
|
||||
|
||||
SELECT 127::DECIMAL(3,0)::FLOAT, -17014118346046923173168730371588410572::DECIMAL(38,0)::FLOAT, -7::DECIMAL(9,1)::FLOAT, 27::DECIMAL(18,1)::FLOAT, 33::DECIMAL(38,1)::FLOAT;
|
||||
|
||||
-- double
|
||||
|
||||
SELECT 127::DECIMAL(3,0)::DOUBLE, -17014118346046923173168730371588410572::DECIMAL(38,0)::DOUBLE, -7::DECIMAL(9,1)::DOUBLE, 27::DECIMAL(18,1)::DOUBLE, 33::DECIMAL(38,1)::DOUBLE;
|
||||
|
||||
|
||||
-- Test casting from other types to decimal
|
||||
|
||||
-- tinyint
|
||||
|
||||
SELECT 100::TINYINT::DECIMAL(18,3), 200::TINYINT::DECIMAL(3,0), (-300)::TINYINT::DECIMAL(3,0), 0::TINYINT::DECIMAL(3,3);
|
||||
|
||||
SELECT 100::TINYINT::DECIMAL(38,35), 200::TINYINT::DECIMAL(9,6);
|
||||
|
||||
-- overflow
|
||||
|
||||
SELECT 100::TINYINT::DECIMAL(3,1);
|
||||
|
||||
SELECT 1::TINYINT::DECIMAL(3,3);
|
||||
|
||||
SELECT 100::TINYINT::DECIMAL(18,17);
|
||||
|
||||
SELECT 100::TINYINT::DECIMAL(9,7);
|
||||
|
||||
SELECT 100::TINYINT::DECIMAL(38,37);
|
||||
|
||||
-- smallint
|
||||
|
||||
SELECT 100::SMALLINT::DECIMAL(18,3), 200::SMALLINT::DECIMAL(3,0), (-300)::SMALLINT::DECIMAL(3,0), 0::SMALLINT::DECIMAL(3,3);
|
||||
|
||||
SELECT 100::SMALLINT::DECIMAL(38,35), 200::SMALLINT::DECIMAL(9,6);
|
||||
|
||||
-- overflow
|
||||
|
||||
SELECT 100::SMALLINT::DECIMAL(3,1);
|
||||
|
||||
SELECT 1::SMALLINT::DECIMAL(3,3);
|
||||
|
||||
SELECT 100::SMALLINT::DECIMAL(18,17);
|
||||
|
||||
SELECT 100::SMALLINT::DECIMAL(9,7);
|
||||
|
||||
SELECT 100::SMALLINT::DECIMAL(38,37);
|
||||
|
||||
-- integer
|
||||
|
||||
SELECT 100::INTEGER::DECIMAL(18,3), 200::INTEGER::DECIMAL(3,0), (-300)::INTEGER::DECIMAL(3,0), 0::INTEGER::DECIMAL(3,3);
|
||||
|
||||
SELECT 100::INTEGER::DECIMAL(38,35), 200::INTEGER::DECIMAL(9,6), 2147483647::INTEGER::DECIMAL(10,0), (-2147483647)::INTEGER::DECIMAL(10,0);
|
||||
|
||||
-- overflow
|
||||
|
||||
SELECT 100::INTEGER::DECIMAL(3,1);
|
||||
|
||||
SELECT 10000000::INTEGER::DECIMAL(3,1);
|
||||
|
||||
SELECT -10000000::INTEGER::DECIMAL(3,1);
|
||||
|
||||
SELECT 1::INTEGER::DECIMAL(3,3);
|
||||
|
||||
SELECT 100::INTEGER::DECIMAL(18,17);
|
||||
|
||||
SELECT 100::INTEGER::DECIMAL(9,7);
|
||||
|
||||
SELECT 100::INTEGER::DECIMAL(38,37);
|
||||
|
||||
-- bigint
|
||||
|
||||
SELECT 100::BIGINT::DECIMAL(18,3), 200::BIGINT::DECIMAL(3,0), (-100)::BIGINT::DECIMAL(3,0), 0::BIGINT::DECIMAL(3,3);
|
||||
|
||||
SELECT 100::BIGINT::DECIMAL(38,35), 200::BIGINT::DECIMAL(9,6), 9223372036854775807::BIGINT::DECIMAL(19,0), (-9223372036854775807)::BIGINT::DECIMAL(19,0);
|
||||
|
||||
SELECT 922337203685477580::BIGINT::DECIMAL(18,0), (-922337203685477580)::BIGINT::DECIMAL(18,0);
|
||||
|
||||
-- overflow
|
||||
|
||||
SELECT 100::BIGINT::DECIMAL(3,1);
|
||||
|
||||
SELECT 10000000::BIGINT::DECIMAL(3,1);
|
||||
|
||||
SELECT -10000000::BIGINT::DECIMAL(3,1);
|
||||
|
||||
SELECT 1::BIGINT::DECIMAL(3,3);
|
||||
|
||||
SELECT 100::BIGINT::DECIMAL(18,17);
|
||||
|
||||
SELECT 100::BIGINT::DECIMAL(9,7);
|
||||
|
||||
SELECT 100::BIGINT::DECIMAL(38,37);
|
||||
|
||||
-- float
|
||||
|
||||
SELECT 100::FLOAT::DECIMAL(18,3), 200::FLOAT::DECIMAL(3,0), (-300)::FLOAT::DECIMAL(3,0), 0::FLOAT::DECIMAL(3,3);
|
||||
|
||||
SELECT 100::FLOAT::DECIMAL(38,35)::FLOAT, 200::FLOAT::DECIMAL(9,6)::FLOAT, 17014118346046923173168730371588410572::FLOAT::DECIMAL(38,0)::FLOAT, (-17014118346046923173168730371588410572)::FLOAT::DECIMAL(38,0)::FLOAT;
|
||||
|
||||
SELECT 1.25::FLOAT::DECIMAL(3,2);
|
||||
|
||||
-- overflow
|
||||
|
||||
SELECT 100::FLOAT::DECIMAL(3,1);
|
||||
|
||||
SELECT 10000000::FLOAT::DECIMAL(3,1);
|
||||
|
||||
SELECT -10000000::FLOAT::DECIMAL(3,1);
|
||||
|
||||
SELECT 1::FLOAT::DECIMAL(3,3);
|
||||
|
||||
SELECT 100::FLOAT::DECIMAL(18,17);
|
||||
|
||||
SELECT 100::FLOAT::DECIMAL(9,7);
|
||||
|
||||
SELECT 100::FLOAT::DECIMAL(38,37);
|
||||
|
||||
-- Some controversial cases
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::FLOAT::DECIMAL(38,1);
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::FLOAT::DECIMAL(37,0);
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::FLOAT::DECIMAL(18,0);
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::FLOAT::DECIMAL(9,0);
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::FLOAT::DECIMAL(4,0);
|
||||
|
||||
-- double
|
||||
|
||||
SELECT 100::DOUBLE::DECIMAL(18,3), 200::DOUBLE::DECIMAL(3,0), (-300)::DOUBLE::DECIMAL(3,0), 0::DOUBLE::DECIMAL(3,3);
|
||||
|
||||
SELECT 100::DOUBLE::DECIMAL(38,35)::DOUBLE, 200::DOUBLE::DECIMAL(9,6)::DOUBLE, 17014118346046923173168730371588410572::DOUBLE::DECIMAL(38,0)::DOUBLE, (-17014118346046923173168730371588410572)::DOUBLE::DECIMAL(38,0)::DOUBLE;
|
||||
|
||||
SELECT 1.25::DOUBLE::DECIMAL(3,2);
|
||||
|
||||
-- overflow
|
||||
|
||||
SELECT 100::DOUBLE::DECIMAL(3,1);
|
||||
|
||||
SELECT 10000000::DOUBLE::DECIMAL(3,1);
|
||||
|
||||
SELECT -10000000::DOUBLE::DECIMAL(3,1);
|
||||
|
||||
SELECT 1::DOUBLE::DECIMAL(3,3);
|
||||
|
||||
SELECT 100::DOUBLE::DECIMAL(18,17);
|
||||
|
||||
SELECT 100::DOUBLE::DECIMAL(9,7);
|
||||
|
||||
SELECT 100::DOUBLE::DECIMAL(38,37);
|
||||
|
||||
-- Some controversial cases
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::DOUBLE::DECIMAL(38,1);
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::DOUBLE::DECIMAL(37,0);
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::DOUBLE::DECIMAL(18,0);
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::DOUBLE::DECIMAL(9,0);
|
||||
|
||||
SELECT 17014118346046923173168730371588410572::DOUBLE::DECIMAL(4,0);
|
||||
440
tests/cases/standalone/common/types/decimal/decimal_ops.result
Normal file
440
tests/cases/standalone/common/types/decimal/decimal_ops.result
Normal file
@@ -0,0 +1,440 @@
|
||||
-- Some cases port from https://github.com/duckdb/duckdb/blob/main/test/sql/types/decimal/test_decimal_ops.test
|
||||
CREATE TABLE decimals(d DECIMAL(3, 2), ts timestamp time index);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO decimals VALUES ('0.1',1000), ('0.2',2000);
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT * FROM decimals;
|
||||
|
||||
+------+---------------------+
|
||||
| d | ts |
|
||||
+------+---------------------+
|
||||
| 0.10 | 1970-01-01T00:00:01 |
|
||||
| 0.20 | 1970-01-01T00:00:02 |
|
||||
+------+---------------------+
|
||||
|
||||
-- ORDER BY
|
||||
SELECT * FROM decimals ORDER BY d DESC;
|
||||
|
||||
+------+---------------------+
|
||||
| d | ts |
|
||||
+------+---------------------+
|
||||
| 0.20 | 1970-01-01T00:00:02 |
|
||||
| 0.10 | 1970-01-01T00:00:01 |
|
||||
+------+---------------------+
|
||||
|
||||
-- equality
|
||||
SELECT * FROM decimals WHERE d = '0.1'::DECIMAL(3,2);
|
||||
|
||||
+------+---------------------+
|
||||
| d | ts |
|
||||
+------+---------------------+
|
||||
| 0.10 | 1970-01-01T00:00:01 |
|
||||
+------+---------------------+
|
||||
|
||||
-- greater than equals
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT * FROM decimals WHERE d >= '0.1'::DECIMAL(3,2);
|
||||
|
||||
+------+---------------------+
|
||||
| d | ts |
|
||||
+------+---------------------+
|
||||
| 0.10 | 1970-01-01T00:00:01 |
|
||||
| 0.20 | 1970-01-01T00:00:02 |
|
||||
+------+---------------------+
|
||||
|
||||
-- what about if we use different decimal scales?
|
||||
SELECT * FROM decimals WHERE d = '0.1'::DECIMAL(9,5);
|
||||
|
||||
+------+---------------------+
|
||||
| d | ts |
|
||||
+------+---------------------+
|
||||
| 0.10 | 1970-01-01T00:00:01 |
|
||||
+------+---------------------+
|
||||
|
||||
SELECT * FROM decimals WHERE d >= '0.1'::DECIMAL(9,5) ORDER BY 1;
|
||||
|
||||
+------+---------------------+
|
||||
| d | ts |
|
||||
+------+---------------------+
|
||||
| 0.10 | 1970-01-01T00:00:01 |
|
||||
| 0.20 | 1970-01-01T00:00:02 |
|
||||
+------+---------------------+
|
||||
|
||||
-- what if we compare decimals with different scales and width (3,2) vs (9,1)
|
||||
INSERT INTO decimals VALUES ('0.11',3000), ('0.21',4000);
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
SELECT * FROM decimals WHERE d = '0.1'::DECIMAL(9,1);
|
||||
|
||||
+------+---------------------+
|
||||
| d | ts |
|
||||
+------+---------------------+
|
||||
| 0.10 | 1970-01-01T00:00:01 |
|
||||
+------+---------------------+
|
||||
|
||||
SELECT * FROM decimals WHERE d > '0.1'::DECIMAL(9,1) ORDER BY 1;
|
||||
|
||||
+------+---------------------+
|
||||
| d | ts |
|
||||
+------+---------------------+
|
||||
| 0.11 | 1970-01-01T00:00:03 |
|
||||
| 0.20 | 1970-01-01T00:00:02 |
|
||||
| 0.21 | 1970-01-01T00:00:04 |
|
||||
+------+---------------------+
|
||||
|
||||
DELETE FROM decimals WHERE d <> d::DECIMAL(9,1);
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
SELECT * FROM decimals;
|
||||
|
||||
+------+---------------------+
|
||||
| d | ts |
|
||||
+------+---------------------+
|
||||
| 0.10 | 1970-01-01T00:00:01 |
|
||||
| 0.20 | 1970-01-01T00:00:02 |
|
||||
+------+---------------------+
|
||||
|
||||
-- test ABS function
|
||||
SELECT ABS('-0.1'::DECIMAL), ABS('0.1'::DECIMAL), ABS(NULL::DECIMAL);
|
||||
|
||||
+-------------------+------------------+-----------+
|
||||
| abs(Utf8("-0.1")) | abs(Utf8("0.1")) | abs(NULL) |
|
||||
+-------------------+------------------+-----------+
|
||||
| 0.1000000000 | 0.1000000000 | |
|
||||
+-------------------+------------------+-----------+
|
||||
|
||||
SELECT ABS('-0.1'::DECIMAL(4,3)) AS col1, ABS('-0.1'::DECIMAL(9,3)) AS col2, ABS('-0.1'::DECIMAL(18,3)) AS col3, ABS('-0.1'::DECIMAL(38,3)) AS col4;
|
||||
|
||||
+-------+-------+-------+-------+
|
||||
| col1 | col2 | col3 | col4 |
|
||||
+-------+-------+-------+-------+
|
||||
| 0.100 | 0.100 | 0.100 | 0.100 |
|
||||
+-------+-------+-------+-------+
|
||||
|
||||
-- test CEIL function
|
||||
SELECT CEIL('0.1'::DECIMAL), CEIL('-0.1'::DECIMAL), CEIL(NULL::DECIMAL);
|
||||
|
||||
+-------------------+--------------------+------------+
|
||||
| ceil(Utf8("0.1")) | ceil(Utf8("-0.1")) | ceil(NULL) |
|
||||
+-------------------+--------------------+------------+
|
||||
| 1.0 | 0.0 | |
|
||||
+-------------------+--------------------+------------+
|
||||
|
||||
SELECT CEIL('100.3'::DECIMAL), CEIL('-127012.3'::DECIMAL);
|
||||
|
||||
+---------------------+-------------------------+
|
||||
| ceil(Utf8("100.3")) | ceil(Utf8("-127012.3")) |
|
||||
+---------------------+-------------------------+
|
||||
| 101.0 | -127012.0 |
|
||||
+---------------------+-------------------------+
|
||||
|
||||
SELECT CEIL('10.5'::DECIMAL), CEIL('-10.5'::DECIMAL);
|
||||
|
||||
+--------------------+---------------------+
|
||||
| ceil(Utf8("10.5")) | ceil(Utf8("-10.5")) |
|
||||
+--------------------+---------------------+
|
||||
| 11.0 | -10.0 |
|
||||
+--------------------+---------------------+
|
||||
|
||||
-- ceil function on the boundaries
|
||||
SELECT CEIL('999.9'::DECIMAL(4,1)), CEIL('99999999.9'::DECIMAL(9,1)), CEIL('99999999999999999.9'::DECIMAL(18,1)), CEIL('9999999999999999999999999999999999999.9'::DECIMAL(38,1));
|
||||
|
||||
+---------------------+--------------------------+-----------------------------------+-------------------------------------------------------+
|
||||
| ceil(Utf8("999.9")) | ceil(Utf8("99999999.9")) | ceil(Utf8("99999999999999999.9")) | ceil(Utf8("9999999999999999999999999999999999999.9")) |
|
||||
+---------------------+--------------------------+-----------------------------------+-------------------------------------------------------+
|
||||
| 1000.0 | 100000000.0 | 1.0e17 | 1.0e37 |
|
||||
+---------------------+--------------------------+-----------------------------------+-------------------------------------------------------+
|
||||
|
||||
SELECT CEIL('-999.9'::DECIMAL(4,1)), CEIL('-99999999.9'::DECIMAL(9,1)), CEIL('-99999999999999999.9'::DECIMAL(18,1)), CEIL('-9999999999999999999999999999999999999.9'::DECIMAL(38,1));
|
||||
|
||||
+----------------------+---------------------------+------------------------------------+--------------------------------------------------------+
|
||||
| ceil(Utf8("-999.9")) | ceil(Utf8("-99999999.9")) | ceil(Utf8("-99999999999999999.9")) | ceil(Utf8("-9999999999999999999999999999999999999.9")) |
|
||||
+----------------------+---------------------------+------------------------------------+--------------------------------------------------------+
|
||||
| -999.0 | -99999999.0 | -1.0e17 | -1.0e37 |
|
||||
+----------------------+---------------------------+------------------------------------+--------------------------------------------------------+
|
||||
|
||||
-- test FLOOR function
|
||||
SELECT FLOOR('0.1'::DECIMAL), FLOOR('-0.1'::DECIMAL), FLOOR(NULL::DECIMAL);
|
||||
|
||||
+--------------------+---------------------+-------------+
|
||||
| floor(Utf8("0.1")) | floor(Utf8("-0.1")) | floor(NULL) |
|
||||
+--------------------+---------------------+-------------+
|
||||
| 0.0 | -1.0 | |
|
||||
+--------------------+---------------------+-------------+
|
||||
|
||||
SELECT FLOOR('100.3'::DECIMAL), FLOOR('-127012.3'::DECIMAL);
|
||||
|
||||
+----------------------+--------------------------+
|
||||
| floor(Utf8("100.3")) | floor(Utf8("-127012.3")) |
|
||||
+----------------------+--------------------------+
|
||||
| 100.0 | -127013.0 |
|
||||
+----------------------+--------------------------+
|
||||
|
||||
SELECT FLOOR('10.5'::DECIMAL), FLOOR('-10.5'::DECIMAL);
|
||||
|
||||
+---------------------+----------------------+
|
||||
| floor(Utf8("10.5")) | floor(Utf8("-10.5")) |
|
||||
+---------------------+----------------------+
|
||||
| 10.0 | -11.0 |
|
||||
+---------------------+----------------------+
|
||||
|
||||
-- floor function on the boundaries
|
||||
SELECT FLOOR('999.9'::DECIMAL(4,1)), FLOOR('99999999.9'::DECIMAL(9,1)), FLOOR('99999999999999999.9'::DECIMAL(18,1)), FLOOR('9999999999999999999999999999999999999.9'::DECIMAL(38,1));
|
||||
|
||||
+----------------------+---------------------------+------------------------------------+--------------------------------------------------------+
|
||||
| floor(Utf8("999.9")) | floor(Utf8("99999999.9")) | floor(Utf8("99999999999999999.9")) | floor(Utf8("9999999999999999999999999999999999999.9")) |
|
||||
+----------------------+---------------------------+------------------------------------+--------------------------------------------------------+
|
||||
| 999.0 | 99999999.0 | 1.0e17 | 1.0e37 |
|
||||
+----------------------+---------------------------+------------------------------------+--------------------------------------------------------+
|
||||
|
||||
SELECT FLOOR('-999.9'::DECIMAL(4,1)), FLOOR('-99999999.9'::DECIMAL(9,1)), FLOOR('-99999999999999999.9'::DECIMAL(18,1)), FLOOR('-9999999999999999999999999999999999999.9'::DECIMAL(38,1));
|
||||
|
||||
+-----------------------+----------------------------+-------------------------------------+---------------------------------------------------------+
|
||||
| floor(Utf8("-999.9")) | floor(Utf8("-99999999.9")) | floor(Utf8("-99999999999999999.9")) | floor(Utf8("-9999999999999999999999999999999999999.9")) |
|
||||
+-----------------------+----------------------------+-------------------------------------+---------------------------------------------------------+
|
||||
| -1000.0 | -100000000.0 | -1.0e17 | -1.0e37 |
|
||||
+-----------------------+----------------------------+-------------------------------------+---------------------------------------------------------+
|
||||
|
||||
-- test unary ROUND function
|
||||
SELECT ROUND('0.1'::DECIMAL), ROUND('-0.1'::DECIMAL), ROUND(NULL::DECIMAL);
|
||||
|
||||
+--------------------+---------------------+-------------+
|
||||
| round(Utf8("0.1")) | round(Utf8("-0.1")) | round(NULL) |
|
||||
+--------------------+---------------------+-------------+
|
||||
| 0.0 | 0.0 | |
|
||||
+--------------------+---------------------+-------------+
|
||||
|
||||
SELECT ROUND('100.3'::DECIMAL), ROUND('-127012.3'::DECIMAL);
|
||||
|
||||
+----------------------+--------------------------+
|
||||
| round(Utf8("100.3")) | round(Utf8("-127012.3")) |
|
||||
+----------------------+--------------------------+
|
||||
| 100.0 | -127012.0 |
|
||||
+----------------------+--------------------------+
|
||||
|
||||
SELECT ROUND('10.5'::DECIMAL), ROUND('-10.5'::DECIMAL);
|
||||
|
||||
+---------------------+----------------------+
|
||||
| round(Utf8("10.5")) | round(Utf8("-10.5")) |
|
||||
+---------------------+----------------------+
|
||||
| 11.0 | -11.0 |
|
||||
+---------------------+----------------------+
|
||||
|
||||
-- round function on the boundaries
|
||||
SELECT ROUND('999.9'::DECIMAL(4,1)), ROUND('99999999.9'::DECIMAL(9,1)), ROUND('99999999999999999.9'::DECIMAL(18,1)), ROUND('9999999999999999999999999999999999999.9'::DECIMAL(38,1));
|
||||
|
||||
+----------------------+---------------------------+------------------------------------+--------------------------------------------------------+
|
||||
| round(Utf8("999.9")) | round(Utf8("99999999.9")) | round(Utf8("99999999999999999.9")) | round(Utf8("9999999999999999999999999999999999999.9")) |
|
||||
+----------------------+---------------------------+------------------------------------+--------------------------------------------------------+
|
||||
| 1000.0 | 100000000.0 | 1.0e17 | 1.0e37 |
|
||||
+----------------------+---------------------------+------------------------------------+--------------------------------------------------------+
|
||||
|
||||
SELECT ROUND('-999.9'::DECIMAL(4,1)), ROUND('-99999999.9'::DECIMAL(9,1)), ROUND('-99999999999999999.9'::DECIMAL(18,1)), ROUND('-9999999999999999999999999999999999999.9'::DECIMAL(38,1));
|
||||
|
||||
+-----------------------+----------------------------+-------------------------------------+---------------------------------------------------------+
|
||||
| round(Utf8("-999.9")) | round(Utf8("-99999999.9")) | round(Utf8("-99999999999999999.9")) | round(Utf8("-9999999999999999999999999999999999999.9")) |
|
||||
+-----------------------+----------------------------+-------------------------------------+---------------------------------------------------------+
|
||||
| -1000.0 | -100000000.0 | -1.0e17 | -1.0e37 |
|
||||
+-----------------------+----------------------------+-------------------------------------+---------------------------------------------------------+
|
||||
|
||||
-- round with precision
|
||||
SELECT ROUND('100.3908147521'::DECIMAL(18,10), 0)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 1)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 2)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 3)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 4)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 5)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 6)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 7)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 8)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 9)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 10)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 100000)::VARCHAR,
|
||||
ROUND(NULL::DECIMAL, 0);
|
||||
|
||||
+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+-----------------------------------------+---------------------------------------------+----------------------+
|
||||
| round(Utf8("100.3908147521"),Int64(0)) | round(Utf8("100.3908147521"),Int64(1)) | round(Utf8("100.3908147521"),Int64(2)) | round(Utf8("100.3908147521"),Int64(3)) | round(Utf8("100.3908147521"),Int64(4)) | round(Utf8("100.3908147521"),Int64(5)) | round(Utf8("100.3908147521"),Int64(6)) | round(Utf8("100.3908147521"),Int64(7)) | round(Utf8("100.3908147521"),Int64(8)) | round(Utf8("100.3908147521"),Int64(9)) | round(Utf8("100.3908147521"),Int64(10)) | round(Utf8("100.3908147521"),Int64(100000)) | round(NULL,Int64(0)) |
|
||||
+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+-----------------------------------------+---------------------------------------------+----------------------+
|
||||
| 100.0 | 100.4 | 100.39 | 100.391 | 100.3908 | 100.39081 | 100.390815 | 100.3908148 | 100.39081475 | 100.390814752 | 100.3908147521 | NaN | |
|
||||
+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+----------------------------------------+-----------------------------------------+---------------------------------------------+----------------------+
|
||||
|
||||
-- negative precision
|
||||
SELECT ROUND('1049578239572094512.32415'::DECIMAL(30,10), 0)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -1)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -2)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -3)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -4)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -5)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -6)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -7)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -8)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -9)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -10)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -11)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -12)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -13)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -14)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -15)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -16)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -18)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -19)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -20)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -19842)::VARCHAR;
|
||||
|
||||
+---------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+--------------------------------------------------------+
|
||||
| round(Utf8("1049578239572094512.32415"),Int64(0)) | round(Utf8("1049578239572094512.32415"),Int64(-1)) | round(Utf8("1049578239572094512.32415"),Int64(-2)) | round(Utf8("1049578239572094512.32415"),Int64(-3)) | round(Utf8("1049578239572094512.32415"),Int64(-4)) | round(Utf8("1049578239572094512.32415"),Int64(-5)) | round(Utf8("1049578239572094512.32415"),Int64(-6)) | round(Utf8("1049578239572094512.32415"),Int64(-7)) | round(Utf8("1049578239572094512.32415"),Int64(-8)) | round(Utf8("1049578239572094512.32415"),Int64(-9)) | round(Utf8("1049578239572094512.32415"),Int64(-10)) | round(Utf8("1049578239572094512.32415"),Int64(-11)) | round(Utf8("1049578239572094512.32415"),Int64(-12)) | round(Utf8("1049578239572094512.32415"),Int64(-13)) | round(Utf8("1049578239572094512.32415"),Int64(-14)) | round(Utf8("1049578239572094512.32415"),Int64(-15)) | round(Utf8("1049578239572094512.32415"),Int64(-16)) | round(Utf8("1049578239572094512.32415"),Int64(-18)) | round(Utf8("1049578239572094512.32415"),Int64(-19)) | round(Utf8("1049578239572094512.32415"),Int64(-20)) | round(Utf8("1049578239572094512.32415"),Int64(-19842)) |
|
||||
+---------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+--------------------------------------------------------+
|
||||
| 1.0495782395720946e18 | 1.0495782395720947e18 | 1.0495782395720946e18 | 1.049578239572095e18 | 1.04957823957209e18 | 1.0495782395721e18 | 1.049578239572e18 | 1.04957823957e18 | 1.0495782396e18 | 1.0495782399999999e18 | 1.04957824e18 | 1.0495782e18 | 1.049578e18 | 1.04958e18 | 1.0496e18 | 1.0499999999999999e18 | 1.05e18 | 9.999999999999999e17 | 0.0 | 0.0 | NaN |
|
||||
+---------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+--------------------------------------------------------+
|
||||
|
||||
-- negative values
|
||||
SELECT ROUND('-100.3908147521'::DECIMAL(18,10), 0)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 1)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 2)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 3)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 4)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 5)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 6)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 7)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 8)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 9)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 10)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 100000)::VARCHAR,
|
||||
ROUND(NULL::DECIMAL, 0);
|
||||
|
||||
+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+------------------------------------------+----------------------------------------------+----------------------+
|
||||
| round(Utf8("-100.3908147521"),Int64(0)) | round(Utf8("-100.3908147521"),Int64(1)) | round(Utf8("-100.3908147521"),Int64(2)) | round(Utf8("-100.3908147521"),Int64(3)) | round(Utf8("-100.3908147521"),Int64(4)) | round(Utf8("-100.3908147521"),Int64(5)) | round(Utf8("-100.3908147521"),Int64(6)) | round(Utf8("-100.3908147521"),Int64(7)) | round(Utf8("-100.3908147521"),Int64(8)) | round(Utf8("-100.3908147521"),Int64(9)) | round(Utf8("-100.3908147521"),Int64(10)) | round(Utf8("-100.3908147521"),Int64(100000)) | round(NULL,Int64(0)) |
|
||||
+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+------------------------------------------+----------------------------------------------+----------------------+
|
||||
| -100.0 | -100.4 | -100.39 | -100.391 | -100.3908 | -100.39081 | -100.390815 | -100.3908148 | -100.39081475 | -100.390814752 | -100.3908147521 | NaN | |
|
||||
+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+-----------------------------------------+------------------------------------------+----------------------------------------------+----------------------+
|
||||
|
||||
SELECT ROUND('-1049578239572094512.32415'::DECIMAL(30,10), 0)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -1)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -2)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -3)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -4)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -5)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -6)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -7)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -8)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -9)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -10)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -11)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -12)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -13)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -14)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -15)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -16)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -18)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -19)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -20)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -19842)::VARCHAR;
|
||||
|
||||
+----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+---------------------------------------------------------+
|
||||
| round(Utf8("-1049578239572094512.32415"),Int64(0)) | round(Utf8("-1049578239572094512.32415"),Int64(-1)) | round(Utf8("-1049578239572094512.32415"),Int64(-2)) | round(Utf8("-1049578239572094512.32415"),Int64(-3)) | round(Utf8("-1049578239572094512.32415"),Int64(-4)) | round(Utf8("-1049578239572094512.32415"),Int64(-5)) | round(Utf8("-1049578239572094512.32415"),Int64(-6)) | round(Utf8("-1049578239572094512.32415"),Int64(-7)) | round(Utf8("-1049578239572094512.32415"),Int64(-8)) | round(Utf8("-1049578239572094512.32415"),Int64(-9)) | round(Utf8("-1049578239572094512.32415"),Int64(-10)) | round(Utf8("-1049578239572094512.32415"),Int64(-11)) | round(Utf8("-1049578239572094512.32415"),Int64(-12)) | round(Utf8("-1049578239572094512.32415"),Int64(-13)) | round(Utf8("-1049578239572094512.32415"),Int64(-14)) | round(Utf8("-1049578239572094512.32415"),Int64(-15)) | round(Utf8("-1049578239572094512.32415"),Int64(-16)) | round(Utf8("-1049578239572094512.32415"),Int64(-18)) | round(Utf8("-1049578239572094512.32415"),Int64(-19)) | round(Utf8("-1049578239572094512.32415"),Int64(-20)) | round(Utf8("-1049578239572094512.32415"),Int64(-19842)) |
|
||||
+----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+---------------------------------------------------------+
|
||||
| -1.0495782395720946e18 | -1.0495782395720947e18 | -1.0495782395720946e18 | -1.049578239572095e18 | -1.04957823957209e18 | -1.0495782395721e18 | -1.049578239572e18 | -1.04957823957e18 | -1.0495782396e18 | -1.0495782399999999e18 | -1.04957824e18 | -1.0495782e18 | -1.049578e18 | -1.04958e18 | -1.0496e18 | -1.0499999999999999e18 | -1.05e18 | -9.999999999999999e17 | 0.0 | 0.0 | NaN |
|
||||
+----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+-----------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+------------------------------------------------------+---------------------------------------------------------+
|
||||
|
||||
SELECT ROUND(12::DECIMAL(3,0));
|
||||
|
||||
+------------------+
|
||||
| round(Int64(12)) |
|
||||
+------------------+
|
||||
| 12.0 |
|
||||
+------------------+
|
||||
|
||||
-- null precision becomes null (postgres behavior)
|
||||
SELECT ROUND(12::DECIMAL(3,0), NULL);
|
||||
|
||||
+-----------------------+
|
||||
| round(Int64(12),NULL) |
|
||||
+-----------------------+
|
||||
| |
|
||||
+-----------------------+
|
||||
|
||||
-- different types for ROUND
|
||||
SELECT ROUND('-100.3'::DECIMAL(4,1), 1)::VARCHAR,
|
||||
ROUND('104.3'::DECIMAL(4,1), 0)::VARCHAR,
|
||||
ROUND('104.3'::DECIMAL(4,1), -1)::VARCHAR;
|
||||
|
||||
+--------------------------------+-------------------------------+--------------------------------+
|
||||
| round(Utf8("-100.3"),Int64(1)) | round(Utf8("104.3"),Int64(0)) | round(Utf8("104.3"),Int64(-1)) |
|
||||
+--------------------------------+-------------------------------+--------------------------------+
|
||||
| -100.3 | 104.0 | 100.0 |
|
||||
+--------------------------------+-------------------------------+--------------------------------+
|
||||
|
||||
SELECT ROUND('-100.3'::DECIMAL(9,1), 1)::VARCHAR,
|
||||
ROUND('104.3'::DECIMAL(9,1), 0)::VARCHAR,
|
||||
ROUND('104.3'::DECIMAL(9,1), -1)::VARCHAR;
|
||||
|
||||
+--------------------------------+-------------------------------+--------------------------------+
|
||||
| round(Utf8("-100.3"),Int64(1)) | round(Utf8("104.3"),Int64(0)) | round(Utf8("104.3"),Int64(-1)) |
|
||||
+--------------------------------+-------------------------------+--------------------------------+
|
||||
| -100.3 | 104.0 | 100.0 |
|
||||
+--------------------------------+-------------------------------+--------------------------------+
|
||||
|
||||
SELECT ROUND('-100.3'::DECIMAL(18,1), 1)::VARCHAR,
|
||||
ROUND('104.3'::DECIMAL(18,1), 0)::VARCHAR,
|
||||
ROUND('104.3'::DECIMAL(18,1), -1)::VARCHAR;
|
||||
|
||||
+--------------------------------+-------------------------------+--------------------------------+
|
||||
| round(Utf8("-100.3"),Int64(1)) | round(Utf8("104.3"),Int64(0)) | round(Utf8("104.3"),Int64(-1)) |
|
||||
+--------------------------------+-------------------------------+--------------------------------+
|
||||
| -100.3 | 104.0 | 100.0 |
|
||||
+--------------------------------+-------------------------------+--------------------------------+
|
||||
|
||||
-- use decimal in sub-query
|
||||
SELECT (SELECT '1.0'::DECIMAL(2,1));
|
||||
|
||||
+-------------+
|
||||
| Utf8("1.0") |
|
||||
+-------------+
|
||||
| 1.0 |
|
||||
+-------------+
|
||||
|
||||
-- test join with decimals
|
||||
CREATE TABLE tmp_table(i INTEGER, ts timestamp time index);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO tmp_table VALUES (1, 1000), (2, 2000), (3, 3000);
|
||||
|
||||
Affected Rows: 3
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT * FROM tmp_table;
|
||||
|
||||
+---+---------------------+
|
||||
| i | ts |
|
||||
+---+---------------------+
|
||||
| 1 | 1970-01-01T00:00:01 |
|
||||
| 2 | 1970-01-01T00:00:02 |
|
||||
| 3 | 1970-01-01T00:00:03 |
|
||||
+---+---------------------+
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT * FROM tmp_table JOIN decimals ON decimals.ts = tmp_table.ts;
|
||||
|
||||
+---+---------------------+------+---------------------+
|
||||
| i | ts | d | ts |
|
||||
+---+---------------------+------+---------------------+
|
||||
| 1 | 1970-01-01T00:00:01 | 0.10 | 1970-01-01T00:00:01 |
|
||||
| 2 | 1970-01-01T00:00:02 | 0.20 | 1970-01-01T00:00:02 |
|
||||
+---+---------------------+------+---------------------+
|
||||
|
||||
DROP TABLE decimals;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE tmp_table;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
207
tests/cases/standalone/common/types/decimal/decimal_ops.sql
Normal file
207
tests/cases/standalone/common/types/decimal/decimal_ops.sql
Normal file
@@ -0,0 +1,207 @@
|
||||
-- Some cases port from https://github.com/duckdb/duckdb/blob/main/test/sql/types/decimal/test_decimal_ops.test
|
||||
|
||||
CREATE TABLE decimals(d DECIMAL(3, 2), ts timestamp time index);
|
||||
|
||||
INSERT INTO decimals VALUES ('0.1',1000), ('0.2',2000);
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT * FROM decimals;
|
||||
|
||||
-- ORDER BY
|
||||
|
||||
SELECT * FROM decimals ORDER BY d DESC;
|
||||
|
||||
-- equality
|
||||
|
||||
SELECT * FROM decimals WHERE d = '0.1'::DECIMAL(3,2);
|
||||
|
||||
-- greater than equals
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT * FROM decimals WHERE d >= '0.1'::DECIMAL(3,2);
|
||||
|
||||
-- what about if we use different decimal scales?
|
||||
|
||||
SELECT * FROM decimals WHERE d = '0.1'::DECIMAL(9,5);
|
||||
|
||||
SELECT * FROM decimals WHERE d >= '0.1'::DECIMAL(9,5) ORDER BY 1;
|
||||
|
||||
-- what if we compare decimals with different scales and width (3,2) vs (9,1)
|
||||
|
||||
INSERT INTO decimals VALUES ('0.11',3000), ('0.21',4000);
|
||||
|
||||
SELECT * FROM decimals WHERE d = '0.1'::DECIMAL(9,1);
|
||||
|
||||
SELECT * FROM decimals WHERE d > '0.1'::DECIMAL(9,1) ORDER BY 1;
|
||||
|
||||
DELETE FROM decimals WHERE d <> d::DECIMAL(9,1);
|
||||
|
||||
SELECT * FROM decimals;
|
||||
|
||||
-- test ABS function
|
||||
|
||||
SELECT ABS('-0.1'::DECIMAL), ABS('0.1'::DECIMAL), ABS(NULL::DECIMAL);
|
||||
|
||||
SELECT ABS('-0.1'::DECIMAL(4,3)) AS col1, ABS('-0.1'::DECIMAL(9,3)) AS col2, ABS('-0.1'::DECIMAL(18,3)) AS col3, ABS('-0.1'::DECIMAL(38,3)) AS col4;
|
||||
|
||||
-- test CEIL function
|
||||
|
||||
SELECT CEIL('0.1'::DECIMAL), CEIL('-0.1'::DECIMAL), CEIL(NULL::DECIMAL);
|
||||
|
||||
SELECT CEIL('100.3'::DECIMAL), CEIL('-127012.3'::DECIMAL);
|
||||
|
||||
SELECT CEIL('10.5'::DECIMAL), CEIL('-10.5'::DECIMAL);
|
||||
|
||||
-- ceil function on the boundaries
|
||||
|
||||
SELECT CEIL('999.9'::DECIMAL(4,1)), CEIL('99999999.9'::DECIMAL(9,1)), CEIL('99999999999999999.9'::DECIMAL(18,1)), CEIL('9999999999999999999999999999999999999.9'::DECIMAL(38,1));
|
||||
|
||||
SELECT CEIL('-999.9'::DECIMAL(4,1)), CEIL('-99999999.9'::DECIMAL(9,1)), CEIL('-99999999999999999.9'::DECIMAL(18,1)), CEIL('-9999999999999999999999999999999999999.9'::DECIMAL(38,1));
|
||||
|
||||
-- test FLOOR function
|
||||
|
||||
SELECT FLOOR('0.1'::DECIMAL), FLOOR('-0.1'::DECIMAL), FLOOR(NULL::DECIMAL);
|
||||
|
||||
SELECT FLOOR('100.3'::DECIMAL), FLOOR('-127012.3'::DECIMAL);
|
||||
|
||||
SELECT FLOOR('10.5'::DECIMAL), FLOOR('-10.5'::DECIMAL);
|
||||
|
||||
-- floor function on the boundaries
|
||||
|
||||
SELECT FLOOR('999.9'::DECIMAL(4,1)), FLOOR('99999999.9'::DECIMAL(9,1)), FLOOR('99999999999999999.9'::DECIMAL(18,1)), FLOOR('9999999999999999999999999999999999999.9'::DECIMAL(38,1));
|
||||
|
||||
SELECT FLOOR('-999.9'::DECIMAL(4,1)), FLOOR('-99999999.9'::DECIMAL(9,1)), FLOOR('-99999999999999999.9'::DECIMAL(18,1)), FLOOR('-9999999999999999999999999999999999999.9'::DECIMAL(38,1));
|
||||
|
||||
-- test unary ROUND function
|
||||
|
||||
SELECT ROUND('0.1'::DECIMAL), ROUND('-0.1'::DECIMAL), ROUND(NULL::DECIMAL);
|
||||
|
||||
SELECT ROUND('100.3'::DECIMAL), ROUND('-127012.3'::DECIMAL);
|
||||
|
||||
SELECT ROUND('10.5'::DECIMAL), ROUND('-10.5'::DECIMAL);
|
||||
|
||||
-- round function on the boundaries
|
||||
|
||||
SELECT ROUND('999.9'::DECIMAL(4,1)), ROUND('99999999.9'::DECIMAL(9,1)), ROUND('99999999999999999.9'::DECIMAL(18,1)), ROUND('9999999999999999999999999999999999999.9'::DECIMAL(38,1));
|
||||
|
||||
SELECT ROUND('-999.9'::DECIMAL(4,1)), ROUND('-99999999.9'::DECIMAL(9,1)), ROUND('-99999999999999999.9'::DECIMAL(18,1)), ROUND('-9999999999999999999999999999999999999.9'::DECIMAL(38,1));
|
||||
|
||||
-- round with precision
|
||||
|
||||
SELECT ROUND('100.3908147521'::DECIMAL(18,10), 0)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 1)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 2)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 3)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 4)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 5)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 6)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 7)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 8)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 9)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 10)::VARCHAR,
|
||||
ROUND('100.3908147521'::DECIMAL(18,10), 100000)::VARCHAR,
|
||||
ROUND(NULL::DECIMAL, 0);
|
||||
|
||||
-- negative precision
|
||||
|
||||
SELECT ROUND('1049578239572094512.32415'::DECIMAL(30,10), 0)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -1)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -2)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -3)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -4)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -5)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -6)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -7)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -8)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -9)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -10)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -11)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -12)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -13)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -14)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -15)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -16)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -18)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -19)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -20)::VARCHAR,
|
||||
ROUND('1049578239572094512.32415'::DECIMAL(30,10), -19842)::VARCHAR;
|
||||
|
||||
|
||||
-- negative values
|
||||
|
||||
SELECT ROUND('-100.3908147521'::DECIMAL(18,10), 0)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 1)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 2)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 3)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 4)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 5)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 6)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 7)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 8)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 9)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 10)::VARCHAR,
|
||||
ROUND('-100.3908147521'::DECIMAL(18,10), 100000)::VARCHAR,
|
||||
ROUND(NULL::DECIMAL, 0);
|
||||
|
||||
|
||||
SELECT ROUND('-1049578239572094512.32415'::DECIMAL(30,10), 0)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -1)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -2)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -3)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -4)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -5)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -6)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -7)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -8)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -9)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -10)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -11)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -12)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -13)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -14)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -15)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -16)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -18)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -19)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -20)::VARCHAR,
|
||||
ROUND('-1049578239572094512.32415'::DECIMAL(30,10), -19842)::VARCHAR;
|
||||
|
||||
SELECT ROUND(12::DECIMAL(3,0));
|
||||
|
||||
-- null precision becomes null (postgres behavior)
|
||||
|
||||
SELECT ROUND(12::DECIMAL(3,0), NULL);
|
||||
|
||||
-- different types for ROUND
|
||||
|
||||
SELECT ROUND('-100.3'::DECIMAL(4,1), 1)::VARCHAR,
|
||||
ROUND('104.3'::DECIMAL(4,1), 0)::VARCHAR,
|
||||
ROUND('104.3'::DECIMAL(4,1), -1)::VARCHAR;
|
||||
|
||||
SELECT ROUND('-100.3'::DECIMAL(9,1), 1)::VARCHAR,
|
||||
ROUND('104.3'::DECIMAL(9,1), 0)::VARCHAR,
|
||||
ROUND('104.3'::DECIMAL(9,1), -1)::VARCHAR;
|
||||
|
||||
SELECT ROUND('-100.3'::DECIMAL(18,1), 1)::VARCHAR,
|
||||
ROUND('104.3'::DECIMAL(18,1), 0)::VARCHAR,
|
||||
ROUND('104.3'::DECIMAL(18,1), -1)::VARCHAR;
|
||||
|
||||
-- use decimal in sub-query
|
||||
|
||||
SELECT (SELECT '1.0'::DECIMAL(2,1));
|
||||
|
||||
-- test join with decimals
|
||||
|
||||
CREATE TABLE tmp_table(i INTEGER, ts timestamp time index);
|
||||
|
||||
INSERT INTO tmp_table VALUES (1, 1000), (2, 2000), (3, 3000);
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT * FROM tmp_table;
|
||||
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT * FROM tmp_table JOIN decimals ON decimals.ts = tmp_table.ts;
|
||||
|
||||
DROP TABLE decimals;
|
||||
|
||||
DROP TABLE tmp_table;
|
||||
@@ -0,0 +1,66 @@
|
||||
select '1.023450000001'::DECIMAL(5,4);
|
||||
|
||||
+------------------------+
|
||||
| Utf8("1.023450000001") |
|
||||
+------------------------+
|
||||
| 1.0235 |
|
||||
+------------------------+
|
||||
|
||||
select '1.234499999'::DECIMAL(4,3);
|
||||
|
||||
+---------------------+
|
||||
| Utf8("1.234499999") |
|
||||
+---------------------+
|
||||
| 1.234 |
|
||||
+---------------------+
|
||||
|
||||
select '1.23499999'::DECIMAL(4,3);
|
||||
|
||||
+--------------------+
|
||||
| Utf8("1.23499999") |
|
||||
+--------------------+
|
||||
| 1.235 |
|
||||
+--------------------+
|
||||
|
||||
select '1.234499999'::DECIMAL(5,4);
|
||||
|
||||
+---------------------+
|
||||
| Utf8("1.234499999") |
|
||||
+---------------------+
|
||||
| 1.2345 |
|
||||
+---------------------+
|
||||
|
||||
-- arrow-rs is a little strange about the conversion behavior of negative numbers.
|
||||
-- issue: https://github.com/apache/arrow-datafusion/issues/8326
|
||||
select '-1.023450000001'::DECIMAL(5,4);
|
||||
|
||||
+-------------------------+
|
||||
| Utf8("-1.023450000001") |
|
||||
+-------------------------+
|
||||
| -0.9765 |
|
||||
+-------------------------+
|
||||
|
||||
select '-1.234499999'::DECIMAL(4,3);
|
||||
|
||||
+----------------------+
|
||||
| Utf8("-1.234499999") |
|
||||
+----------------------+
|
||||
| -0.766 |
|
||||
+----------------------+
|
||||
|
||||
select '-1.23499999'::DECIMAL(4,3);
|
||||
|
||||
+---------------------+
|
||||
| Utf8("-1.23499999") |
|
||||
+---------------------+
|
||||
| -0.765 |
|
||||
+---------------------+
|
||||
|
||||
select '-1.234499999'::DECIMAL(5,4);
|
||||
|
||||
+----------------------+
|
||||
| Utf8("-1.234499999") |
|
||||
+----------------------+
|
||||
| -0.7655 |
|
||||
+----------------------+
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
select '1.023450000001'::DECIMAL(5,4);
|
||||
|
||||
select '1.234499999'::DECIMAL(4,3);
|
||||
|
||||
select '1.23499999'::DECIMAL(4,3);
|
||||
|
||||
select '1.234499999'::DECIMAL(5,4);
|
||||
|
||||
-- arrow-rs is a little strange about the conversion behavior of negative numbers.
|
||||
-- issue: https://github.com/apache/arrow-datafusion/issues/8326
|
||||
select '-1.023450000001'::DECIMAL(5,4);
|
||||
|
||||
select '-1.234499999'::DECIMAL(4,3);
|
||||
|
||||
select '-1.23499999'::DECIMAL(4,3);
|
||||
|
||||
select '-1.234499999'::DECIMAL(5,4);
|
||||
@@ -0,0 +1,60 @@
|
||||
CREATE TABLE decimals(d DECIMAL(18,1) , ts timestamp time index);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO decimals VALUES (99000000000000000.0, 1000);
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
SELECT d + 1 FROM decimals;
|
||||
|
||||
+-----------------------+
|
||||
| decimals.d + Int64(1) |
|
||||
+-----------------------+
|
||||
| 99000000000000001.0 |
|
||||
+-----------------------+
|
||||
|
||||
SELECT d + 1000000000000000.0 FROM decimals;
|
||||
|
||||
+----------------------------------------+
|
||||
| decimals.d + Float64(1000000000000000) |
|
||||
+----------------------------------------+
|
||||
| 1.0e17 |
|
||||
+----------------------------------------+
|
||||
|
||||
SELECT -1 - d FROM decimals;
|
||||
|
||||
+------------------------+
|
||||
| Int64(-1) - decimals.d |
|
||||
+------------------------+
|
||||
| -99000000000000001.0 |
|
||||
+------------------------+
|
||||
|
||||
SELECT -1000000000000000.0 - d FROM decimals;
|
||||
|
||||
+-----------------------------------------+
|
||||
| Float64(-1000000000000000) - decimals.d |
|
||||
+-----------------------------------------+
|
||||
| -1.0e17 |
|
||||
+-----------------------------------------+
|
||||
|
||||
SELECT 1 * d FROM decimals;
|
||||
|
||||
+-----------------------+
|
||||
| Int64(1) * decimals.d |
|
||||
+-----------------------+
|
||||
| 99000000000000000.0 |
|
||||
+-----------------------+
|
||||
|
||||
SELECT 2 * d FROM decimals;
|
||||
|
||||
+-----------------------+
|
||||
| Int64(2) * decimals.d |
|
||||
+-----------------------+
|
||||
| 198000000000000000.0 |
|
||||
+-----------------------+
|
||||
|
||||
DROP TABLE decimals;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
CREATE TABLE decimals(d DECIMAL(18,1) , ts timestamp time index);
|
||||
|
||||
INSERT INTO decimals VALUES (99000000000000000.0, 1000);
|
||||
|
||||
SELECT d + 1 FROM decimals;
|
||||
|
||||
SELECT d + 1000000000000000.0 FROM decimals;
|
||||
|
||||
SELECT -1 - d FROM decimals;
|
||||
|
||||
SELECT -1000000000000000.0 - d FROM decimals;
|
||||
|
||||
SELECT 1 * d FROM decimals;
|
||||
|
||||
SELECT 2 * d FROM decimals;
|
||||
|
||||
DROP TABLE decimals;
|
||||
Reference in New Issue
Block a user