mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-27 00:19:58 +00:00
Compare commits
64 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe1cfbf2b3 | ||
|
|
ded874da04 | ||
|
|
fe2d29a2a0 | ||
|
|
b388829a96 | ||
|
|
8e7c027bf5 | ||
|
|
9d5d7c1f9a | ||
|
|
efe5eeef14 | ||
|
|
ca54b05be3 | ||
|
|
d67314789c | ||
|
|
6c4b8b63a5 | ||
|
|
62a0defd63 | ||
|
|
291d9d55a4 | ||
|
|
90301a6250 | ||
|
|
c66d3090b6 | ||
|
|
656050722c | ||
|
|
b741a7181b | ||
|
|
dd23d47743 | ||
|
|
80aaa7725e | ||
|
|
c24de8b908 | ||
|
|
f382a7695f | ||
|
|
1ea43da9ea | ||
|
|
6113f46284 | ||
|
|
6d8a502430 | ||
|
|
2d992f4f12 | ||
|
|
7daf24c47f | ||
|
|
567f5105bf | ||
|
|
78962015dd | ||
|
|
1138f32af9 | ||
|
|
53fc14a50b | ||
|
|
1895a5478b | ||
|
|
f0c953f84a | ||
|
|
1a38f36d2d | ||
|
|
cb94bd45d3 | ||
|
|
b298b35b3b | ||
|
|
164232e073 | ||
|
|
9a5fa49955 | ||
|
|
92d6d4e64a | ||
|
|
021ec7b6ac | ||
|
|
0710e6ff36 | ||
|
|
db3a07804e | ||
|
|
bdd3d2d9ce | ||
|
|
b81d3a28e6 | ||
|
|
89b86c87a2 | ||
|
|
0b0ed03ee6 | ||
|
|
ea4a71b387 | ||
|
|
4cd5ec7769 | ||
|
|
c8f4a85720 | ||
|
|
024dac8171 | ||
|
|
918be099cd | ||
|
|
91dbac4141 | ||
|
|
e935bf7574 | ||
|
|
f7872654cc | ||
|
|
547730a467 | ||
|
|
49f22f0fc5 | ||
|
|
2ae2a6674e | ||
|
|
c8cf3b1677 | ||
|
|
7aae19aa8b | ||
|
|
b90267dd80 | ||
|
|
9fa9156bde | ||
|
|
ce900e850a | ||
|
|
5274c5a407 | ||
|
|
0b13ac6e16 | ||
|
|
8ab6136d1c | ||
|
|
e39f49fe56 |
15
.coderabbit.yaml
Normal file
15
.coderabbit.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
|
||||
language: "en-US"
|
||||
early_access: false
|
||||
reviews:
|
||||
profile: "chill"
|
||||
request_changes_workflow: false
|
||||
high_level_summary: true
|
||||
poem: true
|
||||
review_status: true
|
||||
collapse_walkthrough: false
|
||||
auto_review:
|
||||
enabled: false
|
||||
drafts: false
|
||||
chat:
|
||||
auto_reply: true
|
||||
@@ -62,12 +62,13 @@ runs:
|
||||
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
|
||||
# linker that prevents backtraces from getting printed correctly.
|
||||
#
|
||||
# <https://github.com/rust-lang/rust/issues/113783>
|
||||
# <https://github.com/rust-lang/rust/issues/113783>
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
env:
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
run: |
|
||||
make test sqlness-test
|
||||
|
||||
@@ -81,7 +82,7 @@ runs:
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
env:
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
run: |
|
||||
make build \
|
||||
|
||||
@@ -40,7 +40,7 @@ runs:
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install PyArrow Package
|
||||
shell: pwsh
|
||||
@@ -62,13 +62,14 @@ runs:
|
||||
env:
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
RUST_BACKTRACE: 1
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
path: C:\tmp\greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime binary
|
||||
|
||||
@@ -2,7 +2,7 @@ name: Setup Etcd cluster
|
||||
description: Deploy Etcd cluster on Kubernetes
|
||||
inputs:
|
||||
etcd-replicas:
|
||||
default: 3
|
||||
default: 1
|
||||
description: "Etcd replicas"
|
||||
namespace:
|
||||
default: "etcd-cluster"
|
||||
|
||||
@@ -1,18 +1,13 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
@@ -1,19 +1,16 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
global_rt_size = 4
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
|
||||
[storage]
|
||||
cache_path = "/data/greptimedb/s3cache"
|
||||
@@ -21,9 +18,7 @@ datanode:
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
@@ -11,15 +9,12 @@ meta:
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
@@ -17,9 +15,8 @@ meta:
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
@@ -28,9 +25,7 @@ datanode:
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
@@ -43,3 +38,8 @@ objectStorage:
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
remoteWal:
|
||||
enabled: true
|
||||
kafka:
|
||||
brokerEndpoints:
|
||||
- "kafka.kafka-cluster.svc.cluster.local:9092"
|
||||
|
||||
1
.github/workflows/develop.yml
vendored
1
.github/workflows/develop.yml
vendored
@@ -141,6 +141,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
steps:
|
||||
|
||||
16
.github/workflows/nightly-ci.yml
vendored
16
.github/workflows/nightly-ci.yml
vendored
@@ -51,13 +51,15 @@ jobs:
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness
|
||||
run: make sqlness-test
|
||||
env:
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
path: C:\tmp\greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
@@ -109,11 +111,7 @@ jobs:
|
||||
|
||||
check-status:
|
||||
name: Check status
|
||||
needs: [
|
||||
sqlness-test,
|
||||
sqlness-windows,
|
||||
test-on-windows,
|
||||
]
|
||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
@@ -127,9 +125,7 @@ jobs:
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
check-status
|
||||
]
|
||||
needs: [check-status]
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
|
||||
@@ -14,7 +14,7 @@ Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get th
|
||||
|
||||
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
|
||||
|
||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md)
|
||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md)
|
||||
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
|
||||
- Check the closed issues before opening your issue.
|
||||
- Try to follow the existing style of the code.
|
||||
@@ -30,7 +30,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
146
Cargo.lock
generated
146
Cargo.lock
generated
@@ -214,7 +214,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -762,7 +762,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1277,7 +1277,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1285,7 +1285,7 @@ dependencies = [
|
||||
"common-meta",
|
||||
"moka",
|
||||
"snafu 0.8.3",
|
||||
"substrait 0.9.0",
|
||||
"substrait 0.9.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1312,7 +1312,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -1637,7 +1637,7 @@ checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70"
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1667,7 +1667,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"snafu 0.8.3",
|
||||
"substrait 0.37.3",
|
||||
"substrait 0.9.0",
|
||||
"substrait 0.9.1",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic 0.11.0",
|
||||
@@ -1697,7 +1697,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1753,7 +1753,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.3",
|
||||
"store-api",
|
||||
"substrait 0.9.0",
|
||||
"substrait 0.9.1",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -1799,7 +1799,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"bitvec",
|
||||
@@ -1815,7 +1815,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -1826,7 +1826,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -1849,7 +1849,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -1886,7 +1886,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"bigdecimal",
|
||||
"common-error",
|
||||
@@ -1899,7 +1899,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"snafu 0.8.3",
|
||||
"strum 0.25.0",
|
||||
@@ -1908,7 +1908,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1923,7 +1923,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1949,6 +1949,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"session",
|
||||
"snafu 0.8.3",
|
||||
"sql",
|
||||
"statrs",
|
||||
"store-api",
|
||||
"table",
|
||||
@@ -1956,7 +1957,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -1973,7 +1974,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -1999,7 +2000,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2017,7 +2018,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2031,7 +2032,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2044,7 +2045,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2099,11 +2100,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2129,7 +2130,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2137,7 +2138,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2162,7 +2163,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2181,7 +2182,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2203,7 +2204,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2230,7 +2231,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2242,7 +2243,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2258,7 +2259,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"const_format",
|
||||
@@ -2269,7 +2270,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -3065,7 +3066,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3114,7 +3115,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.3",
|
||||
"store-api",
|
||||
"substrait 0.9.0",
|
||||
"substrait 0.9.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.14",
|
||||
@@ -3123,7 +3124,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3684,7 +3685,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -3780,7 +3781,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-schema",
|
||||
@@ -3834,7 +3835,7 @@ dependencies = [
|
||||
"snafu 0.8.3",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"substrait 0.9.0",
|
||||
"substrait 0.9.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.11.0",
|
||||
@@ -3881,7 +3882,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -3896,6 +3897,7 @@ dependencies = [
|
||||
"common-datasource",
|
||||
"common-error",
|
||||
"common-frontend",
|
||||
"common-function",
|
||||
"common-grpc",
|
||||
"common-macro",
|
||||
"common-meta",
|
||||
@@ -4232,7 +4234,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=5c801650435d464891114502539b701c77a1b914#5c801650435d464891114502539b701c77a1b914"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=c437b55725b7f5224fe9d46db21072b4a682ee4b#c437b55725b7f5224fe9d46db21072b4a682ee4b"
|
||||
dependencies = [
|
||||
"prost 0.12.6",
|
||||
"serde",
|
||||
@@ -4991,7 +4993,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -5761,7 +5763,7 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -6068,7 +6070,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6094,7 +6096,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6153,7 +6155,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "meter-core"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=80b72716dcde47ec4161478416a5c6c21343364d#80b72716dcde47ec4161478416a5c6c21343364d"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=049171eb16cb4249d8099751a0c46750d1fe88e7#049171eb16cb4249d8099751a0c46750d1fe88e7"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"once_cell",
|
||||
@@ -6163,14 +6165,14 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "meter-macros"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=80b72716dcde47ec4161478416a5c6c21343364d#80b72716dcde47ec4161478416a5c6c21343364d"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=049171eb16cb4249d8099751a0c46750d1fe88e7#049171eb16cb4249d8099751a0c46750d1fe88e7"
|
||||
dependencies = [
|
||||
"meter-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6261,7 +6263,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6908,7 +6910,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -7155,7 +7157,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7200,7 +7202,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
|
||||
"store-api",
|
||||
"substrait 0.9.0",
|
||||
"substrait 0.9.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -7450,7 +7452,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7739,7 +7741,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -7897,7 +7899,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"common-base",
|
||||
@@ -8166,7 +8168,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -8401,7 +8403,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"async-compression 0.4.11",
|
||||
"async-trait",
|
||||
@@ -8523,7 +8525,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8586,7 +8588,7 @@ dependencies = [
|
||||
"stats-cli",
|
||||
"store-api",
|
||||
"streaming-stats",
|
||||
"substrait 0.9.0",
|
||||
"substrait 0.9.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -9914,7 +9916,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "script"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10207,7 +10209,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"aide",
|
||||
"api",
|
||||
@@ -10313,7 +10315,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10324,6 +10326,7 @@ dependencies = [
|
||||
"common-telemetry",
|
||||
"common-time",
|
||||
"derive_builder 0.12.0",
|
||||
"meter-core",
|
||||
"snafu 0.8.3",
|
||||
"sql",
|
||||
]
|
||||
@@ -10613,7 +10616,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -10673,7 +10676,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.7",
|
||||
@@ -10890,7 +10893,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -10899,6 +10902,7 @@ dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-meta",
|
||||
"common-recordbatch",
|
||||
"common-time",
|
||||
"common-wal",
|
||||
@@ -11058,7 +11062,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -11259,7 +11263,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -11524,7 +11528,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -11566,7 +11570,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -11626,7 +11630,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.9.0",
|
||||
"substrait 0.9.1",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
|
||||
12
Cargo.toml
12
Cargo.toml
@@ -64,7 +64,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -119,12 +119,12 @@ etcd-client = { version = "0.13" }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "5c801650435d464891114502539b701c77a1b914" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "c437b55725b7f5224fe9d46db21072b4a682ee4b" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "049171eb16cb4249d8099751a0c46750d1fe88e7" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
notify = "6.1"
|
||||
@@ -183,7 +183,7 @@ auth = { path = "src/auth" }
|
||||
cache = { path = "src/cache" }
|
||||
catalog = { path = "src/catalog" }
|
||||
client = { path = "src/client" }
|
||||
cmd = { path = "src/cmd" }
|
||||
cmd = { path = "src/cmd", default-features = false }
|
||||
common-base = { path = "src/common/base" }
|
||||
common-catalog = { path = "src/common/catalog" }
|
||||
common-config = { path = "src/common/config" }
|
||||
@@ -213,7 +213,7 @@ datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-engine = { path = "src/file-engine" }
|
||||
flow = { path = "src/flow" }
|
||||
frontend = { path = "src/frontend" }
|
||||
frontend = { path = "src/frontend", default-features = false }
|
||||
index = { path = "src/index" }
|
||||
log-store = { path = "src/log-store" }
|
||||
meta-client = { path = "src/meta-client" }
|
||||
@@ -238,7 +238,7 @@ table = { path = "src/table" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "80b72716dcde47ec4161478416a5c6c21343364d"
|
||||
rev = "049171eb16cb4249d8099751a0c46750d1fe88e7"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
5
Makefile
5
Makefile
@@ -15,6 +15,7 @@ RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2
|
||||
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||
SQLNESS_OPTS ?=
|
||||
|
||||
# The arguments for running integration tests.
|
||||
ETCD_VERSION ?= v3.5.9
|
||||
@@ -161,7 +162,7 @@ nextest: ## Install nextest tools.
|
||||
|
||||
.PHONY: sqlness-test
|
||||
sqlness-test: ## Run sqlness test.
|
||||
cargo sqlness
|
||||
cargo sqlness ${SQLNESS_OPTS}
|
||||
|
||||
# Run fuzz test ${FUZZ_TARGET}.
|
||||
RUNS ?= 1
|
||||
@@ -172,7 +173,7 @@ fuzz:
|
||||
|
||||
.PHONY: fuzz-ls
|
||||
fuzz-ls:
|
||||
cargo fuzz list --fuzz-dir tests-fuzz
|
||||
cargo fuzz list --fuzz-dir tests-fuzz
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
|
||||
21
README.md
21
README.md
@@ -6,7 +6,7 @@
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
<h2 align="center">Unified Time Series Database for Metrics, Events, and Logs</h2>
|
||||
<h2 align="center">Unified Time Series Database for Metrics, Logs, and Events</h2>
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
@@ -50,7 +50,7 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Events**, and **Logs** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
|
||||
**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
|
||||
|
||||
## Why GreptimeDB
|
||||
|
||||
@@ -58,7 +58,7 @@ Our core developers have been building time-series data platforms for years. Bas
|
||||
|
||||
* **Unified all kinds of time series**
|
||||
|
||||
GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics and events. It supports analyzing metrics and events with SQL and PromQL, and doing streaming with continuous aggregation.
|
||||
GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics, logs, and events. It supports analyzing metrics, logs, and events with SQL and PromQL, and doing streaming with continuous aggregation.
|
||||
|
||||
* **Cloud-Edge collaboration**
|
||||
|
||||
@@ -104,10 +104,10 @@ Read more about [Installation](https://docs.greptime.com/getting-started/install
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
|
||||
* [Write Data](https://docs.greptime.com/user-guide/clients/overview)
|
||||
* [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
|
||||
* [Operations](https://docs.greptime.com/user-guide/operations/overview)
|
||||
* [Quickstart](https://docs.greptime.com/getting-started/quick-start)
|
||||
* [User Guide](https://docs.greptime.com/user-guide/overview)
|
||||
* [Demos](https://github.com/GreptimeTeam/demo-scene)
|
||||
* [FAQ](https://docs.greptime.com/faq-and-others/faq)
|
||||
|
||||
## Build
|
||||
|
||||
@@ -150,9 +150,10 @@ Our official Grafana dashboard is available at [grafana](grafana/README.md) dire
|
||||
|
||||
## Project Status
|
||||
|
||||
The current version has not yet reached General Availability version standards.
|
||||
In line with our Greptime 2024 Roadmap, we plan to achieve a production-level
|
||||
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||
The current version has not yet reached the standards for General Availability.
|
||||
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||
|
||||
We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
|
||||
|
||||
## Community
|
||||
|
||||
|
||||
@@ -16,9 +16,8 @@
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
@@ -129,6 +128,8 @@
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
@@ -166,12 +167,10 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
@@ -261,23 +260,22 @@
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||
| `failure_detector` | -- | -- | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | -- |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | -- |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | -- |
|
||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. |
|
||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.timeout` | String | `10s` | -- |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | Connect server timeout. |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `wal` | -- | -- | -- |
|
||||
| `wal.provider` | String | `raft_engine` | -- |
|
||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
|
||||
@@ -337,9 +335,8 @@
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
|
||||
@@ -73,11 +73,9 @@ watch = false
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||
mode = "standalone"
|
||||
|
||||
## The default timezone of the server.
|
||||
## +toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
@@ -8,11 +5,9 @@ default_timezone = "UTC"
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
|
||||
@@ -34,11 +34,9 @@ enable_region_failover = false
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## Procedure storage options.
|
||||
[procedure]
|
||||
@@ -58,17 +56,32 @@ max_metadata_value_size = "1500KiB"
|
||||
|
||||
# Failure detectors options.
|
||||
[failure_detector]
|
||||
|
||||
## The threshold value used by the failure detector to determine failure conditions.
|
||||
threshold = 8.0
|
||||
|
||||
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
|
||||
min_std_deviation = "100ms"
|
||||
|
||||
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
|
||||
acceptable_heartbeat_pause = "10000ms"
|
||||
|
||||
## The initial estimate of the heartbeat interval used by the failure detector.
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
|
||||
## Datanode options.
|
||||
[datanode]
|
||||
|
||||
## Datanode client options.
|
||||
[datanode.client]
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
connect_timeout = "10s"
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
|
||||
@@ -11,11 +11,9 @@ default_timezone = "UTC"
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
compact_rt_size = 4
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
@@ -459,6 +457,12 @@ mem_threshold_on_create = "auto"
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## Cache size for inverted index metadata.
|
||||
metadata_cache_size = "64MiB"
|
||||
|
||||
## Cache size for inverted index content.
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
x-custom:
|
||||
etcd_initial_cluster_token: &etcd_initial_cluster_token "--initial-cluster-token=etcd-cluster"
|
||||
etcd_common_settings: &etcd_common_settings
|
||||
image: quay.io/coreos/etcd:v3.5.10
|
||||
image: "${ETCD_REGISTRY:-quay.io}/${ETCD_NAMESPACE:-coreos}/etcd:${ETCD_VERSION:-v3.5.10}"
|
||||
entrypoint: /usr/local/bin/etcd
|
||||
greptimedb_image: &greptimedb_image docker.io/greptimedb/greptimedb:latest
|
||||
greptimedb_image: &greptimedb_image "${GREPTIMEDB_REGISTRY:-docker.io}/${GREPTIMEDB_NAMESPACE:-greptime}/greptimedb:${GREPTIMEDB_VERSION:-latest}"
|
||||
|
||||
services:
|
||||
etcd0:
|
||||
|
||||
@@ -105,7 +105,7 @@ use tests_fuzz::utils::{init_greptime_connections, Connections};
|
||||
|
||||
fuzz_target!(|input: FuzzInput| {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
common_runtime::block_on_write(async {
|
||||
common_runtime::block_on_global(async {
|
||||
let Connections { mysql } = init_greptime_connections().await;
|
||||
let mut rng = ChaChaRng::seed_from_u64(input.seed);
|
||||
let columns = rng.gen_range(2..30);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
@@ -38,6 +38,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Authentication source failure"))]
|
||||
AuthBackend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("User not found, username: {}", username))]
|
||||
UserNotFound { username: String },
|
||||
|
||||
@@ -81,6 +89,7 @@ impl ErrorExt for Error {
|
||||
Error::FileWatch { .. } => StatusCode::InvalidArguments,
|
||||
Error::InternalState { .. } => StatusCode::Unexpected,
|
||||
Error::Io { .. } => StatusCode::StorageUnavailable,
|
||||
Error::AuthBackend { .. } => StatusCode::Internal,
|
||||
|
||||
Error::UserNotFound { .. } => StatusCode::UserNotFound,
|
||||
Error::UnsupportedPasswordType { .. } => StatusCode::UnsupportedPasswordType,
|
||||
|
||||
@@ -57,6 +57,31 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list flows in catalog {catalog}"))]
|
||||
ListFlows {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
catalog: String,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Flow info not found: {flow_name} in catalog {catalog_name}"))]
|
||||
FlowInfoNotFound {
|
||||
flow_name: String,
|
||||
catalog_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Can't convert value to json, input={input}"))]
|
||||
Json {
|
||||
input: String,
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
#[snafu(implicit)]
|
||||
@@ -254,12 +279,15 @@ impl ErrorExt for Error {
|
||||
| Error::FindPartitions { .. }
|
||||
| Error::FindRegionRoutes { .. }
|
||||
| Error::CacheNotFound { .. }
|
||||
| Error::CastManager { .. } => StatusCode::Unexpected,
|
||||
| Error::CastManager { .. }
|
||||
| Error::Json { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::ViewPlanColumnsChanged { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
|
||||
|
||||
Error::FlowInfoNotFound { .. } => StatusCode::FlowNotFound,
|
||||
|
||||
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
@@ -270,7 +298,8 @@ impl ErrorExt for Error {
|
||||
Error::ListCatalogs { source, .. }
|
||||
| Error::ListNodes { source, .. }
|
||||
| Error::ListSchemas { source, .. }
|
||||
| Error::ListTables { source, .. } => source.status_code(),
|
||||
| Error::ListTables { source, .. }
|
||||
| Error::ListFlows { source, .. } => source.status_code(),
|
||||
|
||||
Error::CreateTable { source, .. } => source.status_code(),
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
@@ -85,7 +86,7 @@ impl KvBackendCatalogManager {
|
||||
.get()
|
||||
.expect("Failed to get table_route_cache"),
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend.clone())),
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
@@ -93,11 +94,13 @@ impl KvBackendCatalogManager {
|
||||
information_schema_provider: Arc::new(InformationSchemaProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||
)),
|
||||
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
backend,
|
||||
},
|
||||
cache_registry,
|
||||
})
|
||||
@@ -313,6 +316,7 @@ struct SystemCatalog {
|
||||
// system_schema_provier for default catalog
|
||||
information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
backend: KvBackendRef,
|
||||
}
|
||||
|
||||
impl SystemCatalog {
|
||||
@@ -358,6 +362,7 @@ impl SystemCatalog {
|
||||
Arc::new(InformationSchemaProvider::new(
|
||||
catalog.to_string(),
|
||||
self.catalog_manager.clone(),
|
||||
Arc::new(FlowMetadataManager::new(self.backend.clone())),
|
||||
))
|
||||
});
|
||||
information_schema_provider.table(table_name)
|
||||
|
||||
@@ -23,6 +23,8 @@ use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME,
|
||||
INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
|
||||
};
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use futures_util::stream::BoxStream;
|
||||
use snafu::OptionExt;
|
||||
use table::TableRef;
|
||||
@@ -298,6 +300,7 @@ impl MemoryCatalogManager {
|
||||
let information_schema_provider = InformationSchemaProvider::new(
|
||||
catalog,
|
||||
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
||||
Arc::new(FlowMetadataManager::new(Arc::new(MemoryKvBackend::new()))),
|
||||
);
|
||||
let information_schema = information_schema_provider.tables().clone();
|
||||
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
pub mod information_schema;
|
||||
mod memory_table;
|
||||
pub mod pg_catalog;
|
||||
mod predicate;
|
||||
mod utils;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -14,28 +14,27 @@
|
||||
|
||||
mod cluster_info;
|
||||
pub mod columns;
|
||||
pub mod flows;
|
||||
mod information_memory_table;
|
||||
pub mod key_column_usage;
|
||||
mod partitions;
|
||||
mod predicate;
|
||||
mod region_peers;
|
||||
mod runtime_metrics;
|
||||
pub mod schemata;
|
||||
mod table_constraints;
|
||||
mod table_names;
|
||||
pub mod tables;
|
||||
pub(crate) mod utils;
|
||||
mod views;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME};
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
pub(crate) use predicate::Predicates;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
use table::TableRef;
|
||||
@@ -46,6 +45,7 @@ use self::columns::InformationSchemaColumns;
|
||||
use super::{SystemSchemaProviderInner, SystemTable, SystemTableRef};
|
||||
use crate::error::Result;
|
||||
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::system_schema::information_schema::flows::InformationSchemaFlows;
|
||||
use crate::system_schema::information_schema::information_memory_table::get_schema_columns;
|
||||
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
|
||||
@@ -55,6 +55,7 @@ use crate::system_schema::information_schema::schemata::InformationSchemaSchemat
|
||||
use crate::system_schema::information_schema::table_constraints::InformationSchemaTableConstraints;
|
||||
use crate::system_schema::information_schema::tables::InformationSchemaTables;
|
||||
use crate::system_schema::memory_table::MemoryTable;
|
||||
pub(crate) use crate::system_schema::predicate::Predicates;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
|
||||
@@ -104,6 +105,7 @@ macro_rules! setup_memory_table {
|
||||
pub struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
}
|
||||
|
||||
@@ -182,16 +184,25 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
FLOWS => Some(Arc::new(InformationSchemaFlows::new(
|
||||
self.catalog_name.clone(),
|
||||
self.flow_metadata_manager.clone(),
|
||||
)) as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationSchemaProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
pub fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
flow_metadata_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
|
||||
@@ -238,6 +249,7 @@ impl InformationSchemaProvider {
|
||||
TABLE_CONSTRAINTS.to_string(),
|
||||
self.build_table(TABLE_CONSTRAINTS).unwrap(),
|
||||
);
|
||||
tables.insert(FLOWS.to_string(), self.build_table(FLOWS).unwrap());
|
||||
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
|
||||
@@ -41,7 +41,8 @@ use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::CLUSTER_INFO;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListNodesSnafu, Result};
|
||||
use crate::system_schema::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
const PEER_ID: &str = "peer_id";
|
||||
|
||||
305
src/catalog/src/system_schema/information_schema/flows.rs
Normal file
305
src/catalog/src/system_schema/information_schema/flows.rs
Normal file
@@ -0,0 +1,305 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::flow::flow_info::FlowInfoValue;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::FlowId;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::prelude::ConcreteDataType as CDT;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu, Result,
|
||||
};
|
||||
use crate::information_schema::{Predicates, FLOWS};
|
||||
use crate::system_schema::information_schema::InformationTable;
|
||||
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
// rows of information_schema.flows
|
||||
// pk is (flow_name, flow_id, table_catalog)
|
||||
pub const FLOW_NAME: &str = "flow_name";
|
||||
pub const FLOW_ID: &str = "flow_id";
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const FLOW_DEFINITION: &str = "flow_definition";
|
||||
pub const COMMENT: &str = "comment";
|
||||
pub const EXPIRE_AFTER: &str = "expire_after";
|
||||
pub const SOURCE_TABLE_IDS: &str = "source_table_ids";
|
||||
pub const SINK_TABLE_NAME: &str = "sink_table_name";
|
||||
pub const FLOWNODE_IDS: &str = "flownode_ids";
|
||||
pub const OPTIONS: &str = "options";
|
||||
|
||||
/// The `information_schema.flows` to provides information about flows in databases.
|
||||
pub(super) struct InformationSchemaFlows {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaFlows {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
flow_metadata_manager,
|
||||
}
|
||||
}
|
||||
|
||||
/// for complex fields(including [`SOURCE_TABLE_IDS`], [`FLOWNODE_IDS`] and [`OPTIONS`]), it will be serialized to json string for now
|
||||
/// TODO(discord9): use a better way to store complex fields like json type
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(
|
||||
vec![
|
||||
(FLOW_NAME, CDT::string_datatype(), false),
|
||||
(FLOW_ID, CDT::uint32_datatype(), false),
|
||||
(TABLE_CATALOG, CDT::string_datatype(), false),
|
||||
(FLOW_DEFINITION, CDT::string_datatype(), false),
|
||||
(COMMENT, CDT::string_datatype(), true),
|
||||
(EXPIRE_AFTER, CDT::int64_datatype(), true),
|
||||
(SOURCE_TABLE_IDS, CDT::string_datatype(), true),
|
||||
(SINK_TABLE_NAME, CDT::string_datatype(), false),
|
||||
(FLOWNODE_IDS, CDT::string_datatype(), true),
|
||||
(OPTIONS, CDT::string_datatype(), true),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|(name, ty, nullable)| ColumnSchema::new(name, ty, nullable))
|
||||
.collect(),
|
||||
))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaFlowsBuilder {
|
||||
InformationSchemaFlowsBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
&self.flow_metadata_manager,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaFlows {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_FLOW_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
FLOWS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_flows(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(|err| datafusion::error::DataFusionError::External(Box::new(err)))
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `information_schema.FLOWS` table row by row
|
||||
///
|
||||
/// columns are based on [`FlowInfoValue`]
|
||||
struct InformationSchemaFlowsBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
|
||||
flow_names: StringVectorBuilder,
|
||||
flow_ids: UInt32VectorBuilder,
|
||||
table_catalogs: StringVectorBuilder,
|
||||
raw_sqls: StringVectorBuilder,
|
||||
comments: StringVectorBuilder,
|
||||
expire_afters: Int64VectorBuilder,
|
||||
source_table_id_groups: StringVectorBuilder,
|
||||
sink_table_names: StringVectorBuilder,
|
||||
flownode_id_groups: StringVectorBuilder,
|
||||
option_groups: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaFlowsBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
flow_metadata_manager: &Arc<FlowMetadataManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
flow_metadata_manager: flow_metadata_manager.clone(),
|
||||
|
||||
flow_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
flow_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_catalogs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
raw_sqls: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
comments: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
expire_afters: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
source_table_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
sink_table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
flownode_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
option_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.flows` virtual table
|
||||
async fn make_flows(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
let flow_info_manager = self.flow_metadata_manager.clone();
|
||||
|
||||
// TODO(discord9): use `AsyncIterator` once it's stable-ish
|
||||
let mut stream = flow_info_manager
|
||||
.flow_name_manager()
|
||||
.flow_names(&catalog_name)
|
||||
.await;
|
||||
|
||||
while let Some((flow_name, flow_id)) = stream
|
||||
.try_next()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListFlowsSnafu {
|
||||
catalog: &catalog_name,
|
||||
})?
|
||||
{
|
||||
let flow_info = flow_info_manager
|
||||
.flow_info_manager()
|
||||
.get(flow_id.flow_id())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?
|
||||
.context(FlowInfoNotFoundSnafu {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
flow_name: flow_name.to_string(),
|
||||
})?;
|
||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info)?;
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_flow(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
flow_id: FlowId,
|
||||
flow_info: FlowInfoValue,
|
||||
) -> Result<()> {
|
||||
let row = [
|
||||
(FLOW_NAME, &Value::from(flow_info.flow_name().to_string())),
|
||||
(FLOW_ID, &Value::from(flow_id)),
|
||||
(
|
||||
TABLE_CATALOG,
|
||||
&Value::from(flow_info.catalog_name().to_string()),
|
||||
),
|
||||
];
|
||||
if !predicates.eval(&row) {
|
||||
return Ok(());
|
||||
}
|
||||
self.flow_names.push(Some(flow_info.flow_name()));
|
||||
self.flow_ids.push(Some(flow_id));
|
||||
self.table_catalogs.push(Some(flow_info.catalog_name()));
|
||||
self.raw_sqls.push(Some(flow_info.raw_sql()));
|
||||
self.comments.push(Some(flow_info.comment()));
|
||||
self.expire_afters.push(flow_info.expire_after());
|
||||
self.source_table_id_groups.push(Some(
|
||||
&serde_json::to_string(flow_info.source_table_ids()).context(JsonSnafu {
|
||||
input: format!("{:?}", flow_info.source_table_ids()),
|
||||
})?,
|
||||
));
|
||||
self.sink_table_names
|
||||
.push(Some(&flow_info.sink_table_name().to_string()));
|
||||
self.flownode_id_groups.push(Some(
|
||||
&serde_json::to_string(flow_info.flownode_ids()).context({
|
||||
JsonSnafu {
|
||||
input: format!("{:?}", flow_info.flownode_ids()),
|
||||
}
|
||||
})?,
|
||||
));
|
||||
self.option_groups
|
||||
.push(Some(&serde_json::to_string(flow_info.options()).context(
|
||||
JsonSnafu {
|
||||
input: format!("{:?}", flow_info.options()),
|
||||
},
|
||||
)?));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.flow_names.finish()),
|
||||
Arc::new(self.flow_ids.finish()),
|
||||
Arc::new(self.table_catalogs.finish()),
|
||||
Arc::new(self.raw_sqls.finish()),
|
||||
Arc::new(self.comments.finish()),
|
||||
Arc::new(self.expire_afters.finish()),
|
||||
Arc::new(self.source_table_id_groups.finish()),
|
||||
Arc::new(self.sink_table_names.finish()),
|
||||
Arc::new(self.flownode_id_groups.finish()),
|
||||
Arc::new(self.option_groups.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaFlows {
|
||||
fn schema(&self) -> &arrow_schema::SchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema: Arc<arrow_schema::Schema> = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_flows(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -19,7 +19,7 @@ use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
||||
|
||||
use super::table_names::*;
|
||||
use crate::system_schema::memory_table::tables::{
|
||||
use crate::system_schema::utils::tables::{
|
||||
bigint_column, datetime_column, string_column, string_columns,
|
||||
};
|
||||
|
||||
|
||||
@@ -36,7 +36,8 @@ use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, TableMetadataManagerSnafu,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::system_schema::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const CATALOG_NAME: &str = "catalog_name";
|
||||
|
||||
@@ -44,3 +44,4 @@ pub const REGION_PEERS: &str = "region_peers";
|
||||
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
||||
pub const CLUSTER_INFO: &str = "cluster_info";
|
||||
pub const VIEWS: &str = "views";
|
||||
pub const FLOWS: &str = "flows";
|
||||
|
||||
@@ -298,7 +298,7 @@ impl InformationSchemaTablesBuilder {
|
||||
self.data_free.push(Some(0));
|
||||
self.auto_increment.push(Some(0));
|
||||
self.row_format.push(Some("Fixed"));
|
||||
self.table_collation.push(None);
|
||||
self.table_collation.push(Some("utf8_bin"));
|
||||
self.update_time.push(None);
|
||||
self.check_time.push(None);
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatc
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{BooleanVectorBuilder, StringVectorBuilder};
|
||||
use datatypes::vectors::StringVectorBuilder;
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
@@ -76,7 +76,7 @@ impl InformationSchemaViews {
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(VIEW_DEFINITION, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(CHECK_OPTION, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(IS_UPDATABLE, ConcreteDataType::boolean_datatype(), true),
|
||||
ColumnSchema::new(IS_UPDATABLE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(DEFINER, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(SECURITY_TYPE, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
@@ -148,7 +148,7 @@ struct InformationSchemaViewsBuilder {
|
||||
table_names: StringVectorBuilder,
|
||||
view_definitions: StringVectorBuilder,
|
||||
check_options: StringVectorBuilder,
|
||||
is_updatable: BooleanVectorBuilder,
|
||||
is_updatable: StringVectorBuilder,
|
||||
definer: StringVectorBuilder,
|
||||
security_type: StringVectorBuilder,
|
||||
character_set_client: StringVectorBuilder,
|
||||
@@ -170,7 +170,7 @@ impl InformationSchemaViewsBuilder {
|
||||
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
view_definitions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
check_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
is_updatable: BooleanVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
is_updatable: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
definer: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
security_type: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
character_set_client: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
@@ -241,7 +241,8 @@ impl InformationSchemaViewsBuilder {
|
||||
self.table_names.push(Some(table_name));
|
||||
self.view_definitions.push(Some(definition));
|
||||
self.check_options.push(None);
|
||||
self.is_updatable.push(Some(true));
|
||||
// View is not updatable, statements such UPDATE , DELETE , and INSERT are illegal and are rejected.
|
||||
self.is_updatable.push(Some("NO"));
|
||||
self.definer.push(None);
|
||||
self.security_type.push(None);
|
||||
self.character_set_client.push(Some("utf8"));
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod table_columns;
|
||||
pub mod tables;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod pg_catalog_memory_table;
|
||||
mod pg_class;
|
||||
mod pg_namespace;
|
||||
mod table_names;
|
||||
|
||||
use std::collections::HashMap;
|
||||
@@ -23,11 +25,13 @@ use datatypes::schema::ColumnSchema;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use pg_catalog_memory_table::get_schema_columns;
|
||||
use pg_class::PGClass;
|
||||
use pg_namespace::PGNamespace;
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
|
||||
use super::memory_table::tables::u32_column;
|
||||
use super::memory_table::MemoryTable;
|
||||
use super::utils::tables::u32_column;
|
||||
use super::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
|
||||
use crate::CatalogManager;
|
||||
|
||||
@@ -46,7 +50,7 @@ fn oid_column() -> ColumnSchema {
|
||||
/// [`PGCatalogProvider`] is the provider for a schema named `pg_catalog`, it is not a catalog.
|
||||
pub struct PGCatalogProvider {
|
||||
catalog_name: String,
|
||||
_catalog_manager: Weak<dyn CatalogManager>,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
}
|
||||
|
||||
@@ -79,7 +83,7 @@ impl PGCatalogProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
_catalog_manager: catalog_manager,
|
||||
catalog_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
provider.build_tables();
|
||||
@@ -90,9 +94,19 @@ impl PGCatalogProvider {
|
||||
// SECURITY NOTE:
|
||||
// Must follow the same security rules as [`InformationSchemaProvider::build_tables`].
|
||||
let mut tables = HashMap::new();
|
||||
// TODO(J0HN50N133): modeling the table_name as a enum type to get rid of expect/unwrap here
|
||||
// It's safe to unwrap here because we are sure that the constants have been handle correctly inside system_table.
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert(name.to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
tables.insert(
|
||||
PG_NAMESPACE.to_string(),
|
||||
self.build_table(PG_NAMESPACE).expect(PG_NAMESPACE),
|
||||
);
|
||||
tables.insert(
|
||||
PG_CLASS.to_string(),
|
||||
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
|
||||
);
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
@@ -105,6 +119,14 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
match name {
|
||||
table_names::PG_TYPE => setup_memory_table!(PG_TYPE),
|
||||
table_names::PG_NAMESPACE => Some(Arc::new(PGNamespace::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
))),
|
||||
table_names::PG_CLASS => Some(Arc::new(PGClass::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
))),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ use datatypes::vectors::{Int16Vector, StringVector, UInt32Vector, VectorRef};
|
||||
use super::oid_column;
|
||||
use super::table_names::PG_TYPE;
|
||||
use crate::memory_table_cols;
|
||||
use crate::system_schema::memory_table::tables::{i16_column, string_column};
|
||||
use crate::system_schema::utils::tables::{i16_column, string_column};
|
||||
|
||||
fn pg_type_schema_columns() -> (Vec<ColumnSchema>, Vec<VectorRef>) {
|
||||
// TODO(j0hn50n133): acquire this information from `DataType` instead of hardcoding it to avoid regression.
|
||||
|
||||
249
src/catalog/src/system_schema/pg_catalog/pg_class.rs
Normal file
249
src/catalog/src/system_schema/pg_catalog/pg_class.rs
Normal file
@@ -0,0 +1,249 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_CLASS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::{OID_COLUMN_NAME, PG_CLASS};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
// === column name ===
|
||||
pub const RELNAME: &str = "relname";
|
||||
pub const RELNAMESPACE: &str = "relnamespace";
|
||||
pub const RELKIND: &str = "relkind";
|
||||
pub const RELOWNER: &str = "relowner";
|
||||
|
||||
// === enum value of relkind ===
|
||||
pub const RELKIND_TABLE: &str = "r";
|
||||
pub const RELKIND_VIEW: &str = "v";
|
||||
|
||||
/// The initial capacity of the vector builders.
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
/// The dummy owner id for the namespace.
|
||||
const DUMMY_OWNER_ID: u32 = 0;
|
||||
|
||||
/// The `pg_catalog.pg_class` table implementation.
|
||||
pub(super) struct PGClass {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl PGClass {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(RELNAME),
|
||||
string_column(RELNAMESPACE),
|
||||
string_column(RELKIND),
|
||||
u32_column(RELOWNER),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGClassBuilder {
|
||||
PGClassBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGClass {
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_CLASS_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_CLASS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> Result<common_recordbatch::SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_class(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGClass {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_class(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_class` table row by row
|
||||
/// TODO(J0HN50N133): `relowner` is always the [`DUMMY_OWNER_ID`] cuz we don't have user.
|
||||
/// Once we have user system, make it the actual owner of the table.
|
||||
struct PGClassBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
oid: UInt32VectorBuilder,
|
||||
relname: StringVectorBuilder,
|
||||
relnamespace: StringVectorBuilder,
|
||||
relkind: StringVectorBuilder,
|
||||
relowner: UInt32VectorBuilder,
|
||||
}
|
||||
|
||||
impl PGClassBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relnamespace: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relkind: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relowner: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
async fn make_class(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
self.add_class(
|
||||
&predicates,
|
||||
table_info.table_id(),
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
if table_info.table_type == TableType::View {
|
||||
RELKIND_VIEW
|
||||
} else {
|
||||
RELKIND_TABLE
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_class(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
oid: u32,
|
||||
schema: &str,
|
||||
table: &str,
|
||||
kind: &str,
|
||||
) {
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(RELNAMESPACE, &Value::from(schema)),
|
||||
(RELNAME, &Value::from(table)),
|
||||
(RELKIND, &Value::from(kind)),
|
||||
(RELOWNER, &Value::from(DUMMY_OWNER_ID)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.oid.push(Some(oid));
|
||||
self.relnamespace.push(Some(schema));
|
||||
self.relname.push(Some(table));
|
||||
self.relkind.push(Some(kind));
|
||||
self.relowner.push(Some(DUMMY_OWNER_ID));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.oid.finish()),
|
||||
Arc::new(self.relname.finish()),
|
||||
Arc::new(self.relnamespace.finish()),
|
||||
Arc::new(self.relkind.finish()),
|
||||
Arc::new(self.relowner.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
191
src/catalog/src/system_schema/pg_catalog/pg_namespace.rs
Normal file
191
src/catalog/src/system_schema/pg_catalog/pg_namespace.rs
Normal file
@@ -0,0 +1,191 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_NAMESPACE_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use super::{OID_COLUMN_NAME, PG_NAMESPACE};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::utils::tables::string_column;
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// The `pg_catalog.pg_namespace` table implementation.
|
||||
/// namespace is a schema in greptime
|
||||
|
||||
const NSPNAME: &str = "nspname";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub(super) struct PGNamespace {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl PGNamespace {
|
||||
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
// TODO(J0HN50N133): we do not have a numeric schema id, use schema name as a workaround. Use a proper schema id once we have it.
|
||||
string_column(OID_COLUMN_NAME),
|
||||
string_column(NSPNAME),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGNamespaceBuilder {
|
||||
PGNamespaceBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGNamespace {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_NAMESPACE_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_NAMESPACE
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_namespace(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGNamespace {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_namespace(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_namespace` table row by row
|
||||
/// `oid` use schema name as a workaround since we don't have numeric schema id.
|
||||
/// `nspname` is the schema name.
|
||||
struct PGNamespaceBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
oid: StringVectorBuilder,
|
||||
nspname: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl PGNamespaceBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
oid: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
nspname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `pg_catalog.pg_namespace` virtual table
|
||||
async fn make_namespace(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
self.add_namespace(&predicates, &schema_name);
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> =
|
||||
vec![Arc::new(self.oid.finish()), Arc::new(self.nspname.finish())];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
|
||||
fn add_namespace(&mut self, predicates: &Predicates, schema_name: &str) {
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(schema_name)),
|
||||
(NSPNAME, &Value::from(schema_name)),
|
||||
];
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
self.oid.push(Some(schema_name));
|
||||
self.nspname.push(Some(schema_name));
|
||||
}
|
||||
}
|
||||
@@ -25,7 +25,7 @@ type ColumnName = String;
|
||||
/// we only support these simple predicates currently.
|
||||
/// TODO(dennis): supports more predicate types.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
enum Predicate {
|
||||
pub(crate) enum Predicate {
|
||||
Eq(ColumnName, Value),
|
||||
Like(ColumnName, String, bool),
|
||||
NotEq(ColumnName, Value),
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod tables;
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_config::Mode;
|
||||
@@ -33,9 +33,12 @@ use common_telemetry::tracing_context::W3cTrace;
|
||||
use futures_util::StreamExt;
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tonic::metadata::AsciiMetadataKey;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, InvalidAsciiSnafu, ServerSnafu,
|
||||
};
|
||||
use crate::{from_grpc_response, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
@@ -130,6 +133,36 @@ impl Database {
|
||||
self.handle(Request::Inserts(requests)).await
|
||||
}
|
||||
|
||||
pub async fn insert_with_hints(
|
||||
&self,
|
||||
requests: InsertRequests,
|
||||
hints: &[(&str, &str)],
|
||||
) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let request = self.to_rpc_request(Request::Inserts(requests));
|
||||
|
||||
let mut request = tonic::Request::new(request);
|
||||
let metadata = request.metadata_mut();
|
||||
for (key, value) in hints {
|
||||
let key = AsciiMetadataKey::from_bytes(format!("x-greptime-hint-{}", key).as_bytes())
|
||||
.map_err(|_| {
|
||||
InvalidAsciiSnafu {
|
||||
value: key.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let value = value.parse().map_err(|_| {
|
||||
InvalidAsciiSnafu {
|
||||
value: value.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
metadata.insert(key, value);
|
||||
}
|
||||
let response = client.handle(request).await?.into_inner();
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
async fn handle(&self, request: Request) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let request = self.to_rpc_request(request);
|
||||
|
||||
@@ -122,6 +122,13 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse ascii string: {}", value))]
|
||||
InvalidAscii {
|
||||
value: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -143,6 +150,8 @@ impl ErrorExt for Error {
|
||||
| Error::ConvertFlightData { source, .. }
|
||||
| Error::CreateTlsChannel { source, .. } => source.status_code(),
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::InvalidAscii { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,9 @@ name = "greptime"
|
||||
path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
default = ["python"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
python = ["frontend/python"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -47,7 +49,7 @@ either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
file-engine.workspace = true
|
||||
flow.workspace = true
|
||||
frontend.workspace = true
|
||||
frontend = { workspace = true, default-features = false }
|
||||
futures.workspace = true
|
||||
human-panic = "1.2.2"
|
||||
lazy_static.workspace = true
|
||||
|
||||
@@ -21,11 +21,12 @@ use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use client::DEFAULT_SCHEMA_NAME;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_telemetry::{debug, error, info};
|
||||
use serde_json::Value;
|
||||
use servers::http::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
use tokio::sync::Semaphore;
|
||||
@@ -34,19 +35,20 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{
|
||||
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, InvalidDatabaseNameSnafu, Result,
|
||||
SerdeJsonSnafu,
|
||||
EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, Result, SerdeJsonSnafu,
|
||||
};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
|
||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||
enum ExportTarget {
|
||||
/// Corresponding to `SHOW CREATE TABLE`
|
||||
/// Export all table schemas, corresponding to `SHOW CREATE TABLE`.
|
||||
Schema,
|
||||
/// Export all table data, corresponding to `COPY DATABASE TO`.
|
||||
Data,
|
||||
/// Export all table schemas and data at once.
|
||||
#[default]
|
||||
CreateTable,
|
||||
/// Corresponding to `EXPORT TABLE`
|
||||
TableData,
|
||||
All,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
@@ -72,10 +74,20 @@ pub struct ExportCommand {
|
||||
max_retry: usize,
|
||||
|
||||
/// Things to export
|
||||
#[clap(long, short = 't', value_enum)]
|
||||
#[clap(long, short = 't', value_enum, default_value = "all")]
|
||||
target: ExportTarget,
|
||||
|
||||
/// basic authentication for connecting to the server
|
||||
/// A half-open time range: [start_time, end_time).
|
||||
/// The start of the time range (time-index column) for data export.
|
||||
#[clap(long)]
|
||||
start_time: Option<String>,
|
||||
|
||||
/// A half-open time range: [start_time, end_time).
|
||||
/// The end of the time range (time-index column) for data export.
|
||||
#[clap(long)]
|
||||
end_time: Option<String>,
|
||||
|
||||
/// The basic authentication for connecting to the server
|
||||
#[clap(long)]
|
||||
auth_basic: Option<String>,
|
||||
}
|
||||
@@ -99,6 +111,8 @@ impl ExportCommand {
|
||||
output_dir: self.output_dir.clone(),
|
||||
parallelism: self.export_jobs,
|
||||
target: self.target.clone(),
|
||||
start_time: self.start_time.clone(),
|
||||
end_time: self.end_time.clone(),
|
||||
auth_header,
|
||||
}),
|
||||
guard,
|
||||
@@ -113,6 +127,8 @@ pub struct Export {
|
||||
output_dir: String,
|
||||
parallelism: usize,
|
||||
target: ExportTarget,
|
||||
start_time: Option<String>,
|
||||
end_time: Option<String>,
|
||||
auth_header: Option<String>,
|
||||
}
|
||||
|
||||
@@ -161,13 +177,13 @@ impl Export {
|
||||
if let Some(schema) = &self.schema {
|
||||
Ok(vec![(self.catalog.clone(), schema.clone())])
|
||||
} else {
|
||||
let result = self.sql("show databases").await?;
|
||||
let result = self.sql("SHOW DATABASES").await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let mut result = Vec::with_capacity(records.len());
|
||||
for value in records {
|
||||
let serde_json::Value::String(schema) = &value[0] else {
|
||||
let Value::String(schema) = &value[0] else {
|
||||
unreachable!()
|
||||
};
|
||||
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||
@@ -188,9 +204,11 @@ impl Export {
|
||||
) -> Result<(Vec<TableReference>, Vec<TableReference>)> {
|
||||
// Puts all metric table first
|
||||
let sql = format!(
|
||||
"select table_catalog, table_schema, table_name from \
|
||||
information_schema.columns where column_name = '__tsid' \
|
||||
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'"
|
||||
"SELECT table_catalog, table_schema, table_name \
|
||||
FROM information_schema.columns \
|
||||
WHERE column_name = '__tsid' \
|
||||
and table_catalog = \'{catalog}\' \
|
||||
and table_schema = \'{schema}\'"
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
@@ -210,9 +228,11 @@ impl Export {
|
||||
|
||||
// TODO: SQL injection hurts
|
||||
let sql = format!(
|
||||
"select table_catalog, table_schema, table_name from \
|
||||
information_schema.tables where table_type = \'BASE TABLE\' \
|
||||
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'",
|
||||
"SELECT table_catalog, table_schema, table_name \
|
||||
FROM information_schema.tables \
|
||||
WHERE table_type = \'BASE TABLE\' \
|
||||
and table_catalog = \'{catalog}\' \
|
||||
and table_schema = \'{schema}\'",
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
@@ -249,14 +269,14 @@ impl Export {
|
||||
|
||||
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
|
||||
let sql = format!(
|
||||
r#"show create table "{}"."{}"."{}""#,
|
||||
r#"SHOW CREATE TABLE "{}"."{}"."{}""#,
|
||||
catalog, schema, table
|
||||
);
|
||||
let result = self.sql(&sql).await?;
|
||||
let Some(records) = result else {
|
||||
EmptyResultSnafu.fail()?
|
||||
};
|
||||
let serde_json::Value::String(create_table) = &records[0][1] else {
|
||||
let Value::String(create_table) = &records[0][1] else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -276,11 +296,13 @@ impl Export {
|
||||
let (metric_physical_tables, remaining_tables) =
|
||||
self.get_table_list(&catalog, &schema).await?;
|
||||
let table_count = metric_physical_tables.len() + remaining_tables.len();
|
||||
tokio::fs::create_dir_all(&self.output_dir)
|
||||
let output_dir = Path::new(&self.output_dir)
|
||||
.join(&catalog)
|
||||
.join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}.sql"));
|
||||
let output_file = Path::new(&output_dir).join("create_tables.sql");
|
||||
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
|
||||
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
|
||||
match self.show_create_table(&c, &s, &t).await {
|
||||
@@ -294,7 +316,12 @@ impl Export {
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("finished exporting {catalog}.{schema} with {table_count} tables",);
|
||||
|
||||
info!(
|
||||
"Finished exporting {catalog}.{schema} with {table_count} table schemas to path: {}",
|
||||
output_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
}
|
||||
@@ -317,7 +344,7 @@ impl Export {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn export_table_data(&self) -> Result<()> {
|
||||
async fn export_database_data(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.iter_db_names().await?;
|
||||
@@ -327,70 +354,66 @@ impl Export {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
tokio::fs::create_dir_all(&self.output_dir)
|
||||
let output_dir = Path::new(&self.output_dir)
|
||||
.join(&catalog)
|
||||
.join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
|
||||
// Ignores metric physical tables
|
||||
let (metrics_tables, table_list) = self.get_table_list(&catalog, &schema).await?;
|
||||
for (_, _, table_name) in metrics_tables {
|
||||
warn!("Ignores metric physical table: {table_name}");
|
||||
}
|
||||
for (catalog_name, schema_name, table_name) in table_list {
|
||||
// copy table to
|
||||
let sql = format!(
|
||||
r#"Copy "{}"."{}"."{}" TO '{}{}.parquet' WITH (format='parquet');"#,
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
output_dir.to_str().unwrap(),
|
||||
table_name,
|
||||
);
|
||||
info!("Executing sql: {sql}");
|
||||
self.sql(&sql).await?;
|
||||
}
|
||||
info!("Finished exporting {catalog}.{schema} data");
|
||||
|
||||
// export copy from sql
|
||||
let dir_filenames = match output_dir.read_dir() {
|
||||
Ok(dir) => dir,
|
||||
Err(_) => {
|
||||
warn!("empty database {catalog}.{schema}");
|
||||
return Ok(());
|
||||
let with_options = match (&self.start_time, &self.end_time) {
|
||||
(Some(start_time), Some(end_time)) => {
|
||||
format!(
|
||||
"WITH (FORMAT='parquet', start_time='{}', end_time='{}')",
|
||||
start_time, end_time
|
||||
)
|
||||
}
|
||||
(Some(start_time), None) => {
|
||||
format!("WITH (FORMAT='parquet', start_time='{}')", start_time)
|
||||
}
|
||||
(None, Some(end_time)) => {
|
||||
format!("WITH (FORMAT='parquet', end_time='{}')", end_time)
|
||||
}
|
||||
(None, None) => "WITH (FORMAT='parquet')".to_string(),
|
||||
};
|
||||
|
||||
let copy_from_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}_copy_from.sql"));
|
||||
let sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" TO '{}' {};"#,
|
||||
catalog,
|
||||
schema,
|
||||
output_dir.to_str().unwrap(),
|
||||
with_options
|
||||
);
|
||||
|
||||
info!("Executing sql: {sql}");
|
||||
|
||||
self.sql(&sql).await?;
|
||||
|
||||
info!(
|
||||
"Finished exporting {catalog}.{schema} data into path: {}",
|
||||
output_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
// The export copy from sql
|
||||
let copy_from_file = output_dir.join("copy_from.sql");
|
||||
let mut writer =
|
||||
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
|
||||
|
||||
for table_file in dir_filenames {
|
||||
let table_file = table_file.unwrap();
|
||||
let table_name = table_file
|
||||
.file_name()
|
||||
.into_string()
|
||||
.unwrap()
|
||||
.replace(".parquet", "");
|
||||
|
||||
writer
|
||||
.write(
|
||||
format!(
|
||||
"copy {} from '{}' with (format='parquet');\n",
|
||||
table_name,
|
||||
table_file.path().to_str().unwrap()
|
||||
)
|
||||
.as_bytes(),
|
||||
)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
let copy_database_from_sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH (FORMAT='parquet');"#,
|
||||
catalog,
|
||||
schema,
|
||||
output_dir.to_str().unwrap()
|
||||
);
|
||||
writer
|
||||
.write(copy_database_from_sql.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
writer.flush().await.context(FileIoSnafu)?;
|
||||
|
||||
info!("finished exporting {catalog}.{schema} copy_from.sql");
|
||||
info!("Finished exporting {catalog}.{schema} copy_from.sql");
|
||||
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
})
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
@@ -399,35 +422,41 @@ impl Export {
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export job failed");
|
||||
error!(e; "export database job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
let elapsed = timer.elapsed();
|
||||
|
||||
info!("Success {success}/{db_count} jobs, costs: {:?}", elapsed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
#[async_trait]
|
||||
impl Tool for Export {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
match self.target {
|
||||
ExportTarget::CreateTable => self.export_create_table().await,
|
||||
ExportTarget::TableData => self.export_table_data().await,
|
||||
ExportTarget::Schema => self.export_create_table().await,
|
||||
ExportTarget::Data => self.export_database_data().await,
|
||||
ExportTarget::All => {
|
||||
self.export_create_table().await?;
|
||||
self.export_database_data().await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Split at `-`.
|
||||
fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||
let (catalog, schema) = database
|
||||
.split_once('-')
|
||||
.with_context(|| InvalidDatabaseNameSnafu {
|
||||
database: database.to_string(),
|
||||
})?;
|
||||
let (catalog, schema) = match database.split_once('-') {
|
||||
Some((catalog, schema)) => (catalog, schema),
|
||||
None => (DEFAULT_CATALOG_NAME, database),
|
||||
};
|
||||
|
||||
if schema == "*" {
|
||||
Ok((catalog.to_string(), None))
|
||||
} else {
|
||||
@@ -442,10 +471,26 @@ mod tests {
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
|
||||
use crate::cli::export::split_database;
|
||||
use crate::error::Result as CmdResult;
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::{cli, standalone, App};
|
||||
|
||||
#[test]
|
||||
fn test_split_database() {
|
||||
let result = split_database("catalog-schema").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("schema").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
|
||||
|
||||
let result = split_database("catalog-*").unwrap();
|
||||
assert_eq!(result, ("catalog".to_string(), None));
|
||||
|
||||
let result = split_database("*").unwrap();
|
||||
assert_eq!(result, ("greptime".to_string(), None));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
@@ -487,7 +532,7 @@ mod tests {
|
||||
"--output-dir",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
"--target",
|
||||
"create-table",
|
||||
"schema",
|
||||
]);
|
||||
let mut cli_app = cli.build(LoggingOptions::default()).await?;
|
||||
cli_app.start().await?;
|
||||
@@ -496,7 +541,9 @@ mod tests {
|
||||
|
||||
let output_file = output_dir
|
||||
.path()
|
||||
.join("greptime-cli.export.create_table.sql");
|
||||
.join("greptime")
|
||||
.join("cli.export.create_table")
|
||||
.join("create_tables.sql");
|
||||
let res = std::fs::read_to_string(output_file).unwrap();
|
||||
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
||||
"ts" TIMESTAMP(3) NOT NULL,
|
||||
|
||||
@@ -34,7 +34,6 @@ use common_telemetry::debug;
|
||||
use either::Either;
|
||||
use meta_client::client::MetaClientBuilder;
|
||||
use query::datafusion::DatafusionQueryEngine;
|
||||
use query::logical_optimizer::LogicalOptimizer;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::plan::LogicalPlan;
|
||||
use query::query_engine::{DefaultSerializer, QueryEngineState};
|
||||
@@ -289,6 +288,7 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
plugins.clone(),
|
||||
));
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::time::Duration;
|
||||
use async_trait::async_trait;
|
||||
use catalog::kvbackend::MetaKvBackend;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_telemetry::{info, warn};
|
||||
@@ -271,8 +272,9 @@ impl StartCommand {
|
||||
info!("Datanode start command: {:#?}", self);
|
||||
info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let mut opts = opts.component;
|
||||
let plugins = plugins::setup_datanode_plugins(&mut opts)
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_datanode_plugins(&mut plugins, &opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
|
||||
@@ -298,13 +298,6 @@ pub enum Error {
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid database name: {}", database))]
|
||||
InvalidDatabaseName {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
database: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create directory {}", dir))]
|
||||
CreateDir {
|
||||
dir: String,
|
||||
@@ -384,8 +377,7 @@ impl ErrorExt for Error {
|
||||
| Error::ConnectEtcd { .. }
|
||||
| Error::NotDataFromOutput { .. }
|
||||
| Error::CreateDir { .. }
|
||||
| Error::EmptyResult { .. }
|
||||
| Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
|
||||
| Error::EmptyResult { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
|
||||
@@ -20,6 +20,7 @@ use cache::{build_fundamental_cache_registry, with_default_composite_cache_regis
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
@@ -242,10 +243,11 @@ impl StartCommand {
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs
|
||||
.clone_from(metasrv_addrs);
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
opts.user_provider.clone_from(&self.user_provider);
|
||||
if let Some(user_provider) = &self.user_provider {
|
||||
opts.user_provider = Some(user_provider.clone());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -264,9 +266,9 @@ impl StartCommand {
|
||||
info!("Frontend start command: {:#?}", self);
|
||||
info!("Frontend options: {:#?}", opts);
|
||||
|
||||
let mut opts = opts.component;
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let plugins = plugins::setup_frontend_plugins(&mut opts)
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
@@ -314,7 +316,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
opts.mode,
|
||||
Mode::Distributed,
|
||||
Some(meta_client.clone()),
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
@@ -445,7 +447,6 @@ mod tests {
|
||||
|
||||
let fe_opts = command.load_options(&Default::default()).unwrap().component;
|
||||
|
||||
assert_eq!(Mode::Distributed, fe_opts.mode);
|
||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
|
||||
|
||||
@@ -458,7 +459,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let mut fe_opts = frontend::frontend::FrontendOptions {
|
||||
let fe_opts = frontend::frontend::FrontendOptions {
|
||||
http: HttpOptions {
|
||||
disable_dashboard: false,
|
||||
..Default::default()
|
||||
@@ -467,8 +468,10 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &fe_opts)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
let result = provider
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
@@ -238,8 +239,9 @@ impl StartCommand {
|
||||
info!("Metasrv start command: {:#?}", self);
|
||||
info!("Metasrv options: {:#?}", opts);
|
||||
|
||||
let mut opts = opts.component;
|
||||
let plugins = plugins::setup_metasrv_plugins(&mut opts)
|
||||
let opts = opts.component;
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_metasrv_plugins(&mut plugins, &opts)
|
||||
.await
|
||||
.context(StartMetaServerSnafu)?;
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
||||
use common_error::ext::BoxedError;
|
||||
@@ -181,7 +182,6 @@ impl StandaloneOptions {
|
||||
pub fn frontend_options(&self) -> FrontendOptions {
|
||||
let cloned_opts = self.clone();
|
||||
FrontendOptions {
|
||||
mode: cloned_opts.mode,
|
||||
default_timezone: cloned_opts.default_timezone,
|
||||
http: cloned_opts.http,
|
||||
grpc: cloned_opts.grpc,
|
||||
@@ -396,7 +396,9 @@ impl StartCommand {
|
||||
opts.influxdb.enable = self.influxdb_enable;
|
||||
}
|
||||
|
||||
opts.user_provider.clone_from(&self.user_provider);
|
||||
if let Some(user_provider) = &self.user_provider {
|
||||
opts.user_provider = Some(user_provider.clone());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -418,14 +420,18 @@ impl StartCommand {
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
info!("Standalone options: {opts:#?}");
|
||||
|
||||
let mut plugins = Plugins::new();
|
||||
let opts = opts.component;
|
||||
let mut fe_opts = opts.frontend_options();
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts) // mut ref is MUST, DO NOT change it
|
||||
let fe_opts = opts.frontend_options();
|
||||
let dn_opts = opts.datanode_options();
|
||||
|
||||
plugins::setup_frontend_plugins(&mut plugins, &fe_opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let dn_opts = opts.datanode_options();
|
||||
plugins::setup_datanode_plugins(&mut plugins, &dn_opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
set_default_timezone(fe_opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
||||
|
||||
@@ -464,7 +470,7 @@ impl StartCommand {
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
|
||||
let datanode = DatanodeBuilder::new(dn_opts, fe_plugins.clone())
|
||||
let datanode = DatanodeBuilder::new(dn_opts, plugins.clone())
|
||||
.with_kv_backend(kv_backend.clone())
|
||||
.build()
|
||||
.await
|
||||
@@ -472,7 +478,7 @@ impl StartCommand {
|
||||
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
Default::default(),
|
||||
fe_plugins.clone(),
|
||||
plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
);
|
||||
@@ -533,7 +539,7 @@ impl StartCommand {
|
||||
node_manager.clone(),
|
||||
ddl_task_executor.clone(),
|
||||
)
|
||||
.with_plugin(fe_plugins.clone())
|
||||
.with_plugin(plugins.clone())
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
@@ -554,7 +560,7 @@ impl StartCommand {
|
||||
|
||||
let (tx, _rx) = broadcast::channel(1);
|
||||
|
||||
let servers = Services::new(fe_opts, Arc::new(frontend.clone()), fe_plugins)
|
||||
let servers = Services::new(fe_opts, Arc::new(frontend.clone()), plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
@@ -629,20 +635,21 @@ mod tests {
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{FileConfig, GcsConfig};
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::GlobalOptions;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let mut fe_opts = FrontendOptions {
|
||||
let fe_opts = FrontendOptions {
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &fe_opts)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
let result = provider
|
||||
@@ -727,7 +734,6 @@ mod tests {
|
||||
let fe_opts = options.frontend_options();
|
||||
let dn_opts = options.datanode_options();
|
||||
let logging_opts = options.logging;
|
||||
assert_eq!(Mode::Standalone, fe_opts.mode);
|
||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||
assert_eq!(Duration::from_secs(33), fe_opts.http.timeout);
|
||||
assert_eq!(ReadableSize::mb(128), fe_opts.http.body_limit);
|
||||
|
||||
@@ -46,9 +46,8 @@ fn test_load_datanode_example_config() {
|
||||
|
||||
let expected = GreptimeOptions::<DatanodeOptions> {
|
||||
runtime: RuntimeOptions {
|
||||
read_rt_size: 8,
|
||||
write_rt_size: 8,
|
||||
bg_rt_size: 4,
|
||||
global_rt_size: 8,
|
||||
compact_rt_size: 4,
|
||||
},
|
||||
component: DatanodeOptions {
|
||||
node_id: Some(42),
|
||||
@@ -119,9 +118,8 @@ fn test_load_frontend_example_config() {
|
||||
.unwrap();
|
||||
let expected = GreptimeOptions::<FrontendOptions> {
|
||||
runtime: RuntimeOptions {
|
||||
read_rt_size: 8,
|
||||
write_rt_size: 8,
|
||||
bg_rt_size: 4,
|
||||
global_rt_size: 8,
|
||||
compact_rt_size: 4,
|
||||
},
|
||||
component: FrontendOptions {
|
||||
default_timezone: Some("UTC".to_string()),
|
||||
@@ -167,9 +165,8 @@ fn test_load_metasrv_example_config() {
|
||||
.unwrap();
|
||||
let expected = GreptimeOptions::<MetasrvOptions> {
|
||||
runtime: RuntimeOptions {
|
||||
read_rt_size: 8,
|
||||
write_rt_size: 8,
|
||||
bg_rt_size: 4,
|
||||
global_rt_size: 8,
|
||||
compact_rt_size: 4,
|
||||
},
|
||||
component: MetasrvOptions {
|
||||
selector: SelectorType::LeaseBased,
|
||||
@@ -200,9 +197,8 @@ fn test_load_standalone_example_config() {
|
||||
.unwrap();
|
||||
let expected = GreptimeOptions::<StandaloneOptions> {
|
||||
runtime: RuntimeOptions {
|
||||
read_rt_size: 8,
|
||||
write_rt_size: 8,
|
||||
bg_rt_size: 4,
|
||||
global_rt_size: 8,
|
||||
compact_rt_size: 4,
|
||||
},
|
||||
component: StandaloneOptions {
|
||||
default_timezone: Some("UTC".to_string()),
|
||||
|
||||
@@ -96,11 +96,14 @@ pub const INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID: u32 = 30;
|
||||
pub const INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID: u32 = 31;
|
||||
/// id for information_schema.VIEWS
|
||||
pub const INFORMATION_SCHEMA_VIEW_TABLE_ID: u32 = 32;
|
||||
/// id for information_schema.FLOWS
|
||||
pub const INFORMATION_SCHEMA_FLOW_TABLE_ID: u32 = 33;
|
||||
/// ----- End of information_schema tables -----
|
||||
|
||||
/// ----- Begin of pg_catalog tables -----
|
||||
pub const PG_CATALOG_PG_CLASS_TABLE_ID: u32 = 256;
|
||||
pub const PG_CATALOG_PG_TYPE_TABLE_ID: u32 = 257;
|
||||
pub const PG_CATALOG_PG_NAMESPACE_TABLE_ID: u32 = 258;
|
||||
|
||||
/// ----- End of pg_catalog tables -----
|
||||
pub const MITO_ENGINE: &str = "mito";
|
||||
|
||||
@@ -185,7 +185,7 @@ impl FileFormat for CsvFormat {
|
||||
let schema_infer_max_record = self.schema_infer_max_record;
|
||||
let has_header = self.has_header;
|
||||
|
||||
common_runtime::spawn_blocking_read(move || {
|
||||
common_runtime::spawn_blocking_global(move || {
|
||||
let reader = SyncIoBridge::new(decoded);
|
||||
|
||||
let (schema, _records_read) =
|
||||
|
||||
@@ -101,7 +101,7 @@ impl FileFormat for JsonFormat {
|
||||
|
||||
let schema_infer_max_record = self.schema_infer_max_record;
|
||||
|
||||
common_runtime::spawn_blocking_read(move || {
|
||||
common_runtime::spawn_blocking_global(move || {
|
||||
let mut reader = BufReader::new(SyncIoBridge::new(decoded));
|
||||
|
||||
let iter = ValueIter::new(&mut reader, schema_infer_max_record);
|
||||
|
||||
@@ -53,6 +53,20 @@ pub trait ErrorExt: StackError {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Find out root level error for nested error
|
||||
fn root_cause(&self) -> Option<&dyn std::error::Error>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let error = self.last();
|
||||
if let Some(external_error) = error.source() {
|
||||
let external_root = external_error.sources().last().unwrap();
|
||||
Some(external_root)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait StackError: std::error::Error {
|
||||
|
||||
@@ -36,7 +36,7 @@ pub enum StatusCode {
|
||||
InvalidArguments = 1004,
|
||||
/// The task is cancelled.
|
||||
Cancelled = 1005,
|
||||
/// Illegal state, can be exposed to users
|
||||
/// Illegal state, can be exposed to users.
|
||||
IllegalState = 1006,
|
||||
// ====== End of common status code ================
|
||||
|
||||
@@ -55,33 +55,34 @@ pub enum StatusCode {
|
||||
// ====== Begin of catalog related status code =====
|
||||
/// Table already exists.
|
||||
TableAlreadyExists = 4000,
|
||||
/// Table not found
|
||||
/// Table not found.
|
||||
TableNotFound = 4001,
|
||||
/// Table column not found
|
||||
/// Table column not found.
|
||||
TableColumnNotFound = 4002,
|
||||
/// Table column already exists
|
||||
/// Table column already exists.
|
||||
TableColumnExists = 4003,
|
||||
/// Database not found
|
||||
/// Database not found.
|
||||
DatabaseNotFound = 4004,
|
||||
/// Region not found
|
||||
/// Region not found.
|
||||
RegionNotFound = 4005,
|
||||
/// Region already exists
|
||||
/// Region already exists.
|
||||
RegionAlreadyExists = 4006,
|
||||
/// Region is read-only in current state.
|
||||
RegionReadonly = 4007,
|
||||
/// Region is not in a proper state to handle specific request.
|
||||
RegionNotReady = 4008,
|
||||
/// Region is temporarily in busy state
|
||||
/// Region is temporarily in busy state.
|
||||
RegionBusy = 4009,
|
||||
/// Table is temporarily unable to handle the request
|
||||
/// Table is temporarily unable to handle the request.
|
||||
TableUnavailable = 4010,
|
||||
/// Database not found
|
||||
/// Database already exists.
|
||||
DatabaseAlreadyExists = 4011,
|
||||
// ====== End of catalog related status code =======
|
||||
|
||||
// ====== Begin of storage related status code =====
|
||||
/// Storage is temporarily unable to handle the request
|
||||
/// Storage is temporarily unable to handle the request.
|
||||
StorageUnavailable = 5000,
|
||||
/// Request is outdated, e.g., version mismatch
|
||||
/// Request is outdated, e.g., version mismatch.
|
||||
RequestOutdated = 5001,
|
||||
// ====== End of storage related status code =======
|
||||
|
||||
@@ -89,24 +90,24 @@ pub enum StatusCode {
|
||||
/// Runtime resources exhausted, like creating threads failed.
|
||||
RuntimeResourcesExhausted = 6000,
|
||||
|
||||
/// Rate limit exceeded
|
||||
/// Rate limit exceeded.
|
||||
RateLimited = 6001,
|
||||
// ====== End of server related status code =======
|
||||
|
||||
// ====== Begin of auth related status code =====
|
||||
/// User not exist
|
||||
/// User not exist.
|
||||
UserNotFound = 7000,
|
||||
/// Unsupported password type
|
||||
/// Unsupported password type.
|
||||
UnsupportedPasswordType = 7001,
|
||||
/// Username and password does not match
|
||||
/// Username and password does not match.
|
||||
UserPasswordMismatch = 7002,
|
||||
/// Not found http authorization header
|
||||
/// Not found http authorization header.
|
||||
AuthHeaderNotFound = 7003,
|
||||
/// Invalid http authorization header
|
||||
/// Invalid http authorization header.
|
||||
InvalidAuthHeader = 7004,
|
||||
/// Illegal request to connect catalog-schema
|
||||
/// Illegal request to connect catalog-schema.
|
||||
AccessDenied = 7005,
|
||||
/// User is not authorized to perform the operation
|
||||
/// User is not authorized to perform the operation.
|
||||
PermissionDenied = 7006,
|
||||
// ====== End of auth related status code =====
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
statrs = "0.16"
|
||||
store-api.workspace = true
|
||||
table.workspace = true
|
||||
|
||||
164
src/common/function/src/flush_flow.rs
Normal file
164
src/common/function/src/flush_flow.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::{
|
||||
ExecuteSnafu, InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::Signature;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::value::{Value, ValueRef};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use sql::parser::ParserContext;
|
||||
use store_api::storage::ConcreteDataType;
|
||||
|
||||
use crate::handlers::FlowServiceHandlerRef;
|
||||
|
||||
fn flush_signature() -> Signature {
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
#[admin_fn(
|
||||
name = FlushFlowFunction,
|
||||
display_name = flush_flow,
|
||||
sig_fn = flush_signature,
|
||||
ret = uint64
|
||||
)]
|
||||
pub(crate) async fn flush_flow(
|
||||
flow_service_handler: &FlowServiceHandlerRef,
|
||||
query_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
let (catalog_name, flow_name) = parse_flush_flow(params, query_ctx)?;
|
||||
|
||||
let res = flow_service_handler
|
||||
.flush(&catalog_name, &flow_name, query_ctx.clone())
|
||||
.await?;
|
||||
let affected_rows = res.affected_rows;
|
||||
|
||||
Ok(Value::from(affected_rows))
|
||||
}
|
||||
|
||||
fn parse_flush_flow(
|
||||
params: &[ValueRef<'_>],
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> Result<(String, String)> {
|
||||
ensure!(
|
||||
params.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 1, have: {}",
|
||||
params.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let ValueRef::String(flow_name) = params[0] else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: "flush_flow",
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let obj_name = ParserContext::parse_table_name(flow_name, query_ctx.sql_dialect())
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteSnafu)?;
|
||||
|
||||
let (catalog_name, flow_name) = match &obj_name.0[..] {
|
||||
[flow_name] => (
|
||||
query_ctx.current_catalog().to_string(),
|
||||
flow_name.value.clone(),
|
||||
),
|
||||
[catalog, flow_name] => (catalog.value.clone(), flow_name.value.clone()),
|
||||
_ => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"expect flow name to be <catalog>.<flow-name> or <flow-name>, actual: {}",
|
||||
obj_name
|
||||
),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
Ok((catalog_name, flow_name))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::StringVector;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
#[test]
|
||||
fn test_flush_flow_metadata() {
|
||||
let f = FlushFlowFunction;
|
||||
assert_eq!("flush_flow", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
f.signature(),
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_missing_flow_service() {
|
||||
let f = FlushFlowFunction;
|
||||
|
||||
let args = vec!["flow_name"];
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(StringVector::from_slice(&[arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::default(), &args).unwrap_err();
|
||||
assert_eq!(
|
||||
"Missing FlowServiceHandler, not expected",
|
||||
result.to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_flow_args() {
|
||||
let testcases = [
|
||||
("flow_name", ("greptime", "flow_name")),
|
||||
("catalog.flow_name", ("catalog", "flow_name")),
|
||||
];
|
||||
for (input, expected) in testcases.iter() {
|
||||
let args = vec![*input];
|
||||
let args = args.into_iter().map(ValueRef::String).collect::<Vec<_>>();
|
||||
|
||||
let result = parse_flush_flow(&args, &QueryContext::arc()).unwrap();
|
||||
assert_eq!(*expected, (result.0.as_str(), result.1.as_str()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -65,6 +65,19 @@ pub trait ProcedureServiceHandler: Send + Sync {
|
||||
async fn query_procedure_state(&self, pid: &str) -> Result<ProcedureStateResponse>;
|
||||
}
|
||||
|
||||
/// This flow service handler is only use for flush flow for now.
|
||||
#[async_trait]
|
||||
pub trait FlowServiceHandler: Send + Sync {
|
||||
async fn flush(
|
||||
&self,
|
||||
catalog: &str,
|
||||
flow: &str,
|
||||
ctx: QueryContextRef,
|
||||
) -> Result<api::v1::flow::FlowResponse>;
|
||||
}
|
||||
|
||||
pub type TableMutationHandlerRef = Arc<dyn TableMutationHandler>;
|
||||
|
||||
pub type ProcedureServiceHandlerRef = Arc<dyn ProcedureServiceHandler>;
|
||||
|
||||
pub type FlowServiceHandlerRef = Arc<dyn FlowServiceHandler>;
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#![feature(let_chains)]
|
||||
#![feature(try_blocks)]
|
||||
|
||||
mod flush_flow;
|
||||
mod macros;
|
||||
pub mod scalars;
|
||||
mod system;
|
||||
|
||||
@@ -19,8 +19,6 @@ use snafu::ResultExt;
|
||||
|
||||
use crate::scalars::expression::ctx::EvalContext;
|
||||
|
||||
/// TODO: remove the allow_unused when it's used.
|
||||
#[allow(unused)]
|
||||
pub fn scalar_unary_op<L: Scalar, O: Scalar, F>(
|
||||
l: &VectorRef,
|
||||
f: F,
|
||||
|
||||
@@ -14,11 +14,9 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
mod greatest;
|
||||
mod to_timezone;
|
||||
mod to_unixtime;
|
||||
|
||||
use greatest::GreatestFunction;
|
||||
use to_timezone::ToTimezoneFunction;
|
||||
use to_unixtime::ToUnixtimeFunction;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
@@ -27,7 +25,6 @@ pub(crate) struct TimestampFunction;
|
||||
|
||||
impl TimestampFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ToTimezoneFunction));
|
||||
registry.register(Arc::new(ToUnixtimeFunction));
|
||||
registry.register(Arc::new(GreatestFunction));
|
||||
}
|
||||
|
||||
@@ -1,313 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::Signature;
|
||||
use common_time::{Timestamp, Timezone};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
Int64Vector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, Vector,
|
||||
};
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ToTimezoneFunction;
|
||||
|
||||
const NAME: &str = "to_timezone";
|
||||
|
||||
fn convert_to_timezone(arg: &str) -> Option<Timezone> {
|
||||
Timezone::from_tz_string(arg).ok()
|
||||
}
|
||||
|
||||
fn convert_to_timestamp(arg: &Value) -> Option<Timestamp> {
|
||||
match arg {
|
||||
Value::Timestamp(ts) => Some(*ts),
|
||||
Value::Int64(i) => Some(Timestamp::new_millisecond(*i)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ToTimezoneFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "TO_TIMEZONE")
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ToTimezoneFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
// type checked by signature - MUST BE timestamp
|
||||
Ok(input_types[0].clone())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
],
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly 2, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let array = columns[0].to_arrow_array();
|
||||
let times = match columns[0].data_type() {
|
||||
ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => {
|
||||
let vector = Int64Vector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
ConcreteDataType::Timestamp(ts) => match ts {
|
||||
TimestampType::Second(_) => {
|
||||
let vector = TimestampSecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
TimestampType::Millisecond(_) => {
|
||||
let vector = TimestampMillisecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
TimestampType::Microsecond(_) => {
|
||||
let vector = TimestampMicrosecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
TimestampType::Nanosecond(_) => {
|
||||
let vector = TimestampNanosecondVector::try_from_arrow_array(array).unwrap();
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timestamp(&vector.get(i)))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
},
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail()?,
|
||||
};
|
||||
|
||||
let tzs = {
|
||||
let array = columns[1].to_arrow_array();
|
||||
let vector = StringVector::try_from_arrow_array(&array)
|
||||
.ok()
|
||||
.with_context(|| UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
})?;
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_timezone(&vector.get(i).to_string()))
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let result = times
|
||||
.iter()
|
||||
.zip(tzs.iter())
|
||||
.map(|(time, tz)| match (time, tz) {
|
||||
(Some(time), _) => Some(time.to_timezone_aware_string(tz.as_ref())),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<Option<String>>>();
|
||||
Ok(Arc::new(StringVector::from(result)))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::timestamp::{
|
||||
TimestampMicrosecond, TimestampMillisecond, TimestampNanosecond, TimestampSecond,
|
||||
};
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_to_timezone() {
|
||||
let f = ToTimezoneFunction;
|
||||
assert_eq!("to_timezone", f.name());
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:01"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:01"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampSecond>> = vec![
|
||||
Some(TimestampSecond::new(1)),
|
||||
None,
|
||||
Some(TimestampSecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampSecondVector =
|
||||
TimestampSecondVector::from_owned_iterator(times.into_iter());
|
||||
let tzs = vec![Some("America/New_York"), None, Some("Europe/Moscow"), None];
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.001"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampMillisecond>> = vec![
|
||||
Some(TimestampMillisecond::new(1)),
|
||||
None,
|
||||
Some(TimestampMillisecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampMillisecondVector =
|
||||
TimestampMillisecondVector::from_owned_iterator(times.into_iter());
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.000001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.000001"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampMicrosecond>> = vec![
|
||||
Some(TimestampMicrosecond::new(1)),
|
||||
None,
|
||||
Some(TimestampMicrosecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampMicrosecondVector =
|
||||
TimestampMicrosecondVector::from_owned_iterator(times.into_iter());
|
||||
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.000000001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.000000001"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<TimestampNanosecond>> = vec![
|
||||
Some(TimestampNanosecond::new(1)),
|
||||
None,
|
||||
Some(TimestampNanosecond::new(1)),
|
||||
None,
|
||||
];
|
||||
let ts_vector: TimestampNanosecondVector =
|
||||
TimestampNanosecondVector::from_owned_iterator(times.into_iter());
|
||||
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_numerical_to_timezone() {
|
||||
let f = ToTimezoneFunction;
|
||||
let results = vec![
|
||||
Some("1969-12-31 19:00:00.001"),
|
||||
None,
|
||||
Some("1970-01-01 03:00:00.001"),
|
||||
None,
|
||||
Some("2024-03-26 23:01:50"),
|
||||
None,
|
||||
Some("2024-03-27 06:02:00"),
|
||||
None,
|
||||
];
|
||||
let times: Vec<Option<i64>> = vec![
|
||||
Some(1),
|
||||
None,
|
||||
Some(1),
|
||||
None,
|
||||
Some(1711508510000),
|
||||
None,
|
||||
Some(1711508520000),
|
||||
None,
|
||||
];
|
||||
let ts_vector: Int64Vector = Int64Vector::from_owned_iterator(times.into_iter());
|
||||
let tzs = vec![
|
||||
Some("America/New_York"),
|
||||
None,
|
||||
Some("Europe/Moscow"),
|
||||
None,
|
||||
Some("America/New_York"),
|
||||
None,
|
||||
Some("Europe/Moscow"),
|
||||
None,
|
||||
];
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(ts_vector),
|
||||
Arc::new(StringVector::from(tzs.clone())),
|
||||
];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(8, vector.len());
|
||||
let expect_times: VectorRef = Arc::new(StringVector::from(results));
|
||||
assert_eq!(expect_times, vector);
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::handlers::{ProcedureServiceHandlerRef, TableMutationHandlerRef};
|
||||
use crate::handlers::{FlowServiceHandlerRef, ProcedureServiceHandlerRef, TableMutationHandlerRef};
|
||||
|
||||
/// Shared state for SQL functions.
|
||||
/// The handlers in state may be `None` in cli command-line or test cases.
|
||||
@@ -22,6 +22,8 @@ pub struct FunctionState {
|
||||
pub table_mutation_handler: Option<TableMutationHandlerRef>,
|
||||
// The procedure service handler
|
||||
pub procedure_service_handler: Option<ProcedureServiceHandlerRef>,
|
||||
// The flownode handler
|
||||
pub flow_service_handler: Option<FlowServiceHandlerRef>,
|
||||
}
|
||||
|
||||
impl FunctionState {
|
||||
@@ -42,9 +44,10 @@ impl FunctionState {
|
||||
CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest,
|
||||
};
|
||||
|
||||
use crate::handlers::{ProcedureServiceHandler, TableMutationHandler};
|
||||
use crate::handlers::{FlowServiceHandler, ProcedureServiceHandler, TableMutationHandler};
|
||||
struct MockProcedureServiceHandler;
|
||||
struct MockTableMutationHandler;
|
||||
struct MockFlowServiceHandler;
|
||||
const ROWS: usize = 42;
|
||||
|
||||
#[async_trait]
|
||||
@@ -116,9 +119,22 @@ impl FunctionState {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FlowServiceHandler for MockFlowServiceHandler {
|
||||
async fn flush(
|
||||
&self,
|
||||
_catalog: &str,
|
||||
_flow: &str,
|
||||
_ctx: QueryContextRef,
|
||||
) -> Result<api::v1::flow::FlowResponse> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
table_mutation_handler: Some(Arc::new(MockTableMutationHandler)),
|
||||
procedure_service_handler: Some(Arc::new(MockProcedureServiceHandler)),
|
||||
flow_service_handler: Some(Arc::new(MockFlowServiceHandler)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
mod build;
|
||||
mod database;
|
||||
mod pg_catalog;
|
||||
mod procedure_state;
|
||||
mod timezone;
|
||||
mod version;
|
||||
@@ -22,6 +23,7 @@ use std::sync::Arc;
|
||||
|
||||
use build::BuildFunction;
|
||||
use database::DatabaseFunction;
|
||||
use pg_catalog::PGCatalogFunction;
|
||||
use procedure_state::ProcedureStateFunction;
|
||||
use timezone::TimezoneFunction;
|
||||
use version::VersionFunction;
|
||||
@@ -37,5 +39,6 @@ impl SystemFunction {
|
||||
registry.register(Arc::new(DatabaseFunction));
|
||||
registry.register(Arc::new(TimezoneFunction));
|
||||
registry.register(Arc::new(ProcedureStateFunction));
|
||||
PGCatalogFunction::register(registry);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,17 +12,28 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod pg_get_userbyid;
|
||||
mod table_is_visible;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion::physical_plan::ExecutionPlan;
|
||||
use pg_get_userbyid::PGGetUserByIdFunction;
|
||||
use table_is_visible::PGTableIsVisibleFunction;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::query_engine::QueryEngineContext;
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
pub trait PhysicalOptimizer {
|
||||
fn optimize_physical_plan(
|
||||
&self,
|
||||
ctx: &mut QueryEngineContext,
|
||||
plan: Arc<dyn ExecutionPlan>,
|
||||
) -> Result<Arc<dyn ExecutionPlan>>;
|
||||
#[macro_export]
|
||||
macro_rules! pg_catalog_func_fullname {
|
||||
($name:literal) => {
|
||||
concat!("pg_catalog.", $name)
|
||||
};
|
||||
}
|
||||
|
||||
pub(super) struct PGCatalogFunction;
|
||||
|
||||
impl PGCatalogFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(PGTableIsVisibleFunction));
|
||||
registry.register(Arc::new(PGGetUserByIdFunction));
|
||||
}
|
||||
}
|
||||
72
src/common/function/src/system/pg_catalog/pg_get_userbyid.rs
Normal file
72
src/common/function/src/system/pg_catalog/pg_get_userbyid.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Result;
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::prelude::{ConcreteDataType, DataType, VectorRef};
|
||||
use datatypes::types::LogicalPrimitiveType;
|
||||
use datatypes::with_match_primitive_type_id;
|
||||
use num_traits::AsPrimitive;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::expression::{scalar_unary_op, EvalContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct PGGetUserByIdFunction;
|
||||
|
||||
const NAME: &str = crate::pg_catalog_func_fullname!("pg_get_userbyid");
|
||||
|
||||
impl fmt::Display for PGGetUserByIdFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, crate::pg_catalog_func_fullname!("PG_GET_USERBYID"))
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for PGGetUserByIdFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![ConcreteDataType::uint32_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
with_match_primitive_type_id!(columns[0].data_type().logical_type_id(), |$T| {
|
||||
let col = scalar_unary_op::<<$T as LogicalPrimitiveType>::Native, String, _>(&columns[0], pg_get_user_by_id, &mut EvalContext::default())?;
|
||||
Ok(Arc::new(col))
|
||||
}, {
|
||||
unreachable!()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn pg_get_user_by_id<I>(table_oid: Option<I>, _ctx: &mut EvalContext) -> Option<String>
|
||||
where
|
||||
I: AsPrimitive<u32>,
|
||||
{
|
||||
// TODO(J0HN50N133): We lack way to get the user_info by a numeric value. Once we have it, we can implement this function.
|
||||
table_oid.map(|_| "".to_string())
|
||||
}
|
||||
@@ -0,0 +1,72 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Result;
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::prelude::{ConcreteDataType, DataType, VectorRef};
|
||||
use datatypes::types::LogicalPrimitiveType;
|
||||
use datatypes::with_match_primitive_type_id;
|
||||
use num_traits::AsPrimitive;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::expression::{scalar_unary_op, EvalContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct PGTableIsVisibleFunction;
|
||||
|
||||
const NAME: &str = crate::pg_catalog_func_fullname!("pg_table_is_visible");
|
||||
|
||||
impl fmt::Display for PGTableIsVisibleFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, crate::pg_catalog_func_fullname!("PG_TABLE_IS_VISIBLE"))
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for PGTableIsVisibleFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::boolean_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(
|
||||
1,
|
||||
vec![ConcreteDataType::uint32_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
with_match_primitive_type_id!(columns[0].data_type().logical_type_id(), |$T| {
|
||||
let col = scalar_unary_op::<<$T as LogicalPrimitiveType>::Native, bool, _>(&columns[0], pg_table_is_visible, &mut EvalContext::default())?;
|
||||
Ok(Arc::new(col))
|
||||
}, {
|
||||
unreachable!()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn pg_table_is_visible<I>(table_oid: Option<I>, _ctx: &mut EvalContext) -> Option<bool>
|
||||
where
|
||||
I: AsPrimitive<u32>,
|
||||
{
|
||||
// There is no table visibility in greptime, so we always return true
|
||||
table_oid.map(|_| true)
|
||||
}
|
||||
@@ -12,26 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use api::v1::meta::ProcedureStatus;
|
||||
use common_macro::admin_fn;
|
||||
use common_meta::rpc::procedure::ProcedureStateResponse;
|
||||
use common_query::error::Error::ThreadJoin;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_telemetry::error;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use serde::Serialize;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, Location, OptionExt};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::ensure_greptime;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
|
||||
#[derive(Serialize)]
|
||||
@@ -103,6 +96,7 @@ mod tests {
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
#[test]
|
||||
fn test_procedure_state_misc() {
|
||||
|
||||
@@ -22,6 +22,7 @@ use flush_compact_region::{CompactRegionFunction, FlushRegionFunction};
|
||||
use flush_compact_table::{CompactTableFunction, FlushTableFunction};
|
||||
use migrate_region::MigrateRegionFunction;
|
||||
|
||||
use crate::flush_flow::FlushFlowFunction;
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
/// Table functions
|
||||
@@ -35,5 +36,6 @@ impl TableFunction {
|
||||
registry.register(Arc::new(CompactRegionFunction));
|
||||
registry.register(Arc::new(FlushTableFunction));
|
||||
registry.register(Arc::new(CompactTableFunction));
|
||||
registry.register(Arc::new(FlushFlowFunction));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,23 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::Error::ThreadJoin;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingTableMutationHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_telemetry::error;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, Location, OptionExt};
|
||||
use snafu::ensure;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::ensure_greptime;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::handlers::TableMutationHandlerRef;
|
||||
use crate::helper::cast_u64;
|
||||
|
||||
@@ -84,6 +77,7 @@ mod tests {
|
||||
use datatypes::vectors::UInt64Vector;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
macro_rules! define_region_function_test {
|
||||
($name: ident, $func: ident) => {
|
||||
|
||||
@@ -12,28 +12,23 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::region::{compact_request, StrictWindow};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_macro::admin_fn;
|
||||
use common_query::error::Error::ThreadJoin;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingTableMutationHandlerSnafu, Result, TableMutationSnafu,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_telemetry::{error, info};
|
||||
use common_telemetry::info;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use session::context::QueryContextRef;
|
||||
use session::table_name::table_name_to_full_name;
|
||||
use snafu::{ensure, Location, OptionExt, ResultExt};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use table::requests::{CompactTableRequest, FlushTableRequest};
|
||||
|
||||
use crate::ensure_greptime;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::handlers::TableMutationHandlerRef;
|
||||
|
||||
/// Compact type: strict window.
|
||||
@@ -209,6 +204,7 @@ mod tests {
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
macro_rules! define_table_function_test {
|
||||
($name: ident, $func: ident) => {
|
||||
|
||||
@@ -12,24 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self};
|
||||
use std::time::Duration;
|
||||
|
||||
use common_macro::admin_fn;
|
||||
use common_meta::rpc::procedure::MigrateRegionRequest;
|
||||
use common_query::error::Error::ThreadJoin;
|
||||
use common_query::error::{InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use common_telemetry::error;
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::{Value, ValueRef};
|
||||
use datatypes::vectors::VectorRef;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{Location, OptionExt};
|
||||
|
||||
use crate::ensure_greptime;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
use crate::helper::cast_u64;
|
||||
|
||||
@@ -128,9 +120,10 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::vectors::{StringVector, UInt64Vector};
|
||||
use datatypes::vectors::{StringVector, UInt64Vector, VectorRef};
|
||||
|
||||
use super::*;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
#[test]
|
||||
fn test_migrate_region_misc() {
|
||||
|
||||
@@ -72,7 +72,7 @@ impl GreptimeDBTelemetryTask {
|
||||
match self {
|
||||
GreptimeDBTelemetryTask::Enable((task, _)) => {
|
||||
print_anonymous_usage_data_disclaimer();
|
||||
task.start(common_runtime::bg_runtime())
|
||||
task.start(common_runtime::global_runtime())
|
||||
}
|
||||
GreptimeDBTelemetryTask::Disable => Ok(()),
|
||||
}
|
||||
|
||||
@@ -225,7 +225,7 @@ impl ChannelManager {
|
||||
}
|
||||
|
||||
let pool = self.pool.clone();
|
||||
let _handle = common_runtime::spawn_bg(async {
|
||||
let _handle = common_runtime::spawn_global(async {
|
||||
recycle_channel_in_loop(pool, RECYCLE_CHANNEL_INTERVAL_SECS).await;
|
||||
});
|
||||
info!(
|
||||
|
||||
@@ -153,6 +153,7 @@ fn build_struct(
|
||||
let ret = Ident::new(&format!("{ret}_datatype"), ret.span());
|
||||
let uppcase_display_name = display_name.to_uppercase();
|
||||
// Get the handler name in function state by the argument ident
|
||||
// TODO(discord9): consider simple depend injection if more handlers are needed
|
||||
let (handler, snafu_type) = match handler_type.to_string().as_str() {
|
||||
"ProcedureServiceHandlerRef" => (
|
||||
Ident::new("procedure_service_handler", handler_type.span()),
|
||||
@@ -163,6 +164,11 @@ fn build_struct(
|
||||
Ident::new("table_mutation_handler", handler_type.span()),
|
||||
Ident::new("MissingTableMutationHandlerSnafu", handler_type.span()),
|
||||
),
|
||||
|
||||
"FlowServiceHandlerRef" => (
|
||||
Ident::new("flow_service_handler", handler_type.span()),
|
||||
Ident::new("MissingFlowServiceHandlerSnafu", handler_type.span()),
|
||||
),
|
||||
handler => ok!(error!(
|
||||
handler_type.span(),
|
||||
format!("Unknown handler type: {handler}")
|
||||
@@ -174,29 +180,29 @@ fn build_struct(
|
||||
#[derive(Debug)]
|
||||
#vis struct #name;
|
||||
|
||||
impl fmt::Display for #name {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
impl std::fmt::Display for #name {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(f, #uppcase_display_name)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Function for #name {
|
||||
impl crate::function::Function for #name {
|
||||
fn name(&self) -> &'static str {
|
||||
#display_name
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::#ret())
|
||||
fn return_type(&self, _input_types: &[store_api::storage::ConcreteDataType]) -> common_query::error::Result<store_api::storage::ConcreteDataType> {
|
||||
Ok(store_api::storage::ConcreteDataType::#ret())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
#sig_fn()
|
||||
}
|
||||
|
||||
fn eval(&self, func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
fn eval(&self, func_ctx: crate::function::FunctionContext, columns: &[datatypes::vectors::VectorRef]) -> common_query::error::Result<datatypes::vectors::VectorRef> {
|
||||
// Ensure under the `greptime` catalog for security
|
||||
ensure_greptime!(func_ctx);
|
||||
crate::ensure_greptime!(func_ctx);
|
||||
|
||||
let columns_num = columns.len();
|
||||
let rows_num = if columns.is_empty() {
|
||||
@@ -208,6 +214,9 @@ fn build_struct(
|
||||
|
||||
// TODO(dennis): DataFusion doesn't support async UDF currently
|
||||
std::thread::spawn(move || {
|
||||
use snafu::OptionExt;
|
||||
use datatypes::data_type::DataType;
|
||||
|
||||
let query_ctx = &func_ctx.query_ctx;
|
||||
let handler = func_ctx
|
||||
.state
|
||||
@@ -215,11 +224,11 @@ fn build_struct(
|
||||
.as_ref()
|
||||
.context(#snafu_type)?;
|
||||
|
||||
let mut builder = ConcreteDataType::#ret()
|
||||
let mut builder = store_api::storage::ConcreteDataType::#ret()
|
||||
.create_mutable_vector(rows_num);
|
||||
|
||||
if columns_num == 0 {
|
||||
let result = common_runtime::block_on_read(async move {
|
||||
let result = common_runtime::block_on_global(async move {
|
||||
#fn_name(handler, query_ctx, &[]).await
|
||||
})?;
|
||||
|
||||
@@ -230,7 +239,7 @@ fn build_struct(
|
||||
.map(|vector| vector.get_ref(i))
|
||||
.collect();
|
||||
|
||||
let result = common_runtime::block_on_read(async move {
|
||||
let result = common_runtime::block_on_global(async move {
|
||||
#fn_name(handler, query_ctx, &args).await
|
||||
})?;
|
||||
|
||||
@@ -242,9 +251,9 @@ fn build_struct(
|
||||
})
|
||||
.join()
|
||||
.map_err(|e| {
|
||||
error!(e; "Join thread error");
|
||||
ThreadJoin {
|
||||
location: Location::default(),
|
||||
common_telemetry::error!(e; "Join thread error");
|
||||
common_query::error::Error::ThreadJoin {
|
||||
location: snafu::Location::default(),
|
||||
}
|
||||
})?
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
|
||||
/// Attribute macro to convert a normal function to SQL administration function. The annotated function
|
||||
/// should accept:
|
||||
/// - `&ProcedureServiceHandlerRef` or `&TableMutationHandlerRef` as the first argument,
|
||||
/// - `&ProcedureServiceHandlerRef` or `&TableMutationHandlerRef` or `FlowServiceHandlerRef` as the first argument,
|
||||
/// - `&QueryContextRef` as the second argument, and
|
||||
/// - `&[ValueRef<'_>]` as the third argument which is SQL function input values in each row.
|
||||
/// Return type must be `common_query::error::Result<Value>`.
|
||||
@@ -85,6 +85,8 @@ pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
/// - `ret`: The return type of the generated SQL function, it will be transformed into `ConcreteDataType::{ret}_datatype()` result.
|
||||
/// - `display_name`: The display name of the generated SQL function.
|
||||
/// - `sig_fn`: the function to returns `Signature` of generated `Function`.
|
||||
///
|
||||
/// Note that this macro should only be used in `common-function` crate for now
|
||||
#[proc_macro_attribute]
|
||||
pub fn admin_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
process_admin_fn(args, input)
|
||||
|
||||
@@ -55,6 +55,12 @@ pub fn region_storage_path(catalog: &str, schema: &str) -> String {
|
||||
format!("{}/{}", catalog, schema)
|
||||
}
|
||||
|
||||
/// Extracts catalog and schema from the path that created by [region_storage_path].
|
||||
pub fn get_catalog_and_schema(path: &str) -> Option<(String, String)> {
|
||||
let mut split = path.split('/');
|
||||
Some((split.next()?.to_string(), split.next()?.to_string()))
|
||||
}
|
||||
|
||||
pub async fn check_and_get_physical_table_id(
|
||||
table_metadata_manager: &TableMetadataManagerRef,
|
||||
tasks: &[CreateTableTask],
|
||||
@@ -145,3 +151,18 @@ pub fn convert_region_routes_to_detecting_regions(
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_get_catalog_and_schema() {
|
||||
let test_catalog = "my_catalog";
|
||||
let test_schema = "my_schema";
|
||||
let path = region_storage_path(test_catalog, test_schema);
|
||||
let (catalog, schema) = get_catalog_and_schema(&path).unwrap();
|
||||
assert_eq!(catalog, test_catalog);
|
||||
assert_eq!(schema, test_schema);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ pub const DATANODE_LEASE_SECS: u64 = REGION_LEASE_SECS;
|
||||
pub const FLOWNODE_LEASE_SECS: u64 = DATANODE_LEASE_SECS;
|
||||
|
||||
/// The lease seconds of metasrv leader.
|
||||
pub const META_LEASE_SECS: u64 = 3;
|
||||
pub const META_LEASE_SECS: u64 = 5;
|
||||
|
||||
/// In a lease, there are two opportunities for renewal.
|
||||
pub const META_KEEP_ALIVE_INTERVAL_SECS: u64 = META_LEASE_SECS / 2;
|
||||
|
||||
@@ -142,6 +142,10 @@ impl FlowInfoValue {
|
||||
&self.source_table_ids
|
||||
}
|
||||
|
||||
pub fn catalog_name(&self) -> &String {
|
||||
&self.catalog_name
|
||||
}
|
||||
|
||||
pub fn flow_name(&self) -> &String {
|
||||
&self.flow_name
|
||||
}
|
||||
@@ -161,6 +165,10 @@ impl FlowInfoValue {
|
||||
pub fn comment(&self) -> &String {
|
||||
&self.comment
|
||||
}
|
||||
|
||||
pub fn options(&self) -> &HashMap<String, String> {
|
||||
&self.options
|
||||
}
|
||||
}
|
||||
|
||||
pub type FlowInfoManagerRef = Arc<FlowInfoManager>;
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -20,9 +23,14 @@ use snafu::OptionExt;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, MetaKey, TableMetaValue, NAME_PATTERN};
|
||||
use crate::key::{
|
||||
BytesAdapter, DeserializedValueWithBytes, FlowId, MetaKey, TableMetaValue, NAME_PATTERN,
|
||||
};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use crate::rpc::store::RangeRequest;
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
const FLOW_NAME_KEY_PREFIX: &str = "name";
|
||||
|
||||
@@ -38,6 +46,7 @@ lazy_static! {
|
||||
/// The layout: `__flow/name/{catalog_name}/{flow_name}`.
|
||||
pub struct FlowNameKey<'a>(FlowScoped<FlowNameKeyInner<'a>>);
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl<'a> FlowNameKey<'a> {
|
||||
/// Returns the [FlowNameKey]
|
||||
pub fn new(catalog: &'a str, flow_name: &'a str) -> FlowNameKey<'a> {
|
||||
@@ -45,13 +54,22 @@ impl<'a> FlowNameKey<'a> {
|
||||
FlowNameKey(FlowScoped::new(inner))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn range_start_key(catalog: &str) -> Vec<u8> {
|
||||
let inner = BytesAdapter::from(Self::prefix(catalog).into_bytes());
|
||||
|
||||
FlowScoped::new(inner).to_bytes()
|
||||
}
|
||||
|
||||
/// Return `name/{catalog}/` as prefix
|
||||
pub fn prefix(catalog: &str) -> String {
|
||||
format!("{}/{}/", FLOW_NAME_KEY_PREFIX, catalog)
|
||||
}
|
||||
|
||||
/// Returns the catalog.
|
||||
pub fn catalog(&self) -> &str {
|
||||
self.0.catalog_name
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Return the `flow_name`
|
||||
pub fn flow_name(&self) -> &str {
|
||||
self.0.flow_name
|
||||
@@ -140,6 +158,12 @@ impl FlowNameValue {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flow_name_decoder(kv: KeyValue) -> Result<(String, FlowNameValue)> {
|
||||
let flow_name = FlowNameKey::from_bytes(&kv.key)?;
|
||||
let flow_id = FlowNameValue::try_from_raw_value(&kv.value)?;
|
||||
Ok((flow_name.flow_name().to_string(), flow_id))
|
||||
}
|
||||
|
||||
/// The manager of [FlowNameKey].
|
||||
pub struct FlowNameManager {
|
||||
kv_backend: KvBackendRef,
|
||||
@@ -162,6 +186,25 @@ impl FlowNameManager {
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Return all flows' names and ids
|
||||
pub async fn flow_names(
|
||||
&self,
|
||||
catalog: &str,
|
||||
) -> BoxStream<'static, Result<(String, FlowNameValue)>> {
|
||||
let start_key = FlowNameKey::range_start_key(catalog);
|
||||
common_telemetry::debug!("flow_names: start_key: {:?}", start_key);
|
||||
let req = RangeRequest::new().with_prefix(start_key);
|
||||
|
||||
let stream = PaginationStream::new(
|
||||
self.kv_backend.clone(),
|
||||
req,
|
||||
DEFAULT_PAGE_SIZE,
|
||||
Arc::new(flow_name_decoder),
|
||||
);
|
||||
|
||||
Box::pin(stream)
|
||||
}
|
||||
|
||||
/// Returns true if the `flow` exists.
|
||||
pub async fn exists(&self, catalog: &str, flow: &str) -> Result<bool> {
|
||||
let key = FlowNameKey::new(catalog, flow);
|
||||
@@ -212,4 +255,11 @@ mod tests {
|
||||
assert_eq!(key.catalog(), "my_catalog");
|
||||
assert_eq!(key.flow_name(), "my_task");
|
||||
}
|
||||
#[test]
|
||||
fn test_key_start_range() {
|
||||
assert_eq!(
|
||||
b"__flow/name/greptime/".to_vec(),
|
||||
FlowNameKey::range_start_key("greptime")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1079,6 +1079,7 @@ pub struct QueryContext {
|
||||
current_schema: String,
|
||||
timezone: String,
|
||||
extensions: HashMap<String, String>,
|
||||
channel: u8,
|
||||
}
|
||||
|
||||
impl From<QueryContextRef> for QueryContext {
|
||||
@@ -1088,6 +1089,7 @@ impl From<QueryContextRef> for QueryContext {
|
||||
current_schema: query_context.current_schema().to_string(),
|
||||
timezone: query_context.timezone().to_string(),
|
||||
extensions: query_context.extensions(),
|
||||
channel: query_context.channel() as u8,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1099,6 +1101,7 @@ impl From<QueryContext> for PbQueryContext {
|
||||
current_schema,
|
||||
timezone,
|
||||
extensions,
|
||||
channel,
|
||||
}: QueryContext,
|
||||
) -> Self {
|
||||
PbQueryContext {
|
||||
@@ -1106,6 +1109,7 @@ impl From<QueryContext> for PbQueryContext {
|
||||
current_schema,
|
||||
timezone,
|
||||
extensions,
|
||||
channel: channel as u32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -461,7 +461,7 @@ impl LocalManager {
|
||||
|
||||
let tracing_context = TracingContext::from_current_span();
|
||||
|
||||
let _handle = common_runtime::spawn_bg(async move {
|
||||
let _handle = common_runtime::spawn_global(async move {
|
||||
// Run the root procedure.
|
||||
// The task was moved to another runtime for execution.
|
||||
// In order not to interrupt tracing, a span needs to be created to continue tracing the current task.
|
||||
@@ -593,7 +593,7 @@ impl ProcedureManager for LocalManager {
|
||||
let task_inner = self.build_remove_outdated_meta_task();
|
||||
|
||||
task_inner
|
||||
.start(common_runtime::bg_runtime())
|
||||
.start(common_runtime::global_runtime())
|
||||
.context(StartRemoveOutdatedMetaTaskSnafu)?;
|
||||
|
||||
*task = Some(task_inner);
|
||||
|
||||
@@ -393,7 +393,7 @@ impl Runner {
|
||||
// Add the id of the subprocedure to the metadata.
|
||||
self.meta.push_child(procedure_id);
|
||||
|
||||
let _handle = common_runtime::spawn_bg(async move {
|
||||
let _handle = common_runtime::spawn_global(async move {
|
||||
// Run the root procedure.
|
||||
runner.run().await
|
||||
});
|
||||
|
||||
@@ -239,6 +239,12 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing FlowServiceHandler, not expected"))]
|
||||
MissingFlowServiceHandler {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid function args: {}", err_msg))]
|
||||
InvalidFuncArgs {
|
||||
err_msg: String,
|
||||
@@ -252,6 +258,12 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Can't found alive flownode"))]
|
||||
FlownodeNotFound {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -269,7 +281,8 @@ impl ErrorExt for Error {
|
||||
| Error::BadAccumulatorImpl { .. }
|
||||
| Error::ToScalarValue { .. }
|
||||
| Error::GetScalarVector { .. }
|
||||
| Error::ArrowCompute { .. } => StatusCode::EngineExecuteQuery,
|
||||
| Error::ArrowCompute { .. }
|
||||
| Error::FlownodeNotFound { .. } => StatusCode::EngineExecuteQuery,
|
||||
|
||||
Error::ExecuteFunction { error, .. } | Error::GeneralDataFusion { error, .. } => {
|
||||
datafusion_status_code::<Self>(error, None)
|
||||
@@ -283,6 +296,7 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::MissingTableMutationHandler { .. }
|
||||
| Error::MissingProcedureServiceHandler { .. }
|
||||
| Error::MissingFlowServiceHandler { .. }
|
||||
| Error::ExecuteRepeatedly { .. }
|
||||
| Error::ThreadJoin { .. } => StatusCode::Unexpected,
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ use datafusion::logical_expr::{LogicalPlan, LogicalPlanBuilder};
|
||||
use datafusion_common::Column;
|
||||
use datafusion_expr::col;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
pub use expr::build_filter_from_timestamp;
|
||||
pub use expr::{build_filter_from_timestamp, build_same_type_ts_filter};
|
||||
|
||||
pub use self::accumulator::{Accumulator, AggregateFunctionCreator, AggregateFunctionCreatorRef};
|
||||
pub use self::udaf::AggregateFunction;
|
||||
|
||||
@@ -18,6 +18,35 @@ use common_time::Timestamp;
|
||||
use datafusion_common::{Column, ScalarValue};
|
||||
use datafusion_expr::expr::Expr;
|
||||
use datafusion_expr::{and, binary_expr, Operator};
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use datatypes::value::Value;
|
||||
|
||||
/// Builds a filter for a timestamp column with the same type as the timestamp column.
|
||||
/// Returns [None] if time range is [None] or full time range.
|
||||
pub fn build_same_type_ts_filter(
|
||||
ts_schema: &ColumnSchema,
|
||||
time_range: Option<TimestampRange>,
|
||||
) -> Option<Expr> {
|
||||
let ts_type = ts_schema.data_type.clone();
|
||||
let time_range = time_range?;
|
||||
let start = time_range
|
||||
.start()
|
||||
.and_then(|start| ts_type.try_cast(Value::Timestamp(start)));
|
||||
let end = time_range
|
||||
.end()
|
||||
.and_then(|end| ts_type.try_cast(Value::Timestamp(end)));
|
||||
|
||||
let time_range = match (start, end) {
|
||||
(Some(Value::Timestamp(start)), Some(Value::Timestamp(end))) => {
|
||||
TimestampRange::new(start, end)
|
||||
}
|
||||
(Some(Value::Timestamp(start)), None) => Some(TimestampRange::from_start(start)),
|
||||
(None, Some(Value::Timestamp(end))) => Some(TimestampRange::until_end(end, false)),
|
||||
_ => return None,
|
||||
};
|
||||
build_filter_from_timestamp(&ts_schema.name, time_range.as_ref())
|
||||
}
|
||||
|
||||
/// Builds an `Expr` that filters timestamp column from given timestamp range.
|
||||
/// Returns [None] if time range is [None] or full time range.
|
||||
|
||||
@@ -22,19 +22,25 @@ use std::task::{Context, Poll};
|
||||
use datafusion::arrow::compute::cast;
|
||||
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
|
||||
use datafusion::error::Result as DfResult;
|
||||
use datafusion::execution::context::ExecutionProps;
|
||||
use datafusion::logical_expr::utils::conjunction;
|
||||
use datafusion::logical_expr::Expr;
|
||||
use datafusion::physical_expr::create_physical_expr;
|
||||
use datafusion::physical_plan::metrics::{BaselineMetrics, MetricValue};
|
||||
use datafusion::physical_plan::{
|
||||
accept, displayable, ExecutionPlan, ExecutionPlanVisitor,
|
||||
accept, displayable, ExecutionPlan, ExecutionPlanVisitor, PhysicalExpr,
|
||||
RecordBatchStream as DfRecordBatchStream,
|
||||
};
|
||||
use datafusion_common::arrow::error::ArrowError;
|
||||
use datafusion_common::DataFusionError;
|
||||
use datafusion_common::{DataFusionError, ToDFSchema};
|
||||
use datatypes::arrow::array::Array;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use futures::ready;
|
||||
use pin_project::pin_project;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::filter::batch_filter;
|
||||
use crate::{
|
||||
DfRecordBatch, DfSendableRecordBatchStream, OrderOption, RecordBatch, RecordBatchStream,
|
||||
SendableRecordBatchStream, Stream,
|
||||
@@ -50,6 +56,7 @@ pub struct RecordBatchStreamTypeAdapter<T, E> {
|
||||
stream: T,
|
||||
projected_schema: DfSchemaRef,
|
||||
projection: Vec<usize>,
|
||||
predicate: Option<Arc<dyn PhysicalExpr>>,
|
||||
phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
@@ -69,9 +76,28 @@ where
|
||||
stream,
|
||||
projected_schema,
|
||||
projection,
|
||||
predicate: None,
|
||||
phantom: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_filter(mut self, filters: Vec<Expr>) -> Result<Self> {
|
||||
let filters = if let Some(expr) = conjunction(filters) {
|
||||
let df_schema = self
|
||||
.projected_schema
|
||||
.clone()
|
||||
.to_dfschema_ref()
|
||||
.context(error::PhysicalExprSnafu)?;
|
||||
|
||||
let filters = create_physical_expr(&expr, &df_schema, &ExecutionProps::new())
|
||||
.context(error::PhysicalExprSnafu)?;
|
||||
Some(filters)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
self.predicate = filters;
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> DfRecordBatchStream for RecordBatchStreamTypeAdapter<T, E>
|
||||
@@ -99,6 +125,8 @@ where
|
||||
|
||||
let projected_schema = this.projected_schema.clone();
|
||||
let projection = this.projection.clone();
|
||||
let predicate = this.predicate.clone();
|
||||
|
||||
let batch = batch.map(|b| {
|
||||
b.and_then(|b| {
|
||||
let projected_column = b.project(&projection)?;
|
||||
@@ -121,6 +149,11 @@ where
|
||||
}
|
||||
}
|
||||
let record_batch = DfRecordBatch::try_new(projected_schema, columns)?;
|
||||
let record_batch = if let Some(predicate) = predicate {
|
||||
batch_filter(&record_batch, &predicate)?
|
||||
} else {
|
||||
record_batch
|
||||
};
|
||||
Ok(record_batch)
|
||||
})
|
||||
});
|
||||
|
||||
@@ -73,6 +73,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Create physical expr error"))]
|
||||
PhysicalExpr {
|
||||
#[snafu(source)]
|
||||
error: datafusion::error::DataFusionError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Fail to format record batch"))]
|
||||
Format {
|
||||
#[snafu(source)]
|
||||
@@ -167,7 +175,8 @@ impl ErrorExt for Error {
|
||||
| Error::PollStream { .. }
|
||||
| Error::Format { .. }
|
||||
| Error::ToArrowScalar { .. }
|
||||
| Error::ProjectArrowRecordBatch { .. } => StatusCode::Internal,
|
||||
| Error::ProjectArrowRecordBatch { .. }
|
||||
| Error::PhysicalExpr { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ArrowCompute { .. } => StatusCode::IllegalState,
|
||||
|
||||
|
||||
@@ -14,11 +14,18 @@
|
||||
|
||||
//! Util record batch stream wrapper that can perform precise filter.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion::error::Result as DfResult;
|
||||
use datafusion::logical_expr::{Expr, Literal, Operator};
|
||||
use datafusion::physical_plan::PhysicalExpr;
|
||||
use datafusion_common::arrow::array::{ArrayRef, Datum, Scalar};
|
||||
use datafusion_common::arrow::buffer::BooleanBuffer;
|
||||
use datafusion_common::arrow::compute::kernels::cmp;
|
||||
use datafusion_common::ScalarValue;
|
||||
use datafusion_common::cast::{as_boolean_array, as_null_array};
|
||||
use datafusion_common::{internal_err, DataFusionError, ScalarValue};
|
||||
use datatypes::arrow::array::{Array, BooleanArray, RecordBatch};
|
||||
use datatypes::arrow::compute::filter_record_batch;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -144,13 +151,43 @@ impl SimpleFilterEvaluator {
|
||||
}
|
||||
}
|
||||
|
||||
/// Evaluate the predicate on the input [RecordBatch], and return a new [RecordBatch].
|
||||
/// Copy from datafusion::physical_plan::src::filter.rs
|
||||
pub fn batch_filter(
|
||||
batch: &RecordBatch,
|
||||
predicate: &Arc<dyn PhysicalExpr>,
|
||||
) -> DfResult<RecordBatch> {
|
||||
predicate
|
||||
.evaluate(batch)
|
||||
.and_then(|v| v.into_array(batch.num_rows()))
|
||||
.and_then(|array| {
|
||||
let filter_array = match as_boolean_array(&array) {
|
||||
Ok(boolean_array) => Ok(boolean_array.clone()),
|
||||
Err(_) => {
|
||||
let Ok(null_array) = as_null_array(&array) else {
|
||||
return internal_err!(
|
||||
"Cannot create filter_array from non-boolean predicates"
|
||||
);
|
||||
};
|
||||
|
||||
// if the predicate is null, then the result is also null
|
||||
Ok::<BooleanArray, DataFusionError>(BooleanArray::new_null(null_array.len()))
|
||||
}
|
||||
}?;
|
||||
Ok(filter_record_batch(batch, &filter_array)?)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion::logical_expr::BinaryExpr;
|
||||
use datafusion_common::Column;
|
||||
use datafusion::execution::context::ExecutionProps;
|
||||
use datafusion::logical_expr::{col, lit, BinaryExpr};
|
||||
use datafusion::physical_expr::create_physical_expr;
|
||||
use datafusion_common::{Column, DFSchema};
|
||||
use datatypes::arrow::datatypes::{DataType, Field, Schema};
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -281,4 +318,35 @@ mod test {
|
||||
let result = evaluator.evaluate_scalar(&input_3).unwrap();
|
||||
assert!(!result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn batch_filter_test() {
|
||||
let expr = col("ts").gt(lit(123456u64));
|
||||
let schema = Schema::new(vec![
|
||||
Field::new("a", DataType::Int32, true),
|
||||
Field::new("ts", DataType::UInt64, false),
|
||||
]);
|
||||
let df_schema = DFSchema::try_from(schema.clone()).unwrap();
|
||||
let props = ExecutionProps::new();
|
||||
let physical_expr = create_physical_expr(&expr, &df_schema, &props).unwrap();
|
||||
let batch = RecordBatch::try_new(
|
||||
Arc::new(schema),
|
||||
vec![
|
||||
Arc::new(datatypes::arrow::array::Int32Array::from(vec![4, 5, 6])),
|
||||
Arc::new(datatypes::arrow::array::UInt64Array::from(vec![
|
||||
123456, 123457, 123458,
|
||||
])),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
let new_batch = batch_filter(&batch, &physical_expr).unwrap();
|
||||
assert_eq!(new_batch.num_rows(), 2);
|
||||
let first_column_values = new_batch
|
||||
.column(0)
|
||||
.as_any()
|
||||
.downcast_ref::<datatypes::arrow::array::Int32Array>()
|
||||
.unwrap();
|
||||
let expected = datatypes::arrow::array::Int32Array::from(vec![5, 6]);
|
||||
assert_eq!(first_column_values, &expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,29 +23,25 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{Builder, JoinHandle, Runtime};
|
||||
|
||||
const READ_WORKERS: usize = 8;
|
||||
const WRITE_WORKERS: usize = 8;
|
||||
const BG_WORKERS: usize = 4;
|
||||
const GLOBAL_WORKERS: usize = 8;
|
||||
const COMPACT_WORKERS: usize = 4;
|
||||
const HB_WORKERS: usize = 2;
|
||||
|
||||
/// The options for the global runtimes.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
pub struct RuntimeOptions {
|
||||
/// The number of threads to execute the runtime for global read operations.
|
||||
pub read_rt_size: usize,
|
||||
/// The number of threads to execute the runtime for global write operations.
|
||||
pub write_rt_size: usize,
|
||||
/// The number of threads to execute the runtime for global background operations.
|
||||
pub bg_rt_size: usize,
|
||||
/// The number of threads for the global default runtime.
|
||||
pub global_rt_size: usize,
|
||||
/// The number of threads to execute the runtime for compact operations.
|
||||
pub compact_rt_size: usize,
|
||||
}
|
||||
|
||||
impl Default for RuntimeOptions {
|
||||
fn default() -> Self {
|
||||
let cpus = num_cpus::get();
|
||||
Self {
|
||||
read_rt_size: cpus,
|
||||
write_rt_size: cpus,
|
||||
bg_rt_size: usize::max(cpus / 2, 1),
|
||||
global_rt_size: cpus,
|
||||
compact_rt_size: usize::max(cpus / 2, 1),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -61,9 +57,8 @@ pub fn create_runtime(runtime_name: &str, thread_name: &str, worker_threads: usi
|
||||
}
|
||||
|
||||
struct GlobalRuntimes {
|
||||
read_runtime: Runtime,
|
||||
write_runtime: Runtime,
|
||||
bg_runtime: Runtime,
|
||||
global_runtime: Runtime,
|
||||
compact_runtime: Runtime,
|
||||
hb_runtime: Runtime,
|
||||
}
|
||||
|
||||
@@ -95,48 +90,38 @@ macro_rules! define_spawn {
|
||||
}
|
||||
|
||||
impl GlobalRuntimes {
|
||||
define_spawn!(read);
|
||||
define_spawn!(write);
|
||||
define_spawn!(bg);
|
||||
define_spawn!(global);
|
||||
define_spawn!(compact);
|
||||
define_spawn!(hb);
|
||||
|
||||
fn new(
|
||||
read: Option<Runtime>,
|
||||
write: Option<Runtime>,
|
||||
background: Option<Runtime>,
|
||||
heartbeat: Option<Runtime>,
|
||||
) -> Self {
|
||||
fn new(global: Option<Runtime>, compact: Option<Runtime>, heartbeat: Option<Runtime>) -> Self {
|
||||
Self {
|
||||
read_runtime: read
|
||||
.unwrap_or_else(|| create_runtime("global-read", "read-worker", READ_WORKERS)),
|
||||
write_runtime: write
|
||||
.unwrap_or_else(|| create_runtime("global-write", "write-worker", WRITE_WORKERS)),
|
||||
bg_runtime: background
|
||||
.unwrap_or_else(|| create_runtime("global-bg", "bg-worker", BG_WORKERS)),
|
||||
global_runtime: global
|
||||
.unwrap_or_else(|| create_runtime("global", "global-worker", GLOBAL_WORKERS)),
|
||||
compact_runtime: compact
|
||||
.unwrap_or_else(|| create_runtime("compact", "compact-worker", COMPACT_WORKERS)),
|
||||
hb_runtime: heartbeat
|
||||
.unwrap_or_else(|| create_runtime("global-hb", "hb-worker", HB_WORKERS)),
|
||||
.unwrap_or_else(|| create_runtime("heartbeat", "hb-worker", HB_WORKERS)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct ConfigRuntimes {
|
||||
read_runtime: Option<Runtime>,
|
||||
write_runtime: Option<Runtime>,
|
||||
bg_runtime: Option<Runtime>,
|
||||
global_runtime: Option<Runtime>,
|
||||
compact_runtime: Option<Runtime>,
|
||||
hb_runtime: Option<Runtime>,
|
||||
already_init: bool,
|
||||
}
|
||||
|
||||
static GLOBAL_RUNTIMES: Lazy<GlobalRuntimes> = Lazy::new(|| {
|
||||
let mut c = CONFIG_RUNTIMES.lock().unwrap();
|
||||
let read = c.read_runtime.take();
|
||||
let write = c.write_runtime.take();
|
||||
let background = c.bg_runtime.take();
|
||||
let global = c.global_runtime.take();
|
||||
let compact = c.compact_runtime.take();
|
||||
let heartbeat = c.hb_runtime.take();
|
||||
c.already_init = true;
|
||||
|
||||
GlobalRuntimes::new(read, write, background, heartbeat)
|
||||
GlobalRuntimes::new(global, compact, heartbeat)
|
||||
});
|
||||
|
||||
static CONFIG_RUNTIMES: Lazy<Mutex<ConfigRuntimes>> =
|
||||
@@ -152,22 +137,17 @@ pub fn init_global_runtimes(options: &RuntimeOptions) {
|
||||
START.call_once(move || {
|
||||
let mut c = CONFIG_RUNTIMES.lock().unwrap();
|
||||
assert!(!c.already_init, "Global runtimes already initialized");
|
||||
c.read_runtime = Some(create_runtime(
|
||||
"global-read",
|
||||
"global-read-worker",
|
||||
options.read_rt_size,
|
||||
c.global_runtime = Some(create_runtime(
|
||||
"global",
|
||||
"global-worker",
|
||||
options.global_rt_size,
|
||||
));
|
||||
c.write_runtime = Some(create_runtime(
|
||||
"global-write",
|
||||
"global-write-worker",
|
||||
options.write_rt_size,
|
||||
c.compact_runtime = Some(create_runtime(
|
||||
"compact",
|
||||
"compact-worker",
|
||||
options.compact_rt_size,
|
||||
));
|
||||
c.bg_runtime = Some(create_runtime(
|
||||
"global-bg",
|
||||
"global-bg-worker",
|
||||
options.bg_rt_size,
|
||||
));
|
||||
c.hb_runtime = Some(create_runtime("global-hb", "global-hb-worker", HB_WORKERS));
|
||||
c.hb_runtime = Some(create_runtime("hreartbeat", "hb-worker", HB_WORKERS));
|
||||
});
|
||||
}
|
||||
|
||||
@@ -205,9 +185,8 @@ macro_rules! define_global_runtime_spawn {
|
||||
};
|
||||
}
|
||||
|
||||
define_global_runtime_spawn!(read);
|
||||
define_global_runtime_spawn!(write);
|
||||
define_global_runtime_spawn!(bg);
|
||||
define_global_runtime_spawn!(global);
|
||||
define_global_runtime_spawn!(compact);
|
||||
define_global_runtime_spawn!(hb);
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -218,16 +197,13 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_spawn_block_on() {
|
||||
let handle = spawn_read(async { 1 + 1 });
|
||||
assert_eq!(2, block_on_read(handle).unwrap());
|
||||
let handle = spawn_global(async { 1 + 1 });
|
||||
assert_eq!(2, block_on_global(handle).unwrap());
|
||||
|
||||
let handle = spawn_write(async { 2 + 2 });
|
||||
assert_eq!(4, block_on_write(handle).unwrap());
|
||||
let handle = spawn_compact(async { 2 + 2 });
|
||||
assert_eq!(4, block_on_compact(handle).unwrap());
|
||||
|
||||
let handle = spawn_bg(async { 3 + 3 });
|
||||
assert_eq!(6, block_on_bg(handle).unwrap());
|
||||
|
||||
let handle = spawn_bg(async { 4 + 4 });
|
||||
let handle = spawn_hb(async { 4 + 4 });
|
||||
assert_eq!(8, block_on_hb(handle).unwrap());
|
||||
}
|
||||
|
||||
@@ -253,8 +229,7 @@ mod tests {
|
||||
};
|
||||
}
|
||||
|
||||
define_spawn_blocking_test!(read);
|
||||
define_spawn_blocking_test!(write);
|
||||
define_spawn_blocking_test!(bg);
|
||||
define_spawn_blocking_test!(global);
|
||||
define_spawn_blocking_test!(compact);
|
||||
define_spawn_blocking_test!(hb);
|
||||
}
|
||||
|
||||
@@ -19,9 +19,9 @@ mod repeated_task;
|
||||
pub mod runtime;
|
||||
|
||||
pub use global::{
|
||||
bg_runtime, block_on_bg, block_on_read, block_on_write, create_runtime, init_global_runtimes,
|
||||
read_runtime, spawn_bg, spawn_blocking_bg, spawn_blocking_hb, spawn_blocking_read,
|
||||
spawn_blocking_write, spawn_hb, spawn_read, spawn_write, write_runtime,
|
||||
block_on_compact, block_on_global, compact_runtime, create_runtime, global_runtime,
|
||||
init_global_runtimes, spawn_blocking_compact, spawn_blocking_global, spawn_blocking_hb,
|
||||
spawn_compact, spawn_global, spawn_hb,
|
||||
};
|
||||
|
||||
pub use crate::repeated_task::{BoxedTaskFunction, RepeatedTask, TaskFunction};
|
||||
|
||||
@@ -200,7 +200,7 @@ mod tests {
|
||||
|
||||
let task = RepeatedTask::new(Duration::from_millis(100), Box::new(task_fn));
|
||||
|
||||
task.start(crate::bg_runtime()).unwrap();
|
||||
task.start(crate::global_runtime()).unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(550)).await;
|
||||
task.stop().await.unwrap();
|
||||
|
||||
@@ -217,7 +217,7 @@ mod tests {
|
||||
let task = RepeatedTask::new(Duration::from_millis(100), Box::new(task_fn))
|
||||
.with_initial_delay(Some(Duration::ZERO));
|
||||
|
||||
task.start(crate::bg_runtime()).unwrap();
|
||||
task.start(crate::global_runtime()).unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(550)).await;
|
||||
task.stop().await.unwrap();
|
||||
|
||||
|
||||
@@ -187,7 +187,7 @@ impl RegionAliveKeeper {
|
||||
let running = self.started.clone();
|
||||
|
||||
// Watches changes
|
||||
common_runtime::spawn_bg(async move {
|
||||
common_runtime::spawn_global(async move {
|
||||
loop {
|
||||
if !running.load(Ordering::Relaxed) {
|
||||
info!("RegionAliveKeeper stopped! Quits the watch loop!");
|
||||
@@ -286,7 +286,7 @@ impl CountdownTaskHandle {
|
||||
region_id,
|
||||
rx,
|
||||
};
|
||||
let handler = common_runtime::spawn_bg(async move {
|
||||
let handler = common_runtime::spawn_hb(async move {
|
||||
countdown_task.run().await;
|
||||
});
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ use common_meta::key::datanode_table::{DatanodeTableManager, DatanodeTableValue}
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::wal_options_allocator::prepare_wal_options;
|
||||
pub use common_procedure::options::ProcedureConfig;
|
||||
use common_runtime::Runtime;
|
||||
use common_telemetry::{error, info, warn};
|
||||
use common_wal::config::kafka::DatanodeKafkaConfig;
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
@@ -55,8 +54,8 @@ use tokio::sync::Notify;
|
||||
use crate::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||
use crate::error::{
|
||||
self, BuildMitoEngineSnafu, CreateDirSnafu, GetMetadataSnafu, MissingKvBackendSnafu,
|
||||
MissingNodeIdSnafu, OpenLogStoreSnafu, Result, RuntimeResourceSnafu, ShutdownInstanceSnafu,
|
||||
ShutdownServerSnafu, StartServerSnafu,
|
||||
MissingNodeIdSnafu, OpenLogStoreSnafu, Result, ShutdownInstanceSnafu, ShutdownServerSnafu,
|
||||
StartServerSnafu,
|
||||
};
|
||||
use crate::event_listener::{
|
||||
new_region_server_event_channel, NoopRegionServerEventListener, RegionServerEventListenerRef,
|
||||
@@ -224,7 +223,7 @@ impl DatanodeBuilder {
|
||||
|
||||
if self.opts.init_regions_in_background {
|
||||
// Opens regions in background.
|
||||
common_runtime::spawn_bg(async move {
|
||||
common_runtime::spawn_global(async move {
|
||||
if let Err(err) = open_all_regions.await {
|
||||
error!(err; "Failed to open regions during the startup.");
|
||||
}
|
||||
@@ -321,23 +320,16 @@ impl DatanodeBuilder {
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
self.plugins.clone(),
|
||||
);
|
||||
let query_engine = query_engine_factory.query_engine();
|
||||
|
||||
let runtime = Arc::new(
|
||||
Runtime::builder()
|
||||
.worker_threads(opts.grpc.runtime_size)
|
||||
.thread_name("io-handlers")
|
||||
.build()
|
||||
.context(RuntimeResourceSnafu)?,
|
||||
);
|
||||
|
||||
let table_provider_factory = Arc::new(DummyTableProviderFactory);
|
||||
let mut region_server = RegionServer::with_table_provider(
|
||||
query_engine,
|
||||
runtime,
|
||||
common_runtime::global_runtime(),
|
||||
event_listener,
|
||||
table_provider_factory,
|
||||
);
|
||||
|
||||
@@ -108,7 +108,7 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
|
||||
let region_server = self.region_server.clone();
|
||||
let catchup_tasks = self.catchup_tasks.clone();
|
||||
let handler = Self::build_handler(instruction)?;
|
||||
let _handle = common_runtime::spawn_bg(async move {
|
||||
let _handle = common_runtime::spawn_global(async move {
|
||||
let reply = handler(HandlerContext {
|
||||
region_server,
|
||||
catchup_tasks,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user