mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-04 12:22:55 +00:00
206 lines
6.1 KiB
TOML
206 lines
6.1 KiB
TOML
## The working home directory.
|
|
data_home = "/tmp/metasrv/"
|
|
|
|
## The bind address of metasrv.
|
|
bind_addr = "127.0.0.1:3002"
|
|
|
|
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
|
server_addr = "127.0.0.1:3002"
|
|
|
|
## Store server address default to etcd store.
|
|
store_addr = "127.0.0.1:2379"
|
|
|
|
## Datanode selector type.
|
|
## - `round_robin` (default value)
|
|
## - `lease_based`
|
|
## - `load_based`
|
|
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
|
selector = "round_robin"
|
|
|
|
## Store data in memory.
|
|
use_memory_store = false
|
|
|
|
## Whether to enable greptimedb telemetry.
|
|
enable_telemetry = true
|
|
|
|
## If it's not empty, the metasrv will store all data with this key prefix.
|
|
store_key_prefix = ""
|
|
|
|
## Whether to enable region failover.
|
|
## This feature is only available on GreptimeDB running on cluster mode and
|
|
## - Using Remote WAL
|
|
## - Using shared storage (e.g., s3).
|
|
enable_region_failover = false
|
|
|
|
## The datastore for meta server.
|
|
backend = "EtcdStore"
|
|
|
|
## The runtime options.
|
|
[runtime]
|
|
## The number of threads to execute the runtime for global read operations.
|
|
global_rt_size = 8
|
|
## The number of threads to execute the runtime for global write operations.
|
|
compact_rt_size = 4
|
|
|
|
## Procedure storage options.
|
|
[procedure]
|
|
|
|
## Procedure max retry time.
|
|
max_retry_times = 12
|
|
|
|
## Initial retry delay of procedures, increases exponentially
|
|
retry_delay = "500ms"
|
|
|
|
## Auto split large value
|
|
## GreptimeDB procedure uses etcd as the default metadata storage backend.
|
|
## The etcd the maximum size of any request is 1.5 MiB
|
|
## 1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)
|
|
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
|
|
max_metadata_value_size = "1500KiB"
|
|
|
|
# Failure detectors options.
|
|
[failure_detector]
|
|
|
|
## The threshold value used by the failure detector to determine failure conditions.
|
|
threshold = 8.0
|
|
|
|
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
|
|
min_std_deviation = "100ms"
|
|
|
|
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
|
|
acceptable_heartbeat_pause = "10000ms"
|
|
|
|
## The initial estimate of the heartbeat interval used by the failure detector.
|
|
first_heartbeat_estimate = "1000ms"
|
|
|
|
## Datanode options.
|
|
[datanode]
|
|
|
|
## Datanode client options.
|
|
[datanode.client]
|
|
|
|
## Operation timeout.
|
|
timeout = "10s"
|
|
|
|
## Connect server timeout.
|
|
connect_timeout = "10s"
|
|
|
|
## `TCP_NODELAY` option for accepted connections.
|
|
tcp_nodelay = true
|
|
|
|
[wal]
|
|
# Available wal providers:
|
|
# - `raft_engine` (default): there're none raft-engine wal config since metasrv only involves in remote wal currently.
|
|
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
|
|
provider = "raft_engine"
|
|
|
|
# Kafka wal config.
|
|
|
|
## The broker endpoints of the Kafka cluster.
|
|
broker_endpoints = ["127.0.0.1:9092"]
|
|
|
|
## Automatically create topics for WAL.
|
|
## Set to `true` to automatically create topics for WAL.
|
|
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
|
auto_create_topics = true
|
|
|
|
## Number of topics.
|
|
num_topics = 64
|
|
|
|
## Topic selector type.
|
|
## Available selector types:
|
|
## - `round_robin` (default)
|
|
selector_type = "round_robin"
|
|
|
|
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
|
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
|
topic_name_prefix = "greptimedb_wal_topic"
|
|
|
|
## Expected number of replicas of each partition.
|
|
replication_factor = 1
|
|
|
|
## Above which a topic creation operation will be cancelled.
|
|
create_topic_timeout = "30s"
|
|
## The initial backoff for kafka clients.
|
|
backoff_init = "500ms"
|
|
|
|
## The maximum backoff for kafka clients.
|
|
backoff_max = "10s"
|
|
|
|
## Exponential backoff rate, i.e. next backoff = base * current backoff.
|
|
backoff_base = 2
|
|
|
|
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
|
backoff_deadline = "5mins"
|
|
|
|
# The Kafka SASL configuration.
|
|
# **It's only used when the provider is `kafka`**.
|
|
# Available SASL mechanisms:
|
|
# - `PLAIN`
|
|
# - `SCRAM-SHA-256`
|
|
# - `SCRAM-SHA-512`
|
|
# [wal.sasl]
|
|
# type = "SCRAM-SHA-512"
|
|
# username = "user_kafka"
|
|
# password = "secret"
|
|
|
|
# The Kafka TLS configuration.
|
|
# **It's only used when the provider is `kafka`**.
|
|
# [wal.tls]
|
|
# server_ca_cert_path = "/path/to/server_cert"
|
|
# client_cert_path = "/path/to/client_cert"
|
|
# client_key_path = "/path/to/key"
|
|
|
|
## The logging options.
|
|
[logging]
|
|
## The directory to store the log files.
|
|
dir = "/tmp/greptimedb/logs"
|
|
|
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
|
## +toml2docs:none-default
|
|
level = "info"
|
|
|
|
## Enable OTLP tracing.
|
|
enable_otlp_tracing = false
|
|
|
|
## The OTLP tracing endpoint.
|
|
otlp_endpoint = "http://localhost:4317"
|
|
|
|
## Whether to append logs to stdout.
|
|
append_stdout = true
|
|
|
|
## The percentage of tracing will be sampled and exported.
|
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
|
[logging.tracing_sample_ratio]
|
|
default_ratio = 1.0
|
|
|
|
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
|
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
|
[export_metrics]
|
|
|
|
## whether enable export metrics.
|
|
enable = false
|
|
|
|
## The interval of export metrics.
|
|
write_interval = "30s"
|
|
|
|
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
|
## You must create the database before enabling it.
|
|
[export_metrics.self_import]
|
|
## +toml2docs:none-default
|
|
db = "greptime_metrics"
|
|
|
|
[export_metrics.remote_write]
|
|
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
|
url = ""
|
|
|
|
## HTTP headers of Prometheus remote-write carry.
|
|
headers = { }
|
|
|
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
|
[tracing]
|
|
## The tokio console address.
|
|
## +toml2docs:none-default
|
|
tokio_console_addr = "127.0.0.1"
|