Files
greptimedb/config/metasrv.example.toml
zyy17 60eb5de3f1 refactor: add tracing options in xOptions (#3919)
* refactor: add tracing options in {DatanodeOptions, FrontendOptions, MetasrvOptions, StandaloneOptions}

* ci: fix integration error

* refactor: minor modification for initialization of tracing options
2024-05-13 05:16:50 +00:00

150 lines
4.4 KiB
TOML

## The working home directory.
data_home = "/tmp/metasrv/"
## The bind address of metasrv.
bind_addr = "127.0.0.1:3002"
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
server_addr = "127.0.0.1:3002"
## Etcd server address.
store_addr = "127.0.0.1:2379"
## Datanode selector type.
## - `lease_based` (default value).
## - `load_based`
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
selector = "lease_based"
## Store data in memory.
use_memory_store = false
## Whether to enable greptimedb telemetry.
enable_telemetry = true
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
## Procedure storage options.
[procedure]
## Procedure max retry time.
max_retry_times = 12
## Initial retry delay of procedures, increases exponentially
retry_delay = "500ms"
## Auto split large value
## GreptimeDB procedure uses etcd as the default metadata storage backend.
## The etcd the maximum size of any request is 1.5 MiB
## 1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
max_metadata_value_size = "1500KiB"
# Failure detectors options.
[failure_detector]
threshold = 8.0
min_std_deviation = "100ms"
acceptable_heartbeat_pause = "3000ms"
first_heartbeat_estimate = "1000ms"
## Datanode options.
[datanode]
## Datanode client options.
[datanode.client]
timeout = "10s"
connect_timeout = "10s"
tcp_nodelay = true
[wal]
# Available wal providers:
# - `raft_engine` (default): there're none raft-engine wal config since metasrv only involves in remote wal currently.
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
provider = "raft_engine"
# Kafka wal config.
## The broker endpoints of the Kafka cluster.
broker_endpoints = ["127.0.0.1:9092"]
## Number of topics to be created upon start.
num_topics = 64
## Topic selector type.
## Available selector types:
## - `round_robin` (default)
selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
topic_name_prefix = "greptimedb_wal_topic"
## Expected number of replicas of each partition.
replication_factor = 1
## Above which a topic creation operation will be cancelled.
create_topic_timeout = "30s"
## The initial backoff for kafka clients.
backoff_init = "500ms"
## The maximum backoff for kafka clients.
backoff_max = "10s"
## Exponential backoff rate, i.e. next backoff = base * current backoff.
backoff_base = 2
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
backoff_deadline = "5mins"
## The logging options.
[logging]
## The directory to store the log files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## +toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
## +toml2docs:none-default
otlp_endpoint = ""
## Whether to append logs to stdout.
append_stdout = true
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
[export_metrics.self_import]
## +toml2docs:none-default
db = "information_schema"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
[tracing]
## The tokio console address.
## +toml2docs:none-default
tokio_console_addr = "127.0.0.1"