# The working home directory. data_home = "/tmp/metasrv/" # The bind address of metasrv, "127.0.0.1:3002" by default. bind_addr = "127.0.0.1:3002" # The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. server_addr = "127.0.0.1:3002" # Etcd server address, "127.0.0.1:2379" by default. store_addr = "127.0.0.1:2379" # Datanode selector type. # - "lease_based" (default value). # - "load_based" # For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". selector = "lease_based" # Store data in memory, false by default. use_memory_store = false # Whether to enable greptimedb telemetry, true by default. enable_telemetry = true # If it's not empty, the metasrv will store all data with this key prefix. store_key_prefix = "" # Log options, see `standalone.example.toml` # [logging] # dir = "/tmp/greptimedb/logs" # level = "info" # Procedure storage options. [procedure] # Procedure max retry time. max_retry_times = 12 # Initial retry delay of procedures, increases exponentially retry_delay = "500ms" # Failure detectors options. [failure_detector] threshold = 8.0 min_std_deviation = "100ms" acceptable_heartbeat_pause = "3000ms" first_heartbeat_estimate = "1000ms" # # Datanode options. # [datanode] # # Datanode client options. # [datanode.client_options] # timeout = "10s" # connect_timeout = "10s" # tcp_nodelay = true [wal] # Available wal providers: # - "raft_engine" (default) # - "kafka" provider = "raft_engine" # There're none raft-engine wal config since meta srv only involves in remote wal currently. # Kafka wal config. # The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default. # broker_endpoints = ["127.0.0.1:9092"] # Number of topics to be created upon start. # num_topics = 64 # Topic selector type. # Available selector types: # - "round_robin" (default) # selector_type = "round_robin" # A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. # topic_name_prefix = "greptimedb_wal_topic" # Number of partitions per topic. # num_partitions = 1 # Expected number of replicas of each partition. # replication_factor = 1 # Above which a topic creation operation will be cancelled. # create_topic_timeout = "30s" # The initial backoff for kafka clients. # backoff_init = "500ms" # The maximum backoff for kafka clients. # backoff_max = "10s" # Exponential backoff rate, i.e. next backoff = base * current backoff. # backoff_base = 2 # Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. # backoff_deadline = "5mins" # Metasrv export the metrics generated by itself # encoded to Prometheus remote-write format # and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself) # This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. # [export_metrics] # whether enable export metrics, default is false # enable = false # The url of metrics export endpoint, default is `frontend` default HTTP endpoint. # endpoint = "127.0.0.1:4000" # The database name of exported metrics stores, user needs to specify a valid database # db = "" # The interval of export metrics # write_interval = "30s" # HTTP headers of Prometheus remote-write carry # headers = {}