# Node running mode, see `standalone.example.toml`. mode = "distributed" # The datanode identifier, should be unique. node_id = 42 # gRPC server address, "127.0.0.1:3001" by default. rpc_addr = "127.0.0.1:3001" # Hostname of this node. rpc_hostname = "127.0.0.1" # The number of gRPC server worker threads, 8 by default. rpc_runtime_size = 8 # Start services after regions have obtained leases. # It will block the datanode start if it can't receive leases in the heartbeat from metasrv. require_lease_before_startup = false # Initialize all regions in the background during the startup. # By default, it provides services after all regions have been initialized. initialize_region_in_background = false [heartbeat] # Interval for sending heartbeat messages to the Metasrv, 3 seconds by default. interval = "3s" # Metasrv client options. [meta_client] # Metasrv address list. metasrv_addrs = ["127.0.0.1:3002"] # Heartbeat timeout, 500 milliseconds by default. heartbeat_timeout = "500ms" # Operation timeout, 3 seconds by default. timeout = "3s" # Connect server timeout, 1 second by default. connect_timeout = "1s" # `TCP_NODELAY` option for accepted connections, true by default. tcp_nodelay = true # WAL options. # Currently, users are expected to choose the wal through the provider field. # When a wal provider is chose, the user should comment out all other wal config # except those corresponding to the chosen one. [wal] # WAL data directory provider = "raft_engine" # Raft-engine wal options, see `standalone.example.toml`. # dir = "/tmp/greptimedb/wal" file_size = "256MB" purge_threshold = "4GB" purge_interval = "10m" read_batch_size = 128 sync_write = false # Kafka wal options, see `standalone.example.toml`. # broker_endpoints = ["127.0.0.1:9092"] # max_batch_size = "4MB" # linger = "200ms" # produce_record_timeout = "100ms" # backoff_init = "500ms" # backoff_max = "10s" # backoff_base = 2 # backoff_deadline = "5mins" # Storage options, see `standalone.example.toml`. [storage] # The working home directory. data_home = "/tmp/greptimedb/" # Storage type. type = "File" # TTL for all tables. Disabled by default. # global_ttl = "7d" # Cache configuration for object storage such as 'S3' etc. # The local file cache directory # cache_path = "/path/local_cache" # The local file cache capacity in bytes. # cache_capacity = "256MB" # Custom storage options #[[storage.providers]] #type = "S3" #[[storage.providers]] #type = "Gcs" # Mito engine options [[region_engine]] [region_engine.mito] # Number of region workers num_workers = 8 # Request channel size of each worker worker_channel_size = 128 # Max batch size for a worker to handle requests worker_request_batch_size = 64 # Number of meta action updated to trigger a new checkpoint for the manifest manifest_checkpoint_distance = 10 # Whether to compress manifest and checkpoint file by gzip (default false). compress_manifest = false # Max number of running background jobs max_background_jobs = 4 # Interval to auto flush a region if it has not flushed yet. auto_flush_interval = "1h" # Global write buffer size for all regions. global_write_buffer_size = "1GB" # Global write buffer size threshold to reject write requests (default 2G). global_write_buffer_reject_size = "2GB" # Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache. sst_meta_cache_size = "128MB" # Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache. vector_cache_size = "512MB" # Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache. page_cache_size = "512MB" # Buffer size for SST writing. sst_write_buffer_size = "8MB" # Parallelism to scan a region (default: 1/4 of cpu cores). # - 0: using the default value (1/4 of cpu cores). # - 1: scan in current thread. # - n: scan in parallelism n. scan_parallelism = 0 # Capacity of the channel to send data from parallel scan tasks to the main task (default 32). parallel_scan_channel_size = 32 # Log options, see `standalone.example.toml` # [logging] # dir = "/tmp/greptimedb/logs" # level = "info" # Datanode export the metrics generated by itself # encoded to Prometheus remote-write format # and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself) # This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. # [export_metrics] # whether enable export metrics, default is false # enable = false # The url of metrics export endpoint, default is `frontend` default HTTP endpoint. # endpoint = "127.0.0.1:4000" # The database name of exported metrics stores, user needs to specify a valid database # db = "" # The interval of export metrics # write_interval = "30s" # HTTP headers of Prometheus remote-write carry # headers = {}