Files
greptimedb/docs/benchmarks/log/structured_vector.toml
Yingwen 9c1704d4cb docs: move v0.9.1 benchmark report to tsbs dir (#4598)
* docs: move v0.9.1 benchmark report to tsbs dir

* docs: add newlines
2024-08-21 09:31:05 +00:00

58 lines
1.5 KiB
TOML

# Please note we use patched branch to build vector
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_logitem
[sources.demo_logs]
type = "demo_logs"
format = "apache_common"
# interval value = 1 / rps
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
# set to 0 to run as fast as possible
interval = 0
# total rows to insert
count = 100000000
lines = [ "line1" ]
[transforms.parse_logs]
type = "remap"
inputs = ["demo_logs"]
source = '''
. = parse_regex!(.message, r'^(?P<ip>\S+) - (?P<user>\S+) \[(?P<timestamp>[^\]]+)\] "(?P<method>\S+) (?P<path>\S+) (?P<http_version>\S+)" (?P<status>\d+) (?P<bytes>\d+)$')
# Convert timestamp to a standard format
.timestamp = parse_timestamp!(.timestamp, format: "%d/%b/%Y:%H:%M:%S %z")
# Convert status and bytes to integers
.status = to_int!(.status)
.bytes = to_int!(.bytes)
'''
[sinks.sink_greptime_logs]
type = "greptimedb_logs"
# The table to insert into
table = "test_table"
pipeline_name = "demo_pipeline"
compression = "none"
inputs = [ "parse_logs" ]
endpoint = "http://127.0.0.1:4000"
# Batch size for each insertion
batch.max_events = 4000
[sinks.clickhouse]
type = "clickhouse"
inputs = [ "parse_logs" ]
database = "default"
endpoint = "http://127.0.0.1:8123"
format = "json_each_row"
# The table to insert into
table = "test_table"
[sinks.sink_elasticsearch]
type = "elasticsearch"
inputs = [ "parse_logs" ]
api_version = "auto"
compression = "none"
doc_type = "_doc"
endpoints = [ "http://127.0.0.1:9200" ]
id_key = "id"
mode = "bulk"