Compare commits

..

95 Commits

Author SHA1 Message Date
Christian Schwarz
9d0405b798 Merge remote-tracking branch 'origin/main' into problame/batching-sidecar-task 2024-11-30 00:03:30 +01:00
Christian Schwarz
60b9238383 switch back to serial mode by default 2024-11-29 20:55:22 +01:00
Christian Schwarz
ce83418aa4 spsc_fold: fix missing wakeup on receiver or sender drop while other side is waiting 2024-11-29 20:54:28 +01:00
Christian Schwarz
1cd333ce38 read_messages => batcher 2024-11-29 20:40:51 +01:00
Christian Schwarz
1cf5b1463f benchmark results on hetzner box
--------------------------------------------------------------------------- Benchmark results ---------------------------------------------------------------------------
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].pipelining_config.mode: serial
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].counters.time: 0.9880
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].counters.pageserver_cpu_seconds_total: 0.8227
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 1.0653
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.9743
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 1.0309
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.8490
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 1.0774
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.9896
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 1.0229
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.8428
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].pipelining_config.mode: serial
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].counters.time: 0.7338
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].counters.pageserver_cpu_seconds_total: 0.7370
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.6314
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.7970
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.7640
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.7692
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 2
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.4609
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,402.7188
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 3,206.7344
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,402.7188
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.5242
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.9966
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 2
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.4988
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,402.9500
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 3,206.9667
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,402.9500
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.4975
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.9966
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 4
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.3504
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,402.0824
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 1,651.0941
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,402.0824
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.3765
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 3.8775
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 4
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.3973
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,402.3600
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 1,651.3733
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,402.3600
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.3847
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 3.8770
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 8
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.3140
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,401.8632
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 876.8737
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,401.8632
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.3249
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 7.3008
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 8
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.3275
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,401.9451
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 876.9560
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,401.9451
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.3145
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 7.3002
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 16
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.2905
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,401.7184
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 490.7282
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,401.7184
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.2897
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 13.0453
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 16
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.3148
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,401.8632
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 490.8737
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,401.8632
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.3002
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 13.0418
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.2587
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,401.5259
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 297.5345
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,401.5259
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.2539
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 21.5152
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.2804
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,401.6698
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 297.6792
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,401.6698
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.2674
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 21.5053
test_latency[release-pg16-pipelining_config0-{'mode': 'serial'}].latency_mean: 0.128 ms
test_latency[release-pg16-pipelining_config0-{'mode': 'serial'}].latency_percentiles.p95: 0.153 ms
test_latency[release-pg16-pipelining_config0-{'mode': 'serial'}].latency_percentiles.p99: 0.169 ms
test_latency[release-pg16-pipelining_config0-{'mode': 'serial'}].latency_percentiles.p99.9: 0.257 ms
test_latency[release-pg16-pipelining_config0-{'mode': 'serial'}].latency_percentiles.p99.99: 0.588 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_mean: 0.128 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p95: 0.153 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p99: 0.169 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p99.9: 0.283 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p99.99: 0.556 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].latency_mean: 0.148 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p95: 0.178 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p99: 0.193 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p99.9: 0.308 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p99.99: 0.632 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_mean: 0.126 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p95: 0.150 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p99: 0.173 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p99.9: 0.268 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p99.99: 0.631 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].latency_mean: 0.130 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p95: 0.150 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p99: 0.165 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p99.9: 0.267 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p99.99: 0.530 ms
2024-11-29 17:42:50 +01:00
Christian Schwarz
2cab051921 fix escaping of lfc path (exposed by the benchmark) 2024-11-29 17:25:54 +01:00
Christian Schwarz
9b65b268ed stop Box'ing stuff & clean up the passing-through of errors (remove enum Batch) 2024-11-29 16:14:03 +01:00
Christian Schwarz
53e18b24fc less repetitive match arms; https://github.com/neondatabase/neon/pull/9851#discussion_r1860535860 2024-11-29 16:12:19 +01:00
Christian Schwarz
0d28084985 merge brought back test_pageserver_getpage_merge.py 2024-11-29 15:23:47 +01:00
Christian Schwarz
199a4bd6c1 Merge remote-tracking branch 'origin/main' into problame/batching-sidecar-task 2024-11-29 14:26:37 +01:00
Christian Schwarz
bab3dd009d Merge remote-tracking branch 'origin/main' into problame/batching-sidecar-task 2024-11-29 14:19:06 +01:00
Christian Schwarz
90ef03c3b5 benchmarks on hetzner box
--------------------------------------------------------------------------- Benchmark results ---------------------------------------------------------------------------
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].pipelining_config.mode: serial
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].counters.time: 0.8920
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].counters.pageserver_cpu_seconds_total: 0.7482
test_throughput[release-pg16-50-pipelining_config0-30-1-128-not batchable {'mode': 'serial'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.9038
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.8488
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.9325
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.7834
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.9282
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.8647
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.9148
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.7734
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].pipelining_config.mode: serial
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].counters.time: 0.6814
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].counters.pageserver_cpu_seconds_total: 0.6663
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'mode': 'serial'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.6394
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.7907
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.7021
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.6926
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 2
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.4521
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,402.6818
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 3,206.6970
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,402.6818
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.4967
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.9967
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 2
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.4691
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,402.7619
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 3,206.7778
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,402.7619
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.4438
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 1.9966
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 4
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.3613
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,402.1585
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 1,651.1707
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,402.1585
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.3716
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 3.8773
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 4
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.3925
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,402.3289
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 1,651.3421
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,402.3289
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.3608
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 3.8770
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 8
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.3002
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,401.7879
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 876.7980
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,401.7879
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.2960
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 7.3013
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 8
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.3337
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,401.9888
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 877.0112
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,401.9888
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.3044
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 7.2998
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 16
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.2844
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,401.6857
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 490.6952
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,401.6857
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.2677
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 13.0462
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 16
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.2959
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,401.7525
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 490.7624
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,401.7525
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.2654
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 13.0445
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.execution: concurrent-futures
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.time: 0.2662
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,401.5804
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 297.5893
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,401.5804
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.2445
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].perfmetric.batching_factor: 21.5115
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.execution: tasks
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].pipelining_config.mode: pipelined
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.time: 0.2798
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_getpage_count: 6,401.6542
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_vectored_get_count: 297.6636
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.compute_getpage_count: 6,401.6542
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].counters.pageserver_cpu_seconds_total: 0.2504
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].perfmetric.batching_factor: 21.5063
test_latency[release-pg16-pipelining_config0-{'mode': 'serial'}].latency_mean: 0.123 ms
test_latency[release-pg16-pipelining_config0-{'mode': 'serial'}].latency_percentiles.p95: 0.159 ms
test_latency[release-pg16-pipelining_config0-{'mode': 'serial'}].latency_percentiles.p99: 0.170 ms
test_latency[release-pg16-pipelining_config0-{'mode': 'serial'}].latency_percentiles.p99.9: 0.306 ms
test_latency[release-pg16-pipelining_config0-{'mode': 'serial'}].latency_percentiles.p99.99: 0.579 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_mean: 0.122 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p95: 0.163 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p99: 0.181 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p99.9: 0.290 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p99.99: 0.623 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].latency_mean: 0.143 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p95: 0.175 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p99: 0.188 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p99.9: 0.322 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p99.99: 0.636 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_mean: 0.122 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p95: 0.161 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p99: 0.175 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p99.9: 0.281 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'execution': 'concurrent-futures', 'mode': 'pipelined'}].latency_percentiles.p99.99: 0.605 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].latency_mean: 0.117 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p95: 0.132 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p99: 0.154 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p99.9: 0.402 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'execution': 'tasks', 'mode': 'pipelined'}].latency_percentiles.p99.99: 0.569 ms
2024-11-29 13:50:28 +01:00
Christian Schwarz
dfcbb139fb the None configuration in the benchmark would use the default instead
of the serial configuration; fix that
2024-11-29 13:35:24 +01:00
Christian Schwarz
27c72e4ff3 benchmark on hetzner box
--------------------------------------------------------------------------- Benchmark results ---------------------------------------------------------------------------
test_throughput[release-pg16-50-None-30-1-128-not batchable None].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-None-30-1-128-not batchable None].pipelining_enabled: 0
test_throughput[release-pg16-50-None-30-1-128-not batchable None].effective_io_concurrency: 1
test_throughput[release-pg16-50-None-30-1-128-not batchable None].readhead_buffer_size: 128
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.time: 0.8864
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.pageserver_cpu_seconds_total: 0.8297
test_throughput[release-pg16-50-None-30-1-128-not batchable None].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.9974
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.9223
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.9171
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.7762
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.8903
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.8303
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.9611
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.8074
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-None-30-100-128-batchable None].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-None-30-100-128-batchable None].pipelining_enabled: 0
test_throughput[release-pg16-50-None-30-100-128-batchable None].effective_io_concurrency: 100
test_throughput[release-pg16-50-None-30-100-128-batchable None].readhead_buffer_size: 128
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.time: 0.2695
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.pageserver_getpage_count: 6,401.5946
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.pageserver_vectored_get_count: 297.5946
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.compute_getpage_count: 6,401.5946
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.pageserver_cpu_seconds_total: 0.2469
test_throughput[release-pg16-50-None-30-100-128-batchable None].perfmetric.batching_factor: 21.5111
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.6611
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.8180
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.7554
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.7308
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 2
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.4535
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,402.6364
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 3,206.6515
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,402.6364
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.4974
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.9967
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 2
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.4630
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,402.7656
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 3,206.7812
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,402.7656
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.4397
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.9966
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 4
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.3465
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,402.0581
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 1,651.0698
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,402.0581
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.3615
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 3.8775
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 4
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.3628
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,402.1585
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 1,651.1707
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,402.1585
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.3394
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 3.8773
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 8
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.2961
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,401.7525
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 876.7525
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,401.7525
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.2923
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 7.3017
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 8
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.3317
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,401.9667
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 876.9778
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,401.9667
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.3008
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 7.3000
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 16
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.2885
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,401.6893
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 490.7087
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,401.6893
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.2701
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 13.0458
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 16
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.3042
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,401.8061
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 490.8163
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,401.8061
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.2699
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 13.0432
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.2704
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,401.6091
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 297.6182
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,401.6091
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.2476
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 21.5095
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.2706
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,401.5946
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 297.5946
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,401.5946
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.2425
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 21.5111
test_latency[release-pg16-None-None].latency_mean: 0.136 ms
test_latency[release-pg16-None-None].latency_percentiles.p95: 0.172 ms
test_latency[release-pg16-None-None].latency_percentiles.p99: 0.194 ms
test_latency[release-pg16-None-None].latency_percentiles.p99.9: 0.319 ms
test_latency[release-pg16-None-None].latency_percentiles.p99.99: 0.637 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_mean: 0.121 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p95: 0.150 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99: 0.168 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.9: 0.317 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.99: 0.607 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_mean: 0.124 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p95: 0.161 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99: 0.170 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.9: 0.294 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.99: 0.592 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_mean: 0.122 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p95: 0.157 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99: 0.170 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.9: 0.267 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.99: 0.606 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_mean: 0.125 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p95: 0.161 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99: 0.170 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.9: 0.287 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.99: 0.610 ms
2024-11-29 12:08:05 +01:00
Christian Schwarz
9a5611a5ef merge reader&batcher stages, update docs 2024-11-29 11:39:16 +01:00
Christian Schwarz
f44bfcc7f4 benchmark on hetzner runner
-------------------------------------------------------------------------------------------------------------------- Benchmark results ---------------------------------------------------------------------------------------------------------------------
test_throughput[release-pg16-50-None-30-1-128-not batchable None].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-None-30-1-128-not batchable None].pipelining_enabled: 0
test_throughput[release-pg16-50-None-30-1-128-not batchable None].effective_io_concurrency: 1
test_throughput[release-pg16-50-None-30-1-128-not batchable None].readhead_buffer_size: 128
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.time: 0.8905
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.pageserver_cpu_seconds_total: 0.8585
test_throughput[release-pg16-50-None-30-1-128-not batchable None].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.8965
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.8694
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.9287
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.7891
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.8859
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.8582
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.9158
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.7703
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-None-30-100-128-batchable None].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-None-30-100-128-batchable None].pipelining_enabled: 0
test_throughput[release-pg16-50-None-30-100-128-batchable None].effective_io_concurrency: 100
test_throughput[release-pg16-50-None-30-100-128-batchable None].readhead_buffer_size: 128
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.time: 0.2526
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.pageserver_getpage_count: 6,401.5000
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.pageserver_vectored_get_count: 307.8475
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.compute_getpage_count: 6,401.5000
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.pageserver_cpu_seconds_total: 0.2999
test_throughput[release-pg16-50-None-30-100-128-batchable None].perfmetric.batching_factor: 20.7944
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.6182
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.7483
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.6925
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.6863
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 2
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.4250
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,402.5286
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 3,207.5714
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,402.5286
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.5241
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.9961
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 2
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.4981
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,402.7500
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 3,300.7500
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,402.7500
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.4903
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.9398
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 4
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.3438
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,402.0345
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 1,660.0230
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,402.0345
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.4123
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 3.8566
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 4
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.3766
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,402.2405
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 1,752.2405
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,402.2405
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.3699
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 3.6537
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 8
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.3048
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,401.8061
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 886.7755
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,401.8061
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.3583
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 7.2192
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 8
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.3517
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,402.0824
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 978.0941
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,402.0824
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.3421
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 6.5455
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 16
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.2682
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,401.5946
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 500.5495
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,401.5946
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.3187
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 12.7891
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 16
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.3163
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,401.8830
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 591.5851
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,401.8830
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.3117
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 10.8216
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.2504
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,401.4874
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 307.8067
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,401.4874
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.2972
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 20.7971
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.2899
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,401.7184
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 398.4466
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,401.7184
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.2901
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 16.0667
test_latency[release-pg16-None-None].latency_mean: 0.138 ms
test_latency[release-pg16-None-None].latency_percentiles.p95: 0.173 ms
test_latency[release-pg16-None-None].latency_percentiles.p99: 0.193 ms
test_latency[release-pg16-None-None].latency_percentiles.p99.9: 0.302 ms
test_latency[release-pg16-None-None].latency_percentiles.p99.99: 0.667 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_mean: 0.116 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p95: 0.137 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99: 0.165 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.9: 0.406 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.99: 0.560 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_mean: 0.140 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p95: 0.172 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99: 0.189 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.9: 0.315 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.99: 0.705 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_mean: 0.133 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p95: 0.170 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99: 0.192 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.9: 0.337 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.99: 0.653 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_mean: 0.128 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p95: 0.166 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99: 0.176 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.9: 0.284 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.99: 0.616 ms
2024-11-28 22:40:53 +01:00
Christian Schwarz
a2a3613185 reintroduce task-based execution 2024-11-28 20:50:06 +01:00
Christian Schwarz
6bd39f95f5 rn benchmark on hetzner runner
-------------------------------------------------------------------------------------------------------------------- Benchmark results ---------------------------------------------------------------------------------------------------------------------
test_throughput[release-pg16-50-None-30-1-128-not batchable None].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-None-30-1-128-not batchable None].pipelining_enabled: 0
test_throughput[release-pg16-50-None-30-1-128-not batchable None].effective_io_concurrency: 1
test_throughput[release-pg16-50-None-30-1-128-not batchable None].readhead_buffer_size: 128
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.time: 0.8905
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.pageserver_cpu_seconds_total: 0.8633
test_throughput[release-pg16-50-None-30-1-128-not batchable None].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1}].counters.time: 0.9195
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1}].counters.pageserver_cpu_seconds_total: 0.8925
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 32}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 32}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 32}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 32}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 32}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 32}].counters.time: 0.8724
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 32}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 32}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 32}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 32}].counters.pageserver_cpu_seconds_total: 0.8406
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 32}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-None-30-100-128-batchable None].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-None-30-100-128-batchable None].pipelining_enabled: 0
test_throughput[release-pg16-50-None-30-100-128-batchable None].effective_io_concurrency: 100
test_throughput[release-pg16-50-None-30-100-128-batchable None].readhead_buffer_size: 128
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.time: 0.2576
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.pageserver_getpage_count: 6,401.5259
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.pageserver_vectored_get_count: 307.8534
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.compute_getpage_count: 6,401.5259
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.pageserver_cpu_seconds_total: 0.3043
test_throughput[release-pg16-50-None-30-100-128-batchable None].perfmetric.batching_factor: 20.7941
test_throughput[release-pg16-50-pipelining_config4-30-100-128-batchable {'max_batch_size': 1}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config4-30-100-128-batchable {'max_batch_size': 1}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config4-30-100-128-batchable {'max_batch_size': 1}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config4-30-100-128-batchable {'max_batch_size': 1}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config4-30-100-128-batchable {'max_batch_size': 1}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config4-30-100-128-batchable {'max_batch_size': 1}].counters.time: 0.6187
test_throughput[release-pg16-50-pipelining_config4-30-100-128-batchable {'max_batch_size': 1}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-100-128-batchable {'max_batch_size': 1}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-100-128-batchable {'max_batch_size': 1}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-100-128-batchable {'max_batch_size': 1}].counters.pageserver_cpu_seconds_total: 0.7473
test_throughput[release-pg16-50-pipelining_config4-30-100-128-batchable {'max_batch_size': 1}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'max_batch_size': 2}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'max_batch_size': 2}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'max_batch_size': 2}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'max_batch_size': 2}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'max_batch_size': 2}].pipelining_config.max_batch_size: 2
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'max_batch_size': 2}].counters.time: 0.4419
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'max_batch_size': 2}].counters.pageserver_getpage_count: 6,402.6418
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'max_batch_size': 2}].counters.pageserver_vectored_get_count: 3,207.7015
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'max_batch_size': 2}].counters.compute_getpage_count: 6,402.6418
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'max_batch_size': 2}].counters.pageserver_cpu_seconds_total: 0.5391
test_throughput[release-pg16-50-pipelining_config5-30-100-128-batchable {'max_batch_size': 2}].perfmetric.batching_factor: 1.9960
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 4}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 4}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 4}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 4}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 4}].pipelining_config.max_batch_size: 4
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 4}].counters.time: 0.3569
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 4}].counters.pageserver_getpage_count: 6,402.1071
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 4}].counters.pageserver_vectored_get_count: 1,660.0952
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 4}].counters.compute_getpage_count: 6,402.1071
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 4}].counters.pageserver_cpu_seconds_total: 0.4244
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 4}].perfmetric.batching_factor: 3.8565
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 8}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 8}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 8}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 8}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 8}].pipelining_config.max_batch_size: 8
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 8}].counters.time: 0.2977
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 8}].counters.pageserver_getpage_count: 6,401.7700
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 8}].counters.pageserver_vectored_get_count: 886.6900
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 8}].counters.compute_getpage_count: 6,401.7700
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 8}].counters.pageserver_cpu_seconds_total: 0.3511
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 8}].perfmetric.batching_factor: 7.2199
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 16}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 16}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 16}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 16}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 16}].pipelining_config.max_batch_size: 16
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 16}].counters.time: 0.2697
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 16}].counters.pageserver_getpage_count: 6,401.5946
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 16}].counters.pageserver_vectored_get_count: 500.5766
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 16}].counters.compute_getpage_count: 6,401.5946
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 16}].counters.pageserver_cpu_seconds_total: 0.3195
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 16}].perfmetric.batching_factor: 12.7884
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 32}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 32}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 32}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 32}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 32}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 32}].counters.time: 0.2548
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 32}].counters.pageserver_getpage_count: 6,401.5128
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 32}].counters.pageserver_vectored_get_count: 307.7692
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 32}].counters.compute_getpage_count: 6,401.5128
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 32}].counters.pageserver_cpu_seconds_total: 0.3015
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 32}].perfmetric.batching_factor: 20.7997
test_latency[release-pg16-None-None].latency_mean: 0.127 ms
test_latency[release-pg16-None-None].latency_percentiles.p95: 0.166 ms
test_latency[release-pg16-None-None].latency_percentiles.p99: 0.187 ms
test_latency[release-pg16-None-None].latency_percentiles.p99.9: 0.292 ms
test_latency[release-pg16-None-None].latency_percentiles.p99.99: 0.624 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1}].latency_mean: 0.139 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1}].latency_percentiles.p95: 0.175 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1}].latency_percentiles.p99: 0.200 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1}].latency_percentiles.p99.9: 0.444 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1}].latency_percentiles.p99.99: 0.658 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 32}].latency_mean: 0.119 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 32}].latency_percentiles.p95: 0.155 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 32}].latency_percentiles.p99: 0.172 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 32}].latency_percentiles.p99.9: 0.267 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 32}].latency_percentiles.p99.99: 0.587 ms
2024-11-28 20:24:01 +01:00
Christian Schwarz
07358dea89 converge on approach that pushes read Result through pipeline 2024-11-28 20:06:15 +01:00
Christian Schwarz
82e1fa3f83 WIP 2024-11-27 12:31:56 +01:00
Christian Schwarz
7fb3d95596 review & identified a cast that isn't handled, document that 2024-11-27 10:33:53 +01:00
Christian Schwarz
e0123c8a80 explain the pipeline cancellation story 2024-11-27 10:13:51 +01:00
Christian Schwarz
18ffaba975 fix pipeline cancellation 2024-11-26 20:44:36 +01:00
Christian Schwarz
41ddc6772c benchmark
non-package-mode-py3.10christian@neon-hetzner-dev-christian:[~/src/neon]: DEFAULT_PG_VERSION=16 BUILD_TYPE=release poetry run pytest --alluredir ~/tmp/alluredir --clean-alluredir 'test_runner/performance/pageserver/test_page_service_batching.py' --maxfail=1

-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Benchmark results ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
test_throughput[release-pg16-50-None-30-1-128-not batchable None].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-None-30-1-128-not batchable None].pipelining_enabled: 0
test_throughput[release-pg16-50-None-30-1-128-not batchable None].effective_io_concurrency: 1
test_throughput[release-pg16-50-None-30-1-128-not batchable None].readhead_buffer_size: 128
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.time: 0.9443
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-None-30-1-128-not batchable None].counters.pageserver_cpu_seconds_total: 0.9010
test_throughput[release-pg16-50-None-30-1-128-not batchable None].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.9273
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.8844
test_throughput[release-pg16-50-pipelining_config1-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.9105
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.7669
test_throughput[release-pg16-50-pipelining_config2-30-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.8828
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.8512
test_throughput[release-pg16-50-pipelining_config3-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.9431
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.7971
test_throughput[release-pg16-50-pipelining_config4-30-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-None-30-100-128-batchable None].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-None-30-100-128-batchable None].pipelining_enabled: 0
test_throughput[release-pg16-50-None-30-100-128-batchable None].effective_io_concurrency: 100
test_throughput[release-pg16-50-None-30-100-128-batchable None].readhead_buffer_size: 128
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.time: 0.2604
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.pageserver_getpage_count: 6,401.5391
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.pageserver_vectored_get_count: 307.7217
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.compute_getpage_count: 6,401.5391
test_throughput[release-pg16-50-None-30-100-128-batchable None].counters.pageserver_cpu_seconds_total: 0.3023
test_throughput[release-pg16-50-None-30-100-128-batchable None].perfmetric.batching_factor: 20.8030
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.6268
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.7596
test_throughput[release-pg16-50-pipelining_config6-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.6696
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.6684
test_throughput[release-pg16-50-pipelining_config7-30-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 2
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.4530
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,402.6515
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 3,207.7121
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,402.6515
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.5427
test_throughput[release-pg16-50-pipelining_config8-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.9960
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 2
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.5434
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 3,301.0000
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.5318
test_throughput[release-pg16-50-pipelining_config9-30-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.9397
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 4
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.3455
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,402.0581
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 1,660.0349
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,402.0581
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.4078
test_throughput[release-pg16-50-pipelining_config10-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 3.8566
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 4
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.3785
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,402.2785
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 1,752.2785
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,402.2785
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.3705
test_throughput[release-pg16-50-pipelining_config11-30-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 3.6537
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 8
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.3063
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,401.8247
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 886.7629
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,401.8247
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.3537
test_throughput[release-pg16-50-pipelining_config12-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 7.2193
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 8
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.3365
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,401.9888
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 978.0000
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,401.9888
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.3256
test_throughput[release-pg16-50-pipelining_config13-30-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 6.5460
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 16
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.2730
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,401.6239
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 500.2936
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,401.6239
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.3162
test_throughput[release-pg16-50-pipelining_config14-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 12.7957
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 16
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.3091
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,401.8438
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 591.5312
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,401.8438
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.3022
test_throughput[release-pg16-50-pipelining_config15-30-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 10.8225
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.2609
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,401.5391
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 307.6174
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,401.5391
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.3014
test_throughput[release-pg16-50-pipelining_config16-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 20.8101
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.2910
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,401.7184
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 398.4660
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,401.7184
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.2903
test_throughput[release-pg16-50-pipelining_config17-30-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 16.0659
test_latency[release-pg16-None-None].latency_mean: 0.120 ms
test_latency[release-pg16-None-None].latency_percentiles.p95: 0.151 ms
test_latency[release-pg16-None-None].latency_percentiles.p99: 0.172 ms
test_latency[release-pg16-None-None].latency_percentiles.p99.9: 0.276 ms
test_latency[release-pg16-None-None].latency_percentiles.p99.99: 0.609 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_mean: 0.128 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p95: 0.167 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99: 0.186 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.9: 0.294 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.99: 0.642 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_mean: 0.136 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p95: 0.170 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99: 0.185 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.9: 0.294 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.99: 0.623 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_mean: 0.117 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p95: 0.156 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99: 0.174 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.9: 0.279 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.99: 0.598 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_mean: 0.121 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p95: 0.141 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99: 0.156 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.9: 0.256 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.99: 0.518 ms
2024-11-26 13:56:46 +01:00
Christian Schwarz
a23abb2cc0 adopt spsc_fold 2024-11-26 13:30:40 +01:00
Christian Schwarz
9bf2618c45 implement spsc_fold 2024-11-26 13:27:28 +01:00
Christian Schwarz
99b664c9ed expand fix to tasks mode; add some comments 2024-11-25 11:51:58 +01:00
Christian Schwarz
b9477aa945 fix: batcher wouldn't shut down after executor exits 2024-11-25 11:28:30 +01:00
Christian Schwarz
0bb037240d logging to debug test_pageserver_restarts_under_worload 2024-11-25 10:36:48 +01:00
Christian Schwarz
6ec5ac1ac8 DO NOT MERGE: enable pipelining (32,concurrent-futures) by default so we get test suite coverage 2024-11-25 09:16:09 +01:00
Christian Schwarz
bd31f42f52 run benchmarks 2024-11-22 15:06:11 +01:00
Christian Schwarz
990e44dda4 longer target runtime 2024-11-22 14:37:01 +01:00
Christian Schwarz
cbe18393d3 Benchmark results (metal node, AMD Ryzen 9 7950X3D 16-Core Processor)
------------------------------------------ Benchmark results -------------------------------------------
test_throughput[release-pg16-50-None-5-1-128-not batchable None].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-None-5-1-128-not batchable None].pipelining_enabled: 0
test_throughput[release-pg16-50-None-5-1-128-not batchable None].effective_io_concurrency: 1
test_throughput[release-pg16-50-None-5-1-128-not batchable None].readhead_buffer_size: 128
test_throughput[release-pg16-50-None-5-1-128-not batchable None].counters.time: 0.8347
test_throughput[release-pg16-50-None-5-1-128-not batchable None].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-None-5-1-128-not batchable None].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-None-5-1-128-not batchable None].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-None-5-1-128-not batchable None].counters.pageserver_cpu_seconds_total: 0.7400
test_throughput[release-pg16-50-None-5-1-128-not batchable None].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config1-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config1-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config1-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config1-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config1-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config1-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config1-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.8589
test_throughput[release-pg16-50-pipelining_config1-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config1-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.8740
test_throughput[release-pg16-50-pipelining_config1-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config2-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config2-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config2-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config2-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config2-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config2-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config2-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.7986
test_throughput[release-pg16-50-pipelining_config2-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config2-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.7067
test_throughput[release-pg16-50-pipelining_config2-5-1-128-not batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config3-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config3-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config3-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config3-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config3-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config3-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config3-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.7606
test_throughput[release-pg16-50-pipelining_config3-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config3-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.7700
test_throughput[release-pg16-50-pipelining_config3-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config4-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config4-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config4-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 1
test_throughput[release-pg16-50-pipelining_config4-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config4-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config4-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config4-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.7943
test_throughput[release-pg16-50-pipelining_config4-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config4-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.7083
test_throughput[release-pg16-50-pipelining_config4-5-1-128-not batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-None-5-100-128-batchable None].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-None-5-100-128-batchable None].pipelining_enabled: 0
test_throughput[release-pg16-50-None-5-100-128-batchable None].effective_io_concurrency: 100
test_throughput[release-pg16-50-None-5-100-128-batchable None].readhead_buffer_size: 128
test_throughput[release-pg16-50-None-5-100-128-batchable None].counters.time: 0.5889
test_throughput[release-pg16-50-None-5-100-128-batchable None].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-None-5-100-128-batchable None].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-None-5-100-128-batchable None].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-None-5-100-128-batchable None].counters.pageserver_cpu_seconds_total: 0.5875
test_throughput[release-pg16-50-None-5-100-128-batchable None].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config6-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config6-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config6-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config6-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config6-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config6-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config6-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.5360
test_throughput[release-pg16-50-pipelining_config6-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config6-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.6700
test_throughput[release-pg16-50-pipelining_config6-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config7-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config7-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config7-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config7-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config7-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 1
test_throughput[release-pg16-50-pipelining_config7-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config7-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.6398
test_throughput[release-pg16-50-pipelining_config7-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,403.0000
test_throughput[release-pg16-50-pipelining_config7-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.6386
test_throughput[release-pg16-50-pipelining_config7-5-100-128-batchable {'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.0000
test_throughput[release-pg16-50-pipelining_config8-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config8-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config8-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config8-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config8-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 2
test_throughput[release-pg16-50-pipelining_config8-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config8-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.4210
test_throughput[release-pg16-50-pipelining_config8-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,402.4545
test_throughput[release-pg16-50-pipelining_config8-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 3,207.0909
test_throughput[release-pg16-50-pipelining_config8-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,402.4545
test_throughput[release-pg16-50-pipelining_config8-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.5109
test_throughput[release-pg16-50-pipelining_config8-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 1.9963
test_throughput[release-pg16-50-pipelining_config9-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config9-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config9-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config9-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config9-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 2
test_throughput[release-pg16-50-pipelining_config9-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config9-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.4619
test_throughput[release-pg16-50-pipelining_config9-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,402.7000
test_throughput[release-pg16-50-pipelining_config9-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 3,300.7000
test_throughput[release-pg16-50-pipelining_config9-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,402.7000
test_throughput[release-pg16-50-pipelining_config9-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.4650
test_throughput[release-pg16-50-pipelining_config9-5-100-128-batchable {'max_batch_size': 2, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 1.9398
test_throughput[release-pg16-50-pipelining_config10-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config10-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config10-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config10-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config10-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 4
test_throughput[release-pg16-50-pipelining_config10-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config10-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.3333
test_throughput[release-pg16-50-pipelining_config10-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,401.9286
test_throughput[release-pg16-50-pipelining_config10-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 1,659.2857
test_throughput[release-pg16-50-pipelining_config10-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,401.9286
test_throughput[release-pg16-50-pipelining_config10-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.3914
test_throughput[release-pg16-50-pipelining_config10-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 3.8582
test_throughput[release-pg16-50-pipelining_config11-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config11-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config11-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config11-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config11-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 4
test_throughput[release-pg16-50-pipelining_config11-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config11-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.3526
test_throughput[release-pg16-50-pipelining_config11-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,402.1429
test_throughput[release-pg16-50-pipelining_config11-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 1,752.1429
test_throughput[release-pg16-50-pipelining_config11-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,402.1429
test_throughput[release-pg16-50-pipelining_config11-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.3507
test_throughput[release-pg16-50-pipelining_config11-5-100-128-batchable {'max_batch_size': 4, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 3.6539
test_throughput[release-pg16-50-pipelining_config12-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config12-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config12-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config12-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config12-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 8
test_throughput[release-pg16-50-pipelining_config12-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config12-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.2921
test_throughput[release-pg16-50-pipelining_config12-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,401.7647
test_throughput[release-pg16-50-pipelining_config12-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 885.9412
test_throughput[release-pg16-50-pipelining_config12-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,401.6471
test_throughput[release-pg16-50-pipelining_config12-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.3353
test_throughput[release-pg16-50-pipelining_config12-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 7.2259
test_throughput[release-pg16-50-pipelining_config13-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config13-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config13-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config13-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config13-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 8
test_throughput[release-pg16-50-pipelining_config13-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config13-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.3317
test_throughput[release-pg16-50-pipelining_config13-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,402.0000
test_throughput[release-pg16-50-pipelining_config13-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 978.0000
test_throughput[release-pg16-50-pipelining_config13-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,402.0000
test_throughput[release-pg16-50-pipelining_config13-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.3300
test_throughput[release-pg16-50-pipelining_config13-5-100-128-batchable {'max_batch_size': 8, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 6.5460
test_throughput[release-pg16-50-pipelining_config14-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config14-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config14-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config14-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config14-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 16
test_throughput[release-pg16-50-pipelining_config14-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config14-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.2409
test_throughput[release-pg16-50-pipelining_config14-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,401.5000
test_throughput[release-pg16-50-pipelining_config14-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 499.8000
test_throughput[release-pg16-50-pipelining_config14-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,401.5000
test_throughput[release-pg16-50-pipelining_config14-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.2820
test_throughput[release-pg16-50-pipelining_config14-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 12.8081
test_throughput[release-pg16-50-pipelining_config15-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config15-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config15-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config15-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config15-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 16
test_throughput[release-pg16-50-pipelining_config15-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config15-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.2807
test_throughput[release-pg16-50-pipelining_config15-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,401.5882
test_throughput[release-pg16-50-pipelining_config15-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 590.6471
test_throughput[release-pg16-50-pipelining_config15-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,401.5882
test_throughput[release-pg16-50-pipelining_config15-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.2882
test_throughput[release-pg16-50-pipelining_config15-5-100-128-batchable {'max_batch_size': 16, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 10.8383
test_throughput[release-pg16-50-pipelining_config16-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config16-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config16-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config16-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config16-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config16-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].pipelining_config.protocol_pipelining_mode: concurrent-futures
test_throughput[release-pg16-50-pipelining_config16-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.time: 0.2510
test_throughput[release-pg16-50-pipelining_config16-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_getpage_count: 6,401.4211
test_throughput[release-pg16-50-pipelining_config16-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_vectored_get_count: 307.3684
test_throughput[release-pg16-50-pipelining_config16-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.compute_getpage_count: 6,401.4211
test_throughput[release-pg16-50-pipelining_config16-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].counters.pageserver_cpu_seconds_total: 0.2889
test_throughput[release-pg16-50-pipelining_config16-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].perfmetric.batching_factor: 20.8265
test_throughput[release-pg16-50-pipelining_config17-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].tablesize_mib: 50 MiB
test_throughput[release-pg16-50-pipelining_config17-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_enabled: 1
test_throughput[release-pg16-50-pipelining_config17-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].effective_io_concurrency: 100
test_throughput[release-pg16-50-pipelining_config17-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].readhead_buffer_size: 128
test_throughput[release-pg16-50-pipelining_config17-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.max_batch_size: 32
test_throughput[release-pg16-50-pipelining_config17-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].pipelining_config.protocol_pipelining_mode: tasks
test_throughput[release-pg16-50-pipelining_config17-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.time: 0.2517
test_throughput[release-pg16-50-pipelining_config17-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_getpage_count: 6,401.4211
test_throughput[release-pg16-50-pipelining_config17-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_vectored_get_count: 397.4737
test_throughput[release-pg16-50-pipelining_config17-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.compute_getpage_count: 6,401.4211
test_throughput[release-pg16-50-pipelining_config17-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].counters.pageserver_cpu_seconds_total: 0.2658
test_throughput[release-pg16-50-pipelining_config17-5-100-128-batchable {'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].perfmetric.batching_factor: 16.1053
test_latency[release-pg16-None-None].latency_mean: 0.117 ms
test_latency[release-pg16-None-None].latency_percentiles.p95: 0.153 ms
test_latency[release-pg16-None-None].latency_percentiles.p99: 0.162 ms
test_latency[release-pg16-None-None].latency_percentiles.p99.9: 0.248 ms
test_latency[release-pg16-None-None].latency_percentiles.p99.99: 0.429 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_mean: 0.127 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p95: 0.161 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99: 0.181 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.9: 0.295 ms
test_latency[release-pg16-pipelining_config1-{'max_batch_size': 1, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.99: 0.553 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_mean: 0.123 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p95: 0.162 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99: 0.174 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.9: 0.264 ms
test_latency[release-pg16-pipelining_config2-{'max_batch_size': 1, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.99: 0.499 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_mean: 0.120 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p95: 0.159 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99: 0.175 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.9: 0.258 ms
test_latency[release-pg16-pipelining_config3-{'max_batch_size': 32, 'protocol_pipelining_mode': 'concurrent-futures'}].latency_percentiles.p99.99: 0.522 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_mean: 0.116 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p95: 0.131 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99: 0.150 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.9: 0.404 ms
test_latency[release-pg16-pipelining_config4-{'max_batch_size': 32, 'protocol_pipelining_mode': 'tasks'}].latency_percentiles.p99.99: 0.499 ms
2024-11-22 14:33:55 +01:00
Christian Schwarz
5796f3ba57 fix test 2024-11-22 14:27:59 +01:00
Christian Schwarz
11dc7135b1 rename test file to test_page_service_batching 2024-11-22 13:19:12 +01:00
Christian Schwarz
d6e5a46015 eliminate the word batch and stale doc comments 2024-11-22 12:46:52 +01:00
Christian Schwarz
a28c54dac1 cosmetics 2024-11-22 12:44:31 +01:00
Christian Schwarz
ef502f8311 remove async-timer heritage 2024-11-22 12:43:55 +01:00
Christian Schwarz
39e45f9e51 improve tests 2024-11-22 12:27:38 +01:00
Christian Schwarz
c1e8347160 make configurable whether pipelining should use concurrent futures or tasks 2024-11-22 11:27:23 +01:00
Christian Schwarz
093674b2fb impmlement the serial mode 2024-11-22 09:53:08 +01:00
Christian Schwarz
0fa8ae3c0a WIP refactor to allow truly serial mode 2024-11-22 09:47:49 +01:00
Christian Schwarz
c1040bc25d task-based mode 2024-11-22 09:36:45 +01:00
Christian Schwarz
a3d1cf636b config changes to express pipelining config (not respected yet) 2024-11-22 08:36:17 +01:00
Christian Schwarz
89d9d16130 cherry-pick from problame/batching-benchmark while it's waiting for merge 2024-11-22 08:17:30 +01:00
Christian Schwarz
88fd8aed52 watch-based approach 2024-11-21 23:03:21 +01:00
Christian Schwarz
db9093f938 revert back to 'span fixes' commit 2024-11-21 22:07:05 +01:00
Christian Schwarz
240e48df59 improvements 2024-11-21 21:57:53 +01:00
Christian Schwarz
7680aa12a8 draft 2024-11-21 21:34:58 +01:00
Christian Schwarz
56de07154e fruitless debugging 2024-11-21 20:46:56 +01:00
Christian Schwarz
73046fdf5b span fixes 2024-11-21 20:21:55 +01:00
Christian Schwarz
408bc8fc71 cleanups 2024-11-21 19:42:43 +01:00
Christian Schwarz
345f8b6c3b fix ready_for_next_batch order 2024-11-21 19:11:57 +01:00
Christian Schwarz
aa1032aeff no need for cancel & ctx in pagestream_do_batch 2024-11-21 18:40:22 +01:00
Christian Schwarz
a1bb2e7bb0 WIP: pipelined batching 2024-11-21 18:33:34 +01:00
Christian Schwarz
09e7485004 Merge branch 'problame/merge-getpage-test' into problame/batching-timer 2024-11-21 11:28:12 +01:00
Christian Schwarz
058b35f884 Merge branch 'problame/batching-benchmark' into problame/merge-getpage-test 2024-11-21 11:27:16 +01:00
Christian Schwarz
ff0aa152f1 Merge remote-tracking branch 'origin/main' into problame/batching-benchmark 2024-11-21 11:25:23 +01:00
Christian Schwarz
3375f28990 pytest.approx; https://github.com/neondatabase/neon/pull/9820#discussion_r1850679974 2024-11-21 11:21:50 +01:00
Christian Schwarz
e82deb2ccc high-resolution CPU usage 2024-11-21 11:16:00 +01:00
Christian Schwarz
fa7ce2ca07 the final choice: async-timer 1.0beta15 with features=["tokio1"] 2024-11-21 11:15:02 +01:00
Christian Schwarz
89b6cb8eba Revert "vanilla tokio based timer impl based on tokio::time::Sleep"
This reverts commit 517dda849f.
2024-11-20 20:17:49 +01:00
Christian Schwarz
c68661dfb3 Revert "undo local modifications to benchmark"
This reverts commit 7be13bc5a6.
2024-11-20 19:53:06 +01:00
Christian Schwarz
517dda849f vanilla tokio based timer impl based on tokio::time::Sleep 2024-11-20 19:52:47 +01:00
Christian Schwarz
f22ad868cf Revert "tokio_timerfd::Delay based impl"
This reverts commit fcda7a72c6.
2024-11-20 19:45:37 +01:00
Christian Schwarz
fcda7a72c6 tokio_timerfd::Delay based impl
Performs identically great to the async-timer::Timer features=tokio1 impl
Makes sense because it's the same thing that's happening under the hood.

https://www.notion.so/neondatabase/benchmarking-notes-143f189e004780c4a630cb5f426e39ba?pvs=4#144f189e004780ea9decc82281f6b8d1
2024-11-20 19:42:00 +01:00
Christian Schwarz
469ce810fc Revert "async-timer based approach (again, with data)"
This reverts commit 689788cbba.
2024-11-20 19:40:24 +01:00
Christian Schwarz
21866faa8a Revert "try async-timer 1.0.0-beta15 (still signal-based timers)"
This reverts commit c73e9e40e9.
2024-11-20 19:37:51 +01:00
Christian Schwarz
cbb5817997 Revert "async-timer 1.0.0-beta15 with features=tokio1"
This reverts commit 68550f0f50.
2024-11-20 19:37:44 +01:00
Christian Schwarz
5f3e6f398c Revert "try interval-based impl to cross-chec"
This reverts commit 721643beed.
2024-11-20 18:52:55 +01:00
Christian Schwarz
721643beed try interval-based impl to cross-chec
=> zero batching

https://www.notion.so/neondatabase/benchmarking-notes-143f189e004780c4a630cb5f426e39ba?pvs=4#144f189e00478065a9b3e51726082885
2024-11-20 18:50:48 +01:00
Christian Schwarz
68550f0f50 async-timer 1.0.0-beta15 with features=tokio1
Best batching factor so far with no worse degradation of
un-batchable workloads than the other candidates.

https://www.notion.so/neondatabase/benchmarking-notes-143f189e004780c4a630cb5f426e39ba?pvs=4#144f189e004780c0921fe99e1da0e8c9
2024-11-20 18:41:31 +01:00
Christian Schwarz
c73e9e40e9 try async-timer 1.0.0-beta15 (still signal-based timers)
Results unchanged to 0.7.4

https://www.notion.so/neondatabase/benchmarking-notes-143f189e004780c4a630cb5f426e39ba?pvs=4#144f189e004780e18416cc0faf2aca65
2024-11-20 18:32:53 +01:00
Christian Schwarz
7be13bc5a6 undo local modifications to benchmark 2024-11-20 16:00:19 +01:00
Christian Schwarz
689788cbba async-timer based approach (again, with data)
Yep, it's clearly the best one with best batching factor at lowest CPU
usage.

https://www.notion.so/neondatabase/benchmarking-notes-143f189e004780c4a630cb5f426e39ba?pvs=4#144f189e004780d0a205e081458b46db
2024-11-20 15:36:10 +01:00
Christian Schwarz
f9bf038d2c Revert "tokio_timerfd::Interval"
This reverts commit 12124b28d0.
2024-11-20 15:25:52 +01:00
Christian Schwarz
12124b28d0 tokio_timerfd::Interval
Resolution not high enough to do _any_ batching at 10us or 20us

https://www.notion.so/neondatabase/benchmarking-notes-143f189e004780c4a630cb5f426e39ba?pvs=4#144f189e0047800fb74bd8f4ab6cf8e2
2024-11-20 15:25:14 +01:00
Christian Schwarz
1d85bec0ea Revert "tokio::time::Interval based approach"
This reverts commit 81d99704ee.
2024-11-20 15:13:26 +01:00
Christian Schwarz
81d99704ee tokio::time::Interval based approach
batching at 10us doesn't work well enough, prob the future is ready
too soon. batching factor is just 1.5

https://www.notion.so/neondatabase/benchmarking-notes-143f189e004780c4a630cb5f426e39ba?pvs=4#144f189e004780b79c8dd6d007dbb120
2024-11-20 15:13:11 +01:00
Christian Schwarz
f3ed5692ea Revert "async-timer based approach"
This reverts commit 1639b26002.
2024-11-20 14:49:09 +01:00
Christian Schwarz
1639b26002 async-timer based approach
With this, 10us batching timeout works, but it has some other wrinkles:

- it uses the signal-based timer APIs instead of going through epoll (=> timerfd)
= it needs to make a syscall for each batch, which costs around 1-2us, so, probably significant CPU time wasted on this.
2024-11-20 14:49:01 +01:00
Christian Schwarz
af95320a8c Revert "Revert "switch back to tokio::time::sleep, to get the numbers""
This reverts commit aa695b2ad7.
2024-11-20 14:25:05 +01:00
Christian Schwarz
b299eb19e2 fixup whitespace stuff 2024-11-20 14:23:55 +01:00
Christian Schwarz
88d52b31b7 Merge branch 'problame/batching-benchmark' into problame/merge-getpage-test 2024-11-20 14:23:22 +01:00
Christian Schwarz
aa695b2ad7 Revert "switch back to tokio::time::sleep, to get the numbers"
This reverts commit b9746168ff.
2024-11-20 14:22:31 +01:00
Christian Schwarz
b695907752 page_service: add benchmark for batching
This PR adds a benchmark to demonstrate the effect of server-side
getpage request batching added in https://github.com/neondatabase/neon/pull/9321.

Refs:

- Epic: https://github.com/neondatabase/neon/issues/9376
- Extracted from https://github.com/neondatabase/neon/pull/9792
2024-11-20 14:18:42 +01:00
Christian Schwarz
75041cb61b bench fixups 2024-11-20 13:59:21 +01:00
Christian Schwarz
e80ce970f7 collect CPU utilization 2024-11-20 13:55:05 +01:00
Christian Schwarz
f2de5b504f make it a proper benchmark 2024-11-20 13:52:05 +01:00
Christian Schwarz
b9746168ff switch back to tokio::time::sleep, to get the numbers
=> https://www.notion.so/neondatabase/benchmarking-notes-143f189e004780c4a630cb5f426e39ba?pvs=4#144f189e00478054b8a3e325735ffa19

=> unacceptable
2024-11-20 12:50:29 +01:00
Christian Schwarz
5cc0059088 parametrize more test 2024-11-20 12:48:47 +01:00
Christian Schwarz
911946a3cd fixes 2024-11-19 19:01:55 +01:00
Christian Schwarz
61ff84a3a2 compiles 2024-11-19 18:40:56 +01:00
Christian Schwarz
15e21c714b got it working and turn it more into a benchmark 2024-11-18 23:57:14 +01:00
Christian Schwarz
0689965282 WIP: page_service: add basic testcase for merging
The steps in the test work in neon_local + psql
but for some reason they don't work in the test.

Asked compute team on Slack for help:
https://neondb.slack.com/archives/C04DGM6SMTM/p1731952688386789
2024-11-18 23:57:14 +01:00
497 changed files with 8128 additions and 25902 deletions

View File

@@ -3,16 +3,6 @@
# by the RUSTDOCFLAGS env var in CI.
rustdocflags = ["-Arustdoc::private_intra_doc_links"]
# Enable frame pointers. This may have a minor performance overhead, but makes it easier and more
# efficient to obtain stack traces (and thus CPU/heap profiles). It may also avoid seg faults that
# we've seen with libunwind-based profiling. See also:
#
# * <https://www.brendangregg.com/blog/2024-03-17/the-return-of-the-frame-pointers.html>
# * <https://github.com/rust-lang/rust/pull/122646>
#
# NB: the RUSTFLAGS envvar will replace this. Make sure to update e.g. Dockerfile as well.
rustflags = ["-Cforce-frame-pointers=yes"]
[alias]
build_testing = ["build", "--features", "testing"]
neon = ["run", "--bin", "neon_local"]

View File

@@ -21,7 +21,3 @@ config-variables:
- SLACK_UPCOMING_RELEASE_CHANNEL_ID
- DEV_AWS_OIDC_ROLE_ARN
- BENCHMARK_INGEST_TARGET_PROJECTID
- PGREGRESS_PG16_PROJECT_ID
- PGREGRESS_PG17_PROJECT_ID
- SLACK_ON_CALL_QA_STAGING_STREAM
- DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN

View File

@@ -7,9 +7,10 @@ inputs:
type: boolean
required: false
default: false
aws-oicd-role-arn:
description: 'OIDC role arn to interract with S3'
required: true
aws_oicd_role_arn:
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
required: false
default: ''
outputs:
base-url:
@@ -42,8 +43,7 @@ runs:
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
if [ "${PR_NUMBER}" != "null" ]; then
BRANCH_OR_PR=pr-${PR_NUMBER}
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ] || \
[ "${GITHUB_REF_NAME}" = "release-proxy" ] || [ "${GITHUB_REF_NAME}" = "release-compute" ]; then
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ] || [ "${GITHUB_REF_NAME}" = "release-proxy" ]; then
# Shortcut for special branches
BRANCH_OR_PR=${GITHUB_REF_NAME}
else
@@ -83,11 +83,12 @@ runs:
ALLURE_VERSION: 2.27.0
ALLURE_ZIP_SHA256: b071858fb2fa542c65d8f152c5c40d26267b2dfb74df1f1608a589ecca38e777
- uses: aws-actions/configure-aws-credentials@v4
if: ${{ !cancelled() }}
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
# Potentially we could have several running build for the same key (for example, for the main branch), so we use improvised lock for this

View File

@@ -8,9 +8,10 @@ inputs:
unique-key:
description: 'string to distinguish different results in the same run'
required: true
aws-oicd-role-arn:
description: 'OIDC role arn to interract with S3'
required: true
aws_oicd_role_arn:
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
required: false
default: ''
runs:
using: "composite"
@@ -22,8 +23,7 @@ runs:
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH" || true)
if [ "${PR_NUMBER}" != "null" ]; then
BRANCH_OR_PR=pr-${PR_NUMBER}
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ] || \
[ "${GITHUB_REF_NAME}" = "release-proxy" ] || [ "${GITHUB_REF_NAME}" = "release-compute" ]; then
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "release" ] || [ "${GITHUB_REF_NAME}" = "release-proxy" ]; then
# Shortcut for special branches
BRANCH_OR_PR=${GITHUB_REF_NAME}
else
@@ -35,11 +35,12 @@ runs:
env:
REPORT_DIR: ${{ inputs.report-dir }}
- uses: aws-actions/configure-aws-credentials@v4
if: ${{ !cancelled() }}
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
- name: Upload test results

View File

@@ -15,19 +15,10 @@ inputs:
prefix:
description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
required: false
aws-oicd-role-arn:
description: 'OIDC role arn to interract with S3'
required: true
runs:
using: "composite"
steps:
- uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
role-duration-seconds: 3600
- name: Download artifact
id: download-artifact
shell: bash -euxo pipefail {0}

View File

@@ -48,9 +48,10 @@ inputs:
description: 'benchmark durations JSON'
required: false
default: '{}'
aws-oicd-role-arn:
description: 'OIDC role arn to interract with S3'
required: true
aws_oicd_role_arn:
description: 'the OIDC role arn to (re-)acquire for allure report upload - if not set call must acquire OIDC role'
required: false
default: ''
runs:
using: "composite"
@@ -61,7 +62,6 @@ runs:
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
path: /tmp/neon
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
- name: Download Neon binaries for the previous release
if: inputs.build_type != 'remote'
@@ -70,7 +70,6 @@ runs:
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact
path: /tmp/neon-previous
prefix: latest
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
- name: Download compatibility snapshot
if: inputs.build_type != 'remote'
@@ -82,7 +81,6 @@ runs:
# The lack of compatibility snapshot (for example, for the new Postgres version)
# shouldn't fail the whole job. Only relevant test should fail.
skip-if-does-not-exist: true
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
- name: Checkout
if: inputs.needs_postgres_source == 'true'
@@ -220,19 +218,17 @@ runs:
# The lack of compatibility snapshot shouldn't fail the job
# (for example if we didn't run the test for non build-and-test workflow)
skip-if-does-not-exist: true
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
- uses: aws-actions/configure-aws-credentials@v4
if: ${{ !cancelled() }}
- name: (Re-)configure AWS credentials # necessary to upload reports to S3 after a long-running test
if: ${{ !cancelled() && (inputs.aws_oicd_role_arn != '') }}
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
role-to-assume: ${{ inputs.aws_oicd_role_arn }}
role-duration-seconds: 3600 # 1 hour should be more than enough to upload report
- name: Upload test results
if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-store
with:
report-dir: /tmp/test_output/allure/results
unique-key: ${{ inputs.build_type }}-${{ inputs.pg_version }}
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}

View File

@@ -14,11 +14,9 @@ runs:
name: coverage-data-artifact
path: /tmp/coverage
skip-if-does-not-exist: true # skip if there's no previous coverage to download
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}
- name: Upload coverage data
uses: ./.github/actions/upload
with:
name: coverage-data-artifact
path: /tmp/coverage
aws-oicd-role-arn: ${{ inputs.aws-oicd-role-arn }}

View File

@@ -14,10 +14,6 @@ inputs:
prefix:
description: "S3 prefix. Default is '${GITHUB_SHA}/${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'"
required: false
aws-oicd-role-arn:
description: "the OIDC role arn for aws auth"
required: false
default: ""
runs:
using: "composite"
@@ -57,13 +53,6 @@ runs:
echo 'SKIPPED=false' >> $GITHUB_OUTPUT
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ inputs.aws-oicd-role-arn }}
role-duration-seconds: 3600
- name: Upload artifact
if: ${{ steps.prepare-artifact.outputs.SKIPPED == 'false' }}
shell: bash -euxo pipefail {0}

View File

@@ -1,12 +0,0 @@
rust_code: ['**/*.rs', '**/Cargo.toml', '**/Cargo.lock']
v14: ['vendor/postgres-v14/**', 'Makefile', 'pgxn/**']
v15: ['vendor/postgres-v15/**', 'Makefile', 'pgxn/**']
v16: ['vendor/postgres-v16/**', 'Makefile', 'pgxn/**']
v17: ['vendor/postgres-v17/**', 'Makefile', 'pgxn/**']
rebuild_neon_extra:
- .github/workflows/neon_extra_builds.yml
rebuild_macos:
- .github/workflows/build-macos.yml

View File

@@ -70,7 +70,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/
prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
# we create a table that has one row for each database that we want to restore with the status whether the restore is done
- name: Create benchmark_restore_status table if it does not exist

View File

@@ -31,13 +31,12 @@ defaults:
env:
RUST_BACKTRACE: 1
COPT: '-Werror'
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
jobs:
build-neon:
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
permissions:
id-token: write # aws-actions/configure-aws-credentials
contents: read
container:
image: ${{ inputs.build-tools-image }}
credentials:
@@ -206,13 +205,6 @@ jobs:
done
fi
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 18000 # 5 hours
- name: Run rust tests
env:
NEXTEST_RETRIES: 3
@@ -264,7 +256,6 @@ jobs:
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-artifact
path: /tmp/neon
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
# XXX: keep this after the binaries.list is formed, so the coverage can properly work later
- name: Merge and upload coverage data
@@ -274,10 +265,6 @@ jobs:
regress-tests:
# Don't run regression tests on debug arm64 builds
if: inputs.build-type != 'debug' || inputs.arch != 'arm64'
permissions:
id-token: write # aws-actions/configure-aws-credentials
contents: read
statuses: write
needs: [ build-neon ]
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', inputs.arch == 'arm64' && 'large-arm64' || 'large')) }}
container:
@@ -296,7 +283,7 @@ jobs:
submodules: true
- name: Pytest regression tests
continue-on-error: ${{ matrix.lfc_state == 'with-lfc' && inputs.build-type == 'debug' }}
continue-on-error: ${{ matrix.lfc_state == 'with-lfc' }}
uses: ./.github/actions/run-python-test-set
timeout-minutes: 60
with:
@@ -308,7 +295,6 @@ jobs:
real_s3_region: eu-central-1
rerun_failed: true
pg_version: ${{ matrix.pg_version }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty

View File

@@ -21,7 +21,7 @@ defaults:
shell: bash -euo pipefail {0}
jobs:
create-release-branch:
create-storage-release-branch:
runs-on: ubuntu-22.04
permissions:

View File

@@ -33,7 +33,7 @@ jobs:
# SC2086 - Double quote to prevent globbing and word splitting. - https://www.shellcheck.net/wiki/SC2086
SHELLCHECK_OPTS: --exclude=SC2046,SC2086
with:
fail_level: error
fail_on_error: true
filter_mode: nofilter
level: error

View File

@@ -105,7 +105,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/
prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Create Neon Project
id: create-neon-project
@@ -123,7 +122,7 @@ jobs:
run_in_parallel: false
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
# Set --sparse-ordering option of pytest-order plugin
# to ensure tests are running in order of appears in the file.
# It's important for test_perf_pgbench.py::test_pgbench_remote_* tests
@@ -153,7 +152,7 @@ jobs:
if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate
with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }}
@@ -205,7 +204,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/
prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Run Logical Replication benchmarks
uses: ./.github/actions/run-python-test-set
@@ -216,7 +214,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 5400
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -233,7 +231,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 5400
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -245,13 +243,13 @@ jobs:
uses: ./.github/actions/allure-report-generate
with:
store-test-results-into-db: true
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
# Post both success and failure to the Slack channel
- name: Post to a Slack channel
if: ${{ github.event.schedule && !cancelled() }}
if: ${{ github.event.schedule }}
uses: slackapi/slack-github-action@v1
with:
channel-id: "C06T9AMNDQQ" # on-call-compute-staging-stream
@@ -308,7 +306,6 @@ jobs:
"image": [ "'"$image_default"'" ],
"include": [{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_default"', "image": "'"$image_default"'" },
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new-many-tables","db_size": "10gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
{ "pg_version": 16, "region_id": "'"$region_id_default"'", "platform": "neonvm-captest-new", "db_size": "50gb","runner": '"$runner_default"', "image": "'"$image_default"'" },
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-freetier", "db_size": "3gb" ,"runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
{ "pg_version": 16, "region_id": "azure-eastus2", "platform": "neonvm-azure-captest-new", "db_size": "10gb","runner": '"$runner_azure"', "image": "neondatabase/build-tools:pinned-bookworm" },
@@ -408,10 +405,9 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/
prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Create Neon Project
if: contains(fromJson('["neonvm-captest-new", "neonvm-captest-new-many-tables", "neonvm-captest-freetier", "neonvm-azure-captest-freetier", "neonvm-azure-captest-new"]'), matrix.platform)
if: contains(fromJson('["neonvm-captest-new", "neonvm-captest-freetier", "neonvm-azure-captest-freetier", "neonvm-azure-captest-new"]'), matrix.platform)
id: create-neon-project
uses: ./.github/actions/neon-project-create
with:
@@ -430,7 +426,7 @@ jobs:
neonvm-captest-sharding-reuse)
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_SHARDING_CONNSTR }}
;;
neonvm-captest-new | neonvm-captest-new-many-tables | neonvm-captest-freetier | neonvm-azure-captest-new | neonvm-azure-captest-freetier)
neonvm-captest-new | neonvm-captest-freetier | neonvm-azure-captest-new | neonvm-azure-captest-freetier)
CONNSTR=${{ steps.create-neon-project.outputs.dsn }}
;;
rds-aurora)
@@ -447,26 +443,6 @@ jobs:
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
# we want to compare Neon project OLTP throughput and latency at scale factor 10 GB
# without (neonvm-captest-new)
# and with (neonvm-captest-new-many-tables) many relations in the database
- name: Create many relations before the run
if: contains(fromJson('["neonvm-captest-new-many-tables"]'), matrix.platform)
uses: ./.github/actions/run-python-test-set
with:
build_type: ${{ env.BUILD_TYPE }}
test_selection: performance
run_in_parallel: false
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_perf_many_relations
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
TEST_NUM_RELATIONS: 10000
- name: Benchmark init
uses: ./.github/actions/run-python-test-set
with:
@@ -476,7 +452,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
@@ -491,7 +467,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
@@ -506,7 +482,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
@@ -524,7 +500,7 @@ jobs:
if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate
with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }}
@@ -635,7 +611,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -650,7 +626,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
@@ -661,7 +637,7 @@ jobs:
if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate
with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }}
@@ -732,7 +708,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/
prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Set up Connection String
id: set-up-connstr
@@ -764,7 +739,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 43200 -k test_clickbench
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -778,7 +753,7 @@ jobs:
if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate
with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }}
@@ -843,7 +818,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/
prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Get Connstring Secret Name
run: |
@@ -882,7 +856,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_tpch
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -894,7 +868,7 @@ jobs:
if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate
with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }}
@@ -952,7 +926,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/
prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Set up Connection String
id: set-up-connstr
@@ -984,7 +957,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_user_examples
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -995,7 +968,7 @@ jobs:
if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate
with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }}

View File

@@ -1,241 +0,0 @@
name: Check neon with MacOS builds
on:
workflow_call:
inputs:
pg_versions:
description: "Array of the pg versions to build for, for example: ['v14', 'v17']"
type: string
default: '[]'
required: false
rebuild_rust_code:
description: "Rebuild Rust code"
type: boolean
default: false
required: false
rebuild_everything:
description: "If true, rebuild for all versions"
type: boolean
default: false
required: false
env:
RUST_BACKTRACE: 1
COPT: '-Werror'
# TODO: move `check-*` and `files-changed` jobs to the "Caller" Workflow
# We should care about that as Github has limitations:
# - You can connect up to four levels of workflows
# - You can call a maximum of 20 unique reusable workflows from a single workflow file.
# https://docs.github.com/en/actions/sharing-automations/reusing-workflows#limitations
jobs:
build-pgxn:
if: |
(inputs.pg_versions != '[]' || inputs.rebuild_everything) && (
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
github.ref_name == 'main'
)
timeout-minutes: 30
runs-on: macos-15
strategy:
matrix:
postgres-version: ${{ inputs.rebuild_everything && fromJson('["v14", "v15", "v16", "v17"]') || fromJSON(inputs.pg_versions) }}
env:
# Use release build only, to have less debug info around
# Hence keeping target/ (and general cache size) smaller
BUILD_TYPE: release
steps:
- name: Checkout main repo
uses: actions/checkout@v4
- name: Set pg ${{ matrix.postgres-version }} for caching
id: pg_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-${{ matrix.postgres-version }}) | tee -a "${GITHUB_OUTPUT}"
- name: Cache postgres ${{ matrix.postgres-version }} build
id: cache_pg
uses: actions/cache@v4
with:
path: pg_install/${{ matrix.postgres-version }}
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ matrix.postgres-version }}-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Checkout submodule vendor/postgres-${{ matrix.postgres-version }}
if: steps.cache_pg.outputs.cache-hit != 'true'
run: |
git submodule init vendor/postgres-${{ matrix.postgres-version }}
git submodule update --depth 1 --recursive
- name: Install build dependencies
if: steps.cache_pg.outputs.cache-hit != 'true'
run: |
brew install flex bison openssl protobuf icu4c
- name: Set extra env for macOS
if: steps.cache_pg.outputs.cache-hit != 'true'
run: |
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
- name: Build Postgres ${{ matrix.postgres-version }}
if: steps.cache_pg.outputs.cache-hit != 'true'
run: |
make postgres-${{ matrix.postgres-version }} -j$(sysctl -n hw.ncpu)
- name: Build Neon Pg Ext ${{ matrix.postgres-version }}
if: steps.cache_pg.outputs.cache-hit != 'true'
run: |
make "neon-pg-ext-${{ matrix.postgres-version }}" -j$(sysctl -n hw.ncpu)
- name: Get postgres headers ${{ matrix.postgres-version }}
if: steps.cache_pg.outputs.cache-hit != 'true'
run: |
make postgres-headers-${{ matrix.postgres-version }} -j$(sysctl -n hw.ncpu)
build-walproposer-lib:
if: |
(inputs.pg_versions != '[]' || inputs.rebuild_everything) && (
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
github.ref_name == 'main'
)
timeout-minutes: 30
runs-on: macos-15
needs: [build-pgxn]
env:
# Use release build only, to have less debug info around
# Hence keeping target/ (and general cache size) smaller
BUILD_TYPE: release
steps:
- name: Checkout main repo
uses: actions/checkout@v4
- name: Set pg v17 for caching
id: pg_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v17) | tee -a "${GITHUB_OUTPUT}"
- name: Cache postgres v17 build
id: cache_pg
uses: actions/cache@v4
with:
path: pg_install/v17
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v17-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Cache walproposer-lib
id: cache_walproposer_lib
uses: actions/cache@v4
with:
path: pg_install/build/walproposer-lib
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-walproposer_lib-v17-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Checkout submodule vendor/postgres-v17
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
run: |
git submodule init vendor/postgres-v17
git submodule update --depth 1 --recursive
- name: Install build dependencies
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
run: |
brew install flex bison openssl protobuf icu4c
- name: Set extra env for macOS
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
run: |
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
- name: Build walproposer-lib (only for v17)
if: steps.cache_walproposer_lib.outputs.cache-hit != 'true'
run:
make walproposer-lib -j$(sysctl -n hw.ncpu)
cargo-build:
if: |
(inputs.pg_versions != '[]' || inputs.rebuild_rust_code || inputs.rebuild_everything) && (
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
github.ref_name == 'main'
)
timeout-minutes: 30
runs-on: macos-15
needs: [build-pgxn, build-walproposer-lib]
env:
# Use release build only, to have less debug info around
# Hence keeping target/ (and general cache size) smaller
BUILD_TYPE: release
steps:
- name: Checkout main repo
uses: actions/checkout@v4
with:
submodules: true
- name: Set pg v14 for caching
id: pg_rev_v14
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) | tee -a "${GITHUB_OUTPUT}"
- name: Set pg v15 for caching
id: pg_rev_v15
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) | tee -a "${GITHUB_OUTPUT}"
- name: Set pg v16 for caching
id: pg_rev_v16
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v16) | tee -a "${GITHUB_OUTPUT}"
- name: Set pg v17 for caching
id: pg_rev_v17
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v17) | tee -a "${GITHUB_OUTPUT}"
- name: Cache postgres v14 build
id: cache_pg
uses: actions/cache@v4
with:
path: pg_install/v14
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v14-${{ steps.pg_rev_v14.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Cache postgres v15 build
id: cache_pg_v15
uses: actions/cache@v4
with:
path: pg_install/v15
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v15-${{ steps.pg_rev_v15.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Cache postgres v16 build
id: cache_pg_v16
uses: actions/cache@v4
with:
path: pg_install/v16
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v16-${{ steps.pg_rev_v16.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Cache postgres v17 build
id: cache_pg_v17
uses: actions/cache@v4
with:
path: pg_install/v17
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-v17-${{ steps.pg_rev_v17.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Cache cargo deps (only for v17)
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
!~/.cargo/registry/src
~/.cargo/git
target
key: v1-${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust
- name: Cache walproposer-lib
id: cache_walproposer_lib
uses: actions/cache@v4
with:
path: pg_install/build/walproposer-lib
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-walproposer_lib-v17-${{ steps.pg_rev_v17.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Install build dependencies
run: |
brew install flex bison openssl protobuf icu4c
- name: Set extra env for macOS
run: |
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
- name: Run cargo build (only for v17)
run: PQ_LIB_DIR=$(pwd)/pg_install/v17/lib cargo build --all --release -j$(sysctl -n hw.ncpu)
- name: Check that no warnings are produced (only for v17)
run: ./run_clippy.sh

View File

@@ -6,7 +6,6 @@ on:
- main
- release
- release-proxy
- release-compute
pull_request:
defaults:
@@ -21,6 +20,8 @@ concurrency:
env:
RUST_BACKTRACE: 1
COPT: '-Werror'
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
# A concurrency group that we use for e2e-tests runs, matches `concurrency.group` above with `github.repository` as a prefix
E2E_CONCURRENCY_GROUP: ${{ github.repository }}-e2e-tests-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}
@@ -69,10 +70,8 @@ jobs:
echo "tag=release-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then
echo "tag=release-proxy-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
elif [[ "$GITHUB_REF_NAME" == "release-compute" ]]; then
echo "tag=release-compute-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
else
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release', 'release-proxy', 'release-compute'"
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
echo "tag=$GITHUB_RUN_ID" >> $GITHUB_OUTPUT
fi
shell: bash
@@ -212,7 +211,7 @@ jobs:
fi
echo "CLIPPY_COMMON_ARGS=${CLIPPY_COMMON_ARGS}" >> $GITHUB_ENV
- name: Run cargo clippy (debug)
run: cargo hack --features default --ignore-unknown-features --feature-powerset clippy $CLIPPY_COMMON_ARGS
run: cargo hack --feature-powerset clippy $CLIPPY_COMMON_ARGS
- name: Check documentation generation
run: cargo doc --workspace --no-deps --document-private-items
@@ -253,15 +252,15 @@ jobs:
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
build-tag: ${{ needs.tag.outputs.build-tag }}
build-type: ${{ matrix.build-type }}
# Run tests on all Postgres versions in release builds and only on the latest version in debug builds.
# Run without LFC on v17 release and debug builds only. For all the other cases LFC is enabled.
# Run tests on all Postgres versions in release builds and only on the latest version in debug builds
# run without LFC on v17 release only
test-cfg: |
${{ matrix.build-type == 'release' && '[{"pg_version":"v14", "lfc_state": "with-lfc"},
{"pg_version":"v15", "lfc_state": "with-lfc"},
{"pg_version":"v16", "lfc_state": "with-lfc"},
{"pg_version":"v17", "lfc_state": "with-lfc"},
{"pg_version":"v17", "lfc_state": "without-lfc"}]'
|| '[{"pg_version":"v17", "lfc_state": "without-lfc" }]' }}
${{ matrix.build-type == 'release' && '[{"pg_version":"v14", "lfc_state": "without-lfc"},
{"pg_version":"v15", "lfc_state": "without-lfc"},
{"pg_version":"v16", "lfc_state": "without-lfc"},
{"pg_version":"v17", "lfc_state": "without-lfc"},
{"pg_version":"v17", "lfc_state": "with-lfc"}]'
|| '[{"pg_version":"v17", "lfc_state": "without-lfc"}]' }}
secrets: inherit
# Keep `benchmarks` job outside of `build-and-test-locally` workflow to make job failures non-blocking
@@ -303,11 +302,6 @@ jobs:
benchmarks:
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
needs: [ check-permissions, build-and-test-locally, build-build-tools-image, get-benchmarks-durations ]
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
pull-requests: write
runs-on: [ self-hosted, small ]
container:
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
@@ -336,7 +330,6 @@ jobs:
extra_params: --splits 5 --group ${{ matrix.pytest_split_group }}
benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }}
pg_version: v16
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -349,11 +342,6 @@ jobs:
report-benchmarks-failures:
needs: [ benchmarks, create-test-report ]
if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure'
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
pull-requests: write
runs-on: ubuntu-22.04
steps:
@@ -369,11 +357,6 @@ jobs:
create-test-report:
needs: [ check-permissions, build-and-test-locally, coverage-report, build-build-tools-image, benchmarks ]
if: ${{ !cancelled() && contains(fromJSON('["skipped", "success"]'), needs.check-permissions.result) }}
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
pull-requests: write
outputs:
report-url: ${{ steps.create-allure-report.outputs.report-url }}
@@ -394,7 +377,6 @@ jobs:
uses: ./.github/actions/allure-report-generate
with:
store-test-results-into-db: true
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
@@ -426,10 +408,6 @@ jobs:
coverage-report:
if: ${{ !startsWith(github.ref_name, 'release') }}
needs: [ check-permissions, build-build-tools-image, build-and-test-locally ]
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
runs-on: [ self-hosted, small ]
container:
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
@@ -456,14 +434,12 @@ jobs:
with:
name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact
path: /tmp/neon
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Get coverage artifact
uses: ./.github/actions/download
with:
name: coverage-data-artifact
path: /tmp/coverage
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Merge coverage data
run: scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage merge
@@ -537,8 +513,8 @@ jobs:
})
trigger-e2e-tests:
if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'run-e2e-tests-in-draft') || github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute' }}
needs: [ check-permissions, promote-images-dev, tag ]
if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'run-e2e-tests-in-draft') || github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' }}
needs: [ check-permissions, promote-images, tag ]
uses: ./.github/workflows/trigger-e2e-tests.yml
secrets: inherit
@@ -594,10 +570,6 @@ jobs:
neon-image:
needs: [ neon-image-arch, tag ]
runs-on: ubuntu-22.04
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: read
steps:
- uses: docker/login-action@v3
@@ -612,15 +584,11 @@ jobs:
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-x64 \
neondatabase/neon:${{ needs.tag.outputs.build-tag }}-bookworm-arm64
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
- uses: docker/login-action@v3
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 3600
- name: Login to Amazon Dev ECR
uses: aws-actions/amazon-ecr-login@v2
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
- name: Push multi-arch image to ECR
run: |
@@ -629,10 +597,6 @@ jobs:
compute-node-image-arch:
needs: [ check-permissions, build-build-tools-image, tag ]
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: read
strategy:
fail-fast: false
matrix:
@@ -673,15 +637,11 @@ jobs:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
- uses: docker/login-action@v3
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 3600
- name: Login to Amazon Dev ECR
uses: aws-actions/amazon-ecr-login@v2
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
- uses: docker/login-action@v3
with:
@@ -709,7 +669,7 @@ jobs:
neondatabase/compute-node-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-${{ matrix.arch }}
- name: Build neon extensions test image
if: matrix.version.pg >= 'v16'
if: matrix.version.pg == 'v16'
uses: docker/build-push-action@v6
with:
context: .
@@ -724,7 +684,8 @@ jobs:
pull: true
file: compute/compute-node.Dockerfile
target: neon-pg-ext-test
cache-from: type=registry,ref=cache.neon.build/compute-node-${{ matrix.version.pg }}:cache-${{ matrix.version.debian }}-${{ matrix.arch }}
cache-from: type=registry,ref=cache.neon.build/neon-test-extensions-${{ matrix.version.pg }}:cache-${{ matrix.version.debian }}-${{ matrix.arch }}
cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=cache.neon.build/neon-test-extensions-{0}:cache-{1}-{2},mode=max', matrix.version.pg, matrix.version.debian, matrix.arch) || '' }}
tags: |
neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{needs.tag.outputs.build-tag}}-${{ matrix.version.debian }}-${{ matrix.arch }}
@@ -747,17 +708,13 @@ jobs:
push: true
pull: true
file: compute/compute-node.Dockerfile
cache-from: type=registry,ref=cache.neon.build/compute-node-${{ matrix.version.pg }}:cache-${{ matrix.version.debian }}-${{ matrix.arch }}
cache-from: type=registry,ref=cache.neon.build/neon-test-extensions-${{ matrix.version.pg }}:cache-${{ matrix.version.debian }}-${{ matrix.arch }}
cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=cache.neon.build/compute-tools-{0}:cache-{1}-{2},mode=max', matrix.version.pg, matrix.version.debian, matrix.arch) || '' }}
tags: |
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-${{ matrix.arch }}
compute-node-image:
needs: [ compute-node-image-arch, tag ]
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: read
runs-on: ubuntu-22.04
strategy:
@@ -787,7 +744,7 @@ jobs:
neondatabase/compute-node-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
- name: Create multi-arch neon-test-extensions image
if: matrix.version.pg >= 'v16'
if: matrix.version.pg == 'v16'
run: |
docker buildx imagetools create -t neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }} \
-t neondatabase/neon-test-extensions-${{ matrix.version.pg }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }} \
@@ -802,15 +759,11 @@ jobs:
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-x64 \
neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.version.debian }}-arm64
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
- uses: docker/login-action@v3
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 3600
- name: Login to Amazon Dev ECR
uses: aws-actions/amazon-ecr-login@v2
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
- name: Push multi-arch compute-node-${{ matrix.version.pg }} image to ECR
run: |
@@ -840,7 +793,7 @@ jobs:
- pg: v17
debian: bookworm
env:
VM_BUILDER_VERSION: v0.37.1
VM_BUILDER_VERSION: v0.35.0
steps:
- uses: actions/checkout@v4
@@ -880,7 +833,6 @@ jobs:
fail-fast: false
matrix:
arch: [ x64, arm64 ]
pg_version: [v16, v17]
runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', matrix.arch == 'arm64' && 'small-arm64' || 'small')) }}
@@ -919,10 +871,7 @@ jobs:
- name: Verify docker-compose example and test extensions
timeout-minutes: 20
env:
TAG: ${{needs.tag.outputs.build-tag}}
TEST_VERSION_ONLY: ${{ matrix.pg_version }}
run: ./docker-compose/docker_compose_test.sh
run: env TAG=${{needs.tag.outputs.build-tag}} ./docker-compose/docker_compose_test.sh
- name: Print logs and clean up
if: always()
@@ -930,14 +879,12 @@ jobs:
docker compose -f ./docker-compose/docker-compose.yml logs || 0
docker compose -f ./docker-compose/docker-compose.yml down
promote-images-dev:
needs: [ check-permissions, tag, vm-compute-node-image ]
promote-images:
needs: [ check-permissions, tag, test-images, vm-compute-node-image ]
runs-on: ubuntu-22.04
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: read
id-token: write # for `aws-actions/configure-aws-credentials`
env:
VERSIONS: v14 v15 v16 v17
@@ -948,15 +895,12 @@ jobs:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
- name: Login to dev ECR
uses: docker/login-action@v3
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 3600
- name: Login to Amazon Dev ECR
uses: aws-actions/amazon-ecr-login@v2
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
- name: Copy vm-compute-node images to ECR
run: |
@@ -965,35 +909,6 @@ jobs:
neondatabase/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }}
done
promote-images-prod:
needs: [ check-permissions, tag, test-images, vm-compute-node-image ]
runs-on: ubuntu-22.04
if: github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: read
env:
VERSIONS: v14 v15 v16 v17
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 3600
- name: Login to Amazon Dev ECR
uses: aws-actions/amazon-ecr-login@v2
- uses: docker/login-action@v3
with:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
- name: Add latest tag to images
if: github.ref_name == 'main'
run: |
@@ -1016,7 +931,7 @@ jobs:
neondatabase/neon-test-extensions-v16:${{ needs.tag.outputs.build-tag }}
- name: Configure AWS-prod credentials
if: github.ref_name == 'release'|| github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
if: github.ref_name == 'release'|| github.ref_name == 'release-proxy'
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
@@ -1025,12 +940,12 @@ jobs:
- name: Login to prod ECR
uses: docker/login-action@v3
if: github.ref_name == 'release'|| github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
if: github.ref_name == 'release'|| github.ref_name == 'release-proxy'
with:
registry: 093970136003.dkr.ecr.eu-central-1.amazonaws.com
- name: Copy all images to prod ECR
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
if: github.ref_name == 'release'|| github.ref_name == 'release-proxy'
run: |
for image in neon compute-tools {vm-,}compute-node-{v14,v15,v16,v17}; do
docker buildx imagetools create -t 093970136003.dkr.ecr.eu-central-1.amazonaws.com/${image}:${{ needs.tag.outputs.build-tag }} \
@@ -1039,7 +954,7 @@ jobs:
push-to-acr-dev:
if: github.ref_name == 'main'
needs: [ tag, promote-images-dev ]
needs: [ tag, promote-images ]
uses: ./.github/workflows/_push-to-acr.yml
with:
client_id: ${{ vars.AZURE_DEV_CLIENT_ID }}
@@ -1050,8 +965,8 @@ jobs:
tenant_id: ${{ vars.AZURE_TENANT_ID }}
push-to-acr-prod:
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
needs: [ tag, promote-images-prod ]
if: github.ref_name == 'release'|| github.ref_name == 'release-proxy'
needs: [ tag, promote-images ]
uses: ./.github/workflows/_push-to-acr.yml
with:
client_id: ${{ vars.AZURE_PROD_CLIENT_ID }}
@@ -1064,11 +979,6 @@ jobs:
trigger-custom-extensions-build-and-wait:
needs: [ check-permissions, tag ]
runs-on: ubuntu-22.04
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
pull-requests: write
steps:
- name: Set PR's status to pending and request a remote CI test
run: |
@@ -1141,82 +1051,15 @@ jobs:
exit 1
deploy:
needs: [ check-permissions, promote-images-prod, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ]
needs: [ check-permissions, promote-images, tag, build-and-test-locally, trigger-custom-extensions-build-and-wait, push-to-acr-dev, push-to-acr-prod ]
# `!failure() && !cancelled()` is required because the workflow depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute') && !failure() && !cancelled()
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
if: (github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy') && !failure() && !cancelled()
runs-on: [ self-hosted, small ]
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
steps:
- uses: actions/checkout@v4
- name: Create git tag and GitHub release
if: github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
uses: actions/github-script@v7
with:
retries: 5
script: |
const tag = "${{ needs.tag.outputs.build-tag }}";
try {
const existingRef = await github.rest.git.getRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: `tags/${tag}`,
});
if (existingRef.data.object.sha !== context.sha) {
throw new Error(`Tag ${tag} already exists but points to a different commit (expected: ${context.sha}, actual: ${existingRef.data.object.sha}).`);
}
console.log(`Tag ${tag} already exists and points to ${context.sha} as expected.`);
} catch (error) {
if (error.status !== 404) {
throw error;
}
console.log(`Tag ${tag} does not exist. Creating it...`);
await github.rest.git.createRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: `refs/tags/${tag}`,
sha: context.sha,
});
console.log(`Tag ${tag} created successfully.`);
}
// TODO: check how GitHub releases looks for proxy/compute releases and enable them if they're ok
if (context.ref !== 'refs/heads/release') {
console.log(`GitHub release skipped for ${context.ref}.`);
return;
}
try {
const existingRelease = await github.rest.repos.getReleaseByTag({
owner: context.repo.owner,
repo: context.repo.repo,
tag: tag,
});
console.log(`Release for tag ${tag} already exists (ID: ${existingRelease.data.id}).`);
} catch (error) {
if (error.status !== 404) {
throw error;
}
console.log(`Release for tag ${tag} does not exist. Creating it...`);
await github.rest.repos.createRelease({
owner: context.repo.owner,
repo: context.repo.repo,
tag_name: tag,
generate_release_notes: true,
});
console.log(`Release for tag ${tag} created successfully.`);
}
- name: Trigger deploy workflow
env:
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
@@ -1259,20 +1102,43 @@ jobs:
-f deployProxyAuthBroker=true \
-f branch=main \
-f dockerTag=${{needs.tag.outputs.build-tag}}
elif [[ "$GITHUB_REF_NAME" == "release-compute" ]]; then
gh workflow --repo neondatabase/infra run deploy-compute-dev.yml --ref main -f dockerTag=${{needs.tag.outputs.build-tag}}
else
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main', 'release', 'release-proxy' or 'release-compute'"
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
exit 1
fi
- name: Create git tag
if: github.ref_name == 'release' || github.ref_name == 'release-proxy'
uses: actions/github-script@v7
with:
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
retries: 5
script: |
await github.rest.git.createRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: "refs/tags/${{ needs.tag.outputs.build-tag }}",
sha: context.sha,
})
# TODO: check how GitHub releases looks for proxy releases and enable it if it's ok
- name: Create GitHub release
if: github.ref_name == 'release'
uses: actions/github-script@v7
with:
# Retry script for 5XX server errors: https://github.com/actions/github-script#retries
retries: 5
script: |
await github.rest.repos.createRelease({
owner: context.repo.owner,
repo: context.repo.repo,
tag_name: "${{ needs.tag.outputs.build-tag }}",
generate_release_notes: true,
})
# The job runs on `release` branch and copies compatibility data and Neon artifact from the last *release PR* to the latest directory
promote-compatibility-data:
needs: [ deploy ]
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: read
# `!failure() && !cancelled()` is required because the workflow transitively depends on the job that can be skipped: `push-to-acr-dev` and `push-to-acr-prod`
if: github.ref_name == 'release' && !failure() && !cancelled()
@@ -1309,12 +1175,6 @@ jobs:
echo "run-id=${run_id}" | tee -a ${GITHUB_OUTPUT}
echo "commit-sha=${last_commit_sha}" | tee -a ${GITHUB_OUTPUT}
- uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 3600
- name: Promote compatibility snapshot and Neon artifact
env:
BUCKET: neon-github-public-dev
@@ -1362,7 +1222,7 @@ jobs:
done
pin-build-tools-image:
needs: [ build-build-tools-image, promote-images-prod, build-and-test-locally ]
needs: [ build-build-tools-image, promote-images, build-and-test-locally ]
if: github.ref_name == 'main'
uses: ./.github/workflows/pin-build-tools-image.yml
with:
@@ -1385,7 +1245,7 @@ jobs:
- build-and-test-locally
- check-codestyle-python
- check-codestyle-rust
- promote-images-dev
- promote-images
- test-images
- trigger-custom-extensions-build-and-wait
runs-on: ubuntu-22.04

View File

@@ -19,21 +19,15 @@ concurrency:
group: ${{ github.workflow }}
cancel-in-progress: true
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
jobs:
regress:
env:
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
DEFAULT_PG_VERSION: 16
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
strategy:
fail-fast: false
matrix:
pg-version: [16, 17]
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
runs-on: us-east-2
container:
@@ -46,11 +40,9 @@ jobs:
submodules: true
- name: Patch the test
env:
PG_VERSION: ${{matrix.pg-version}}
run: |
cd "vendor/postgres-v${PG_VERSION}"
patch -p1 < "../../compute/patches/cloud_regress_pg${PG_VERSION}.patch"
cd "vendor/postgres-v${DEFAULT_PG_VERSION}"
patch -p1 < "../../compute/patches/cloud_regress_pg${DEFAULT_PG_VERSION}.patch"
- name: Generate a random password
id: pwgen
@@ -63,9 +55,8 @@ jobs:
- name: Change tests according to the generated password
env:
DBPASS: ${{ steps.pwgen.outputs.DBPASS }}
PG_VERSION: ${{matrix.pg-version}}
run: |
cd vendor/postgres-v"${PG_VERSION}"/src/test/regress
cd vendor/postgres-v"${DEFAULT_PG_VERSION}"/src/test/regress
for fname in sql/*.sql expected/*.out; do
sed -i.bak s/NEON_PASSWORD_PLACEHOLDER/"'${DBPASS}'"/ "${fname}"
done
@@ -81,46 +72,27 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/
prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Create a new branch
id: create-branch
uses: ./.github/actions/neon-branch-create
with:
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
project_id: ${{ vars[format('PGREGRESS_PG{0}_PROJECT_ID', matrix.pg-version)] }}
- name: Run the regression tests
uses: ./.github/actions/run-python-test-set
with:
build_type: ${{ env.BUILD_TYPE }}
test_selection: cloud_regress
pg_version: ${{matrix.pg-version}}
pg_version: ${{ env.DEFAULT_PG_VERSION }}
extra_params: -m remote_cluster
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
BENCHMARK_CONNSTR: ${{steps.create-branch.outputs.dsn}}
- name: Delete branch
if: always()
uses: ./.github/actions/neon-branch-delete
with:
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
project_id: ${{ vars[format('PGREGRESS_PG{0}_PROJECT_ID', matrix.pg-version)] }}
branch_id: ${{steps.create-branch.outputs.branch_id}}
BENCHMARK_CONNSTR: ${{ secrets.PG_REGRESS_CONNSTR }}
- name: Create Allure report
id: create-allure-report
if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate
with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }}
uses: slackapi/slack-github-action@v1
with:
channel-id: ${{ vars.SLACK_ON_CALL_QA_STAGING_STREAM }}
channel-id: "C033QLM5P7D" # on-call-staging-stream
slack-message: |
Periodic pg_regress on staging: ${{ job.status }}
<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run>

View File

@@ -13,7 +13,7 @@ on:
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
- cron: '0 9 * * *' # run once a day, timezone is utc
workflow_dispatch: # adds ability to run this manually
defaults:
run:
shell: bash -euxo pipefail {0}
@@ -26,9 +26,8 @@ concurrency:
jobs:
ingest:
strategy:
fail-fast: false # allow other variants to continue even if one fails
matrix:
target_project: [new_empty_project, large_existing_project]
target_project: [new_empty_project, large_existing_project]
permissions:
contents: write
statuses: write
@@ -56,7 +55,7 @@ jobs:
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role
role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role
- name: Download Neon artifact
uses: ./.github/actions/download
@@ -64,7 +63,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/
prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Create Neon Project
if: ${{ matrix.target_project == 'new_empty_project' }}
@@ -95,7 +93,7 @@ jobs:
project_id: ${{ vars.BENCHMARK_INGEST_TARGET_PROJECTID }}
api_key: ${{ secrets.NEON_STAGING_API_KEY }}
- name: Initialize Neon project
- name: Initialize Neon project
if: ${{ matrix.target_project == 'large_existing_project' }}
env:
BENCHMARK_INGEST_TARGET_CONNSTR: ${{ steps.create-neon-branch-ingest-target.outputs.dsn }}
@@ -123,7 +121,7 @@ jobs:
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"
echo "BENCHMARK_INGEST_TARGET_CONNSTR=${BENCHMARK_INGEST_TARGET_CONNSTR}" >> $GITHUB_ENV
- name: Invoke pgcopydb
- name: Invoke pgcopydb
uses: ./.github/actions/run-python-test-set
with:
build_type: remote
@@ -132,7 +130,7 @@ jobs:
extra_params: -s -m remote_cluster --timeout 86400 -k test_ingest_performance_using_pgcopydb
pg_version: v16
save_perf_report: true
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
aws_oicd_role_arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
BENCHMARK_INGEST_SOURCE_CONNSTR: ${{ secrets.BENCHMARK_INGEST_SOURCE_CONNSTR }}
TARGET_PROJECT_TYPE: ${{ matrix.target_project }}
@@ -144,7 +142,7 @@ jobs:
run: |
export LD_LIBRARY_PATH=${PG_16_LIB_PATH}
${PSQL} "${BENCHMARK_INGEST_TARGET_CONNSTR}" -c "\dt+"
- name: Delete Neon Project
if: ${{ always() && matrix.target_project == 'new_empty_project' }}
uses: ./.github/actions/neon-project-delete

View File

@@ -31,15 +31,19 @@ jobs:
uses: ./.github/workflows/build-build-tools-image.yml
secrets: inherit
files-changed:
name: Detect what files changed
runs-on: ubuntu-22.04
timeout-minutes: 3
outputs:
v17: ${{ steps.files_changed.outputs.v17 }}
postgres_changes: ${{ steps.postgres_changes.outputs.changes }}
rebuild_rust_code: ${{ steps.files_changed.outputs.rust_code }}
rebuild_everything: ${{ steps.files_changed.outputs.rebuild_neon_extra || steps.files_changed.outputs.rebuild_macos }}
check-macos-build:
needs: [ check-permissions ]
if: |
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
github.ref_name == 'main'
timeout-minutes: 90
runs-on: macos-15
env:
# Use release build only, to have less debug info around
# Hence keeping target/ (and general cache size) smaller
BUILD_TYPE: release
steps:
- name: Checkout
@@ -47,45 +51,102 @@ jobs:
with:
submodules: true
- name: Check for Postgres changes
uses: dorny/paths-filter@1441771bbfdd59dcd748680ee64ebd8faab1a242 #v3
id: files_changed
- name: Install macOS postgres dependencies
run: brew install flex bison openssl protobuf icu4c
- name: Set pg 14 revision for caching
id: pg_v14_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
- name: Set pg 15 revision for caching
id: pg_v15_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) >> $GITHUB_OUTPUT
- name: Set pg 16 revision for caching
id: pg_v16_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v16) >> $GITHUB_OUTPUT
- name: Set pg 17 revision for caching
id: pg_v17_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v17) >> $GITHUB_OUTPUT
- name: Cache postgres v14 build
id: cache_pg_14
uses: actions/cache@v4
with:
token: ${{ github.token }}
filters: .github/file-filters.yaml
base: ${{ github.event_name != 'pull_request' && (github.event.merge_group.base_ref || github.ref_name) || '' }}
ref: ${{ github.event_name != 'pull_request' && (github.event.merge_group.head_ref || github.ref) || '' }}
path: pg_install/v14
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Filter out only v-string for build matrix
id: postgres_changes
- name: Cache postgres v15 build
id: cache_pg_15
uses: actions/cache@v4
with:
path: pg_install/v15
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Cache postgres v16 build
id: cache_pg_16
uses: actions/cache@v4
with:
path: pg_install/v16
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Cache postgres v17 build
id: cache_pg_17
uses: actions/cache@v4
with:
path: pg_install/v17
key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ steps.pg_v17_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}
- name: Set extra env for macOS
run: |
v_strings_only_as_json_array=$(echo ${{ steps.files_changed.outputs.chnages }} | jq '.[]|select(test("v\\d+"))' | jq --slurp -c)
echo "changes=${v_strings_only_as_json_array}" | tee -a "${GITHUB_OUTPUT}"
echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV
echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV
check-macos-build:
needs: [ check-permissions, files-changed ]
if: |
contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') ||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
github.ref_name == 'main'
uses: ./.github/workflows/build-macos.yml
with:
pg_versions: ${{ needs.files-changed.outputs.postgres_changes }}
rebuild_rust_code: ${{ needs.files-changed.outputs.rebuild_rust_code }}
rebuild_everything: ${{ fromJson(needs.files-changed.outputs.rebuild_everything) }}
- name: Cache cargo deps
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
!~/.cargo/registry/src
~/.cargo/git
target
key: v1-${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust
- name: Build postgres v14
if: steps.cache_pg_14.outputs.cache-hit != 'true'
run: make postgres-v14 -j$(sysctl -n hw.ncpu)
- name: Build postgres v15
if: steps.cache_pg_15.outputs.cache-hit != 'true'
run: make postgres-v15 -j$(sysctl -n hw.ncpu)
- name: Build postgres v16
if: steps.cache_pg_16.outputs.cache-hit != 'true'
run: make postgres-v16 -j$(sysctl -n hw.ncpu)
- name: Build postgres v17
if: steps.cache_pg_17.outputs.cache-hit != 'true'
run: make postgres-v17 -j$(sysctl -n hw.ncpu)
- name: Build neon extensions
run: make neon-pg-ext -j$(sysctl -n hw.ncpu)
- name: Build walproposer-lib
run: make walproposer-lib -j$(sysctl -n hw.ncpu)
- name: Run cargo build
run: PQ_LIB_DIR=$(pwd)/pg_install/v16/lib cargo build --all --release
- name: Check that no warnings are produced
run: ./run_clippy.sh
gather-rust-build-stats:
needs: [ check-permissions, build-build-tools-image, files-changed ]
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
needs: [ check-permissions, build-build-tools-image ]
if: |
(needs.files-changed.outputs.v17 == 'true' || needs.files-changed.outputs.rebuild_everything == 'true') && (
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
github.ref_name == 'main'
)
contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats') ||
contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') ||
github.ref_name == 'main'
runs-on: [ self-hosted, large ]
container:
image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm
@@ -116,18 +177,13 @@ jobs:
- name: Produce the build stats
run: PQ_LIB_DIR=$(pwd)/pg_install/v17/lib cargo build --all --release --timings -j$(nproc)
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 3600
- name: Upload the build stats
id: upload-stats
env:
BUCKET: neon-github-public-dev
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
run: |
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"

View File

@@ -27,11 +27,6 @@ concurrency:
jobs:
trigger_bench_on_ec2_machine_in_eu_central_1:
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write
contents: write
pull-requests: write
runs-on: [ self-hosted, small ]
container:
image: neondatabase/build-tools:pinned-bookworm
@@ -43,6 +38,8 @@ jobs:
env:
API_KEY: ${{ secrets.PERIODIC_PAGEBENCH_EC2_RUNNER_API_KEY }}
RUN_ID: ${{ github.run_id }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_EC2_US_TEST_RUNNER_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY : ${{ secrets.AWS_EC2_US_TEST_RUNNER_ACCESS_KEY_SECRET }}
AWS_DEFAULT_REGION : "eu-central-1"
AWS_INSTANCE_ID : "i-02a59a3bf86bc7e74"
steps:
@@ -53,13 +50,6 @@ jobs:
- name: Show my own (github runner) external IP address - usefull for IP allowlisting
run: curl https://ifconfig.me
- name: Assume AWS OIDC role that allows to manage (start/stop/describe... EC machine)
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN }}
role-duration-seconds: 3600
- name: Start EC2 instance and wait for the instance to boot up
run: |
aws ec2 start-instances --instance-ids $AWS_INSTANCE_ID
@@ -134,10 +124,11 @@ jobs:
cat "test_log_${GITHUB_RUN_ID}"
- name: Create Allure report
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
if: ${{ !cancelled() }}
uses: ./.github/actions/allure-report-generate
with:
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Post to a Slack channel
if: ${{ github.event.schedule && failure() }}
@@ -157,14 +148,6 @@ jobs:
-H "Authorization: Bearer $API_KEY" \
-d ''
- name: Assume AWS OIDC role that allows to manage (start/stop/describe... EC machine)
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_MANAGE_BENCHMARK_EC2_VMS_ARN }}
role-duration-seconds: 3600
- name: Stop EC2 instance and wait for the instance to be stopped
if: always() && steps.poll_step.outputs.too_many_runs != 'true'
run: |

View File

@@ -25,13 +25,11 @@ defaults:
run:
shell: bash -euxo pipefail {0}
permissions:
id-token: write # aws-actions/configure-aws-credentials
statuses: write # require for posting a status update
env:
DEFAULT_PG_VERSION: 16
PLATFORM: neon-captest-new
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
AWS_DEFAULT_REGION: eu-central-1
jobs:
@@ -96,7 +94,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/
prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Create Neon Project
id: create-neon-project
@@ -113,7 +110,6 @@ jobs:
run_in_parallel: false
extra_params: -m remote_cluster
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
@@ -130,7 +126,6 @@ jobs:
uses: ./.github/actions/allure-report-generate
with:
store-test-results-into-db: true
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
@@ -164,7 +159,6 @@ jobs:
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
path: /tmp/neon/
prefix: latest
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
- name: Create Neon Project
id: create-neon-project
@@ -181,7 +175,6 @@ jobs:
run_in_parallel: false
extra_params: -m remote_cluster
pg_version: ${{ env.DEFAULT_PG_VERSION }}
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
@@ -198,7 +191,6 @@ jobs:
uses: ./.github/actions/allure-report-generate
with:
store-test-results-into-db: true
aws-oicd-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
env:
REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}

View File

@@ -67,7 +67,7 @@ jobs:
runs-on: ubuntu-22.04
permissions:
id-token: write # for `azure/login` and aws auth
id-token: write # for `azure/login`
steps:
- uses: docker/login-action@v3
@@ -75,15 +75,11 @@ jobs:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
- uses: docker/login-action@v3
with:
aws-region: eu-central-1
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
role-duration-seconds: 3600
- name: Login to Amazon Dev ECR
uses: aws-actions/amazon-ecr-login@v2
registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com
username: ${{ secrets.AWS_ACCESS_KEY_DEV }}
password: ${{ secrets.AWS_SECRET_KEY_DEV }}
- name: Azure login
uses: azure/login@6c251865b4e6290e7b78be643ea2d005bc51f69a # @v2.1.1

View File

@@ -63,7 +63,6 @@ jobs:
if: always()
permissions:
statuses: write # for `github.repos.createCommitStatus(...)`
contents: write
needs:
- get-changed-files
- check-codestyle-python

View File

@@ -3,7 +3,7 @@ name: Create Release Branch
on:
schedule:
# It should be kept in sync with if-condition in jobs
- cron: '0 6 * * FRI' # Storage release
- cron: '0 6 * * MON' # Storage release
- cron: '0 6 * * THU' # Proxy release
workflow_dispatch:
inputs:
@@ -15,10 +15,6 @@ on:
type: boolean
description: 'Create Proxy release PR'
required: false
create-compute-release-branch:
type: boolean
description: 'Create Compute release PR'
required: false
# No permission for GITHUB_TOKEN by default; the **minimal required** set of permissions should be granted in each job.
permissions: {}
@@ -29,20 +25,20 @@ defaults:
jobs:
create-storage-release-branch:
if: ${{ github.event.schedule == '0 6 * * FRI' || inputs.create-storage-release-branch }}
if: ${{ github.event.schedule == '0 6 * * MON' || format('{0}', inputs.create-storage-release-branch) == 'true' }}
permissions:
contents: write
uses: ./.github/workflows/_create-release-pr.yml
with:
component-name: 'Storage'
component-name: 'Storage & Compute'
release-branch: 'release'
secrets:
ci-access-token: ${{ secrets.CI_ACCESS_TOKEN }}
create-proxy-release-branch:
if: ${{ github.event.schedule == '0 6 * * THU' || inputs.create-proxy-release-branch }}
if: ${{ github.event.schedule == '0 6 * * THU' || format('{0}', inputs.create-proxy-release-branch) == 'true' }}
permissions:
contents: write
@@ -53,16 +49,3 @@ jobs:
release-branch: 'release-proxy'
secrets:
ci-access-token: ${{ secrets.CI_ACCESS_TOKEN }}
create-compute-release-branch:
if: inputs.create-compute-release-branch
permissions:
contents: write
uses: ./.github/workflows/_create-release-pr.yml
with:
component-name: 'Compute'
release-branch: 'release-compute'
secrets:
ci-access-token: ${{ secrets.CI_ACCESS_TOKEN }}

View File

@@ -51,8 +51,6 @@ jobs:
echo "tag=release-$(git rev-list --count HEAD)" | tee -a $GITHUB_OUTPUT
elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then
echo "tag=release-proxy-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
elif [[ "$GITHUB_REF_NAME" == "release-compute" ]]; then
echo "tag=release-compute-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
else
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
BUILD_AND_TEST_RUN_ID=$(gh run list -b $CURRENT_BRANCH -c $CURRENT_SHA -w 'Build and Test' -L 1 --json databaseId --jq '.[].databaseId')
@@ -68,7 +66,7 @@ jobs:
GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }}
TAG: ${{ needs.tag.outputs.build-tag }}
steps:
- name: Wait for `promote-images-dev` job to finish
- name: Wait for `promote-images` job to finish
# It's important to have a timeout here, the script in the step can run infinitely
timeout-minutes: 60
run: |
@@ -79,17 +77,17 @@ jobs:
# For PRs we use the run id as the tag
BUILD_AND_TEST_RUN_ID=${TAG}
while true; do
conclusion=$(gh run --repo ${GITHUB_REPOSITORY} view ${BUILD_AND_TEST_RUN_ID} --json jobs --jq '.jobs[] | select(.name == "promote-images-dev") | .conclusion')
conclusion=$(gh run --repo ${GITHUB_REPOSITORY} view ${BUILD_AND_TEST_RUN_ID} --json jobs --jq '.jobs[] | select(.name == "promote-images") | .conclusion')
case "$conclusion" in
success)
break
;;
failure | cancelled | skipped)
echo "The 'promote-images-dev' job didn't succeed: '${conclusion}'. Exiting..."
echo "The 'promote-images' job didn't succeed: '${conclusion}'. Exiting..."
exit 1
;;
*)
echo "The 'promote-images-dev' hasn't succeed yet. Waiting..."
echo "The 'promote-images' hasn't succeed yet. Waiting..."
sleep 60
;;
esac

View File

@@ -1,29 +1,15 @@
# Autoscaling
/libs/vm_monitor/ @neondatabase/autoscaling
# DevProd
/.github/ @neondatabase/developer-productivity
# Compute
/pgxn/ @neondatabase/compute
/vendor/ @neondatabase/compute
/compute/ @neondatabase/compute
/compute_tools/ @neondatabase/compute
# Proxy
/libs/proxy/ @neondatabase/proxy
/proxy/ @neondatabase/proxy
# Storage
/compute_tools/ @neondatabase/control-plane @neondatabase/compute
/libs/pageserver_api/ @neondatabase/storage
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage
/libs/remote_storage/ @neondatabase/storage
/libs/safekeeper_api/ @neondatabase/storage
/libs/vm_monitor/ @neondatabase/autoscaling
/pageserver/ @neondatabase/storage
/pgxn/ @neondatabase/compute
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
/proxy/ @neondatabase/proxy
/safekeeper/ @neondatabase/storage
/storage_controller @neondatabase/storage
/storage_scrubber @neondatabase/storage
/libs/pageserver_api/ @neondatabase/storage
/libs/remote_storage/ @neondatabase/storage
/libs/safekeeper_api/ @neondatabase/storage
# Shared
/pgxn/neon/ @neondatabase/compute @neondatabase/storage
/libs/compute_api/ @neondatabase/compute @neondatabase/control-plane
/libs/postgres_ffi/ @neondatabase/compute @neondatabase/storage
/vendor/ @neondatabase/compute

1161
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,6 @@ members = [
"pageserver/pagebench",
"proxy",
"safekeeper",
"safekeeper/client",
"storage_broker",
"storage_controller",
"storage_controller/client",
@@ -52,7 +51,10 @@ anyhow = { version = "1.0", features = ["backtrace"] }
arc-swap = "1.6"
async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] }
atomic-take = "1.1.0"
backtrace = "0.3.74"
azure_core = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] }
azure_identity = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
azure_storage = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
azure_storage_blobs = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] }
flate2 = "1.0.26"
async-stream = "0.3"
async-trait = "0.1"
@@ -65,14 +67,14 @@ aws-smithy-types = "1.2"
aws-credential-types = "1.2.0"
aws-sigv4 = { version = "1.2", features = ["sign-http"] }
aws-types = "1.3"
axum = { version = "0.7.9", features = ["ws"] }
axum = { version = "0.7.5", features = ["ws"] }
base64 = "0.13.0"
bincode = "1.3"
bindgen = "0.70"
bit_field = "0.10.2"
bstr = "1.0"
byteorder = "1.4"
bytes = "1.9"
bytes = "1.0"
camino = "1.1.6"
cfg-if = "1.0.0"
chrono = { version = "0.4", default-features = false, features = ["clock"] }
@@ -110,11 +112,9 @@ hyper-util = "0.1"
tokio-tungstenite = "0.21.0"
indexmap = "2"
indoc = "2"
inferno = "0.12.0"
ipnet = "2.10.0"
itertools = "0.10"
itoa = "1.0.11"
jemalloc_pprof = "0.6"
jsonwebtoken = "9"
lasso = "0.7"
libc = "0.2"
@@ -127,16 +127,16 @@ notify = "6.0.0"
num_cpus = "1.15"
num-traits = "0.2.15"
once_cell = "1.13"
opentelemetry = "0.27"
opentelemetry_sdk = "0.27"
opentelemetry-otlp = { version = "0.27", default-features = false, features = ["http-proto", "trace", "http", "reqwest-client"] }
opentelemetry-semantic-conventions = "0.27"
opentelemetry = "0.24"
opentelemetry_sdk = "0.24"
opentelemetry-otlp = { version = "0.17", default-features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
opentelemetry-semantic-conventions = "0.16"
parking_lot = "0.12"
parquet = { version = "53", default-features = false, features = ["zstd"] }
parquet_derive = "53"
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
pin-project-lite = "0.2"
pprof = { version = "0.14", features = ["criterion", "flamegraph", "frame-pointer", "protobuf", "protobuf-codec"] }
pprof = { version = "0.14", features = ["criterion", "flamegraph", "protobuf", "protobuf-codec"] }
procfs = "0.16"
prometheus = {version = "0.13", default-features=false, features = ["process"]} # removes protobuf dependency
prost = "0.13"
@@ -144,9 +144,9 @@ rand = "0.8"
redis = { version = "0.25.2", features = ["tokio-rustls-comp", "keep-alive"] }
regex = "1.10.2"
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] }
reqwest-tracing = { version = "0.5", features = ["opentelemetry_0_27"] }
reqwest-middleware = "0.4"
reqwest-retry = "0.7"
reqwest-tracing = { version = "0.5", features = ["opentelemetry_0_24"] }
reqwest-middleware = "0.3.0"
reqwest-retry = "0.5"
routerify = "3"
rpds = "0.13"
rustc-hash = "1.1.0"
@@ -175,7 +175,7 @@ sync_wrapper = "0.1.2"
tar = "0.4"
test-context = "0.3"
thiserror = "1.0"
tikv-jemallocator = { version = "0.6", features = ["profiling", "stats", "unprefixed_malloc_on_supported_platforms"] }
tikv-jemallocator = { version = "0.6", features = ["stats"] }
tikv-jemalloc-ctl = { version = "0.6", features = ["stats"] }
tokio = { version = "1.17", features = ["macros"] }
tokio-epoll-uring = { git = "https://github.com/neondatabase/tokio-epoll-uring.git" , branch = "main" }
@@ -188,12 +188,10 @@ tokio-util = { version = "0.7.10", features = ["io", "rt"] }
toml = "0.8"
toml_edit = "0.22"
tonic = {version = "0.12.3", features = ["tls", "tls-roots"]}
tower = { version = "0.5.2", default-features = false }
tower-http = { version = "0.6.2", features = ["request-id", "trace"] }
tower-service = "0.3.3"
tower-service = "0.3.2"
tracing = "0.1"
tracing-error = "0.2"
tracing-opentelemetry = "0.28"
tracing-opentelemetry = "0.25"
tracing-subscriber = { version = "0.3", default-features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] }
try-lock = "0.2.5"
twox-hash = { version = "1.6.3", default-features = false }
@@ -217,12 +215,6 @@ postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git",
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
## Azure SDK crates
azure_core = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] }
azure_identity = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
azure_storage = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
azure_storage_blobs = { git = "https://github.com/neondatabase/azure-sdk-for-rust.git", branch = "neon", default-features = false, features = ["enable_reqwest_rustls"] }
## Local libraries
compute_api = { version = "0.1", path = "./libs/compute_api/" }
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
@@ -238,7 +230,6 @@ postgres_initdb = { path = "./libs/postgres_initdb" }
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
safekeeper_client = { path = "./safekeeper/client" }
desim = { version = "0.1", path = "./libs/desim" }
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
storage_controller_client = { path = "./storage_controller/client" }
@@ -269,8 +260,6 @@ tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", br
[profile.release]
# This is useful for profiling and, to some extent, debug.
# Besides, debug info should not affect the performance.
#
# NB: we also enable frame pointers for improved profiling, see .cargo/config.toml.
debug = true
# disable debug symbols for all packages except this one to decrease binaries size

View File

@@ -45,7 +45,7 @@ COPY --chown=nonroot . .
ARG ADDITIONAL_RUSTFLAGS
RUN set -e \
&& PQ_LIB_DIR=$(pwd)/pg_install/v${STABLE_PG_VERSION}/lib RUSTFLAGS="-Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=-Wl,--no-rosegment -Cforce-frame-pointers=yes ${ADDITIONAL_RUSTFLAGS}" cargo build \
&& PQ_LIB_DIR=$(pwd)/pg_install/v${STABLE_PG_VERSION}/lib RUSTFLAGS="-Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=-Wl,--no-rosegment ${ADDITIONAL_RUSTFLAGS}" cargo build \
--bin pg_sni_router \
--bin pageserver \
--bin pagectl \
@@ -69,8 +69,6 @@ RUN set -e \
libreadline-dev \
libseccomp-dev \
ca-certificates \
# System postgres for use with client libraries (e.g. in storage controller)
postgresql-15 \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
&& useradd -d /data neon \
&& chown -R neon:neon /data
@@ -103,6 +101,11 @@ RUN mkdir -p /data/.neon/ && \
> /data/.neon/pageserver.toml && \
chown -R neon:neon /data/.neon
# When running a binary that links with libpq, default to using our most recent postgres version. Binaries
# that want a particular postgres version will select it explicitly: this is just a default.
ENV LD_LIBRARY_PATH=/usr/local/v${DEFAULT_PG_VERSION}/lib
VOLUME ["/data"]
USER neon
EXPOSE 6400

View File

@@ -115,7 +115,7 @@ RUN set -e \
# Keep the version the same as in compute/compute-node.Dockerfile and
# test_runner/regress/test_compute_metrics.py.
ENV SQL_EXPORTER_VERSION=0.16.0
ENV SQL_EXPORTER_VERSION=0.13.1
RUN curl -fsSL \
"https://github.com/burningalchemist/sql_exporter/releases/download/${SQL_EXPORTER_VERSION}/sql_exporter-${SQL_EXPORTER_VERSION}.linux-$(case "$(uname -m)" in x86_64) echo amd64;; aarch64) echo arm64;; esac).tar.gz" \
--output sql_exporter.tar.gz \
@@ -258,7 +258,7 @@ WORKDIR /home/nonroot
# Rust
# Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`)
ENV RUSTC_VERSION=1.84.0
ENV RUSTC_VERSION=1.83.0
ENV RUSTUP_HOME="/home/nonroot/.rustup"
ENV PATH="/home/nonroot/.cargo/bin:${PATH}"
ARG RUSTFILT_VERSION=0.2.1

View File

@@ -14,9 +14,6 @@ ARG DEBIAN_FLAVOR=${DEBIAN_VERSION}-slim
FROM debian:$DEBIAN_FLAVOR AS build-deps
ARG DEBIAN_VERSION
# Use strict mode for bash to catch errors early
SHELL ["/bin/bash", "-euo", "pipefail", "-c"]
RUN case $DEBIAN_VERSION in \
# Version-specific installs for Bullseye (PG14-PG16):
# The h3_pg extension needs a cmake 3.20+, but Debian bullseye has 3.18.
@@ -35,12 +32,10 @@ RUN case $DEBIAN_VERSION in \
;; \
esac && \
apt update && \
apt install --no-install-recommends --no-install-suggests -y \
ninja-build git autoconf automake libtool build-essential bison flex libreadline-dev \
apt install --no-install-recommends -y git autoconf automake libtool build-essential bison flex libreadline-dev \
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget ca-certificates pkg-config libssl-dev \
libicu-dev libxslt1-dev liblz4-dev libzstd-dev zstd \
$VERSION_INSTALLS \
&& apt clean && rm -rf /var/lib/apt/lists/*
$VERSION_INSTALLS
#########################################################################################
#
@@ -111,16 +106,13 @@ RUN cd postgres && \
#
#########################################################################################
FROM build-deps AS postgis-build
ARG DEBIAN_VERSION
ARG PG_VERSION
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
RUN apt update && \
apt install --no-install-recommends --no-install-suggests -y \
gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
apt install --no-install-recommends -y gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev \
libcgal-dev libgdal-dev libgmp-dev libmpfr-dev libopenscenegraph-dev libprotobuf-c-dev \
protobuf-c-compiler xsltproc \
&& apt clean && rm -rf /var/lib/apt/lists/*
protobuf-c-compiler xsltproc
# Postgis 3.5.0 requires SFCGAL 1.4+
@@ -130,12 +122,12 @@ RUN apt update && \
# and also we must check backward compatibility with older versions of PostGIS.
#
# Use new version only for v17
RUN case "${DEBIAN_VERSION}" in \
"bookworm") \
RUN case "${PG_VERSION}" in \
"v17") \
export SFCGAL_VERSION=1.4.1 \
export SFCGAL_CHECKSUM=1800c8a26241588f11cddcf433049e9b9aea902e923414d2ecef33a3295626c3 \
;; \
"bullseye") \
"v14" | "v15" | "v16") \
export SFCGAL_VERSION=1.3.10 \
export SFCGAL_CHECKSUM=4e39b3b2adada6254a7bdba6d297bb28e1a9835a9f879b74f37e2dab70203232 \
;; \
@@ -147,9 +139,9 @@ RUN case "${DEBIAN_VERSION}" in \
wget https://gitlab.com/sfcgal/SFCGAL/-/archive/v${SFCGAL_VERSION}/SFCGAL-v${SFCGAL_VERSION}.tar.gz -O SFCGAL.tar.gz && \
echo "${SFCGAL_CHECKSUM} SFCGAL.tar.gz" | sha256sum --check && \
mkdir sfcgal-src && cd sfcgal-src && tar xzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
cmake -DCMAKE_BUILD_TYPE=Release -GNinja . && ninja -j $(getconf _NPROCESSORS_ONLN) && \
DESTDIR=/sfcgal ninja install -j $(getconf _NPROCESSORS_ONLN) && \
ninja clean && cp -R /sfcgal/* /
cmake -DCMAKE_BUILD_TYPE=Release . && make -j $(getconf _NPROCESSORS_ONLN) && \
DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \
make clean && cp -R /sfcgal/* /
ENV PATH="/usr/local/pgsql/bin:$PATH"
@@ -217,9 +209,9 @@ RUN case "${PG_VERSION}" in \
echo "${PGROUTING_CHECKSUM} pgrouting.tar.gz" | sha256sum --check && \
mkdir pgrouting-src && cd pgrouting-src && tar xzf ../pgrouting.tar.gz --strip-components=1 -C . && \
mkdir build && cd build && \
cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. && \
ninja -j $(getconf _NPROCESSORS_ONLN) && \
ninja -j $(getconf _NPROCESSORS_ONLN) install && \
cmake -DCMAKE_BUILD_TYPE=Release .. && \
make -j $(getconf _NPROCESSORS_ONLN) && \
make -j $(getconf _NPROCESSORS_ONLN) install && \
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrouting.control && \
find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /after.txt &&\
cp /usr/local/pgsql/share/extension/pgrouting.control /extensions/postgis && \
@@ -236,12 +228,8 @@ FROM build-deps AS plv8-build
ARG PG_VERSION
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
COPY compute/patches/plv8-3.1.10.patch /plv8-3.1.10.patch
RUN apt update && \
apt install --no-install-recommends --no-install-suggests -y \
ninja-build python3-dev libncurses5 binutils clang \
&& apt clean && rm -rf /var/lib/apt/lists/*
apt install --no-install-recommends -y ninja-build python3-dev libncurses5 binutils clang
# plv8 3.2.3 supports v17
# last release v3.2.3 - Sep 7, 2024
@@ -251,6 +239,8 @@ RUN apt update && \
#
# Use new version only for v17
# because since v3.2, plv8 doesn't include plcoffee and plls extensions
ENV PLV8_TAG=v3.2.3
RUN case "${PG_VERSION}" in \
"v17") \
export PLV8_TAG=v3.2.3 \
@@ -265,9 +255,8 @@ RUN case "${PG_VERSION}" in \
git clone --recurse-submodules --depth 1 --branch ${PLV8_TAG} https://github.com/plv8/plv8.git plv8-src && \
tar -czf plv8.tar.gz --exclude .git plv8-src && \
cd plv8-src && \
if [[ "${PG_VERSION}" < "v17" ]]; then patch -p1 < /plv8-3.1.10.patch; fi && \
# generate and copy upgrade scripts
mkdir -p upgrade && ./generate_upgrade.sh ${PLV8_TAG#v} && \
mkdir -p upgrade && ./generate_upgrade.sh 3.1.10 && \
cp upgrade/* /usr/local/pgsql/share/extension/ && \
export PATH="/usr/local/pgsql/bin:$PATH" && \
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
@@ -307,10 +296,9 @@ RUN mkdir -p /h3/usr/ && \
echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \
mkdir h3-src && cd h3-src && tar xzf ../h3.tar.gz --strip-components=1 -C . && \
mkdir build && cd build && \
cmake .. -GNinja -DBUILD_BENCHMARKS=0 -DCMAKE_BUILD_TYPE=Release \
-DBUILD_FUZZERS=0 -DBUILD_FILTERS=0 -DBUILD_GENERATORS=0 -DBUILD_TESTING=0 \
&& ninja -j $(getconf _NPROCESSORS_ONLN) && \
DESTDIR=/h3 ninja install && \
cmake .. -DCMAKE_BUILD_TYPE=Release && \
make -j $(getconf _NPROCESSORS_ONLN) && \
DESTDIR=/h3 make install && \
cp -R /h3/usr / && \
rm -rf build
@@ -365,10 +353,10 @@ COPY compute/patches/pgvector.patch /pgvector.patch
# because we build the images on different machines than where we run them.
# Pass OPTFLAGS="" to remove it.
#
# vector >0.7.4 supports v17
# last release v0.8.0 - Oct 30, 2024
RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz -O pgvector.tar.gz && \
echo "867a2c328d4928a5a9d6f052cd3bc78c7d60228a9b914ad32aa3db88e9de27b0 pgvector.tar.gz" | sha256sum --check && \
# vector 0.7.4 supports v17
# last release v0.7.4 - Aug 5, 2024
RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.7.4.tar.gz -O pgvector.tar.gz && \
echo "0341edf89b1924ae0d552f617e14fb7f8867c0194ed775bcc44fa40288642583 pgvector.tar.gz" | sha256sum --check && \
mkdir pgvector-src && cd pgvector-src && tar xzf ../pgvector.tar.gz --strip-components=1 -C . && \
patch -p1 < /pgvector.patch && \
make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
@@ -657,15 +645,14 @@ FROM build-deps AS rdkit-pg-build
ARG PG_VERSION
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
RUN apt update && \
apt install --no-install-recommends --no-install-suggests -y \
RUN apt-get update && \
apt-get install --no-install-recommends -y \
libboost-iostreams1.74-dev \
libboost-regex1.74-dev \
libboost-serialization1.74-dev \
libboost-system1.74-dev \
libeigen3-dev \
libboost-all-dev \
&& apt clean && rm -rf /var/lib/apt/lists/*
libboost-all-dev
# rdkit Release_2024_09_1 supports v17
# last release Release_2024_09_1 - Sep 27, 2024
@@ -701,8 +688,6 @@ RUN case "${PG_VERSION}" in \
-D RDK_BUILD_MOLINTERCHANGE_SUPPORT=OFF \
-D RDK_BUILD_YAEHMOP_SUPPORT=OFF \
-D RDK_BUILD_STRUCTCHECKER_SUPPORT=OFF \
-D RDK_TEST_MULTITHREADED=OFF \
-D RDK_BUILD_CPP_TESTS=OFF \
-D RDK_USE_URF=OFF \
-D RDK_BUILD_PGSQL=ON \
-D RDK_PGSQL_STATIC=ON \
@@ -714,10 +699,9 @@ RUN case "${PG_VERSION}" in \
-D RDK_INSTALL_COMIC_FONTS=OFF \
-D RDK_BUILD_FREETYPE_SUPPORT=OFF \
-D CMAKE_BUILD_TYPE=Release \
-GNinja \
. && \
ninja -j $(getconf _NPROCESSORS_ONLN) && \
ninja -j $(getconf _NPROCESSORS_ONLN) install && \
make -j $(getconf _NPROCESSORS_ONLN) && \
make -j $(getconf _NPROCESSORS_ONLN) install && \
echo 'trusted = true' >> /usr/local/pgsql/share/extension/rdkit.control
#########################################################################################
@@ -860,9 +844,8 @@ FROM build-deps AS rust-extensions-build
ARG PG_VERSION
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
RUN apt update && \
apt install --no-install-recommends --no-install-suggests -y curl libclang-dev && \
apt clean && rm -rf /var/lib/apt/lists/* && \
RUN apt-get update && \
apt-get install --no-install-recommends -y curl libclang-dev && \
useradd -ms /bin/bash nonroot -b /home
ENV HOME=/home/nonroot
@@ -897,9 +880,8 @@ FROM build-deps AS rust-extensions-build-pgrx12
ARG PG_VERSION
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
RUN apt update && \
apt install --no-install-recommends --no-install-suggests -y curl libclang-dev && \
apt clean && rm -rf /var/lib/apt/lists/* && \
RUN apt-get update && \
apt-get install --no-install-recommends -y curl libclang-dev && \
useradd -ms /bin/bash nonroot -b /home
ENV HOME=/home/nonroot
@@ -927,22 +909,18 @@ FROM rust-extensions-build-pgrx12 AS pg-onnx-build
# cmake 3.26 or higher is required, so installing it using pip (bullseye-backports has cmake 3.25).
# Install it using virtual environment, because Python 3.11 (the default version on Debian 12 (Bookworm)) complains otherwise
RUN apt update && apt install --no-install-recommends --no-install-suggests -y \
python3 python3-pip python3-venv && \
apt clean && rm -rf /var/lib/apt/lists/* && \
RUN apt-get update && apt-get install -y python3 python3-pip python3-venv && \
python3 -m venv venv && \
. venv/bin/activate && \
python3 -m pip install cmake==3.30.5 && \
wget https://github.com/microsoft/onnxruntime/archive/refs/tags/v1.18.1.tar.gz -O onnxruntime.tar.gz && \
mkdir onnxruntime-src && cd onnxruntime-src && tar xzf ../onnxruntime.tar.gz --strip-components=1 -C . && \
./build.sh --config Release --parallel --cmake_generator Ninja \
--skip_submodule_sync --skip_tests --allow_running_as_root
./build.sh --config Release --parallel --skip_submodule_sync --skip_tests --allow_running_as_root
FROM pg-onnx-build AS pgrag-pg-build
RUN apt update && apt install --no-install-recommends --no-install-suggests -y protobuf-compiler \
&& apt clean && rm -rf /var/lib/apt/lists/* && \
RUN apt-get install -y protobuf-compiler && \
wget https://github.com/neondatabase-labs/pgrag/archive/refs/tags/v0.0.0.tar.gz -O pgrag.tar.gz && \
echo "2cbe394c1e74fc8bcad9b52d5fbbfb783aef834ca3ce44626cfd770573700bb4 pgrag.tar.gz" | sha256sum --check && \
mkdir pgrag-src && cd pgrag-src && tar xzf ../pgrag.tar.gz --strip-components=1 -C . && \
@@ -1167,34 +1145,24 @@ FROM rust-extensions-build AS pg-mooncake-build
ARG PG_VERSION
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
# The topmost commit in the `neon` branch at the time of writing this
# https://github.com/Mooncake-Labs/pg_mooncake/commits/neon/
# https://github.com/Mooncake-Labs/pg_mooncake/commit/077c92c452bb6896a7b7776ee95f039984f076af
ENV PG_MOONCAKE_VERSION=077c92c452bb6896a7b7776ee95f039984f076af
ENV PATH="/usr/local/pgsql/bin/:$PATH"
RUN wget https://github.com/Mooncake-Labs/pg_mooncake/releases/download/v0.1.0/pg_mooncake-0.1.0.tar.gz -O pg_mooncake.tar.gz && \
echo "eafd059b77f541f11525eb8affcd66a176968cbd8fe7c0d436e733f2aa4da59f pg_mooncake.tar.gz" | sha256sum --check && \
mkdir pg_mooncake-src && cd pg_mooncake-src && tar xzf ../pg_mooncake.tar.gz --strip-components=1 -C . && \
make release -j $(getconf _NPROCESSORS_ONLN) && \
make install -j $(getconf _NPROCESSORS_ONLN) && \
RUN case "${PG_VERSION}" in \
'v14') \
echo "pg_mooncake is not supported on Postgres ${PG_VERSION}" && exit 0;; \
esac && \
git clone --depth 1 --branch neon https://github.com/Mooncake-Labs/pg_mooncake.git pg_mooncake-src && \
cd pg_mooncake-src && \
git checkout "${PG_MOONCAKE_VERSION}" && \
git submodule update --init --depth 1 --recursive && \
make BUILD_TYPE=release -j $(getconf _NPROCESSORS_ONLN) && \
make BUILD_TYPE=release -j $(getconf _NPROCESSORS_ONLN) install && \
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_mooncake.control
#########################################################################################
#
# Layer "pg_repack"
# compile pg_repack extension
#
#########################################################################################
FROM build-deps AS pg-repack-build
ARG PG_VERSION
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
ENV PATH="/usr/local/pgsql/bin/:$PATH"
RUN wget https://github.com/reorg/pg_repack/archive/refs/tags/ver_1.5.2.tar.gz -O pg_repack.tar.gz && \
echo '4516cad42251ed3ad53ff619733004db47d5755acac83f75924cd94d1c4fb681 pg_repack.tar.gz' | sha256sum --check && \
mkdir pg_repack-src && cd pg_repack-src && tar xzf ../pg_repack.tar.gz --strip-components=1 -C . && \
make -j $(getconf _NPROCESSORS_ONLN) && \
make -j $(getconf _NPROCESSORS_ONLN) install
#########################################################################################
#
# Layer "neon-pg-ext-build"
@@ -1240,7 +1208,6 @@ COPY --from=pg-anon-pg-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=pg-ivm-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=pg-partman-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=pg-mooncake-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=pg-repack-build /usr/local/pgsql/ /usr/local/pgsql/
COPY pgxn/ pgxn/
RUN make -j $(getconf _NPROCESSORS_ONLN) \
@@ -1276,7 +1243,7 @@ RUN make -j $(getconf _NPROCESSORS_ONLN) \
#########################################################################################
#
# Compile the Neon-specific `compute_ctl`, `fast_import`, and `local_proxy` binaries
# Compile and run the Neon-specific `compute_ctl` and `fast_import` binaries
#
#########################################################################################
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
@@ -1286,7 +1253,7 @@ ENV BUILD_TAG=$BUILD_TAG
USER nonroot
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
COPY --chown=nonroot . .
RUN mold -run cargo build --locked --profile release-line-debug-size-lto --bin compute_ctl --bin fast_import --bin local_proxy
RUN cd compute_tools && mold -run cargo build --locked --profile release-line-debug-size-lto
#########################################################################################
#
@@ -1307,8 +1274,8 @@ COPY --from=compute-tools /home/nonroot/target/release-line-debug-size-lto/fast_
FROM debian:$DEBIAN_FLAVOR AS pgbouncer
RUN set -e \
&& apt update \
&& apt install --no-install-suggests --no-install-recommends -y \
&& apt-get update \
&& apt-get install --no-install-recommends -y \
build-essential \
git \
ca-certificates \
@@ -1316,8 +1283,7 @@ RUN set -e \
automake \
libevent-dev \
libtool \
pkg-config \
&& apt clean && rm -rf /var/lib/apt/lists/*
pkg-config
# Use `dist_man_MANS=` to skip manpage generation (which requires python3/pandoc)
ENV PGBOUNCER_TAG=pgbouncer_1_22_1
@@ -1329,6 +1295,20 @@ RUN set -e \
&& make -j $(nproc) dist_man_MANS= \
&& make install dist_man_MANS=
#########################################################################################
#
# Compile the Neon-specific `local_proxy` binary
#
#########################################################################################
FROM $REPOSITORY/$IMAGE:$TAG AS local_proxy
ARG BUILD_TAG
ENV BUILD_TAG=$BUILD_TAG
USER nonroot
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
COPY --chown=nonroot . .
RUN mold -run cargo build --locked --profile release-line-debug-size-lto --bin local_proxy
#########################################################################################
#
# Layers "postgres-exporter" and "sql-exporter"
@@ -1339,7 +1319,7 @@ FROM quay.io/prometheuscommunity/postgres-exporter:v0.12.1 AS postgres-exporter
# Keep the version the same as in build-tools.Dockerfile and
# test_runner/regress/test_compute_metrics.py.
FROM burningalchemist/sql_exporter:0.16.0 AS sql-exporter
FROM burningalchemist/sql_exporter:0.13.1 AS sql-exporter
#########################################################################################
#
@@ -1382,12 +1362,15 @@ RUN make PG_VERSION="${PG_VERSION}" -C compute
FROM neon-pg-ext-build AS neon-pg-ext-test
ARG PG_VERSION
RUN mkdir /ext-src
RUN case "${PG_VERSION}" in "v17") \
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
esac && \
mkdir /ext-src
#COPY --from=postgis-build /postgis.tar.gz /ext-src/
#COPY --from=postgis-build /sfcgal/* /usr
COPY --from=plv8-build /plv8.tar.gz /ext-src/
#COPY --from=h3-pg-build /h3-pg.tar.gz /ext-src/
COPY --from=h3-pg-build /h3-pg.tar.gz /ext-src/
COPY --from=unit-pg-build /postgresql-unit.tar.gz /ext-src/
COPY --from=vector-pg-build /pgvector.tar.gz /ext-src/
COPY --from=vector-pg-build /pgvector.patch /ext-src/
@@ -1407,7 +1390,7 @@ COPY --from=hll-pg-build /hll.tar.gz /ext-src
COPY --from=plpgsql-check-pg-build /plpgsql_check.tar.gz /ext-src
#COPY --from=timescaledb-pg-build /timescaledb.tar.gz /ext-src
COPY --from=pg-hint-plan-pg-build /pg_hint_plan.tar.gz /ext-src
COPY compute/patches/pg_hint_plan_${PG_VERSION}.patch /ext-src
COPY compute/patches/pg_hint_plan.patch /ext-src
COPY --from=pg-cron-pg-build /pg_cron.tar.gz /ext-src
COPY compute/patches/pg_cron.patch /ext-src
#COPY --from=pg-pgx-ulid-build /home/nonroot/pgx_ulid.tar.gz /ext-src
@@ -1417,23 +1400,38 @@ COPY --from=pg-roaringbitmap-pg-build /pg_roaringbitmap.tar.gz /ext-src
COPY --from=pg-semver-pg-build /pg_semver.tar.gz /ext-src
#COPY --from=pg-embedding-pg-build /home/nonroot/pg_embedding-src/ /ext-src
#COPY --from=wal2json-pg-build /wal2json_2_5.tar.gz /ext-src
#pg_anon is not supported yet for pg v17 so, don't fail if nothing found
COPY --from=pg-anon-pg-build /pg_anon.tar.g? /ext-src
COPY --from=pg-anon-pg-build /pg_anon.tar.gz /ext-src
COPY compute/patches/pg_anon.patch /ext-src
COPY --from=pg-ivm-build /pg_ivm.tar.gz /ext-src
COPY --from=pg-partman-build /pg_partman.tar.gz /ext-src
RUN cd /ext-src/ && for f in *.tar.gz; \
RUN case "${PG_VERSION}" in "v17") \
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
esac && \
cd /ext-src/ && for f in *.tar.gz; \
do echo $f; dname=$(echo $f | sed 's/\.tar.*//')-src; \
rm -rf $dname; mkdir $dname; tar xzf $f --strip-components=1 -C $dname \
|| exit 1; rm -f $f; done
RUN cd /ext-src/rum-src && patch -p1 <../rum.patch
RUN cd /ext-src/pgvector-src && patch -p1 <../pgvector.patch
RUN cd /ext-src/pg_hint_plan-src && patch -p1 < /ext-src/pg_hint_plan_${PG_VERSION}.patch
RUN case "${PG_VERSION}" in "v17") \
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
esac && \
cd /ext-src/rum-src && patch -p1 <../rum.patch
RUN case "${PG_VERSION}" in "v17") \
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
esac && \
cd /ext-src/pgvector-src && patch -p1 <../pgvector.patch
RUN case "${PG_VERSION}" in "v17") \
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
esac && \
cd /ext-src/pg_hint_plan-src && patch -p1 < /ext-src/pg_hint_plan.patch
COPY --chmod=755 docker-compose/run-tests.sh /run-tests.sh
RUN case "${PG_VERSION}" in "v17") \
echo "postgresql_anonymizer does not yet support PG17" && exit 0;; \
esac && patch -p1 </ext-src/pg_anon.patch
RUN patch -p1 </ext-src/pg_cron.patch
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
esac && \
patch -p1 </ext-src/pg_anon.patch
RUN case "${PG_VERSION}" in "v17") \
echo "v17 extensions are not supported yet. Quit" && exit 0;; \
esac && \
patch -p1 </ext-src/pg_cron.patch
ENV PATH=/usr/local/pgsql/bin:$PATH
ENV PGHOST=compute
ENV PGPORT=55433
@@ -1468,7 +1466,7 @@ COPY --from=pgbouncer /usr/local/pgbouncer/bin/pgbouncer /usr/local/bin/
COPY --chmod=0666 --chown=postgres compute/etc/pgbouncer.ini /etc/pgbouncer.ini
# local_proxy and its config
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/local_proxy /usr/local/bin/local_proxy
COPY --from=local_proxy --chown=postgres /home/nonroot/target/release-line-debug-size-lto/local_proxy /usr/local/bin/local_proxy
RUN mkdir -p /etc/local_proxy && chown postgres:postgres /etc/local_proxy
# Metrics exporter binaries and configuration files
@@ -1533,30 +1531,28 @@ RUN apt update && \
locales \
procps \
ca-certificates \
curl \
unzip \
$VERSION_INSTALLS && \
apt clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
# aws cli is used by fast_import (curl and unzip above are at this time only used for this installation step)
# s5cmd 2.2.2 from https://github.com/peak/s5cmd/releases/tag/v2.2.2
# used by fast_import
ARG TARGETARCH
ADD https://github.com/peak/s5cmd/releases/download/v2.2.2/s5cmd_2.2.2_linux_$TARGETARCH.deb /tmp/s5cmd.deb
RUN set -ex; \
\
# Determine the expected checksum based on TARGETARCH
if [ "${TARGETARCH}" = "amd64" ]; then \
TARGETARCH_ALT="x86_64"; \
CHECKSUM="c9a9df3770a3ff9259cb469b6179e02829687a464e0824d5c32d378820b53a00"; \
CHECKSUM="392c385320cd5ffa435759a95af77c215553d967e4b1c0fffe52e4f14c29cf85"; \
elif [ "${TARGETARCH}" = "arm64" ]; then \
TARGETARCH_ALT="aarch64"; \
CHECKSUM="8181730be7891582b38b028112e81b4899ca817e8c616aad807c9e9d1289223a"; \
CHECKSUM="939bee3cf4b5604ddb00e67f8c157b91d7c7a5b553d1fbb6890fad32894b7b46"; \
else \
echo "Unsupported architecture: ${TARGETARCH}"; exit 1; \
fi; \
curl -L "https://awscli.amazonaws.com/awscli-exe-linux-${TARGETARCH_ALT}-2.17.5.zip" -o /tmp/awscliv2.zip; \
echo "${CHECKSUM} /tmp/awscliv2.zip" | sha256sum -c -; \
unzip /tmp/awscliv2.zip -d /tmp/awscliv2; \
/tmp/awscliv2/aws/install; \
rm -rf /tmp/awscliv2.zip /tmp/awscliv2; \
true
\
# Compute and validate the checksum
echo "${CHECKSUM} /tmp/s5cmd.deb" | sha256sum -c -
RUN dpkg -i /tmp/s5cmd.deb && rm /tmp/s5cmd.deb
ENV LANG=en_US.utf8
USER postgres

View File

@@ -3,10 +3,9 @@
metrics: [
import 'sql_exporter/checkpoints_req.libsonnet',
import 'sql_exporter/checkpoints_timed.libsonnet',
import 'sql_exporter/compute_backpressure_throttling_seconds_total.libsonnet',
import 'sql_exporter/compute_backpressure_throttling_seconds.libsonnet',
import 'sql_exporter/compute_current_lsn.libsonnet',
import 'sql_exporter/compute_logical_snapshot_files.libsonnet',
import 'sql_exporter/compute_logical_snapshots_bytes.libsonnet',
import 'sql_exporter/compute_max_connections.libsonnet',
import 'sql_exporter/compute_receive_lsn.libsonnet',
import 'sql_exporter/compute_subscriptions_count.libsonnet',

View File

@@ -1,9 +1,5 @@
[databases]
;; pgbouncer propagates application_name (if it's specified) to the server, but some
;; clients don't set it. We set default application_name=pgbouncer to make it
;; easier to identify pgbouncer connections in Postgres. If client sets
;; application_name, it will be used instead.
*=host=localhost port=5432 auth_user=cloud_admin application_name=pgbouncer
*=host=localhost port=5432 auth_user=cloud_admin
[pgbouncer]
listen_port=6432
listen_addr=0.0.0.0
@@ -19,10 +15,3 @@ max_prepared_statements=0
admin_users=postgres
unix_socket_dir=/tmp/
unix_socket_mode=0777
;; Disable connection logging. It produces a lot of logs that no one looks at,
;; and we can get similar log entries from the proxy too. We had incidents in
;; the past where the logging significantly stressed the log device or pgbouncer
;; itself.
log_connections=0
log_disconnections=0

View File

@@ -1,10 +1,10 @@
{
metric_name: 'compute_backpressure_throttling_seconds_total',
type: 'counter',
metric_name: 'compute_backpressure_throttling_seconds',
type: 'gauge',
help: 'Time compute has spent throttled',
key_labels: null,
values: [
'throttled',
],
query: importstr 'sql_exporter/compute_backpressure_throttling_seconds_total.sql',
query: importstr 'sql_exporter/compute_backpressure_throttling_seconds.sql',
}

View File

@@ -1,7 +0,0 @@
SELECT
(SELECT current_setting('neon.timeline_id')) AS timeline_id,
-- Postgres creates temporary snapshot files of the form %X-%X.snap.%d.tmp.
-- These temporary snapshot files are renamed to the actual snapshot files
-- after they are completely built. We only WAL-log the completely built
-- snapshot files
(SELECT COALESCE(sum(size), 0) FROM pg_ls_logicalsnapdir() WHERE name LIKE '%.snap') AS logical_snapshots_bytes;

View File

@@ -1,17 +0,0 @@
local neon = import 'neon.libsonnet';
local pg_ls_logicalsnapdir = importstr 'sql_exporter/compute_logical_snapshots_bytes.15.sql';
local pg_ls_dir = importstr 'sql_exporter/compute_logical_snapshots_bytes.sql';
{
metric_name: 'compute_logical_snapshots_bytes',
type: 'gauge',
help: 'Size of the pg_logical/snapshots directory, not including temporary files',
key_labels: [
'timeline_id',
],
values: [
'logical_snapshots_bytes',
],
query: if neon.PG_MAJORVERSION_NUM < 15 then pg_ls_dir else pg_ls_logicalsnapdir,
}

View File

@@ -1,9 +0,0 @@
SELECT
(SELECT setting FROM pg_settings WHERE name = 'neon.timeline_id') AS timeline_id,
-- Postgres creates temporary snapshot files of the form %X-%X.snap.%d.tmp.
-- These temporary snapshot files are renamed to the actual snapshot files
-- after they are completely built. We only WAL-log the completely built
-- snapshot files
(SELECT COALESCE(sum((pg_stat_file('pg_logical/snapshots/' || name, missing_ok => true)).size), 0)
FROM (SELECT * FROM pg_ls_dir('pg_logical/snapshots') WHERE pg_ls_dir LIKE '%.snap') AS name
) AS logical_snapshots_bytes;

View File

@@ -981,7 +981,7 @@ index fc42d418bf..e38f517574 100644
CREATE SCHEMA addr_nsp;
SET search_path TO 'addr_nsp';
diff --git a/src/test/regress/expected/password.out b/src/test/regress/expected/password.out
index 8475231735..0653946337 100644
index 8475231735..1afae5395f 100644
--- a/src/test/regress/expected/password.out
+++ b/src/test/regress/expected/password.out
@@ -12,11 +12,11 @@ SET password_encryption = 'md5'; -- ok
@@ -1006,63 +1006,65 @@ index 8475231735..0653946337 100644
-----------------+---------------------------------------------------
- regress_passwd1 | md5783277baca28003b33453252be4dbb34
- regress_passwd2 | md54044304ba511dd062133eb5b4b84a2a3
+ regress_passwd1 | NEON_MD5_PLACEHOLDER:regress_passwd1
+ regress_passwd2 | NEON_MD5_PLACEHOLDER:regress_passwd2
+ regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1
+ regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
- regress_passwd4 |
+ regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
(4 rows)
-- Rename a role
@@ -54,24 +54,16 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
@@ -54,24 +54,30 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
-- passwords.
SET password_encryption = 'md5';
-- encrypt with MD5
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
--- already encrypted, use as they are
-ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
-ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
-- already encrypted, use as they are
ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
SET password_encryption = 'scram-sha-256';
-- create SCRAM secret
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
--- already encrypted with MD5, use as it is
-CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
--- This looks like a valid SCRAM-SHA-256 secret, but it is not
--- so it should be hashed with SCRAM-SHA-256.
-CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
--- These may look like valid MD5 secrets, but they are not, so they
--- should be hashed with SCRAM-SHA-256.
--- trailing garbage at the end
-CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
--- invalid length
-CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+-- Neon does not support encrypted passwords, use unencrypted instead
+CREATE ROLE regress_passwd5 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+-- Neon does not support encrypted passwords, use unencrypted instead
+CREATE ROLE regress_passwd6 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+CREATE ROLE regress_passwd7 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+CREATE ROLE regress_passwd8 PASSWORD NEON_PASSWORD_PLACEHOLDER;
-- already encrypted with MD5, use as it is
CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
-- This looks like a valid SCRAM-SHA-256 secret, but it is not
-- so it should be hashed with SCRAM-SHA-256.
CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
-- These may look like valid MD5 secrets, but they are not, so they
-- should be hashed with SCRAM-SHA-256.
-- trailing garbage at the end
CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
-- invalid length
CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
-- Changing the SCRAM iteration count
SET scram_iterations = 1024;
CREATE ROLE regress_passwd9 PASSWORD 'alterediterationcount';
@@ -81,11 +73,11 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
@@ -81,63 +87,67 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
ORDER BY rolname, rolpassword;
rolname | rolpassword_masked
-----------------+---------------------------------------------------
- regress_passwd1 | md5cd3578025fe2c3d7ed1b9a9b26238b70
- regress_passwd2 | md5dfa155cadd5f4ad57860162f3fab9cdb
+ regress_passwd1 | NEON_MD5_PLACEHOLDER:regress_passwd1
+ regress_passwd2 | NEON_MD5_PLACEHOLDER:regress_passwd2
+ regress_passwd1 | NEON_MD5_PLACEHOLDER_regress_passwd1
+ regress_passwd2 | NEON_MD5_PLACEHOLDER_regress_passwd2
regress_passwd3 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
regress_passwd4 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
- regress_passwd5 | md5e73a4b11df52a6068f8b39f90be36023
+ regress_passwd5 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
regress_passwd6 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
regress_passwd7 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
regress_passwd8 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
@@ -95,23 +87,20 @@ SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+
- regress_passwd6 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
- regress_passwd7 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
- regress_passwd8 | SCRAM-SHA-256$4096:<salt>$<storedkey>:<serverkey>
regress_passwd9 | SCRAM-SHA-256$1024:<salt>$<storedkey>:<serverkey>
-(9 rows)
+(5 rows)
-- An empty password is not allowed, in any form
CREATE ROLE regress_passwd_empty PASSWORD '';
NOTICE: empty string is not a valid password, clearing password
@@ -1080,37 +1082,56 @@ index 8475231735..0653946337 100644
-(1 row)
+(0 rows)
--- Test with invalid stored and server keys.
---
--- The first is valid, to act as a control. The others have too long
--- stored/server keys. They will be re-hashed.
-CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
-CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
-CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
+-- Neon does not support encrypted passwords, use unencrypted instead
+CREATE ROLE regress_passwd_sha_len0 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+CREATE ROLE regress_passwd_sha_len1 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+CREATE ROLE regress_passwd_sha_len2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
-- Test with invalid stored and server keys.
--
-- The first is valid, to act as a control. The others have too long
-- stored/server keys. They will be re-hashed.
CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
+ERROR: Received HTTP code 400 from control plane: {"error":"Neon only supports being given plaintext passwords"}
-- Check that the invalid secrets were re-hashed. A re-hashed secret
-- should not contain the original salt.
SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassword_rehashed
@@ -120,7 +109,7 @@ SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassw
FROM pg_authid
WHERE rolname LIKE 'regress_passwd_sha_len%'
ORDER BY rolname;
rolname | is_rolpassword_rehashed
-------------------------+-------------------------
- rolname | is_rolpassword_rehashed
--------------------------+-------------------------
- regress_passwd_sha_len0 | f
+ regress_passwd_sha_len0 | t
regress_passwd_sha_len1 | t
regress_passwd_sha_len2 | t
(3 rows)
@@ -135,6 +124,7 @@ DROP ROLE regress_passwd7;
- regress_passwd_sha_len1 | t
- regress_passwd_sha_len2 | t
-(3 rows)
+ rolname | is_rolpassword_rehashed
+---------+-------------------------
+(0 rows)
DROP ROLE regress_passwd1;
DROP ROLE regress_passwd2;
DROP ROLE regress_passwd3;
DROP ROLE regress_passwd4;
DROP ROLE regress_passwd5;
+ERROR: role "regress_passwd5" does not exist
DROP ROLE regress_passwd6;
+ERROR: role "regress_passwd6" does not exist
DROP ROLE regress_passwd7;
+ERROR: role "regress_passwd7" does not exist
DROP ROLE regress_passwd8;
+ERROR: role "regress_passwd8" does not exist
DROP ROLE regress_passwd9;
DROP ROLE regress_passwd_empty;
+ERROR: role "regress_passwd_empty" does not exist
DROP ROLE regress_passwd_sha_len0;
+ERROR: role "regress_passwd_sha_len0" does not exist
DROP ROLE regress_passwd_sha_len1;
+ERROR: role "regress_passwd_sha_len1" does not exist
DROP ROLE regress_passwd_sha_len2;
+ERROR: role "regress_passwd_sha_len2" does not exist
-- all entries should have been removed
SELECT rolname, rolpassword
FROM pg_authid
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
index 5b9dba7b32..cc408dad42 100644
--- a/src/test/regress/expected/privileges.out
@@ -3173,7 +3194,7 @@ index 1a6c61f49d..1c31ac6a53 100644
-- Test generic object addressing/identification functions
CREATE SCHEMA addr_nsp;
diff --git a/src/test/regress/sql/password.sql b/src/test/regress/sql/password.sql
index 53e86b0b6c..0303fdfe96 100644
index 53e86b0b6c..f07cf1ec54 100644
--- a/src/test/regress/sql/password.sql
+++ b/src/test/regress/sql/password.sql
@@ -10,11 +10,11 @@ SET password_encryption = 'scram-sha-256'; -- ok
@@ -3192,59 +3213,23 @@ index 53e86b0b6c..0303fdfe96 100644
-- check list of created entries
--
@@ -42,26 +42,18 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
@@ -42,14 +42,14 @@ ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2;
SET password_encryption = 'md5';
-- encrypt with MD5
-ALTER ROLE regress_passwd2 PASSWORD 'foo';
--- already encrypted, use as they are
-ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
-ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
+ALTER ROLE regress_passwd2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
-- already encrypted, use as they are
ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70';
ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo=';
SET password_encryption = 'scram-sha-256';
-- create SCRAM secret
-ALTER ROLE regress_passwd4 PASSWORD 'foo';
--- already encrypted with MD5, use as it is
-CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
+ALTER ROLE regress_passwd4 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+-- Neon does not support encrypted passwords, use unencrypted instead
+CREATE ROLE regress_passwd5 PASSWORD NEON_PASSWORD_PLACEHOLDER;
-- already encrypted with MD5, use as it is
CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023';
--- This looks like a valid SCRAM-SHA-256 secret, but it is not
--- so it should be hashed with SCRAM-SHA-256.
-CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234';
--- These may look like valid MD5 secrets, but they are not, so they
--- should be hashed with SCRAM-SHA-256.
--- trailing garbage at the end
-CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz';
--- invalid length
-CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz';
+-- Neon does not support encrypted passwords, use unencrypted instead
+CREATE ROLE regress_passwd6 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+CREATE ROLE regress_passwd7 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+CREATE ROLE regress_passwd8 PASSWORD NEON_PASSWORD_PLACEHOLDER;
-- Changing the SCRAM iteration count
SET scram_iterations = 1024;
@@ -78,13 +70,10 @@ ALTER ROLE regress_passwd_empty PASSWORD 'md585939a5ce845f1a1b620742e3c659e0a';
ALTER ROLE regress_passwd_empty PASSWORD 'SCRAM-SHA-256$4096:hpFyHTUsSWcR7O9P$LgZFIt6Oqdo27ZFKbZ2nV+vtnYM995pDh9ca6WSi120=:qVV5NeluNfUPkwm7Vqat25RjSPLkGeoZBQs6wVv+um4=';
SELECT rolpassword FROM pg_authid WHERE rolname='regress_passwd_empty';
--- Test with invalid stored and server keys.
---
--- The first is valid, to act as a control. The others have too long
--- stored/server keys. They will be re-hashed.
-CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
-CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI=';
-CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=';
+-- Neon does not support encrypted passwords, use unencrypted instead
+CREATE ROLE regress_passwd_sha_len0 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+CREATE ROLE regress_passwd_sha_len1 PASSWORD NEON_PASSWORD_PLACEHOLDER;
+CREATE ROLE regress_passwd_sha_len2 PASSWORD NEON_PASSWORD_PLACEHOLDER;
-- Check that the invalid secrets were re-hashed. A re-hashed secret
-- should not contain the original salt.
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
index 249df17a58..b258e7f26a 100644
--- a/src/test/regress/sql/privileges.sql

File diff suppressed because it is too large Load Diff

View File

@@ -1,174 +0,0 @@
diff --git a/expected/ut-A.out b/expected/ut-A.out
index e7d68a1..65a056c 100644
--- a/expected/ut-A.out
+++ b/expected/ut-A.out
@@ -9,13 +9,16 @@ SET search_path TO public;
----
-- No.A-1-1-3
CREATE EXTENSION pg_hint_plan;
+LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan
-- No.A-1-2-3
DROP EXTENSION pg_hint_plan;
-- No.A-1-1-4
CREATE SCHEMA other_schema;
CREATE EXTENSION pg_hint_plan SCHEMA other_schema;
+LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan
ERROR: extension "pg_hint_plan" must be installed in schema "hint_plan"
CREATE EXTENSION pg_hint_plan;
+LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan
DROP SCHEMA other_schema;
----
---- No. A-5-1 comment pattern
diff --git a/expected/ut-J.out b/expected/ut-J.out
index 2fa3c70..314e929 100644
--- a/expected/ut-J.out
+++ b/expected/ut-J.out
@@ -789,38 +789,6 @@ NestLoop(st1 st2)
MergeJoin(t1 t2)
not used hint:
duplication hint:
-error hint:
-
-LOG: pg_hint_plan:
-used hint:
-not used hint:
-NestLoop(st1 st2)
-MergeJoin(t1 t2)
-duplication hint:
-error hint:
-
-LOG: pg_hint_plan:
-used hint:
-not used hint:
-NestLoop(st1 st2)
-MergeJoin(t1 t2)
-duplication hint:
-error hint:
-
-LOG: pg_hint_plan:
-used hint:
-not used hint:
-NestLoop(st1 st2)
-MergeJoin(t1 t2)
-duplication hint:
-error hint:
-
-LOG: pg_hint_plan:
-used hint:
-not used hint:
-NestLoop(st1 st2)
-MergeJoin(t1 t2)
-duplication hint:
error hint:
explain_filter
diff --git a/expected/ut-S.out b/expected/ut-S.out
index 0bfcfb8..e75f581 100644
--- a/expected/ut-S.out
+++ b/expected/ut-S.out
@@ -4415,34 +4415,6 @@ used hint:
IndexScan(ti1 ti1_pred)
not used hint:
duplication hint:
-error hint:
-
-LOG: pg_hint_plan:
-used hint:
-not used hint:
-IndexScan(ti1 ti1_pred)
-duplication hint:
-error hint:
-
-LOG: pg_hint_plan:
-used hint:
-not used hint:
-IndexScan(ti1 ti1_pred)
-duplication hint:
-error hint:
-
-LOG: pg_hint_plan:
-used hint:
-not used hint:
-IndexScan(ti1 ti1_pred)
-duplication hint:
-error hint:
-
-LOG: pg_hint_plan:
-used hint:
-not used hint:
-IndexScan(ti1 ti1_pred)
-duplication hint:
error hint:
explain_filter
diff --git a/expected/ut-W.out b/expected/ut-W.out
index a09bd34..0ad227c 100644
--- a/expected/ut-W.out
+++ b/expected/ut-W.out
@@ -1341,54 +1341,6 @@ IndexScan(ft1)
IndexScan(t)
Parallel(s1 3 hard)
duplication hint:
-error hint:
-
-LOG: pg_hint_plan:
-used hint:
-not used hint:
-IndexScan(*VALUES*)
-SeqScan(cte1)
-IndexScan(ft1)
-IndexScan(t)
-Parallel(p1 5 hard)
-Parallel(s1 3 hard)
-duplication hint:
-error hint:
-
-LOG: pg_hint_plan:
-used hint:
-not used hint:
-IndexScan(*VALUES*)
-SeqScan(cte1)
-IndexScan(ft1)
-IndexScan(t)
-Parallel(p1 5 hard)
-Parallel(s1 3 hard)
-duplication hint:
-error hint:
-
-LOG: pg_hint_plan:
-used hint:
-not used hint:
-IndexScan(*VALUES*)
-SeqScan(cte1)
-IndexScan(ft1)
-IndexScan(t)
-Parallel(p1 5 hard)
-Parallel(s1 3 hard)
-duplication hint:
-error hint:
-
-LOG: pg_hint_plan:
-used hint:
-not used hint:
-IndexScan(*VALUES*)
-SeqScan(cte1)
-IndexScan(ft1)
-IndexScan(t)
-Parallel(p1 5 hard)
-Parallel(s1 3 hard)
-duplication hint:
error hint:
explain_filter
diff --git a/expected/ut-fdw.out b/expected/ut-fdw.out
index 017fa4b..98d989b 100644
--- a/expected/ut-fdw.out
+++ b/expected/ut-fdw.out
@@ -7,6 +7,7 @@ SET pg_hint_plan.debug_print TO on;
SET client_min_messages TO LOG;
SET pg_hint_plan.enable_hint TO on;
CREATE EXTENSION file_fdw;
+LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/file_fdw
CREATE SERVER file_server FOREIGN DATA WRAPPER file_fdw;
CREATE USER MAPPING FOR PUBLIC SERVER file_server;
CREATE FOREIGN TABLE ft1 (id int, val int) SERVER file_server OPTIONS (format 'csv', filename :'filename');

View File

@@ -1,42 +0,0 @@
commit 46b38d3e46f9cd6c70d9b189dd6ff4abaa17cf5e
Author: Alexander Bayandin <alexander@neon.tech>
Date: Sat Nov 30 18:29:32 2024 +0000
Fix v8 9.7.37 compilation on Debian 12
diff --git a/patches/code/84cf3230a9680aac3b73c410c2b758760b6d3066.patch b/patches/code/84cf3230a9680aac3b73c410c2b758760b6d3066.patch
new file mode 100644
index 0000000..f0a5dc7
--- /dev/null
+++ b/patches/code/84cf3230a9680aac3b73c410c2b758760b6d3066.patch
@@ -0,0 +1,30 @@
+From 84cf3230a9680aac3b73c410c2b758760b6d3066 Mon Sep 17 00:00:00 2001
+From: Michael Lippautz <mlippautz@chromium.org>
+Date: Thu, 27 Jan 2022 14:14:11 +0100
+Subject: [PATCH] cppgc: Fix include
+
+Add <utility> to cover for std::exchange.
+
+Bug: v8:12585
+Change-Id: Ida65144e93e466be8914527d0e646f348c136bcb
+Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3420309
+Auto-Submit: Michael Lippautz <mlippautz@chromium.org>
+Reviewed-by: Omer Katz <omerkatz@chromium.org>
+Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
+Cr-Commit-Position: refs/heads/main@{#78820}
+---
+ src/heap/cppgc/prefinalizer-handler.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/src/heap/cppgc/prefinalizer-handler.h b/src/heap/cppgc/prefinalizer-handler.h
+index bc17c99b1838..c82c91ff5a45 100644
+--- a/src/heap/cppgc/prefinalizer-handler.h
++++ b/src/heap/cppgc/prefinalizer-handler.h
+@@ -5,6 +5,7 @@
+ #ifndef V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
+ #define V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
+
++#include <utility>
+ #include <vector>
+
+ #include "include/cppgc/prefinalizer.h"

View File

@@ -7,7 +7,7 @@ license.workspace = true
[features]
default = []
# Enables test specific features.
testing = ["fail/failpoints"]
testing = []
[dependencies]
base64.workspace = true
@@ -15,15 +15,13 @@ aws-config.workspace = true
aws-sdk-s3.workspace = true
aws-sdk-kms.workspace = true
anyhow.workspace = true
axum = { workspace = true, features = [] }
camino.workspace = true
chrono.workspace = true
cfg-if.workspace = true
clap.workspace = true
fail.workspace = true
flate2.workspace = true
futures.workspace = true
http.workspace = true
hyper0 = { workspace = true, features = ["full"] }
metrics.workspace = true
nix.workspace = true
notify.workspace = true
@@ -38,8 +36,6 @@ serde_with.workspace = true
serde_json.workspace = true
signal-hook.workspace = true
tar.workspace = true
tower.workspace = true
tower-http.workspace = true
reqwest = { workspace = true, features = ["json"] }
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
tokio-postgres.workspace = true

View File

@@ -60,22 +60,19 @@ use compute_tools::compute::{
};
use compute_tools::configurator::launch_configurator;
use compute_tools::extension_server::get_pg_version_string;
use compute_tools::http::launch_http_server;
use compute_tools::http::api::launch_http_server;
use compute_tools::logger::*;
use compute_tools::monitor::launch_monitor;
use compute_tools::params::*;
use compute_tools::spec::*;
use compute_tools::swap::resize_swap;
use rlimit::{setrlimit, Resource};
use utils::failpoint_support;
// this is an arbitrary build tag. Fine as a default / for testing purposes
// in-case of not-set environment var
const BUILD_TAG_DEFAULT: &str = "latest";
fn main() -> Result<()> {
let scenario = failpoint_support::init();
let (build_tag, clap_args) = init()?;
// enable core dumping for all child processes
@@ -103,14 +100,17 @@ fn main() -> Result<()> {
maybe_delay_exit(delay_exit);
scenario.teardown();
deinit_and_exit(wait_pg_result);
}
fn init() -> Result<(String, clap::ArgMatches)> {
init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
opentelemetry::global::set_error_handler(|err| {
tracing::info!("OpenTelemetry error: {err}");
})
.expect("global error handler lock poisoned");
let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
thread::spawn(move || {
for sig in signals.forever() {
@@ -246,48 +246,47 @@ fn try_spec_from_cli(
let compute_id = matches.get_one::<String>("compute-id");
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
// First, try to get cluster spec from the cli argument
if let Some(spec_json) = spec_json {
info!("got spec from cli argument {}", spec_json);
return Ok(CliSpecParams {
spec: Some(serde_json::from_str(spec_json)?),
live_config_allowed: false,
});
}
// Second, try to read it from the file if path is provided
if let Some(spec_path) = spec_path {
let file = File::open(Path::new(spec_path))?;
return Ok(CliSpecParams {
spec: Some(serde_json::from_reader(file)?),
live_config_allowed: true,
});
}
let Some(compute_id) = compute_id else {
panic!(
"compute spec should be provided by one of the following ways: \
--spec OR --spec-path OR --control-plane-uri and --compute-id"
);
};
let Some(control_plane_uri) = control_plane_uri else {
panic!("must specify both --control-plane-uri and --compute-id or none");
};
match get_spec_from_control_plane(control_plane_uri, compute_id) {
Ok(spec) => Ok(CliSpecParams {
spec,
live_config_allowed: true,
}),
Err(e) => {
error!(
"cannot get response from control plane: {}\n\
neither spec nor confirmation that compute is in the Empty state was received",
e
);
Err(e)
let spec;
let mut live_config_allowed = false;
match spec_json {
// First, try to get cluster spec from the cli argument
Some(json) => {
info!("got spec from cli argument {}", json);
spec = Some(serde_json::from_str(json)?);
}
}
None => {
// Second, try to read it from the file if path is provided
if let Some(sp) = spec_path {
let path = Path::new(sp);
let file = File::open(path)?;
spec = Some(serde_json::from_reader(file)?);
live_config_allowed = true;
} else if let Some(id) = compute_id {
if let Some(cp_base) = control_plane_uri {
live_config_allowed = true;
spec = match get_spec_from_control_plane(cp_base, id) {
Ok(s) => s,
Err(e) => {
error!("cannot get response from control plane: {}", e);
panic!("neither spec nor confirmation that compute is in the Empty state was received");
}
};
} else {
panic!("must specify both --control-plane-uri and --compute-id or none");
}
} else {
panic!(
"compute spec should be provided by one of the following ways: \
--spec OR --spec-path OR --control-plane-uri and --compute-id"
);
}
}
};
Ok(CliSpecParams {
spec,
live_config_allowed,
})
}
struct CliSpecParams {
@@ -336,7 +335,6 @@ fn wait_spec(
pgdata: pgdata.to_string(),
pgbin: pgbin.to_string(),
pgversion: get_pg_version_string(pgbin),
http_port,
live_config_allowed,
state: Mutex::new(new_state),
state_changed: Condvar::new(),
@@ -391,6 +389,7 @@ fn wait_spec(
Ok(WaitSpecResult {
compute,
http_port,
resize_swap_on_bind,
set_disk_quota_for_fs: set_disk_quota_for_fs.cloned(),
})
@@ -398,6 +397,8 @@ fn wait_spec(
struct WaitSpecResult {
compute: Arc<ComputeNode>,
// passed through from ProcessCliResult
http_port: u16,
resize_swap_on_bind: bool,
set_disk_quota_for_fs: Option<String>,
}
@@ -407,6 +408,7 @@ fn start_postgres(
#[allow(unused_variables)] matches: &clap::ArgMatches,
WaitSpecResult {
compute,
http_port,
resize_swap_on_bind,
set_disk_quota_for_fs,
}: WaitSpecResult,
@@ -419,14 +421,9 @@ fn start_postgres(
"running compute with features: {:?}",
state.pspec.as_ref().unwrap().spec.features
);
// before we release the mutex, fetch some parameters for later.
let &ComputeSpec {
swap_size_bytes,
disk_quota_bytes,
#[cfg(target_os = "linux")]
disable_lfc_resizing,
..
} = &state.pspec.as_ref().unwrap().spec;
// before we release the mutex, fetch the swap size (if any) for later.
let swap_size_bytes = state.pspec.as_ref().unwrap().spec.swap_size_bytes;
let disk_quota_bytes = state.pspec.as_ref().unwrap().spec.disk_quota_bytes;
drop(state);
// Launch remaining service threads
@@ -484,14 +481,13 @@ fn start_postgres(
}
}
let extension_server_port: u16 = http_port;
// Start Postgres
let mut pg = None;
if !prestartup_failed {
pg = match compute.start_compute() {
Ok(pg) => {
info!(postmaster_pid = %pg.0.id(), "Postgres was started");
Some(pg)
}
pg = match compute.start_compute(extension_server_port) {
Ok(pg) => Some(pg),
Err(err) => {
error!("could not start the compute node: {:#}", err);
compute.set_failed_status(err);
@@ -534,18 +530,11 @@ fn start_postgres(
// This token is used internally by the monitor to clean up all threads
let token = CancellationToken::new();
// don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
let pgconnstr = if disable_lfc_resizing.unwrap_or(false) {
None
} else {
file_cache_connstr.cloned()
};
let vm_monitor = rt.as_ref().map(|rt| {
rt.spawn(vm_monitor::start(
Box::leak(Box::new(vm_monitor::Args {
cgroup: cgroup.cloned(),
pgconnstr,
pgconnstr: file_cache_connstr.cloned(),
addr: vm_monitor_addr.clone(),
})),
token.clone(),
@@ -589,8 +578,6 @@ fn wait_postgres(pg: Option<PostgresHandle>) -> Result<WaitPostgresResult> {
// propagate to Postgres and it will be shut down as well.
let mut exit_code = None;
if let Some((mut pg, logs_handle)) = pg {
info!(postmaster_pid = %pg.id(), "Waiting for Postgres to exit");
let ecode = pg
.wait()
.expect("failed to start waiting on Postgres process");

View File

@@ -34,12 +34,12 @@ use nix::unistd::Pid;
use tracing::{info, info_span, warn, Instrument};
use utils::fs_ext::is_directory_empty;
#[path = "fast_import/aws_s3_sync.rs"]
mod aws_s3_sync;
#[path = "fast_import/child_stdio_to_log.rs"]
mod child_stdio_to_log;
#[path = "fast_import/s3_uri.rs"]
mod s3_uri;
#[path = "fast_import/s5cmd.rs"]
mod s5cmd;
#[derive(clap::Parser)]
struct Args {
@@ -326,7 +326,7 @@ pub(crate) async fn main() -> anyhow::Result<()> {
}
info!("upload pgdata");
aws_s3_sync::sync(Utf8Path::new(&pgdata_dir), &s3_prefix.append("/pgdata/"))
s5cmd::sync(Utf8Path::new(&pgdata_dir), &s3_prefix.append("/"))
.await
.context("sync dump directory to destination")?;
@@ -334,10 +334,10 @@ pub(crate) async fn main() -> anyhow::Result<()> {
{
let status_dir = working_directory.join("status");
std::fs::create_dir(&status_dir).context("create status directory")?;
let status_file = status_dir.join("pgdata");
let status_file = status_dir.join("status");
std::fs::write(&status_file, serde_json::json!({"done": true}).to_string())
.context("write status file")?;
aws_s3_sync::sync(&status_dir, &s3_prefix.append("/status/"))
s5cmd::sync(&status_file, &s3_prefix.append("/status/pgdata"))
.await
.context("sync status directory to destination")?;
}

View File

@@ -4,21 +4,24 @@ use camino::Utf8Path;
use super::s3_uri::S3Uri;
pub(crate) async fn sync(local: &Utf8Path, remote: &S3Uri) -> anyhow::Result<()> {
let mut builder = tokio::process::Command::new("aws");
let mut builder = tokio::process::Command::new("s5cmd");
// s5cmd uses aws-sdk-go v1, hence doesn't support AWS_ENDPOINT_URL
if let Some(val) = std::env::var_os("AWS_ENDPOINT_URL") {
builder.arg("--endpoint-url").arg(val);
}
builder
.arg("s3")
.arg("sync")
.arg(local.as_str())
.arg(remote.to_string());
let st = builder
.spawn()
.context("spawn aws s3 sync")?
.context("spawn s5cmd")?
.wait()
.await
.context("wait for aws s3 sync")?;
.context("wait for s5cmd")?;
if st.success() {
Ok(())
} else {
Err(anyhow::anyhow!("aws s3 sync failed"))
Err(anyhow::anyhow!("s5cmd failed"))
}
}

View File

@@ -36,11 +36,11 @@ pub async fn get_dbs_and_roles(compute: &Arc<ComputeNode>) -> anyhow::Result<Cat
#[derive(Debug, thiserror::Error)]
pub enum SchemaDumpError {
#[error("database does not exist")]
#[error("Database does not exist.")]
DatabaseDoesNotExist,
#[error("failed to execute pg_dump")]
#[error("Failed to execute pg_dump.")]
IO(#[from] std::io::Error),
#[error("unexpected I/O error")]
#[error("Unexpected error.")]
Unexpected,
}

View File

@@ -15,7 +15,7 @@ use std::time::Instant;
use anyhow::{Context, Result};
use chrono::{DateTime, Utc};
use compute_api::spec::{Database, PgIdent, Role};
use compute_api::spec::{PgIdent, Role};
use futures::future::join_all;
use futures::stream::FuturesUnordered;
use futures::StreamExt;
@@ -45,10 +45,8 @@ use crate::spec_apply::ApplySpecPhase::{
DropInvalidDatabases, DropRoles, HandleNeonExtension, HandleOtherExtensions,
RenameAndDeleteDatabases, RenameRoles, RunInEachDatabase,
};
use crate::spec_apply::PerDatabasePhase;
use crate::spec_apply::PerDatabasePhase::{
ChangeSchemaPerms, DeleteDBRoleReferences, DropSubscriptionsForDeletedDatabases,
HandleAnonExtension,
ChangeSchemaPerms, DeleteDBRoleReferences, HandleAnonExtension,
};
use crate::spec_apply::{apply_operations, MutableApplyContext, DB};
use crate::sync_sk::{check_if_synced, ping_safekeeper};
@@ -81,8 +79,6 @@ pub struct ComputeNode {
/// - we push spec and it does configuration
/// - but then it is restarted without any spec again
pub live_config_allowed: bool,
/// The port that the compute's HTTP server listens on
pub http_port: u16,
/// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
/// To allow HTTP API server to serving status requests, while configuration
/// is in progress, lock should be held only for short periods of time to do
@@ -615,7 +611,11 @@ impl ComputeNode {
/// Do all the preparations like PGDATA directory creation, configuration,
/// safekeepers sync, basebackup, etc.
#[instrument(skip_all)]
pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
pub fn prepare_pgdata(
&self,
compute_state: &ComputeState,
extension_server_port: u16,
) -> Result<()> {
let pspec = compute_state.pspec.as_ref().expect("spec must be set");
let spec = &pspec.spec;
let pgdata_path = Path::new(&self.pgdata);
@@ -625,7 +625,7 @@ impl ComputeNode {
config::write_postgres_conf(
&pgdata_path.join("postgresql.conf"),
&pspec.spec,
self.http_port,
Some(extension_server_port),
)?;
// Syncing safekeepers is only safe with primary nodes: if a primary
@@ -836,7 +836,7 @@ impl ComputeNode {
conf
}
pub async fn get_maintenance_client(
async fn get_maintenance_client(
conf: &tokio_postgres::Config,
) -> Result<tokio_postgres::Client> {
let mut conf = conf.clone();
@@ -945,78 +945,6 @@ impl ComputeNode {
dbs: databases,
}));
// Apply special pre drop database phase.
// NOTE: we use the code of RunInEachDatabase phase for parallelism
// and connection management, but we don't really run it in *each* database,
// only in databases, we're about to drop.
info!("Applying PerDatabase (pre-dropdb) phase");
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
// Run the phase for each database that we're about to drop.
let db_processes = spec
.delta_operations
.iter()
.flatten()
.filter_map(move |op| {
if op.action.as_str() == "delete_db" {
Some(op.name.clone())
} else {
None
}
})
.map(|dbname| {
let spec = spec.clone();
let ctx = ctx.clone();
let jwks_roles = jwks_roles.clone();
let mut conf = conf.as_ref().clone();
let concurrency_token = concurrency_token.clone();
// We only need dbname field for this phase, so set other fields to dummy values
let db = DB::UserDB(Database {
name: dbname.clone(),
owner: "cloud_admin".to_string(),
options: None,
restrict_conn: false,
invalid: false,
});
debug!("Applying per-database phases for Database {:?}", &db);
match &db {
DB::SystemDB => {}
DB::UserDB(db) => {
conf.dbname(db.name.as_str());
}
}
let conf = Arc::new(conf);
let fut = Self::apply_spec_sql_db(
spec.clone(),
conf,
ctx.clone(),
jwks_roles.clone(),
concurrency_token.clone(),
db,
[DropSubscriptionsForDeletedDatabases].to_vec(),
);
Ok(spawn(fut))
})
.collect::<Vec<Result<_, anyhow::Error>>>();
for process in db_processes.into_iter() {
let handle = process?;
if let Err(e) = handle.await? {
// Handle the error case where the database does not exist
// We do not check whether the DB exists or not in the deletion phase,
// so we shouldn't be strict about it in pre-deletion cleanup as well.
if e.to_string().contains("does not exist") {
warn!("Error dropping subscription: {}", e);
} else {
return Err(e);
}
};
}
for phase in [
CreateSuperUser,
DropInvalidDatabases,
@@ -1036,7 +964,7 @@ impl ComputeNode {
.await?;
}
info!("Applying RunInEachDatabase2 phase");
info!("Applying RunInEachDatabase phase");
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
let db_processes = spec
@@ -1071,12 +999,6 @@ impl ComputeNode {
jwks_roles.clone(),
concurrency_token.clone(),
db,
[
DeleteDBRoleReferences,
ChangeSchemaPerms,
HandleAnonExtension,
]
.to_vec(),
);
Ok(spawn(fut))
@@ -1123,13 +1045,16 @@ impl ComputeNode {
jwks_roles: Arc<HashSet<String>>,
concurrency_token: Arc<tokio::sync::Semaphore>,
db: DB,
subphases: Vec<PerDatabasePhase>,
) -> Result<()> {
let _permit = concurrency_token.acquire().await?;
let mut client_conn = None;
for subphase in subphases {
for subphase in [
DeleteDBRoleReferences,
ChangeSchemaPerms,
HandleAnonExtension,
] {
apply_operations(
spec.clone(),
ctx.clone(),
@@ -1258,19 +1183,8 @@ impl ComputeNode {
let mut conf = postgres::config::Config::from(conf);
conf.application_name("compute_ctl:migrations");
match conf.connect(NoTls) {
Ok(mut client) => {
if let Err(e) = handle_migrations(&mut client) {
error!("Failed to run migrations: {}", e);
}
}
Err(e) => {
error!(
"Failed to connect to the compute for running migrations: {}",
e
);
}
};
let mut client = conf.connect(NoTls)?;
handle_migrations(&mut client).context("apply_config handle_migrations")
});
Ok::<(), anyhow::Error>(())
@@ -1329,9 +1243,14 @@ impl ComputeNode {
// Write new config
let pgdata_path = Path::new(&self.pgdata);
let postgresql_conf_path = pgdata_path.join("postgresql.conf");
config::write_postgres_conf(&postgresql_conf_path, &spec, self.http_port)?;
config::write_postgres_conf(&postgresql_conf_path, &spec, None)?;
let max_concurrent_connections = spec.reconfigure_concurrency;
// TODO(ololobus): We need a concurrency during reconfiguration as well,
// but DB is already running and used by user. We can easily get out of
// `max_connections` limit, and the current code won't handle that.
// let compute_state = self.state.lock().unwrap().clone();
// let max_concurrent_connections = self.max_service_connections(&compute_state, &spec);
let max_concurrent_connections = 1;
// Temporarily reset max_cluster_size in config
// to avoid the possibility of hitting the limit, while we are reconfiguring:
@@ -1365,7 +1284,10 @@ impl ComputeNode {
}
#[instrument(skip_all)]
pub fn start_compute(&self) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
pub fn start_compute(
&self,
extension_server_port: u16,
) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
let compute_state = self.state.lock().unwrap().clone();
let pspec = compute_state.pspec.as_ref().expect("spec must be set");
info!(
@@ -1440,7 +1362,7 @@ impl ComputeNode {
info!("{:?}", remote_ext_metrics);
}
self.prepare_pgdata(&compute_state)?;
self.prepare_pgdata(&compute_state, extension_server_port)?;
let start_time = Utc::now();
let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;

View File

@@ -37,7 +37,7 @@ pub fn line_in_file(path: &Path, line: &str) -> Result<bool> {
pub fn write_postgres_conf(
path: &Path,
spec: &ComputeSpec,
extension_server_port: u16,
extension_server_port: Option<u16>,
) -> Result<()> {
// File::create() destroys the file content if it exists.
let mut file = File::create(path)?;
@@ -127,7 +127,9 @@ pub fn write_postgres_conf(
writeln!(file, "# Managed by compute_ctl: end")?;
}
writeln!(file, "neon.extension_server_port={}", extension_server_port)?;
if let Some(port) = extension_server_port {
writeln!(file, "neon.extension_server_port={}", port)?;
}
// This is essential to keep this line at the end of the file,
// because it is intended to override any settings above.

View File

@@ -0,0 +1,591 @@
use std::convert::Infallible;
use std::net::IpAddr;
use std::net::Ipv6Addr;
use std::net::SocketAddr;
use std::sync::Arc;
use std::thread;
use crate::catalog::SchemaDumpError;
use crate::catalog::{get_database_schema, get_dbs_and_roles};
use crate::compute::forward_termination_signal;
use crate::compute::{ComputeNode, ComputeState, ParsedSpec};
use crate::installed_extensions;
use compute_api::requests::{ConfigurationRequest, ExtensionInstallRequest, SetRoleGrantsRequest};
use compute_api::responses::{
ComputeStatus, ComputeStatusResponse, ExtensionInstallResult, GenericAPIError,
SetRoleGrantsResponse,
};
use anyhow::Result;
use hyper::header::CONTENT_TYPE;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Method, Request, Response, Server, StatusCode};
use metrics::proto::MetricFamily;
use metrics::Encoder;
use metrics::TextEncoder;
use tokio::task;
use tracing::{debug, error, info, warn};
use tracing_utils::http::OtelName;
use utils::http::request::must_get_query_param;
fn status_response_from_state(state: &ComputeState) -> ComputeStatusResponse {
ComputeStatusResponse {
start_time: state.start_time,
tenant: state
.pspec
.as_ref()
.map(|pspec| pspec.tenant_id.to_string()),
timeline: state
.pspec
.as_ref()
.map(|pspec| pspec.timeline_id.to_string()),
status: state.status,
last_active: state.last_active,
error: state.error.clone(),
}
}
// Service function to handle all available routes.
async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body> {
//
// NOTE: The URI path is currently included in traces. That's OK because
// it doesn't contain any variable parts or sensitive information. But
// please keep that in mind if you change the routing here.
//
match (req.method(), req.uri().path()) {
// Serialized compute state.
(&Method::GET, "/status") => {
debug!("serving /status GET request");
let state = compute.state.lock().unwrap();
let status_response = status_response_from_state(&state);
Response::new(Body::from(serde_json::to_string(&status_response).unwrap()))
}
// Startup metrics in JSON format. Keep /metrics reserved for a possible
// future use for Prometheus metrics format.
(&Method::GET, "/metrics.json") => {
info!("serving /metrics.json GET request");
let metrics = compute.state.lock().unwrap().metrics.clone();
Response::new(Body::from(serde_json::to_string(&metrics).unwrap()))
}
// Prometheus metrics
(&Method::GET, "/metrics") => {
debug!("serving /metrics GET request");
// When we call TextEncoder::encode() below, it will immediately
// return an error if a metric family has no metrics, so we need to
// preemptively filter out metric families with no metrics.
let metrics = installed_extensions::collect()
.into_iter()
.filter(|m| !m.get_metric().is_empty())
.collect::<Vec<MetricFamily>>();
let encoder = TextEncoder::new();
let mut buffer = vec![];
if let Err(err) = encoder.encode(&metrics, &mut buffer) {
let msg = format!("error handling /metrics request: {err}");
error!(msg);
return render_json_error(&msg, StatusCode::INTERNAL_SERVER_ERROR);
}
match Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, encoder.format_type())
.body(Body::from(buffer))
{
Ok(response) => response,
Err(err) => {
let msg = format!("error handling /metrics request: {err}");
error!(msg);
render_json_error(&msg, StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
// Collect Postgres current usage insights
(&Method::GET, "/insights") => {
info!("serving /insights GET request");
let status = compute.get_status();
if status != ComputeStatus::Running {
let msg = format!("compute is not running, current status: {:?}", status);
error!(msg);
return Response::new(Body::from(msg));
}
let insights = compute.collect_insights().await;
Response::new(Body::from(insights))
}
(&Method::POST, "/check_writability") => {
info!("serving /check_writability POST request");
let status = compute.get_status();
if status != ComputeStatus::Running {
let msg = format!(
"invalid compute status for check_writability request: {:?}",
status
);
error!(msg);
return Response::new(Body::from(msg));
}
let res = crate::checker::check_writability(compute).await;
match res {
Ok(_) => Response::new(Body::from("true")),
Err(e) => {
error!("check_writability failed: {}", e);
Response::new(Body::from(e.to_string()))
}
}
}
(&Method::POST, "/extensions") => {
info!("serving /extensions POST request");
let status = compute.get_status();
if status != ComputeStatus::Running {
let msg = format!(
"invalid compute status for extensions request: {:?}",
status
);
error!(msg);
return render_json_error(&msg, StatusCode::PRECONDITION_FAILED);
}
let request = hyper::body::to_bytes(req.into_body()).await.unwrap();
let request = serde_json::from_slice::<ExtensionInstallRequest>(&request).unwrap();
let res = compute
.install_extension(&request.extension, &request.database, request.version)
.await;
match res {
Ok(version) => render_json(Body::from(
serde_json::to_string(&ExtensionInstallResult {
extension: request.extension,
version,
})
.unwrap(),
)),
Err(e) => {
error!("install_extension failed: {}", e);
render_json_error(&e.to_string(), StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
(&Method::GET, "/info") => {
let num_cpus = num_cpus::get_physical();
info!("serving /info GET request. num_cpus: {}", num_cpus);
Response::new(Body::from(
serde_json::json!({
"num_cpus": num_cpus,
})
.to_string(),
))
}
// Accept spec in JSON format and request compute configuration. If
// anything goes wrong after we set the compute status to `ConfigurationPending`
// and update compute state with new spec, we basically leave compute
// in the potentially wrong state. That said, it's control-plane's
// responsibility to watch compute state after reconfiguration request
// and to clean restart in case of errors.
(&Method::POST, "/configure") => {
info!("serving /configure POST request");
match handle_configure_request(req, compute).await {
Ok(msg) => Response::new(Body::from(msg)),
Err((msg, code)) => {
error!("error handling /configure request: {msg}");
render_json_error(&msg, code)
}
}
}
(&Method::POST, "/terminate") => {
info!("serving /terminate POST request");
match handle_terminate_request(compute).await {
Ok(()) => Response::new(Body::empty()),
Err((msg, code)) => {
error!("error handling /terminate request: {msg}");
render_json_error(&msg, code)
}
}
}
(&Method::GET, "/dbs_and_roles") => {
info!("serving /dbs_and_roles GET request",);
match get_dbs_and_roles(compute).await {
Ok(res) => render_json(Body::from(serde_json::to_string(&res).unwrap())),
Err(_) => {
render_json_error("can't get dbs and roles", StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
(&Method::GET, "/database_schema") => {
let database = match must_get_query_param(&req, "database") {
Err(e) => return e.into_response(),
Ok(database) => database,
};
info!("serving /database_schema GET request with database: {database}",);
match get_database_schema(compute, &database).await {
Ok(res) => render_plain(Body::wrap_stream(res)),
Err(SchemaDumpError::DatabaseDoesNotExist) => {
render_json_error("database does not exist", StatusCode::NOT_FOUND)
}
Err(e) => {
error!("can't get schema dump: {}", e);
render_json_error("can't get schema dump", StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
(&Method::POST, "/grants") => {
info!("serving /grants POST request");
let status = compute.get_status();
if status != ComputeStatus::Running {
let msg = format!(
"invalid compute status for set_role_grants request: {:?}",
status
);
error!(msg);
return render_json_error(&msg, StatusCode::PRECONDITION_FAILED);
}
let request = hyper::body::to_bytes(req.into_body()).await.unwrap();
let request = serde_json::from_slice::<SetRoleGrantsRequest>(&request).unwrap();
let res = compute
.set_role_grants(
&request.database,
&request.schema,
&request.privileges,
&request.role,
)
.await;
match res {
Ok(()) => render_json(Body::from(
serde_json::to_string(&SetRoleGrantsResponse {
database: request.database,
schema: request.schema,
role: request.role,
privileges: request.privileges,
})
.unwrap(),
)),
Err(e) => render_json_error(
&format!("could not grant role privileges to the schema: {e}"),
// TODO: can we filter on role/schema not found errors
// and return appropriate error code?
StatusCode::INTERNAL_SERVER_ERROR,
),
}
}
// get the list of installed extensions
// currently only used in python tests
// TODO: call it from cplane
(&Method::GET, "/installed_extensions") => {
info!("serving /installed_extensions GET request");
let status = compute.get_status();
if status != ComputeStatus::Running {
let msg = format!(
"invalid compute status for extensions request: {:?}",
status
);
error!(msg);
return Response::new(Body::from(msg));
}
let conf = compute.get_conn_conf(None);
let res =
task::spawn_blocking(move || installed_extensions::get_installed_extensions(conf))
.await
.unwrap();
match res {
Ok(res) => render_json(Body::from(serde_json::to_string(&res).unwrap())),
Err(e) => render_json_error(
&format!("could not get list of installed extensions: {}", e),
StatusCode::INTERNAL_SERVER_ERROR,
),
}
}
// download extension files from remote extension storage on demand
(&Method::POST, route) if route.starts_with("/extension_server/") => {
info!("serving {:?} POST request", route);
info!("req.uri {:?}", req.uri());
// don't even try to download extensions
// if no remote storage is configured
if compute.ext_remote_storage.is_none() {
info!("no extensions remote storage configured");
let mut resp = Response::new(Body::from("no remote storage configured"));
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
return resp;
}
let mut is_library = false;
if let Some(params) = req.uri().query() {
info!("serving {:?} POST request with params: {}", route, params);
if params == "is_library=true" {
is_library = true;
} else {
let mut resp = Response::new(Body::from("Wrong request parameters"));
*resp.status_mut() = StatusCode::BAD_REQUEST;
return resp;
}
}
let filename = route.split('/').last().unwrap().to_string();
info!("serving /extension_server POST request, filename: {filename:?} is_library: {is_library}");
// get ext_name and path from spec
// don't lock compute_state for too long
let ext = {
let compute_state = compute.state.lock().unwrap();
let pspec = compute_state.pspec.as_ref().expect("spec must be set");
let spec = &pspec.spec;
// debug only
info!("spec: {:?}", spec);
let remote_extensions = match spec.remote_extensions.as_ref() {
Some(r) => r,
None => {
info!("no remote extensions spec was provided");
let mut resp = Response::new(Body::from("no remote storage configured"));
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
return resp;
}
};
remote_extensions.get_ext(
&filename,
is_library,
&compute.build_tag,
&compute.pgversion,
)
};
match ext {
Ok((ext_name, ext_path)) => {
match compute.download_extension(ext_name, ext_path).await {
Ok(_) => Response::new(Body::from("OK")),
Err(e) => {
error!("extension download failed: {}", e);
let mut resp = Response::new(Body::from(e.to_string()));
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
resp
}
}
}
Err(e) => {
warn!("extension download failed to find extension: {}", e);
let mut resp = Response::new(Body::from("failed to find file"));
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
resp
}
}
}
// Return the `404 Not Found` for any other routes.
_ => {
let mut not_found = Response::new(Body::from("404 Not Found"));
*not_found.status_mut() = StatusCode::NOT_FOUND;
not_found
}
}
}
async fn handle_configure_request(
req: Request<Body>,
compute: &Arc<ComputeNode>,
) -> Result<String, (String, StatusCode)> {
if !compute.live_config_allowed {
return Err((
"live configuration is not allowed for this compute node".to_string(),
StatusCode::PRECONDITION_FAILED,
));
}
let body_bytes = hyper::body::to_bytes(req.into_body()).await.unwrap();
let spec_raw = String::from_utf8(body_bytes.to_vec()).unwrap();
if let Ok(request) = serde_json::from_str::<ConfigurationRequest>(&spec_raw) {
let spec = request.spec;
let parsed_spec = match ParsedSpec::try_from(spec) {
Ok(ps) => ps,
Err(msg) => return Err((msg, StatusCode::BAD_REQUEST)),
};
// XXX: wrap state update under lock in code blocks. Otherwise,
// we will try to `Send` `mut state` into the spawned thread
// bellow, which will cause error:
// ```
// error: future cannot be sent between threads safely
// ```
{
let mut state = compute.state.lock().unwrap();
if state.status != ComputeStatus::Empty && state.status != ComputeStatus::Running {
let msg = format!(
"invalid compute status for configuration request: {:?}",
state.status.clone()
);
return Err((msg, StatusCode::PRECONDITION_FAILED));
}
state.pspec = Some(parsed_spec);
state.set_status(ComputeStatus::ConfigurationPending, &compute.state_changed);
drop(state);
info!("set new spec and notified waiters");
}
// Spawn a blocking thread to wait for compute to become Running.
// This is needed to do not block the main pool of workers and
// be able to serve other requests while some particular request
// is waiting for compute to finish configuration.
let c = compute.clone();
task::spawn_blocking(move || {
let mut state = c.state.lock().unwrap();
while state.status != ComputeStatus::Running {
state = c.state_changed.wait(state).unwrap();
info!(
"waiting for compute to become Running, current status: {:?}",
state.status
);
if state.status == ComputeStatus::Failed {
let err = state.error.as_ref().map_or("unknown error", |x| x);
let msg = format!("compute configuration failed: {:?}", err);
return Err((msg, StatusCode::INTERNAL_SERVER_ERROR));
}
}
Ok(())
})
.await
.unwrap()?;
// Return current compute state if everything went well.
let state = compute.state.lock().unwrap().clone();
let status_response = status_response_from_state(&state);
Ok(serde_json::to_string(&status_response).unwrap())
} else {
Err(("invalid spec".to_string(), StatusCode::BAD_REQUEST))
}
}
fn render_json_error(e: &str, status: StatusCode) -> Response<Body> {
let error = GenericAPIError {
error: e.to_string(),
};
Response::builder()
.status(status)
.header(CONTENT_TYPE, "application/json")
.body(Body::from(serde_json::to_string(&error).unwrap()))
.unwrap()
}
fn render_json(body: Body) -> Response<Body> {
Response::builder()
.header(CONTENT_TYPE, "application/json")
.body(body)
.unwrap()
}
fn render_plain(body: Body) -> Response<Body> {
Response::builder()
.header(CONTENT_TYPE, "text/plain")
.body(body)
.unwrap()
}
async fn handle_terminate_request(compute: &Arc<ComputeNode>) -> Result<(), (String, StatusCode)> {
{
let mut state = compute.state.lock().unwrap();
if state.status == ComputeStatus::Terminated {
return Ok(());
}
if state.status != ComputeStatus::Empty && state.status != ComputeStatus::Running {
let msg = format!(
"invalid compute status for termination request: {}",
state.status
);
return Err((msg, StatusCode::PRECONDITION_FAILED));
}
state.set_status(ComputeStatus::TerminationPending, &compute.state_changed);
drop(state);
}
forward_termination_signal();
info!("sent signal and notified waiters");
// Spawn a blocking thread to wait for compute to become Terminated.
// This is needed to do not block the main pool of workers and
// be able to serve other requests while some particular request
// is waiting for compute to finish configuration.
let c = compute.clone();
task::spawn_blocking(move || {
let mut state = c.state.lock().unwrap();
while state.status != ComputeStatus::Terminated {
state = c.state_changed.wait(state).unwrap();
info!(
"waiting for compute to become {}, current status: {:?}",
ComputeStatus::Terminated,
state.status
);
}
Ok(())
})
.await
.unwrap()?;
info!("terminated Postgres");
Ok(())
}
// Main Hyper HTTP server function that runs it and blocks waiting on it forever.
#[tokio::main]
async fn serve(port: u16, state: Arc<ComputeNode>) {
// this usually binds to both IPv4 and IPv6 on linux
// see e.g. https://github.com/rust-lang/rust/pull/34440
let addr = SocketAddr::new(IpAddr::from(Ipv6Addr::UNSPECIFIED), port);
let make_service = make_service_fn(move |_conn| {
let state = state.clone();
async move {
Ok::<_, Infallible>(service_fn(move |req: Request<Body>| {
let state = state.clone();
async move {
Ok::<_, Infallible>(
// NOTE: We include the URI path in the string. It
// doesn't contain any variable parts or sensitive
// information in this API.
tracing_utils::http::tracing_handler(
req,
|req| routes(req, &state),
OtelName::UriPath,
)
.await,
)
}
}))
}
});
info!("starting HTTP server on {}", addr);
let server = Server::bind(&addr).serve(make_service);
// Run this server forever
if let Err(e) = server.await {
error!("server error: {}", e);
}
}
/// Launch a separate Hyper HTTP API server thread and return its `JoinHandle`.
pub fn launch_http_server(port: u16, state: &Arc<ComputeNode>) -> Result<thread::JoinHandle<()>> {
let state = Arc::clone(state);
Ok(thread::Builder::new()
.name("http-endpoint".into())
.spawn(move || serve(port, state))?)
}

View File

@@ -1,48 +0,0 @@
use std::ops::{Deref, DerefMut};
use axum::{
async_trait,
extract::{rejection::JsonRejection, FromRequest, Request},
};
use compute_api::responses::GenericAPIError;
use http::StatusCode;
/// Custom `Json` extractor, so that we can format errors into
/// `JsonResponse<GenericAPIError>`.
#[derive(Debug, Clone, Copy, Default)]
pub(crate) struct Json<T>(pub T);
#[async_trait]
impl<S, T> FromRequest<S> for Json<T>
where
axum::Json<T>: FromRequest<S, Rejection = JsonRejection>,
S: Send + Sync,
{
type Rejection = (StatusCode, axum::Json<GenericAPIError>);
async fn from_request(req: Request, state: &S) -> Result<Self, Self::Rejection> {
match axum::Json::<T>::from_request(req, state).await {
Ok(value) => Ok(Self(value.0)),
Err(rejection) => Err((
rejection.status(),
axum::Json(GenericAPIError {
error: rejection.body_text().to_lowercase(),
}),
)),
}
}
}
impl<T> Deref for Json<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> DerefMut for Json<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}

View File

@@ -1,7 +0,0 @@
pub(crate) mod json;
pub(crate) mod path;
pub(crate) mod query;
pub(crate) use json::Json;
pub(crate) use path::Path;
pub(crate) use query::Query;

View File

@@ -1,48 +0,0 @@
use std::ops::{Deref, DerefMut};
use axum::{
async_trait,
extract::{rejection::PathRejection, FromRequestParts},
};
use compute_api::responses::GenericAPIError;
use http::{request::Parts, StatusCode};
/// Custom `Path` extractor, so that we can format errors into
/// `JsonResponse<GenericAPIError>`.
#[derive(Debug, Clone, Copy, Default)]
pub(crate) struct Path<T>(pub T);
#[async_trait]
impl<S, T> FromRequestParts<S> for Path<T>
where
axum::extract::Path<T>: FromRequestParts<S, Rejection = PathRejection>,
S: Send + Sync,
{
type Rejection = (StatusCode, axum::Json<GenericAPIError>);
async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
match axum::extract::Path::<T>::from_request_parts(parts, state).await {
Ok(value) => Ok(Self(value.0)),
Err(rejection) => Err((
rejection.status(),
axum::Json(GenericAPIError {
error: rejection.body_text().to_ascii_lowercase(),
}),
)),
}
}
}
impl<T> Deref for Path<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> DerefMut for Path<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}

View File

@@ -1,48 +0,0 @@
use std::ops::{Deref, DerefMut};
use axum::{
async_trait,
extract::{rejection::QueryRejection, FromRequestParts},
};
use compute_api::responses::GenericAPIError;
use http::{request::Parts, StatusCode};
/// Custom `Query` extractor, so that we can format errors into
/// `JsonResponse<GenericAPIError>`.
#[derive(Debug, Clone, Copy, Default)]
pub(crate) struct Query<T>(pub T);
#[async_trait]
impl<S, T> FromRequestParts<S> for Query<T>
where
axum::extract::Query<T>: FromRequestParts<S, Rejection = QueryRejection>,
S: Send + Sync,
{
type Rejection = (StatusCode, axum::Json<GenericAPIError>);
async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
match axum::extract::Query::<T>::from_request_parts(parts, state).await {
Ok(value) => Ok(Self(value.0)),
Err(rejection) => Err((
rejection.status(),
axum::Json(GenericAPIError {
error: rejection.body_text().to_ascii_lowercase(),
}),
)),
}
}
}
impl<T> Deref for Query<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> DerefMut for Query<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}

View File

@@ -1,56 +1 @@
use axum::{body::Body, response::Response};
use compute_api::responses::{ComputeStatus, GenericAPIError};
use http::{header::CONTENT_TYPE, StatusCode};
use serde::Serialize;
use tracing::error;
pub use server::launch_http_server;
mod extract;
mod routes;
mod server;
/// Convenience response builder for JSON responses
struct JsonResponse;
impl JsonResponse {
/// Helper for actually creating a response
fn create_response(code: StatusCode, body: impl Serialize) -> Response {
Response::builder()
.status(code)
.header(CONTENT_TYPE.as_str(), "application/json")
.body(Body::from(serde_json::to_string(&body).unwrap()))
.unwrap()
}
/// Create a successful error response
pub(self) fn success(code: StatusCode, body: impl Serialize) -> Response {
assert!({
let code = code.as_u16();
(200..300).contains(&code)
});
Self::create_response(code, body)
}
/// Create an error response
pub(self) fn error(code: StatusCode, error: impl ToString) -> Response {
assert!(code.as_u16() >= 400);
let message = error.to_string();
error!(message);
Self::create_response(code, &GenericAPIError { error: message })
}
/// Create an error response related to the compute being in an invalid state
pub(self) fn invalid_status(status: ComputeStatus) -> Response {
Self::create_response(
StatusCode::PRECONDITION_FAILED,
&GenericAPIError {
error: format!("invalid compute status: {status}"),
},
)
}
}
pub mod api;

View File

@@ -37,7 +37,7 @@ paths:
schema:
$ref: "#/components/schemas/ComputeMetrics"
/metrics:
/metrics
get:
tags:
- Info
@@ -537,14 +537,12 @@ components:
properties:
extname:
type: string
version:
type: string
versions:
type: array
items:
type: string
n_databases:
type: integer
owned_by_superuser:
type: integer
SetRoleGrantsRequest:
type: object

View File

@@ -1,20 +0,0 @@
use std::sync::Arc;
use axum::{extract::State, response::Response};
use compute_api::responses::ComputeStatus;
use http::StatusCode;
use crate::{checker::check_writability, compute::ComputeNode, http::JsonResponse};
/// Check that the compute is currently running.
pub(in crate::http) async fn is_writable(State(compute): State<Arc<ComputeNode>>) -> Response {
let status = compute.get_status();
if status != ComputeStatus::Running {
return JsonResponse::invalid_status(status);
}
match check_writability(&compute).await {
Ok(_) => JsonResponse::success(StatusCode::OK, true),
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
}
}

View File

@@ -1,91 +0,0 @@
use std::sync::Arc;
use axum::{extract::State, response::Response};
use compute_api::{
requests::ConfigurationRequest,
responses::{ComputeStatus, ComputeStatusResponse},
};
use http::StatusCode;
use tokio::task;
use tracing::info;
use crate::{
compute::{ComputeNode, ParsedSpec},
http::{extract::Json, JsonResponse},
};
// Accept spec in JSON format and request compute configuration. If anything
// goes wrong after we set the compute status to `ConfigurationPending` and
// update compute state with new spec, we basically leave compute in the
// potentially wrong state. That said, it's control-plane's responsibility to
// watch compute state after reconfiguration request and to clean restart in
// case of errors.
pub(in crate::http) async fn configure(
State(compute): State<Arc<ComputeNode>>,
request: Json<ConfigurationRequest>,
) -> Response {
if !compute.live_config_allowed {
return JsonResponse::error(
StatusCode::PRECONDITION_FAILED,
"live configuration is not allowed for this compute node".to_string(),
);
}
let pspec = match ParsedSpec::try_from(request.spec.clone()) {
Ok(p) => p,
Err(e) => return JsonResponse::error(StatusCode::BAD_REQUEST, e),
};
// XXX: wrap state update under lock in a code block. Otherwise, we will try
// to `Send` `mut state` into the spawned thread bellow, which will cause
// the following rustc error:
//
// error: future cannot be sent between threads safely
{
let mut state = compute.state.lock().unwrap();
if !matches!(state.status, ComputeStatus::Empty | ComputeStatus::Running) {
return JsonResponse::invalid_status(state.status);
}
state.pspec = Some(pspec);
state.set_status(ComputeStatus::ConfigurationPending, &compute.state_changed);
drop(state);
}
// Spawn a blocking thread to wait for compute to become Running. This is
// needed to do not block the main pool of workers and be able to serve
// other requests while some particular request is waiting for compute to
// finish configuration.
let c = compute.clone();
let completed = task::spawn_blocking(move || {
let mut state = c.state.lock().unwrap();
while state.status != ComputeStatus::Running {
state = c.state_changed.wait(state).unwrap();
info!(
"waiting for compute to become {}, current status: {}",
ComputeStatus::Running,
state.status
);
if state.status == ComputeStatus::Failed {
let err = state.error.as_ref().map_or("unknown error", |x| x);
let msg = format!("compute configuration failed: {:?}", err);
return Err(msg);
}
}
Ok(())
})
.await
.unwrap();
if let Err(e) = completed {
return JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e);
}
// Return current compute state if everything went well.
let state = compute.state.lock().unwrap().clone();
let body = ComputeStatusResponse::from(&state);
JsonResponse::success(StatusCode::OK, body)
}

View File

@@ -1,34 +0,0 @@
use std::sync::Arc;
use axum::{body::Body, extract::State, response::Response};
use http::{header::CONTENT_TYPE, StatusCode};
use serde::Deserialize;
use crate::{
catalog::{get_database_schema, SchemaDumpError},
compute::ComputeNode,
http::{extract::Query, JsonResponse},
};
#[derive(Debug, Clone, Deserialize)]
pub(in crate::http) struct DatabaseSchemaParams {
database: String,
}
/// Get a schema dump of the requested database.
pub(in crate::http) async fn get_schema_dump(
params: Query<DatabaseSchemaParams>,
State(compute): State<Arc<ComputeNode>>,
) -> Response {
match get_database_schema(&compute, &params.database).await {
Ok(schema) => Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE.as_str(), "application/json")
.body(Body::from_stream(schema))
.unwrap(),
Err(SchemaDumpError::DatabaseDoesNotExist) => {
JsonResponse::error(StatusCode::NOT_FOUND, SchemaDumpError::DatabaseDoesNotExist)
}
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
}
}

View File

@@ -1,16 +0,0 @@
use std::sync::Arc;
use axum::{extract::State, response::Response};
use http::StatusCode;
use crate::{catalog::get_dbs_and_roles, compute::ComputeNode, http::JsonResponse};
/// Get the databases and roles from the compute.
pub(in crate::http) async fn get_catalog_objects(
State(compute): State<Arc<ComputeNode>>,
) -> Response {
match get_dbs_and_roles(&compute).await {
Ok(catalog_objects) => JsonResponse::success(StatusCode::OK, catalog_objects),
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
}
}

View File

@@ -1,67 +0,0 @@
use std::sync::Arc;
use axum::{
extract::State,
response::{IntoResponse, Response},
};
use http::StatusCode;
use serde::Deserialize;
use crate::{
compute::ComputeNode,
http::{
extract::{Path, Query},
JsonResponse,
},
};
#[derive(Debug, Clone, Deserialize)]
pub(in crate::http) struct ExtensionServerParams {
is_library: Option<bool>,
}
/// Download a remote extension.
pub(in crate::http) async fn download_extension(
Path(filename): Path<String>,
params: Query<ExtensionServerParams>,
State(compute): State<Arc<ComputeNode>>,
) -> Response {
// Don't even try to download extensions if no remote storage is configured
if compute.ext_remote_storage.is_none() {
return JsonResponse::error(
StatusCode::PRECONDITION_FAILED,
"remote storage is not configured",
);
}
let ext = {
let state = compute.state.lock().unwrap();
let pspec = state.pspec.as_ref().unwrap();
let spec = &pspec.spec;
let remote_extensions = match spec.remote_extensions.as_ref() {
Some(r) => r,
None => {
return JsonResponse::error(
StatusCode::CONFLICT,
"information about remote extensions is unavailable",
);
}
};
remote_extensions.get_ext(
&filename,
params.is_library.unwrap_or(false),
&compute.build_tag,
&compute.pgversion,
)
};
match ext {
Ok((ext_name, ext_path)) => match compute.download_extension(ext_name, ext_path).await {
Ok(_) => StatusCode::OK.into_response(),
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
},
Err(e) => JsonResponse::error(StatusCode::NOT_FOUND, e),
}
}

View File

@@ -1,45 +0,0 @@
use std::sync::Arc;
use axum::{extract::State, response::Response};
use compute_api::{
requests::ExtensionInstallRequest,
responses::{ComputeStatus, ExtensionInstallResponse},
};
use http::StatusCode;
use crate::{
compute::ComputeNode,
http::{extract::Json, JsonResponse},
};
/// Install a extension.
pub(in crate::http) async fn install_extension(
State(compute): State<Arc<ComputeNode>>,
request: Json<ExtensionInstallRequest>,
) -> Response {
let status = compute.get_status();
if status != ComputeStatus::Running {
return JsonResponse::invalid_status(status);
}
match compute
.install_extension(
&request.extension,
&request.database,
request.version.to_string(),
)
.await
{
Ok(version) => JsonResponse::success(
StatusCode::CREATED,
Some(ExtensionInstallResponse {
extension: request.extension.clone(),
version,
}),
),
Err(e) => JsonResponse::error(
StatusCode::INTERNAL_SERVER_ERROR,
format!("failed to install extension: {e}"),
),
}
}

View File

@@ -1,35 +0,0 @@
use axum::response::{IntoResponse, Response};
use http::StatusCode;
use tracing::info;
use utils::failpoint_support::{apply_failpoint, ConfigureFailpointsRequest};
use crate::http::{extract::Json, JsonResponse};
/// Configure failpoints for testing purposes.
pub(in crate::http) async fn configure_failpoints(
failpoints: Json<ConfigureFailpointsRequest>,
) -> Response {
if !fail::has_failpoints() {
return JsonResponse::error(
StatusCode::PRECONDITION_FAILED,
"Cannot manage failpoints because neon was compiled without failpoints support",
);
}
for fp in &*failpoints {
info!("cfg failpoint: {} {}", fp.name, fp.actions);
// We recognize one extra "action" that's not natively recognized
// by the failpoints crate: exit, to immediately kill the process
let cfg_result = apply_failpoint(&fp.name, &fp.actions);
if let Err(e) = cfg_result {
return JsonResponse::error(
StatusCode::BAD_REQUEST,
format!("failed to configure failpoints: {e}"),
);
}
}
StatusCode::OK.into_response()
}

View File

@@ -1,48 +0,0 @@
use std::sync::Arc;
use axum::{extract::State, response::Response};
use compute_api::{
requests::SetRoleGrantsRequest,
responses::{ComputeStatus, SetRoleGrantsResponse},
};
use http::StatusCode;
use crate::{
compute::ComputeNode,
http::{extract::Json, JsonResponse},
};
/// Add grants for a role.
pub(in crate::http) async fn add_grant(
State(compute): State<Arc<ComputeNode>>,
request: Json<SetRoleGrantsRequest>,
) -> Response {
let status = compute.get_status();
if status != ComputeStatus::Running {
return JsonResponse::invalid_status(status);
}
match compute
.set_role_grants(
&request.database,
&request.schema,
&request.privileges,
&request.role,
)
.await
{
Ok(()) => JsonResponse::success(
StatusCode::CREATED,
Some(SetRoleGrantsResponse {
database: request.database.clone(),
schema: request.schema.clone(),
role: request.role.clone(),
privileges: request.privileges.clone(),
}),
),
Err(e) => JsonResponse::error(
StatusCode::INTERNAL_SERVER_ERROR,
format!("failed to grant role privileges to the schema: {e}"),
),
}
}

View File

@@ -1,11 +0,0 @@
use axum::response::Response;
use compute_api::responses::InfoResponse;
use http::StatusCode;
use crate::http::JsonResponse;
/// Get information about the physical characteristics about the compute.
pub(in crate::http) async fn get_info() -> Response {
let num_cpus = num_cpus::get_physical();
JsonResponse::success(StatusCode::OK, &InfoResponse { num_cpus })
}

View File

@@ -1,18 +0,0 @@
use std::sync::Arc;
use axum::{extract::State, response::Response};
use compute_api::responses::ComputeStatus;
use http::StatusCode;
use crate::{compute::ComputeNode, http::JsonResponse};
/// Collect current Postgres usage insights.
pub(in crate::http) async fn get_insights(State(compute): State<Arc<ComputeNode>>) -> Response {
let status = compute.get_status();
if status != ComputeStatus::Running {
return JsonResponse::invalid_status(status);
}
let insights = compute.collect_insights().await;
JsonResponse::success(StatusCode::OK, insights)
}

View File

@@ -1,33 +0,0 @@
use std::sync::Arc;
use axum::{extract::State, response::Response};
use compute_api::responses::ComputeStatus;
use http::StatusCode;
use tokio::task;
use crate::{compute::ComputeNode, http::JsonResponse, installed_extensions};
/// Get a list of installed extensions.
pub(in crate::http) async fn get_installed_extensions(
State(compute): State<Arc<ComputeNode>>,
) -> Response {
let status = compute.get_status();
if status != ComputeStatus::Running {
return JsonResponse::invalid_status(status);
}
let conf = compute.get_conn_conf(None);
let res = task::spawn_blocking(move || installed_extensions::get_installed_extensions(conf))
.await
.unwrap();
match res {
Ok(installed_extensions) => {
JsonResponse::success(StatusCode::OK, Some(installed_extensions))
}
Err(e) => JsonResponse::error(
StatusCode::INTERNAL_SERVER_ERROR,
format!("failed to get list of installed extensions: {e}"),
),
}
}

View File

@@ -1,32 +0,0 @@
use axum::{body::Body, response::Response};
use http::header::CONTENT_TYPE;
use http::StatusCode;
use metrics::proto::MetricFamily;
use metrics::Encoder;
use metrics::TextEncoder;
use crate::{http::JsonResponse, installed_extensions};
/// Expose Prometheus metrics.
pub(in crate::http) async fn get_metrics() -> Response {
// When we call TextEncoder::encode() below, it will immediately return an
// error if a metric family has no metrics, so we need to preemptively
// filter out metric families with no metrics.
let metrics = installed_extensions::collect()
.into_iter()
.filter(|m| !m.get_metric().is_empty())
.collect::<Vec<MetricFamily>>();
let encoder = TextEncoder::new();
let mut buffer = vec![];
if let Err(e) = encoder.encode(&metrics, &mut buffer) {
return JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e);
}
Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, encoder.format_type())
.body(Body::from(buffer))
.unwrap()
}

View File

@@ -1,12 +0,0 @@
use std::sync::Arc;
use axum::{extract::State, response::Response};
use http::StatusCode;
use crate::{compute::ComputeNode, http::JsonResponse};
/// Get startup metrics.
pub(in crate::http) async fn get_metrics(State(compute): State<Arc<ComputeNode>>) -> Response {
let metrics = compute.state.lock().unwrap().metrics.clone();
JsonResponse::success(StatusCode::OK, metrics)
}

View File

@@ -1,38 +0,0 @@
use compute_api::responses::ComputeStatusResponse;
use crate::compute::ComputeState;
pub(in crate::http) mod check_writability;
pub(in crate::http) mod configure;
pub(in crate::http) mod database_schema;
pub(in crate::http) mod dbs_and_roles;
pub(in crate::http) mod extension_server;
pub(in crate::http) mod extensions;
pub(in crate::http) mod failpoints;
pub(in crate::http) mod grants;
pub(in crate::http) mod info;
pub(in crate::http) mod insights;
pub(in crate::http) mod installed_extensions;
pub(in crate::http) mod metrics;
pub(in crate::http) mod metrics_json;
pub(in crate::http) mod status;
pub(in crate::http) mod terminate;
impl From<&ComputeState> for ComputeStatusResponse {
fn from(state: &ComputeState) -> Self {
ComputeStatusResponse {
start_time: state.start_time,
tenant: state
.pspec
.as_ref()
.map(|pspec| pspec.tenant_id.to_string()),
timeline: state
.pspec
.as_ref()
.map(|pspec| pspec.timeline_id.to_string()),
status: state.status,
last_active: state.last_active,
error: state.error.clone(),
}
}
}

View File

@@ -1,14 +0,0 @@
use std::{ops::Deref, sync::Arc};
use axum::{extract::State, http::StatusCode, response::Response};
use compute_api::responses::ComputeStatusResponse;
use crate::{compute::ComputeNode, http::JsonResponse};
/// Retrieve the state of the comute.
pub(in crate::http) async fn get_status(State(compute): State<Arc<ComputeNode>>) -> Response {
let state = compute.state.lock().unwrap();
let body = ComputeStatusResponse::from(state.deref());
JsonResponse::success(StatusCode::OK, body)
}

View File

@@ -1,58 +0,0 @@
use std::sync::Arc;
use axum::{
extract::State,
response::{IntoResponse, Response},
};
use compute_api::responses::ComputeStatus;
use http::StatusCode;
use tokio::task;
use tracing::info;
use crate::{
compute::{forward_termination_signal, ComputeNode},
http::JsonResponse,
};
/// Terminate the compute.
pub(in crate::http) async fn terminate(State(compute): State<Arc<ComputeNode>>) -> Response {
{
let mut state = compute.state.lock().unwrap();
if state.status == ComputeStatus::Terminated {
return StatusCode::CREATED.into_response();
}
if !matches!(state.status, ComputeStatus::Empty | ComputeStatus::Running) {
return JsonResponse::invalid_status(state.status);
}
state.set_status(ComputeStatus::TerminationPending, &compute.state_changed);
drop(state);
}
forward_termination_signal();
info!("sent signal and notified waiters");
// Spawn a blocking thread to wait for compute to become Terminated.
// This is needed to do not block the main pool of workers and
// be able to serve other requests while some particular request
// is waiting for compute to finish configuration.
let c = compute.clone();
task::spawn_blocking(move || {
let mut state = c.state.lock().unwrap();
while state.status != ComputeStatus::Terminated {
state = c.state_changed.wait(state).unwrap();
info!(
"waiting for compute to become {}, current status: {:?}",
ComputeStatus::Terminated,
state.status
);
}
})
.await
.unwrap();
info!("terminated Postgres");
StatusCode::OK.into_response()
}

View File

@@ -1,165 +0,0 @@
use std::{
net::{IpAddr, Ipv6Addr, SocketAddr},
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
thread,
time::Duration,
};
use anyhow::Result;
use axum::{
response::{IntoResponse, Response},
routing::{get, post},
Router,
};
use http::StatusCode;
use tokio::net::TcpListener;
use tower::ServiceBuilder;
use tower_http::{
request_id::{MakeRequestId, PropagateRequestIdLayer, RequestId, SetRequestIdLayer},
trace::TraceLayer,
};
use tracing::{debug, error, info, Span};
use super::routes::{
check_writability, configure, database_schema, dbs_and_roles, extension_server, extensions,
grants, info as info_route, insights, installed_extensions, metrics, metrics_json, status,
terminate,
};
use crate::compute::ComputeNode;
async fn handle_404() -> Response {
StatusCode::NOT_FOUND.into_response()
}
#[derive(Clone, Default)]
struct ComputeMakeRequestId(Arc<AtomicU64>);
impl MakeRequestId for ComputeMakeRequestId {
fn make_request_id<B>(
&mut self,
_request: &http::Request<B>,
) -> Option<tower_http::request_id::RequestId> {
let request_id = self
.0
.fetch_add(1, Ordering::SeqCst)
.to_string()
.parse()
.unwrap();
Some(RequestId::new(request_id))
}
}
/// Run the HTTP server and wait on it forever.
#[tokio::main]
async fn serve(port: u16, compute: Arc<ComputeNode>) {
const X_REQUEST_ID: &str = "x-request-id";
let mut app = Router::new()
.route("/check_writability", post(check_writability::is_writable))
.route("/configure", post(configure::configure))
.route("/database_schema", get(database_schema::get_schema_dump))
.route("/dbs_and_roles", get(dbs_and_roles::get_catalog_objects))
.route(
"/extension_server/*filename",
post(extension_server::download_extension),
)
.route("/extensions", post(extensions::install_extension))
.route("/grants", post(grants::add_grant))
.route("/info", get(info_route::get_info))
.route("/insights", get(insights::get_insights))
.route(
"/installed_extensions",
get(installed_extensions::get_installed_extensions),
)
.route("/metrics", get(metrics::get_metrics))
.route("/metrics.json", get(metrics_json::get_metrics))
.route("/status", get(status::get_status))
.route("/terminate", post(terminate::terminate))
.fallback(handle_404)
.layer(
ServiceBuilder::new()
.layer(SetRequestIdLayer::x_request_id(
ComputeMakeRequestId::default(),
))
.layer(
TraceLayer::new_for_http()
.on_request(|request: &http::Request<_>, _span: &Span| {
let request_id = request
.headers()
.get(X_REQUEST_ID)
.unwrap()
.to_str()
.unwrap();
match request.uri().path() {
"/metrics" => {
debug!(%request_id, "{} {}", request.method(), request.uri())
}
_ => info!(%request_id, "{} {}", request.method(), request.uri()),
};
})
.on_response(
|response: &http::Response<_>, latency: Duration, _span: &Span| {
let request_id = response
.headers()
.get(X_REQUEST_ID)
.unwrap()
.to_str()
.unwrap();
info!(
%request_id,
code = response.status().as_u16(),
latency = latency.as_millis()
)
},
),
)
.layer(PropagateRequestIdLayer::x_request_id()),
)
.with_state(compute);
// Add in any testing support
if cfg!(feature = "testing") {
use super::routes::failpoints;
app = app.route("/failpoints", post(failpoints::configure_failpoints))
}
// This usually binds to both IPv4 and IPv6 on Linux, see
// https://github.com/rust-lang/rust/pull/34440 for more information
let addr = SocketAddr::new(IpAddr::from(Ipv6Addr::UNSPECIFIED), port);
let listener = match TcpListener::bind(&addr).await {
Ok(listener) => listener,
Err(e) => {
error!(
"failed to bind the compute_ctl HTTP server to port {}: {}",
port, e
);
return;
}
};
if let Ok(local_addr) = listener.local_addr() {
info!("compute_ctl HTTP server listening on {}", local_addr);
} else {
info!("compute_ctl HTTP server listening on port {}", port);
}
if let Err(e) = axum::serve(listener, app).await {
error!("compute_ctl HTTP server error: {}", e);
}
}
/// Launch a separate HTTP server thread and return its `JoinHandle`.
pub fn launch_http_server(port: u16, state: &Arc<ComputeNode>) -> Result<thread::JoinHandle<()>> {
let state = Arc::clone(state);
Ok(thread::Builder::new()
.name("http-server".into())
.spawn(move || serve(port, state))?)
}

View File

@@ -1,6 +1,7 @@
use compute_api::responses::{InstalledExtension, InstalledExtensions};
use metrics::proto::MetricFamily;
use std::collections::HashMap;
use std::collections::HashSet;
use anyhow::Result;
use postgres::{Client, NoTls};
@@ -37,77 +38,61 @@ fn list_dbs(client: &mut Client) -> Result<Vec<String>> {
/// Connect to every database (see list_dbs above) and get the list of installed extensions.
///
/// Same extension can be installed in multiple databases with different versions,
/// so we report a separate metric (number of databases where it is installed)
/// for each extension version.
/// we only keep the highest and lowest version across all databases.
pub fn get_installed_extensions(mut conf: postgres::config::Config) -> Result<InstalledExtensions> {
conf.application_name("compute_ctl:get_installed_extensions");
let mut client = conf.connect(NoTls)?;
let databases: Vec<String> = list_dbs(&mut client)?;
let mut extensions_map: HashMap<(String, String, String), InstalledExtension> = HashMap::new();
let mut extensions_map: HashMap<String, InstalledExtension> = HashMap::new();
for db in databases.iter() {
conf.dbname(db);
let mut db_client = conf.connect(NoTls)?;
let extensions: Vec<(String, String, i32)> = db_client
let extensions: Vec<(String, String)> = db_client
.query(
"SELECT extname, extversion, extowner::integer FROM pg_catalog.pg_extension",
"SELECT extname, extversion FROM pg_catalog.pg_extension;",
&[],
)?
.iter()
.map(|row| {
(
row.get("extname"),
row.get("extversion"),
row.get("extowner"),
)
})
.map(|row| (row.get("extname"), row.get("extversion")))
.collect();
for (extname, v, extowner) in extensions.iter() {
for (extname, v) in extensions.iter() {
let version = v.to_string();
// check if the extension is owned by superuser
// 10 is the oid of superuser
let owned_by_superuser = if *extowner == 10 { "1" } else { "0" };
// increment the number of databases where the version of extension is installed
INSTALLED_EXTENSIONS
.with_label_values(&[extname, &version])
.inc();
extensions_map
.entry((
extname.to_string(),
version.clone(),
owned_by_superuser.to_string(),
))
.entry(extname.to_string())
.and_modify(|e| {
e.versions.insert(version.clone());
// count the number of databases where the extension is installed
e.n_databases += 1;
})
.or_insert(InstalledExtension {
extname: extname.to_string(),
version: version.clone(),
versions: HashSet::from([version.clone()]),
n_databases: 1,
owned_by_superuser: owned_by_superuser.to_string(),
});
}
}
for (key, ext) in extensions_map.iter() {
let (extname, version, owned_by_superuser) = key;
let n_databases = ext.n_databases as u64;
INSTALLED_EXTENSIONS
.with_label_values(&[extname, version, owned_by_superuser])
.set(n_databases);
}
Ok(InstalledExtensions {
let res = InstalledExtensions {
extensions: extensions_map.into_values().collect(),
})
};
Ok(res)
}
static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| {
register_uint_gauge_vec!(
"compute_installed_extensions",
"Number of databases where the version of extension is installed",
&["extension_name", "version", "owned_by_superuser"]
&["extension_name", "version"]
)
.expect("failed to define a metric")
});

View File

@@ -3,6 +3,8 @@
#![deny(unsafe_code)]
#![deny(clippy::undocumented_unsafe_blocks)]
extern crate hyper0 as hyper;
pub mod checker;
pub mod config;
pub mod configurator;

View File

@@ -1,16 +1,13 @@
use anyhow::{Context, Result};
use fail::fail_point;
use postgres::{Client, Transaction};
use postgres::Client;
use tracing::info;
/// Runs a series of migrations on a target database
pub(crate) struct MigrationRunner<'m> {
client: &'m mut Client,
migrations: &'m [&'m str],
}
impl<'m> MigrationRunner<'m> {
/// Create a new migration runner
pub fn new(client: &'m mut Client, migrations: &'m [&'m str]) -> Self {
// The neon_migration.migration_id::id column is a bigint, which is equivalent to an i64
assert!(migrations.len() + 1 < i64::MAX as usize);
@@ -18,110 +15,87 @@ impl<'m> MigrationRunner<'m> {
Self { client, migrations }
}
/// Get the current value neon_migration.migration_id
fn get_migration_id(&mut self) -> Result<i64> {
let query = "SELECT id FROM neon_migration.migration_id";
let row = self
.client
.query_one("SELECT id FROM neon_migration.migration_id", &[])?;
.query_one(query, &[])
.context("run_migrations get migration_id")?;
Ok(row.get::<&str, i64>("id"))
}
/// Update the neon_migration.migration_id value
///
/// This function has a fail point called compute-migration, which can be
/// used if you would like to fail the application of a series of migrations
/// at some point.
fn update_migration_id(txn: &mut Transaction, migration_id: i64) -> Result<()> {
// We use this fail point in order to check that failing in the
// middle of applying a series of migrations fails in an expected
// manner
if cfg!(feature = "testing") {
let fail = (|| {
fail_point!("compute-migration", |fail_migration_id| {
migration_id == fail_migration_id.unwrap().parse::<i64>().unwrap()
});
fn update_migration_id(&mut self, migration_id: i64) -> Result<()> {
let setval = format!("UPDATE neon_migration.migration_id SET id={}", migration_id);
false
})();
if fail {
return Err(anyhow::anyhow!(format!(
"migration {} was configured to fail because of a failpoint",
migration_id
)));
}
}
txn.query(
"UPDATE neon_migration.migration_id SET id = $1",
&[&migration_id],
)
.with_context(|| format!("update neon_migration.migration_id to {migration_id}"))?;
self.client
.simple_query(&setval)
.context("run_migrations update id")?;
Ok(())
}
/// Prepare the migrations the target database for handling migrations
fn prepare_database(&mut self) -> Result<()> {
self.client
.simple_query("CREATE SCHEMA IF NOT EXISTS neon_migration")?;
self.client.simple_query("CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)")?;
self.client.simple_query(
"INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING",
)?;
self.client
.simple_query("ALTER SCHEMA neon_migration OWNER TO cloud_admin")?;
self.client
.simple_query("REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC")?;
fn prepare_migrations(&mut self) -> Result<()> {
let query = "CREATE SCHEMA IF NOT EXISTS neon_migration";
self.client.simple_query(query)?;
let query = "CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key INT NOT NULL PRIMARY KEY, id bigint NOT NULL DEFAULT 0)";
self.client.simple_query(query)?;
let query = "INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING";
self.client.simple_query(query)?;
let query = "ALTER SCHEMA neon_migration OWNER TO cloud_admin";
self.client.simple_query(query)?;
let query = "REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC";
self.client.simple_query(query)?;
Ok(())
}
/// Run an individual migration
fn run_migration(txn: &mut Transaction, migration_id: i64, migration: &str) -> Result<()> {
if migration.starts_with("-- SKIP") {
info!("Skipping migration id={}", migration_id);
// Even though we are skipping the migration, updating the
// migration ID should help keep logic easy to understand when
// trying to understand the state of a cluster.
Self::update_migration_id(txn, migration_id)?;
} else {
info!("Running migration id={}:\n{}\n", migration_id, migration);
txn.simple_query(migration)
.with_context(|| format!("apply migration {migration_id}"))?;
Self::update_migration_id(txn, migration_id)?;
}
Ok(())
}
/// Run the configured set of migrations
pub fn run_migrations(mut self) -> Result<()> {
self.prepare_database()
.context("prepare database to handle migrations")?;
self.prepare_migrations()?;
let mut current_migration = self.get_migration_id()? as usize;
while current_migration < self.migrations.len() {
// The index lags the migration ID by 1, so the current migration
// ID is also the next index
let migration_id = (current_migration + 1) as i64;
macro_rules! migration_id {
($cm:expr) => {
($cm + 1) as i64
};
}
let mut txn = self
.client
.transaction()
.with_context(|| format!("begin transaction for migration {migration_id}"))?;
let migration = self.migrations[current_migration];
Self::run_migration(&mut txn, migration_id, self.migrations[current_migration])
.with_context(|| format!("running migration {migration_id}"))?;
if migration.starts_with("-- SKIP") {
info!("Skipping migration id={}", migration_id!(current_migration));
} else {
info!(
"Running migration id={}:\n{}\n",
migration_id!(current_migration),
migration
);
txn.commit()
.with_context(|| format!("commit transaction for migration {migration_id}"))?;
self.client
.simple_query("BEGIN")
.context("begin migration")?;
info!("Finished migration id={}", migration_id);
self.client.simple_query(migration).with_context(|| {
format!(
"run_migrations migration id={}",
migration_id!(current_migration)
)
})?;
// Migration IDs start at 1
self.update_migration_id(migration_id!(current_migration))?;
self.client
.simple_query("COMMIT")
.context("commit migration")?;
info!("Finished migration id={}", migration_id!(current_migration));
}
current_migration += 1;
}

View File

@@ -1,9 +0,0 @@
DO $$
DECLARE
bypassrls boolean;
BEGIN
SELECT rolbypassrls INTO bypassrls FROM pg_roles WHERE rolname = 'neon_superuser';
IF NOT bypassrls THEN
RAISE EXCEPTION 'neon_superuser cannot bypass RLS';
END IF;
END $$;

View File

@@ -1,25 +0,0 @@
DO $$
DECLARE
role record;
BEGIN
FOR role IN
SELECT rolname AS name, rolinherit AS inherit
FROM pg_roles
WHERE pg_has_role(rolname, 'neon_superuser', 'member')
LOOP
IF NOT role.inherit THEN
RAISE EXCEPTION '% cannot inherit', quote_ident(role.name);
END IF;
END LOOP;
FOR role IN
SELECT rolname AS name, rolbypassrls AS bypassrls
FROM pg_roles
WHERE NOT pg_has_role(rolname, 'neon_superuser', 'member')
AND NOT starts_with(rolname, 'pg_')
LOOP
IF role.bypassrls THEN
RAISE EXCEPTION '% can bypass RLS', quote_ident(role.name);
END IF;
END LOOP;
END $$;

View File

@@ -1,10 +0,0 @@
DO $$
BEGIN
IF (SELECT current_setting('server_version_num')::numeric < 160000) THEN
RETURN;
END IF;
IF NOT (SELECT pg_has_role('neon_superuser', 'pg_create_subscription', 'member')) THEN
RAISE EXCEPTION 'neon_superuser cannot execute pg_create_subscription';
END IF;
END $$;

View File

@@ -1,19 +0,0 @@
DO $$
DECLARE
monitor record;
BEGIN
SELECT pg_has_role('neon_superuser', 'pg_monitor', 'member') AS member,
admin_option AS admin
INTO monitor
FROM pg_auth_members
WHERE roleid = 'pg_monitor'::regrole
AND member = 'pg_monitor'::regrole;
IF NOT monitor.member THEN
RAISE EXCEPTION 'neon_superuser is not a member of pg_monitor';
END IF;
IF NOT monitor.admin THEN
RAISE EXCEPTION 'neon_superuser cannot grant pg_monitor';
END IF;
END $$;

View File

@@ -1,2 +0,0 @@
-- This test was never written becuase at the time migration tests were added
-- the accompanying migration was already skipped.

View File

@@ -1,2 +0,0 @@
-- This test was never written becuase at the time migration tests were added
-- the accompanying migration was already skipped.

View File

@@ -1,2 +0,0 @@
-- This test was never written becuase at the time migration tests were added
-- the accompanying migration was already skipped.

View File

@@ -1,2 +0,0 @@
-- This test was never written becuase at the time migration tests were added
-- the accompanying migration was already skipped.

View File

@@ -1,2 +0,0 @@
-- This test was never written becuase at the time migration tests were added
-- the accompanying migration was already skipped.

View File

@@ -1,13 +0,0 @@
DO $$
DECLARE
can_execute boolean;
BEGIN
SELECT bool_and(has_function_privilege('neon_superuser', oid, 'execute'))
INTO can_execute
FROM pg_proc
WHERE proname IN ('pg_export_snapshot', 'pg_log_standby_snapshot')
AND pronamespace = 'pg_catalog'::regnamespace;
IF NOT can_execute THEN
RAISE EXCEPTION 'neon_superuser cannot execute both pg_export_snapshot and pg_log_standby_snapshot';
END IF;
END $$;

View File

@@ -1,13 +0,0 @@
DO $$
DECLARE
can_execute boolean;
BEGIN
SELECT has_function_privilege('neon_superuser', oid, 'execute')
INTO can_execute
FROM pg_proc
WHERE proname = 'pg_show_replication_origin_status'
AND pronamespace = 'pg_catalog'::regnamespace;
IF NOT can_execute THEN
RAISE EXCEPTION 'neon_superuser cannot execute pg_show_replication_origin_status';
END IF;
END $$;

View File

@@ -47,7 +47,6 @@ pub enum PerDatabasePhase {
DeleteDBRoleReferences,
ChangeSchemaPerms,
HandleAnonExtension,
DropSubscriptionsForDeletedDatabases,
}
#[derive(Clone, Debug)]
@@ -75,7 +74,7 @@ pub struct MutableApplyContext {
pub dbs: HashMap<String, Database>,
}
/// Apply the operations that belong to the given spec apply phase.
/// Appply the operations that belong to the given spec apply phase.
///
/// Commands within a single phase are executed in order of Iterator yield.
/// Commands of ApplySpecPhase::RunInEachDatabase will execute in the database
@@ -327,12 +326,13 @@ async fn get_operations<'a>(
// Use FORCE to drop database even if there are active connections.
// We run this from `cloud_admin`, so it should have enough privileges.
//
// NB: there could be other db states, which prevent us from dropping
// the database. For example, if db is used by any active subscription
// or replication slot.
// Such cases are handled in the DropSubscriptionsForDeletedDatabases
// phase. We do all the cleanup before actually dropping the database.
// TODO: deal with it once we allow logical replication. Proper fix should
// involve returning an error code to the control plane, so it could
// figure out that this is a non-retryable error, return it to the user
// and fail operation permanently.
let drop_db_query: String = format!(
"DROP DATABASE IF EXISTS {} WITH (FORCE)",
&op.name.pg_quote()
@@ -444,30 +444,6 @@ async fn get_operations<'a>(
}
ApplySpecPhase::RunInEachDatabase { db, subphase } => {
match subphase {
PerDatabasePhase::DropSubscriptionsForDeletedDatabases => {
match &db {
DB::UserDB(db) => {
let drop_subscription_query: String = format!(
include_str!("sql/drop_subscription_for_drop_dbs.sql"),
datname_str = escape_literal(&db.name),
);
let operations = vec![Operation {
query: drop_subscription_query,
comment: Some(format!(
"optionally dropping subscriptions for DB {}",
db.name,
)),
}]
.into_iter();
Ok(Box::new(operations))
}
// skip this cleanup for the system databases
// because users can't drop them
DB::SystemDB => Ok(Box::new(empty())),
}
}
PerDatabasePhase::DeleteDBRoleReferences => {
let ctx = ctx.read().await;
@@ -498,19 +474,7 @@ async fn get_operations<'a>(
),
comment: None,
},
// Revoke some potentially blocking privileges (Neon-specific currently)
Operation {
query: format!(
include_str!("sql/pre_drop_role_revoke_privileges.sql"),
role_name = quoted,
),
comment: None,
},
// This now will only drop privileges of the role
// TODO: this is obviously not 100% true because of the above case,
// there could be still some privileges that are not revoked. Maybe this
// only drops privileges that were granted *by this* role, not *to this* role,
// but this has to be checked.
Operation {
query: format!("DROP OWNED BY {}", quoted),
comment: None,

View File

@@ -1,11 +0,0 @@
DO $$
DECLARE
subname TEXT;
BEGIN
FOR subname IN SELECT pg_subscription.subname FROM pg_subscription WHERE subdbid = (SELECT oid FROM pg_database WHERE datname = {datname_str}) LOOP
EXECUTE format('ALTER SUBSCRIPTION %I DISABLE;', subname);
EXECUTE format('ALTER SUBSCRIPTION %I SET (slot_name = NONE);', subname);
EXECUTE format('DROP SUBSCRIPTION %I;', subname);
END LOOP;
END;
$$;

View File

@@ -1,28 +0,0 @@
SET SESSION ROLE neon_superuser;
DO $$
DECLARE
schema TEXT;
revoke_query TEXT;
BEGIN
FOR schema IN
SELECT schema_name
FROM information_schema.schemata
-- So far, we only had issues with 'public' schema. Probably, because we do some additional grants,
-- e.g., make DB owner the owner of 'public' schema automatically (when created via API).
-- See https://github.com/neondatabase/cloud/issues/13582 for the context.
-- Still, keep the loop because i) it efficiently handles the case when there is no 'public' schema,
-- ii) it's easy to add more schemas to the list if needed.
WHERE schema_name IN ('public')
LOOP
revoke_query := format(
'REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %I FROM {role_name} GRANTED BY neon_superuser;',
schema
);
EXECUTE revoke_query;
END LOOP;
END;
$$;
RESET ROLE;

View File

@@ -274,7 +274,6 @@ fn fill_remote_storage_secrets_vars(mut cmd: &mut Command) -> &mut Command {
for env_key in [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"AWS_SESSION_TOKEN",
"AWS_PROFILE",
// HOME is needed in combination with `AWS_PROFILE` to pick up the SSO sessions.
"HOME",

View File

@@ -19,7 +19,6 @@ use control_plane::storage_controller::{
NeonStorageControllerStartArgs, NeonStorageControllerStopArgs, StorageController,
};
use control_plane::{broker, local_env};
use nix::fcntl::{flock, FlockArg};
use pageserver_api::config::{
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_PAGESERVER_HTTP_PORT,
DEFAULT_PG_LISTEN_PORT as DEFAULT_PAGESERVER_PG_PORT,
@@ -37,8 +36,6 @@ use safekeeper_api::{
};
use std::borrow::Cow;
use std::collections::{BTreeSet, HashMap};
use std::fs::File;
use std::os::fd::AsRawFd;
use std::path::PathBuf;
use std::process::exit;
use std::str::FromStr;
@@ -692,21 +689,6 @@ struct TimelineTreeEl {
pub children: BTreeSet<TimelineId>,
}
/// A flock-based guard over the neon_local repository directory
struct RepoLock {
_file: File,
}
impl RepoLock {
fn new() -> Result<Self> {
let repo_dir = File::open(local_env::base_path())?;
let repo_dir_fd = repo_dir.as_raw_fd();
flock(repo_dir_fd, FlockArg::LockExclusive)?;
Ok(Self { _file: repo_dir })
}
}
// Main entry point for the 'neon_local' CLI utility
//
// This utility helps to manage neon installation. That includes following:
@@ -718,14 +700,9 @@ fn main() -> Result<()> {
let cli = Cli::parse();
// Check for 'neon init' command first.
let (subcommand_result, _lock) = if let NeonLocalCmd::Init(args) = cli.command {
(handle_init(&args).map(|env| Some(Cow::Owned(env))), None)
let subcommand_result = if let NeonLocalCmd::Init(args) = cli.command {
handle_init(&args).map(|env| Some(Cow::Owned(env)))
} else {
// This tool uses a collection of simple files to store its state, and consequently
// it is not generally safe to run multiple commands concurrently. Rather than expect
// all callers to know this, use a lock file to protect against concurrent execution.
let _repo_lock = RepoLock::new().unwrap();
// all other commands need an existing config
let env = LocalEnv::load_config(&local_env::base_path()).context("Error loading config")?;
let original_env = env.clone();
@@ -751,12 +728,11 @@ fn main() -> Result<()> {
NeonLocalCmd::Mappings(subcmd) => handle_mappings(&subcmd, env),
};
let subcommand_result = if &original_env != env {
if &original_env != env {
subcommand_result.map(|()| Some(Cow::Borrowed(env)))
} else {
subcommand_result.map(|()| None)
};
(subcommand_result, Some(_repo_lock))
}
};
match subcommand_result {
@@ -946,7 +922,7 @@ fn handle_init(args: &InitCmdArgs) -> anyhow::Result<LocalEnv> {
} else {
// User (likely interactive) did not provide a description of the environment, give them the default
NeonLocalInitConf {
control_plane_api: Some(DEFAULT_PAGESERVER_CONTROL_PLANE_API.parse().unwrap()),
control_plane_api: Some(Some(DEFAULT_PAGESERVER_CONTROL_PLANE_API.parse().unwrap())),
broker: NeonBroker {
listen_addr: DEFAULT_BROKER_ADDR.parse().unwrap(),
},
@@ -1742,15 +1718,18 @@ async fn handle_start_all_impl(
broker::start_broker_process(env, &retry_timeout).await
});
js.spawn(async move {
let storage_controller = StorageController::from_env(env);
storage_controller
.start(NeonStorageControllerStartArgs::with_default_instance_id(
retry_timeout,
))
.await
.map_err(|e| e.context("start storage_controller"))
});
// Only start the storage controller if the pageserver is configured to need it
if env.control_plane_api.is_some() {
js.spawn(async move {
let storage_controller = StorageController::from_env(env);
storage_controller
.start(NeonStorageControllerStartArgs::with_default_instance_id(
retry_timeout,
))
.await
.map_err(|e| e.context("start storage_controller"))
});
}
for ps_conf in &env.pageservers {
js.spawn(async move {
@@ -1795,6 +1774,10 @@ async fn neon_start_status_check(
const RETRY_INTERVAL: Duration = Duration::from_millis(100);
const NOTICE_AFTER_RETRIES: Duration = Duration::from_secs(5);
if env.control_plane_api.is_none() {
return Ok(());
}
let storcon = StorageController::from_env(env);
let retries = retry_timeout.as_millis() / RETRY_INTERVAL.as_millis();

View File

@@ -53,7 +53,6 @@ use compute_api::spec::Role;
use nix::sys::signal::kill;
use nix::sys::signal::Signal;
use pageserver_api::shard::ShardStripeSize;
use reqwest::header::CONTENT_TYPE;
use serde::{Deserialize, Serialize};
use url::Host;
use utils::id::{NodeId, TenantId, TimelineId};
@@ -62,7 +61,7 @@ use crate::local_env::LocalEnv;
use crate::postgresql_conf::PostgresConf;
use crate::storage_controller::StorageController;
use compute_api::responses::{ComputeStatus, ComputeStatusResponse};
use compute_api::responses::{ComputeState, ComputeStatus};
use compute_api::spec::{Cluster, ComputeFeature, ComputeMode, ComputeSpec};
// contents of a endpoint.json file
@@ -311,15 +310,7 @@ impl Endpoint {
conf.append("wal_log_hints", "off");
conf.append("max_replication_slots", "10");
conf.append("hot_standby", "on");
// Set to 1MB to both exercise getPage requests/LFC, and still have enough room for
// Postgres to operate. Everything smaller might be not enough for Postgres under load,
// and can cause errors like 'no unpinned buffers available', see
// <https://github.com/neondatabase/neon/issues/9956>
conf.append("shared_buffers", "1MB");
// Postgres defaults to effective_io_concurrency=1, which does not exercise the pageserver's
// batching logic. Set this to 2 so that we exercise the code a bit without letting
// individual tests do a lot of concurrent work on underpowered test machines
conf.append("effective_io_concurrency", "2");
conf.append("fsync", "off");
conf.append("max_connections", "100");
conf.append("wal_level", "logical");
@@ -585,7 +576,6 @@ impl Endpoint {
features: self.features.clone(),
swap_size_bytes: None,
disk_quota_bytes: None,
disable_lfc_resizing: None,
cluster: Cluster {
cluster_id: None, // project ID: not used
name: None, // project name: not used
@@ -624,7 +614,6 @@ impl Endpoint {
pgbouncer_settings: None,
shard_stripe_size: Some(shard_stripe_size),
local_proxy_config: None,
reconfigure_concurrency: 1,
};
let spec_path = self.endpoint_path().join("spec.json");
std::fs::write(spec_path, serde_json::to_string_pretty(&spec)?)?;
@@ -739,7 +728,7 @@ impl Endpoint {
}
// Call the /status HTTP API
pub async fn get_status(&self) -> Result<ComputeStatusResponse> {
pub async fn get_status(&self) -> Result<ComputeState> {
let client = reqwest::Client::new();
let response = client
@@ -815,7 +804,7 @@ impl Endpoint {
}
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(120))
.timeout(Duration::from_secs(30))
.build()
.unwrap();
let response = client
@@ -824,7 +813,6 @@ impl Endpoint {
self.http_address.ip(),
self.http_address.port()
))
.header(CONTENT_TYPE.as_str(), "application/json")
.body(format!(
"{{\"spec\":{}}}",
serde_json::to_string_pretty(&spec)?

View File

@@ -76,7 +76,7 @@ pub struct LocalEnv {
// Control plane upcall API for pageserver: if None, we will not run storage_controller If set, this will
// be propagated into each pageserver's configuration.
pub control_plane_api: Url,
pub control_plane_api: Option<Url>,
// Control plane upcall API for storage controller. If set, this will be propagated into the
// storage controller's configuration.
@@ -133,7 +133,7 @@ pub struct NeonLocalInitConf {
pub storage_controller: Option<NeonStorageControllerConf>,
pub pageservers: Vec<NeonLocalInitPageserverConf>,
pub safekeepers: Vec<SafekeeperConf>,
pub control_plane_api: Option<Url>,
pub control_plane_api: Option<Option<Url>>,
pub control_plane_compute_hook_api: Option<Option<Url>>,
}
@@ -180,7 +180,7 @@ impl NeonStorageControllerConf {
const DEFAULT_MAX_WARMING_UP_INTERVAL: std::time::Duration = std::time::Duration::from_secs(30);
// Very tight heartbeat interval to speed up tests
const DEFAULT_HEARTBEAT_INTERVAL: std::time::Duration = std::time::Duration::from_millis(1000);
const DEFAULT_HEARTBEAT_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100);
}
impl Default for NeonStorageControllerConf {
@@ -535,7 +535,7 @@ impl LocalEnv {
storage_controller,
pageservers,
safekeepers,
control_plane_api: control_plane_api.unwrap(),
control_plane_api,
control_plane_compute_hook_api,
branch_name_mappings,
}
@@ -638,7 +638,7 @@ impl LocalEnv {
storage_controller: self.storage_controller.clone(),
pageservers: vec![], // it's skip_serializing anyway
safekeepers: self.safekeepers.clone(),
control_plane_api: Some(self.control_plane_api.clone()),
control_plane_api: self.control_plane_api.clone(),
control_plane_compute_hook_api: self.control_plane_compute_hook_api.clone(),
branch_name_mappings: self.branch_name_mappings.clone(),
},
@@ -768,7 +768,7 @@ impl LocalEnv {
storage_controller: storage_controller.unwrap_or_default(),
pageservers: pageservers.iter().map(Into::into).collect(),
safekeepers,
control_plane_api: control_plane_api.unwrap(),
control_plane_api: control_plane_api.unwrap_or_default(),
control_plane_compute_hook_api: control_plane_compute_hook_api.unwrap_or_default(),
branch_name_mappings: Default::default(),
};

View File

@@ -95,19 +95,21 @@ impl PageServerNode {
let mut overrides = vec![pg_distrib_dir_param, broker_endpoint_param];
overrides.push(format!(
"control_plane_api='{}'",
self.env.control_plane_api.as_str()
));
if let Some(control_plane_api) = &self.env.control_plane_api {
overrides.push(format!(
"control_plane_api='{}'",
control_plane_api.as_str()
));
// Storage controller uses the same auth as pageserver: if JWT is enabled
// for us, we will also need it to talk to them.
if matches!(conf.http_auth_type, AuthType::NeonJWT) {
let jwt_token = self
.env
.generate_auth_token(&Claims::new(None, Scope::GenerationsApi))
.unwrap();
overrides.push(format!("control_plane_api_token='{}'", jwt_token));
// Storage controller uses the same auth as pageserver: if JWT is enabled
// for us, we will also need it to talk to them.
if matches!(conf.http_auth_type, AuthType::NeonJWT) {
let jwt_token = self
.env
.generate_auth_token(&Claims::new(None, Scope::GenerationsApi))
.unwrap();
overrides.push(format!("control_plane_api_token='{}'", jwt_token));
}
}
if !conf.other.contains_key("remote_storage") {
@@ -433,7 +435,7 @@ impl PageServerNode {
) -> anyhow::Result<()> {
let config = Self::parse_config(settings)?;
self.http_client
.set_tenant_config(&models::TenantConfigRequest { tenant_id, config })
.tenant_config(&models::TenantConfigRequest { tenant_id, config })
.await?;
Ok(())

View File

@@ -5,7 +5,6 @@
//! ```text
//! .neon/safekeepers/<safekeeper id>
//! ```
use std::error::Error as _;
use std::future::Future;
use std::io::Write;
use std::path::PathBuf;
@@ -27,7 +26,7 @@ use crate::{
#[derive(Error, Debug)]
pub enum SafekeeperHttpError {
#[error("request error: {0}{}", .0.source().map(|e| format!(": {e}")).unwrap_or_default())]
#[error("Reqwest error: {0}")]
Transport(#[from] reqwest::Error),
#[error("Error: {0}")]

View File

@@ -338,7 +338,7 @@ impl StorageController {
.port(),
)
} else {
let listen_url = self.env.control_plane_api.clone();
let listen_url = self.env.control_plane_api.clone().unwrap();
let listen = format!(
"{}:{}",
@@ -708,7 +708,7 @@ impl StorageController {
} else {
// The configured URL has the /upcall path prefix for pageservers to use: we will strip that out
// for general purpose API access.
let listen_url = self.env.control_plane_api.clone();
let listen_url = self.env.control_plane_api.clone().unwrap();
Url::from_str(&format!(
"http://{}:{}/{path}",
listen_url.host_str().unwrap(),

View File

@@ -5,13 +5,12 @@ use clap::{Parser, Subcommand};
use pageserver_api::{
controller_api::{
AvailabilityZone, NodeAvailabilityWrapper, NodeDescribeResponse, NodeShardResponse,
SafekeeperDescribeResponse, ShardSchedulingPolicy, TenantCreateRequest,
TenantDescribeResponse, TenantPolicyRequest,
ShardSchedulingPolicy, TenantCreateRequest, TenantDescribeResponse, TenantPolicyRequest,
},
models::{
EvictionPolicy, EvictionPolicyLayerAccessThreshold, LocationConfigSecondary,
ShardParameters, TenantConfig, TenantConfigPatchRequest, TenantConfigRequest,
TenantShardSplitRequest, TenantShardSplitResponse,
ShardParameters, TenantConfig, TenantConfigRequest, TenantShardSplitRequest,
TenantShardSplitResponse,
},
shard::{ShardStripeSize, TenantShardId},
};
@@ -117,19 +116,9 @@ enum Command {
#[arg(long)]
tenant_shard_id: TenantShardId,
},
/// Set the pageserver tenant configuration of a tenant: this is the configuration structure
/// Modify the pageserver tenant configuration of a tenant: this is the configuration structure
/// that is passed through to pageservers, and does not affect storage controller behavior.
/// Any previous tenant configs are overwritten.
SetTenantConfig {
#[arg(long)]
tenant_id: TenantId,
#[arg(long)]
config: String,
},
/// Patch the pageserver tenant configuration of a tenant. Any fields with null values in the
/// provided JSON are unset from the tenant config and all fields with non-null values are set.
/// Unspecified fields are not changed.
PatchTenantConfig {
TenantConfig {
#[arg(long)]
tenant_id: TenantId,
#[arg(long)]
@@ -212,8 +201,6 @@ enum Command {
#[arg(long)]
timeout: humantime::Duration,
},
/// List safekeepers known to the storage controller
Safekeepers {},
}
#[derive(Parser)]
@@ -562,47 +549,25 @@ async fn main() -> anyhow::Result<()> {
)
.await?;
}
Command::SetTenantConfig { tenant_id, config } => {
Command::TenantConfig { tenant_id, config } => {
let tenant_conf = serde_json::from_str(&config)?;
vps_client
.set_tenant_config(&TenantConfigRequest {
tenant_id,
config: tenant_conf,
})
.await?;
}
Command::PatchTenantConfig { tenant_id, config } => {
let tenant_conf = serde_json::from_str(&config)?;
vps_client
.patch_tenant_config(&TenantConfigPatchRequest {
.tenant_config(&TenantConfigRequest {
tenant_id,
config: tenant_conf,
})
.await?;
}
Command::TenantDescribe { tenant_id } => {
let TenantDescribeResponse {
tenant_id,
shards,
stripe_size,
policy,
config,
} = storcon_client
let describe_response = storcon_client
.dispatch::<(), TenantDescribeResponse>(
Method::GET,
format!("control/v1/tenant/{tenant_id}"),
None,
)
.await?;
println!("Tenant {tenant_id}");
let mut table = comfy_table::Table::new();
table.add_row(["Policy", &format!("{:?}", policy)]);
table.add_row(["Stripe size", &format!("{:?}", stripe_size)]);
table.add_row(["Config", &serde_json::to_string_pretty(&config).unwrap()]);
println!("{table}");
println!("Shards:");
let shards = describe_response.shards;
let mut table = comfy_table::Table::new();
table.set_header(["Shard", "Attached", "Secondary", "Last error", "status"]);
for shard in shards {
@@ -759,7 +724,7 @@ async fn main() -> anyhow::Result<()> {
threshold,
} => {
vps_client
.set_tenant_config(&TenantConfigRequest {
.tenant_config(&TenantConfigRequest {
tenant_id,
config: TenantConfig {
eviction_policy: Some(EvictionPolicy::LayerAccessThreshold(
@@ -1023,40 +988,6 @@ async fn main() -> anyhow::Result<()> {
"Fill was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
);
}
Command::Safekeepers {} => {
let mut resp = storcon_client
.dispatch::<(), Vec<SafekeeperDescribeResponse>>(
Method::GET,
"control/v1/safekeeper".to_string(),
None,
)
.await?;
resp.sort_by(|a, b| a.id.cmp(&b.id));
let mut table = comfy_table::Table::new();
table.set_header([
"Id",
"Version",
"Host",
"Port",
"Http Port",
"AZ Id",
"Scheduling",
]);
for sk in resp {
table.add_row([
format!("{}", sk.id),
format!("{}", sk.version),
sk.host,
format!("{}", sk.port),
format!("{}", sk.http_port),
sk.availability_zone_id.clone(),
String::from(sk.scheduling_policy),
]);
}
println!("{table}");
}
}
Ok(())

View File

@@ -42,7 +42,6 @@ allow = [
"MPL-2.0",
"OpenSSL",
"Unicode-DFS-2016",
"Unicode-3.0",
]
confidence-threshold = 0.8
exceptions = [

Some files were not shown because too many files have changed in this diff Show More