mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-14 00:42:54 +00:00
## Problem The new gRPC page service protocol supports client-side batches. The current libpq protocol only does best-effort server-side batching. To compare these approaches, Pagebench should support submitting contiguous page batches, similar to how Postgres will submit them (e.g. with prefetches or vectored reads). ## Summary of changes Add a `--batch-size` parameter specifying the size of contiguous page batches. One batch counts as 1 RPS and 1 queue depth. For the libpq protocol, a batch is submitted as individual requests and we rely on the server to batch them for us. This will give a realistic comparison of how these would be processed in the wild (e.g. when Postgres sends 100 prefetch requests). This patch also adds some basic validation of responses.
34 lines
887 B
TOML
34 lines
887 B
TOML
[package]
|
|
name = "pagebench"
|
|
version = "0.1.0"
|
|
edition.workspace = true
|
|
license.workspace = true
|
|
|
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
|
|
[dependencies]
|
|
anyhow.workspace = true
|
|
async-trait.workspace = true
|
|
bytes.workspace = true
|
|
camino.workspace = true
|
|
clap.workspace = true
|
|
futures.workspace = true
|
|
hdrhistogram.workspace = true
|
|
humantime.workspace = true
|
|
humantime-serde.workspace = true
|
|
rand.workspace = true
|
|
reqwest.workspace = true
|
|
serde.workspace = true
|
|
serde_json.workspace = true
|
|
tracing.workspace = true
|
|
tokio.workspace = true
|
|
tokio-stream.workspace = true
|
|
tokio-util.workspace = true
|
|
tonic.workspace = true
|
|
|
|
pageserver_client.workspace = true
|
|
pageserver_api.workspace = true
|
|
pageserver_page_api.workspace = true
|
|
utils = { path = "../../libs/utils/" }
|
|
workspace_hack = { version = "0.1", path = "../../workspace_hack" }
|