mirror of
https://github.com/neondatabase/neon.git
synced 2026-02-17 01:20:36 +00:00
Compare commits
35 Commits
release-co
...
skyzh/work
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4834d1c888 | ||
|
|
b12a898bf9 | ||
|
|
b0b4b7dd8f | ||
|
|
4dd4096f11 | ||
|
|
be718ed121 | ||
|
|
9f1408fdf3 | ||
|
|
7000aaaf75 | ||
|
|
ef2a2555b1 | ||
|
|
d8ab6ddb0f | ||
|
|
dcc437da1d | ||
|
|
c286fea018 | ||
|
|
de8276488d | ||
|
|
ddb9ae1214 | ||
|
|
9e55d79803 | ||
|
|
8d47a60de2 | ||
|
|
6166482589 | ||
|
|
ca6d72ba2a | ||
|
|
b6c0f66619 | ||
|
|
3702ec889f | ||
|
|
8e8df1b453 | ||
|
|
92d95b08cf | ||
|
|
0af40b5494 | ||
|
|
c60b91369a | ||
|
|
f1473dd438 | ||
|
|
c283aaaf8d | ||
|
|
414ed82c1f | ||
|
|
881e351f69 | ||
|
|
b31ce14083 | ||
|
|
b4d87b9dfe | ||
|
|
2b49d6ee05 | ||
|
|
14e1f89053 | ||
|
|
8a8c656c06 | ||
|
|
a75e11cc00 | ||
|
|
7d4bfcdc47 | ||
|
|
737888e5c9 |
10
.github/workflows/_build-and-test-locally.yml
vendored
10
.github/workflows/_build-and-test-locally.yml
vendored
@@ -229,8 +229,13 @@ jobs:
|
||||
${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E '!package(pageserver)'
|
||||
|
||||
# run pageserver tests with different settings
|
||||
for io_engine in std-fs tokio-epoll-uring ; do
|
||||
NEON_PAGESERVER_UNIT_TEST_VIRTUAL_FILE_IOENGINE=$io_engine ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(pageserver)'
|
||||
for get_vectored_concurrent_io in sequential sidecar-task; do
|
||||
for io_engine in std-fs tokio-epoll-uring ; do
|
||||
NEON_PAGESERVER_UNIT_TEST_GET_VECTORED_CONCURRENT_IO=$get_vectored_concurrent_io \
|
||||
NEON_PAGESERVER_UNIT_TEST_VIRTUAL_FILE_IOENGINE=$io_engine \
|
||||
${cov_prefix} \
|
||||
cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(pageserver)'
|
||||
done
|
||||
done
|
||||
|
||||
# Run separate tests for real S3
|
||||
@@ -314,6 +319,7 @@ jobs:
|
||||
CHECK_ONDISK_DATA_COMPATIBILITY: nonempty
|
||||
BUILD_TAG: ${{ inputs.build-tag }}
|
||||
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring
|
||||
PAGESERVER_GET_VECTORED_CONCURRENT_IO: sidecar-task
|
||||
USE_LFC: ${{ matrix.lfc_state == 'with-lfc' && 'true' || 'false' }}
|
||||
|
||||
# Temporary disable this step until we figure out why it's so flaky
|
||||
|
||||
6
.github/workflows/build_and_test.yml
vendored
6
.github/workflows/build_and_test.yml
vendored
@@ -820,8 +820,8 @@ jobs:
|
||||
- name: Print logs and clean up
|
||||
if: always()
|
||||
run: |
|
||||
docker compose -f ./docker-compose/docker-compose.yml logs || 0
|
||||
docker compose -f ./docker-compose/docker-compose.yml down
|
||||
docker compose --profile test-extensions -f ./docker-compose/docker-compose.yml logs || true
|
||||
docker compose --profile test-extensions -f ./docker-compose/docker-compose.yml down
|
||||
|
||||
promote-images-dev:
|
||||
needs: [ check-permissions, tag, vm-compute-node-image, neon-image ]
|
||||
@@ -859,7 +859,7 @@ jobs:
|
||||
done
|
||||
|
||||
promote-images-prod:
|
||||
needs: [ check-permissions, tag, test-images, vm-compute-node-image ]
|
||||
needs: [ check-permissions, tag, test-images, promote-images-dev ]
|
||||
runs-on: ubuntu-22.04
|
||||
if: github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' || github.ref_name == 'release-compute'
|
||||
|
||||
|
||||
5
.github/workflows/release.yml
vendored
5
.github/workflows/release.yml
vendored
@@ -3,8 +3,9 @@ name: Create Release Branch
|
||||
on:
|
||||
schedule:
|
||||
# It should be kept in sync with if-condition in jobs
|
||||
- cron: '0 6 * * FRI' # Storage release
|
||||
- cron: '0 6 * * THU' # Proxy release
|
||||
- cron: '0 6 * * FRI' # Storage release
|
||||
- cron: '0 7 * * FRI' # Compute release
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
create-storage-release-branch:
|
||||
@@ -55,7 +56,7 @@ jobs:
|
||||
ci-access-token: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||
|
||||
create-compute-release-branch:
|
||||
if: inputs.create-compute-release-branch
|
||||
if: ${{ github.event.schedule == '0 7 * * FRI' || inputs.create-compute-release-branch }}
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
171
Cargo.lock
generated
171
Cargo.lock
generated
@@ -942,6 +942,18 @@ version = "1.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
|
||||
|
||||
[[package]]
|
||||
name = "bb8"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d89aabfae550a5c44b43ab941844ffcd2e993cb6900b342debf59e9ea74acdb8"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"futures-util",
|
||||
"parking_lot 0.12.1",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bcder"
|
||||
version = "0.7.4"
|
||||
@@ -1301,7 +1313,7 @@ dependencies = [
|
||||
"tar",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tokio-postgres 0.7.7",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
"tower 0.5.2",
|
||||
@@ -1410,7 +1422,7 @@ dependencies = [
|
||||
"storage_broker",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tokio-postgres 0.7.7",
|
||||
"tokio-util",
|
||||
"toml",
|
||||
"toml_edit",
|
||||
@@ -1786,11 +1798,24 @@ dependencies = [
|
||||
"chrono",
|
||||
"diesel_derives",
|
||||
"itoa",
|
||||
"pq-sys",
|
||||
"r2d2",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "diesel-async"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51a307ac00f7c23f526a04a77761a0519b9f0eb2838ebf5b905a58580095bdcb"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bb8",
|
||||
"diesel",
|
||||
"futures-util",
|
||||
"scoped-futures",
|
||||
"tokio",
|
||||
"tokio-postgres 0.7.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "diesel_derives"
|
||||
version = "2.2.1"
|
||||
@@ -4042,8 +4067,8 @@ dependencies = [
|
||||
"pageserver_compaction",
|
||||
"pin-project-lite",
|
||||
"postgres",
|
||||
"postgres-protocol",
|
||||
"postgres-types",
|
||||
"postgres-protocol 0.6.4",
|
||||
"postgres-types 0.2.4",
|
||||
"postgres_backend",
|
||||
"postgres_connection",
|
||||
"postgres_ffi",
|
||||
@@ -4074,7 +4099,7 @@ dependencies = [
|
||||
"tokio",
|
||||
"tokio-epoll-uring",
|
||||
"tokio-io-timeout",
|
||||
"tokio-postgres",
|
||||
"tokio-postgres 0.7.7",
|
||||
"tokio-stream",
|
||||
"tokio-tar",
|
||||
"tokio-util",
|
||||
@@ -4132,7 +4157,7 @@ dependencies = [
|
||||
"serde",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tokio-postgres 0.7.7",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
"utils",
|
||||
@@ -4438,7 +4463,7 @@ dependencies = [
|
||||
"futures-util",
|
||||
"log",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tokio-postgres 0.7.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4459,6 +4484,24 @@ dependencies = [
|
||||
"stringprep",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "postgres-protocol"
|
||||
version = "0.6.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "acda0ebdebc28befa84bee35e651e4c5f09073d668c7aed4cf7e23c3cda84b23"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"byteorder",
|
||||
"bytes",
|
||||
"fallible-iterator",
|
||||
"hmac",
|
||||
"md-5",
|
||||
"memchr",
|
||||
"rand 0.8.5",
|
||||
"sha2",
|
||||
"stringprep",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "postgres-protocol2"
|
||||
version = "0.1.0"
|
||||
@@ -4482,7 +4525,18 @@ source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#511f
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fallible-iterator",
|
||||
"postgres-protocol",
|
||||
"postgres-protocol 0.6.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "postgres-types"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f66ea23a2d0e5734297357705193335e0a957696f34bed2f2faefacb2fec336f"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fallible-iterator",
|
||||
"postgres-protocol 0.6.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4507,7 +4561,7 @@ dependencies = [
|
||||
"serde",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tokio-postgres 0.7.7",
|
||||
"tokio-postgres-rustls",
|
||||
"tokio-rustls 0.26.0",
|
||||
"tokio-util",
|
||||
@@ -4522,7 +4576,7 @@ dependencies = [
|
||||
"itertools 0.10.5",
|
||||
"once_cell",
|
||||
"postgres",
|
||||
"tokio-postgres",
|
||||
"tokio-postgres 0.7.7",
|
||||
"url",
|
||||
]
|
||||
|
||||
@@ -4609,15 +4663,6 @@ version = "0.2.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
|
||||
|
||||
[[package]]
|
||||
name = "pq-sys"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6cc05d7ea95200187117196eee9edd0644424911821aeb28a18ce60ea0b8793"
|
||||
dependencies = [
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pq_proto"
|
||||
version = "0.1.0"
|
||||
@@ -4625,7 +4670,7 @@ dependencies = [
|
||||
"byteorder",
|
||||
"bytes",
|
||||
"itertools 0.10.5",
|
||||
"postgres-protocol",
|
||||
"postgres-protocol 0.6.4",
|
||||
"rand 0.8.5",
|
||||
"serde",
|
||||
"thiserror",
|
||||
@@ -4873,7 +4918,7 @@ dependencies = [
|
||||
"tikv-jemalloc-ctl",
|
||||
"tikv-jemallocator",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tokio-postgres 0.7.7",
|
||||
"tokio-postgres2",
|
||||
"tokio-rustls 0.26.0",
|
||||
"tokio-tungstenite 0.21.0",
|
||||
@@ -4930,17 +4975,6 @@ dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "r2d2"
|
||||
version = "0.8.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93"
|
||||
dependencies = [
|
||||
"log",
|
||||
"parking_lot 0.12.1",
|
||||
"scheduled-thread-pool",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.7.3"
|
||||
@@ -5672,7 +5706,7 @@ dependencies = [
|
||||
"pageserver_api",
|
||||
"parking_lot 0.12.1",
|
||||
"postgres",
|
||||
"postgres-protocol",
|
||||
"postgres-protocol 0.6.4",
|
||||
"postgres_backend",
|
||||
"postgres_ffi",
|
||||
"pprof",
|
||||
@@ -5696,7 +5730,7 @@ dependencies = [
|
||||
"tikv-jemallocator",
|
||||
"tokio",
|
||||
"tokio-io-timeout",
|
||||
"tokio-postgres",
|
||||
"tokio-postgres 0.7.7",
|
||||
"tokio-stream",
|
||||
"tokio-tar",
|
||||
"tokio-util",
|
||||
@@ -5755,12 +5789,12 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scheduled-thread-pool"
|
||||
version = "0.2.7"
|
||||
name = "scoped-futures"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19"
|
||||
checksum = "1b24aae2d0636530f359e9d5ef0c04669d11c5e756699b27a6a6d845d8329091"
|
||||
dependencies = [
|
||||
"parking_lot 0.12.1",
|
||||
"pin-project-lite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6295,6 +6329,7 @@ dependencies = [
|
||||
"clap",
|
||||
"control_plane",
|
||||
"diesel",
|
||||
"diesel-async",
|
||||
"diesel_migrations",
|
||||
"fail",
|
||||
"futures",
|
||||
@@ -6309,10 +6344,10 @@ dependencies = [
|
||||
"pageserver_api",
|
||||
"pageserver_client",
|
||||
"postgres_connection",
|
||||
"r2d2",
|
||||
"rand 0.8.5",
|
||||
"reqwest",
|
||||
"routerify",
|
||||
"scoped-futures",
|
||||
"scopeguard",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -6365,7 +6400,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"storage_controller_client",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tokio-postgres 0.7.7",
|
||||
"tokio-postgres-rustls",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
@@ -6774,7 +6809,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "tokio-epoll-uring"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/neondatabase/tokio-epoll-uring.git?branch=main#33e00106a268644d02ba0461bbd64476073b0ee1"
|
||||
source = "git+https://github.com/neondatabase/tokio-epoll-uring.git?branch=main#781989bb540a1408b0b93daa1e9d1fa452195497"
|
||||
dependencies = [
|
||||
"futures",
|
||||
"nix 0.26.4",
|
||||
@@ -6824,13 +6859,39 @@ dependencies = [
|
||||
"percent-encoding",
|
||||
"phf",
|
||||
"pin-project-lite",
|
||||
"postgres-protocol",
|
||||
"postgres-types",
|
||||
"postgres-protocol 0.6.4",
|
||||
"postgres-types 0.2.4",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-postgres"
|
||||
version = "0.7.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b5d3742945bc7d7f210693b0c58ae542c6fd47b17adbbda0885f3dcb34a6bdb"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"byteorder",
|
||||
"bytes",
|
||||
"fallible-iterator",
|
||||
"futures-channel",
|
||||
"futures-util",
|
||||
"log",
|
||||
"parking_lot 0.12.1",
|
||||
"percent-encoding",
|
||||
"phf",
|
||||
"pin-project-lite",
|
||||
"postgres-protocol 0.6.7",
|
||||
"postgres-types 0.2.8",
|
||||
"rand 0.8.5",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"whoami",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-postgres-rustls"
|
||||
version = "0.12.0"
|
||||
@@ -6840,7 +6901,7 @@ dependencies = [
|
||||
"ring",
|
||||
"rustls 0.23.18",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tokio-postgres 0.7.7",
|
||||
"tokio-rustls 0.26.0",
|
||||
"x509-certificate",
|
||||
]
|
||||
@@ -7004,12 +7065,9 @@ version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"axum",
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"h2 0.4.4",
|
||||
"http 1.1.0",
|
||||
"http-body 1.0.0",
|
||||
"http-body-util",
|
||||
@@ -7021,7 +7079,6 @@ dependencies = [
|
||||
"prost",
|
||||
"rustls-native-certs 0.8.0",
|
||||
"rustls-pemfile 2.1.1",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"tokio-rustls 0.26.0",
|
||||
"tokio-stream",
|
||||
@@ -7373,7 +7430,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "uring-common"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/neondatabase/tokio-epoll-uring.git?branch=main#33e00106a268644d02ba0461bbd64476073b0ee1"
|
||||
source = "git+https://github.com/neondatabase/tokio-epoll-uring.git?branch=main#781989bb540a1408b0b93daa1e9d1fa452195497"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"io-uring",
|
||||
@@ -7502,12 +7559,6 @@ version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.4"
|
||||
@@ -7527,7 +7578,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"sysinfo",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tokio-postgres 0.7.7",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
@@ -7582,7 +7633,6 @@ dependencies = [
|
||||
"tikv-jemallocator",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tonic",
|
||||
"tonic-build",
|
||||
"tracing",
|
||||
"utils",
|
||||
@@ -7991,8 +8041,6 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"anyhow",
|
||||
"axum",
|
||||
"axum-core",
|
||||
"base64 0.13.1",
|
||||
"base64 0.21.1",
|
||||
"base64ct",
|
||||
@@ -8073,7 +8121,6 @@ dependencies = [
|
||||
"toml_edit",
|
||||
"tonic",
|
||||
"tower 0.4.13",
|
||||
"tower 0.5.2",
|
||||
"tracing",
|
||||
"tracing-core",
|
||||
"url",
|
||||
|
||||
@@ -187,7 +187,7 @@ tokio-tar = "0.3"
|
||||
tokio-util = { version = "0.7.10", features = ["io", "rt"] }
|
||||
toml = "0.8"
|
||||
toml_edit = "0.22"
|
||||
tonic = {version = "0.12.3", features = ["tls", "tls-roots"]}
|
||||
tonic = {version = "0.12.3", default-features = false, features = ["channel", "tls", "tls-roots"]}
|
||||
tower = { version = "0.5.2", default-features = false }
|
||||
tower-http = { version = "0.6.2", features = ["request-id", "trace"] }
|
||||
tower-service = "0.3.3"
|
||||
|
||||
2
Makefile
2
Makefile
@@ -64,8 +64,6 @@ CARGO_BUILD_FLAGS += $(filter -j1,$(MAKEFLAGS))
|
||||
CARGO_CMD_PREFIX += $(if $(filter n,$(MAKEFLAGS)),,+)
|
||||
# Force cargo not to print progress bar
|
||||
CARGO_CMD_PREFIX += CARGO_TERM_PROGRESS_WHEN=never CI=1
|
||||
# Set PQ_LIB_DIR to make sure `storage_controller` get linked with bundled libpq (through diesel)
|
||||
CARGO_CMD_PREFIX += PQ_LIB_DIR=$(POSTGRES_INSTALL_DIR)/v16/lib
|
||||
|
||||
CACHEDIR_TAG_CONTENTS := "Signature: 8a477f597d28d172789f06886806bc55"
|
||||
|
||||
|
||||
@@ -67,6 +67,9 @@ RUN cd postgres && \
|
||||
# Enable some of contrib extensions
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/autoinc.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/dblink.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgres_fdw.control && \
|
||||
file=/usr/local/pgsql/share/extension/postgres_fdw--1.0.sql && [ -e $file ] && \
|
||||
echo 'GRANT USAGE ON FOREIGN DATA WRAPPER postgres_fdw TO neon_superuser;' >> $file && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/bloom.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/earthdistance.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/insert_username.control && \
|
||||
@@ -360,6 +363,8 @@ COPY compute/patches/pgvector.patch /pgvector.patch
|
||||
RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz -O pgvector.tar.gz && \
|
||||
echo "867a2c328d4928a5a9d6f052cd3bc78c7d60228a9b914ad32aa3db88e9de27b0 pgvector.tar.gz" | sha256sum --check && \
|
||||
mkdir pgvector-src && cd pgvector-src && tar xzf ../pgvector.tar.gz --strip-components=1 -C . && \
|
||||
wget https://github.com/pgvector/pgvector/raw/refs/tags/v0.7.4/sql/vector.sql -O ./sql/vector--0.7.4.sql && \
|
||||
echo "10218d05dc02299562252a9484775178b14a1d8edb92a2d1672ef488530f7778 ./sql/vector--0.7.4.sql" | sha256sum --check && \
|
||||
patch -p1 < /pgvector.patch && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" install && \
|
||||
@@ -1261,11 +1266,12 @@ RUN set -e \
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layers "postgres-exporter" and "sql-exporter"
|
||||
# Layers "postgres-exporter", "pgbouncer-exporter", and "sql-exporter"
|
||||
#
|
||||
#########################################################################################
|
||||
|
||||
FROM quay.io/prometheuscommunity/postgres-exporter:v0.16.0 AS postgres-exporter
|
||||
FROM quay.io/prometheuscommunity/pgbouncer-exporter:v0.10.2 AS pgbouncer-exporter
|
||||
|
||||
# Keep the version the same as in build-tools.Dockerfile and
|
||||
# test_runner/regress/test_compute_metrics.py.
|
||||
@@ -1347,9 +1353,6 @@ COPY --from=pg-roaringbitmap-pg-build /pg_roaringbitmap.tar.gz /ext-src
|
||||
COPY --from=pg-semver-pg-build /pg_semver.tar.gz /ext-src
|
||||
#COPY --from=pg-embedding-pg-build /home/nonroot/pg_embedding-src/ /ext-src
|
||||
#COPY --from=wal2json-pg-build /wal2json_2_5.tar.gz /ext-src
|
||||
#pg_anon is not supported yet for pg v17 so, don't fail if nothing found
|
||||
COPY --from=pg-anon-pg-build /pg_anon.tar.g? /ext-src
|
||||
COPY compute/patches/pg_anon.patch /ext-src
|
||||
COPY --from=pg-ivm-build /pg_ivm.tar.gz /ext-src
|
||||
COPY --from=pg-partman-build /pg_partman.tar.gz /ext-src
|
||||
RUN cd /ext-src/ && for f in *.tar.gz; \
|
||||
@@ -1360,9 +1363,6 @@ RUN cd /ext-src/rum-src && patch -p1 <../rum.patch
|
||||
RUN cd /ext-src/pgvector-src && patch -p1 <../pgvector.patch
|
||||
RUN cd /ext-src/pg_hint_plan-src && patch -p1 < /ext-src/pg_hint_plan_${PG_VERSION}.patch
|
||||
COPY --chmod=755 docker-compose/run-tests.sh /run-tests.sh
|
||||
RUN case "${PG_VERSION}" in "v17") \
|
||||
echo "postgresql_anonymizer does not yet support PG17" && exit 0;; \
|
||||
esac && patch -p1 </ext-src/pg_anon.patch
|
||||
RUN patch -p1 </ext-src/pg_cron.patch
|
||||
ENV PATH=/usr/local/pgsql/bin:$PATH
|
||||
ENV PGHOST=compute
|
||||
@@ -1403,6 +1403,7 @@ RUN mkdir -p /etc/local_proxy && chown postgres:postgres /etc/local_proxy
|
||||
|
||||
# Metrics exporter binaries and configuration files
|
||||
COPY --from=postgres-exporter /bin/postgres_exporter /bin/postgres_exporter
|
||||
COPY --from=pgbouncer-exporter /bin/pgbouncer_exporter /bin/pgbouncer_exporter
|
||||
COPY --from=sql-exporter /bin/sql_exporter /bin/sql_exporter
|
||||
|
||||
COPY --chown=postgres compute/etc/postgres_exporter.yml /etc/postgres_exporter.yml
|
||||
|
||||
@@ -19,6 +19,8 @@ max_prepared_statements=0
|
||||
admin_users=postgres
|
||||
unix_socket_dir=/tmp/
|
||||
unix_socket_mode=0777
|
||||
; required for pgbouncer_exporter
|
||||
ignore_startup_parameters=extra_float_digits
|
||||
|
||||
;; Disable connection logging. It produces a lot of logs that no one looks at,
|
||||
;; and we can get similar log entries from the proxy too. We had incidents in
|
||||
|
||||
@@ -1,8 +1,24 @@
|
||||
diff --git a/Makefile b/Makefile
|
||||
index 7a4b88c..56678af 100644
|
||||
--- a/Makefile
|
||||
+++ b/Makefile
|
||||
@@ -3,7 +3,10 @@ EXTVERSION = 0.8.0
|
||||
|
||||
MODULE_big = vector
|
||||
DATA = $(wildcard sql/*--*--*.sql)
|
||||
-DATA_built = sql/$(EXTENSION)--$(EXTVERSION).sql
|
||||
+# This change is needed to install different per-version SQL files
|
||||
+# like pgvector--0.8.0.sql and pgvector--0.7.4.sql
|
||||
+# The corresponding file is downloaded during the Docker image build process
|
||||
+DATA_built = sql/$(EXTENSION)--$(EXTVERSION).sql sql/vector--0.7.4.sql
|
||||
OBJS = src/bitutils.o src/bitvec.o src/halfutils.o src/halfvec.o src/hnsw.o src/hnswbuild.o src/hnswinsert.o src/hnswscan.o src/hnswutils.o src/hnswvacuum.o src/ivfbuild.o src/ivfflat.o src/ivfinsert.o src/ivfkmeans.o src/ivfscan.o src/ivfutils.o src/ivfvacuum.o src/sparsevec.o src/vector.o
|
||||
HEADERS = src/halfvec.h src/sparsevec.h src/vector.h
|
||||
|
||||
diff --git a/src/hnswbuild.c b/src/hnswbuild.c
|
||||
index dcfb2bd..d5189ee 100644
|
||||
index b667478..fc1897c 100644
|
||||
--- a/src/hnswbuild.c
|
||||
+++ b/src/hnswbuild.c
|
||||
@@ -860,9 +860,17 @@ HnswParallelBuildMain(dsm_segment *seg, shm_toc *toc)
|
||||
@@ -843,9 +843,17 @@ HnswParallelBuildMain(dsm_segment *seg, shm_toc *toc)
|
||||
|
||||
hnswarea = shm_toc_lookup(toc, PARALLEL_KEY_HNSW_AREA, false);
|
||||
|
||||
@@ -20,7 +36,7 @@ index dcfb2bd..d5189ee 100644
|
||||
/* Close relations within worker */
|
||||
index_close(indexRel, indexLockmode);
|
||||
table_close(heapRel, heapLockmode);
|
||||
@@ -1117,12 +1125,38 @@ BuildIndex(Relation heap, Relation index, IndexInfo *indexInfo,
|
||||
@@ -1100,12 +1108,38 @@ BuildIndex(Relation heap, Relation index, IndexInfo *indexInfo,
|
||||
SeedRandom(42);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -27,6 +27,10 @@ commands:
|
||||
user: nobody
|
||||
sysvInitAction: respawn
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml'
|
||||
- name: pgbouncer-exporter
|
||||
user: postgres
|
||||
sysvInitAction: respawn
|
||||
shell: '/bin/pgbouncer_exporter --pgBouncer.connectionString="postgres:///pgbouncer?host=/tmp&port=6432&dbname=pgbouncer&user=pgbouncer"'
|
||||
- name: sql-exporter
|
||||
user: nobody
|
||||
sysvInitAction: respawn
|
||||
|
||||
@@ -27,6 +27,10 @@ commands:
|
||||
user: nobody
|
||||
sysvInitAction: respawn
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml'
|
||||
- name: pgbouncer-exporter
|
||||
user: postgres
|
||||
sysvInitAction: respawn
|
||||
shell: '/bin/pgbouncer_exporter --pgBouncer.connectionString="postgres:///pgbouncer?host=/tmp&port=6432&dbname=pgbouncer&user=pgbouncer"'
|
||||
- name: sql-exporter
|
||||
user: nobody
|
||||
sysvInitAction: respawn
|
||||
|
||||
@@ -41,14 +41,14 @@ use crate::local_proxy;
|
||||
use crate::pg_helpers::*;
|
||||
use crate::spec::*;
|
||||
use crate::spec_apply::ApplySpecPhase::{
|
||||
CreateAndAlterDatabases, CreateAndAlterRoles, CreateAvailabilityCheck, CreateSuperUser,
|
||||
DropInvalidDatabases, DropRoles, HandleNeonExtension, HandleOtherExtensions,
|
||||
RenameAndDeleteDatabases, RenameRoles, RunInEachDatabase,
|
||||
CreateAndAlterDatabases, CreateAndAlterRoles, CreateAvailabilityCheck, CreateSchemaNeon,
|
||||
CreateSuperUser, DropInvalidDatabases, DropRoles, FinalizeDropLogicalSubscriptions,
|
||||
HandleNeonExtension, HandleOtherExtensions, RenameAndDeleteDatabases, RenameRoles,
|
||||
RunInEachDatabase,
|
||||
};
|
||||
use crate::spec_apply::PerDatabasePhase;
|
||||
use crate::spec_apply::PerDatabasePhase::{
|
||||
ChangeSchemaPerms, DeleteDBRoleReferences, DropSubscriptionsForDeletedDatabases,
|
||||
HandleAnonExtension,
|
||||
ChangeSchemaPerms, DeleteDBRoleReferences, DropLogicalSubscriptions, HandleAnonExtension,
|
||||
};
|
||||
use crate::spec_apply::{apply_operations, MutableApplyContext, DB};
|
||||
use crate::sync_sk::{check_if_synced, ping_safekeeper};
|
||||
@@ -340,6 +340,15 @@ impl ComputeNode {
|
||||
self.state.lock().unwrap().status
|
||||
}
|
||||
|
||||
pub fn get_timeline_id(&self) -> Option<TimelineId> {
|
||||
self.state
|
||||
.lock()
|
||||
.unwrap()
|
||||
.pspec
|
||||
.as_ref()
|
||||
.map(|s| s.timeline_id)
|
||||
}
|
||||
|
||||
// Remove `pgdata` directory and create it again with right permissions.
|
||||
fn create_pgdata(&self) -> Result<()> {
|
||||
// Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
|
||||
@@ -929,6 +938,48 @@ impl ComputeNode {
|
||||
.map(|role| (role.name.clone(), role))
|
||||
.collect::<HashMap<String, Role>>();
|
||||
|
||||
// Check if we need to drop subscriptions before starting the endpoint.
|
||||
//
|
||||
// It is important to do this operation exactly once when endpoint starts on a new branch.
|
||||
// Otherwise, we may drop not inherited, but newly created subscriptions.
|
||||
//
|
||||
// We cannot rely only on spec.drop_subscriptions_before_start flag,
|
||||
// because if for some reason compute restarts inside VM,
|
||||
// it will start again with the same spec and flag value.
|
||||
//
|
||||
// To handle this, we save the fact of the operation in the database
|
||||
// in the neon.drop_subscriptions_done table.
|
||||
// If the table does not exist, we assume that the operation was never performed, so we must do it.
|
||||
// If table exists, we check if the operation was performed on the current timelilne.
|
||||
//
|
||||
let mut drop_subscriptions_done = false;
|
||||
|
||||
if spec.drop_subscriptions_before_start {
|
||||
let timeline_id = self.get_timeline_id().context("timeline_id must be set")?;
|
||||
let query = format!("select 1 from neon.drop_subscriptions_done where timeline_id = '{}'", timeline_id);
|
||||
|
||||
info!("Checking if drop subscription operation was already performed for timeline_id: {}", timeline_id);
|
||||
|
||||
drop_subscriptions_done = match
|
||||
client.simple_query(&query).await {
|
||||
Ok(result) => {
|
||||
matches!(&result[0], postgres::SimpleQueryMessage::Row(_))
|
||||
},
|
||||
Err(e) =>
|
||||
{
|
||||
match e.code() {
|
||||
Some(&SqlState::UNDEFINED_TABLE) => false,
|
||||
_ => {
|
||||
// We don't expect any other error here, except for the schema/table not existing
|
||||
error!("Error checking if drop subscription operation was already performed: {}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
let jwks_roles = Arc::new(
|
||||
spec.as_ref()
|
||||
.local_proxy_config
|
||||
@@ -996,7 +1047,7 @@ impl ComputeNode {
|
||||
jwks_roles.clone(),
|
||||
concurrency_token.clone(),
|
||||
db,
|
||||
[DropSubscriptionsForDeletedDatabases].to_vec(),
|
||||
[DropLogicalSubscriptions].to_vec(),
|
||||
);
|
||||
|
||||
Ok(spawn(fut))
|
||||
@@ -1024,6 +1075,7 @@ impl ComputeNode {
|
||||
CreateAndAlterRoles,
|
||||
RenameAndDeleteDatabases,
|
||||
CreateAndAlterDatabases,
|
||||
CreateSchemaNeon,
|
||||
] {
|
||||
info!("Applying phase {:?}", &phase);
|
||||
apply_operations(
|
||||
@@ -1064,6 +1116,17 @@ impl ComputeNode {
|
||||
}
|
||||
|
||||
let conf = Arc::new(conf);
|
||||
let mut phases = vec![
|
||||
DeleteDBRoleReferences,
|
||||
ChangeSchemaPerms,
|
||||
HandleAnonExtension,
|
||||
];
|
||||
|
||||
if spec.drop_subscriptions_before_start && !drop_subscriptions_done {
|
||||
info!("Adding DropLogicalSubscriptions phase because drop_subscriptions_before_start is set");
|
||||
phases.push(DropLogicalSubscriptions);
|
||||
}
|
||||
|
||||
let fut = Self::apply_spec_sql_db(
|
||||
spec.clone(),
|
||||
conf,
|
||||
@@ -1071,12 +1134,7 @@ impl ComputeNode {
|
||||
jwks_roles.clone(),
|
||||
concurrency_token.clone(),
|
||||
db,
|
||||
[
|
||||
DeleteDBRoleReferences,
|
||||
ChangeSchemaPerms,
|
||||
HandleAnonExtension,
|
||||
]
|
||||
.to_vec(),
|
||||
phases,
|
||||
);
|
||||
|
||||
Ok(spawn(fut))
|
||||
@@ -1088,12 +1146,20 @@ impl ComputeNode {
|
||||
handle.await??;
|
||||
}
|
||||
|
||||
for phase in vec![
|
||||
let mut phases = vec![
|
||||
HandleOtherExtensions,
|
||||
HandleNeonExtension,
|
||||
HandleNeonExtension, // This step depends on CreateSchemaNeon
|
||||
CreateAvailabilityCheck,
|
||||
DropRoles,
|
||||
] {
|
||||
];
|
||||
|
||||
// This step depends on CreateSchemaNeon
|
||||
if spec.drop_subscriptions_before_start && !drop_subscriptions_done {
|
||||
info!("Adding FinalizeDropLogicalSubscriptions phase because drop_subscriptions_before_start is set");
|
||||
phases.push(FinalizeDropLogicalSubscriptions);
|
||||
}
|
||||
|
||||
for phase in phases {
|
||||
debug!("Applying phase {:?}", &phase);
|
||||
apply_operations(
|
||||
spec.clone(),
|
||||
@@ -1463,6 +1529,14 @@ impl ComputeNode {
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
let postgresql_conf_path = pgdata_path.join("postgresql.conf");
|
||||
if config::line_in_file(
|
||||
&postgresql_conf_path,
|
||||
"neon.disable_logical_replication_subscribers=false",
|
||||
)? {
|
||||
info!("updated postgresql.conf to set neon.disable_logical_replication_subscribers=false");
|
||||
}
|
||||
self.pg_reload_conf()?;
|
||||
}
|
||||
self.post_apply_config()?;
|
||||
|
||||
@@ -129,6 +129,13 @@ pub fn write_postgres_conf(
|
||||
|
||||
writeln!(file, "neon.extension_server_port={}", extension_server_port)?;
|
||||
|
||||
if spec.drop_subscriptions_before_start {
|
||||
writeln!(file, "neon.disable_logical_replication_subscribers=true")?;
|
||||
} else {
|
||||
// be explicit about the default value
|
||||
writeln!(file, "neon.disable_logical_replication_subscribers=false")?;
|
||||
}
|
||||
|
||||
// This is essential to keep this line at the end of the file,
|
||||
// because it is intended to override any settings above.
|
||||
writeln!(file, "include_if_exists = 'compute_ctl_temp_override.conf'")?;
|
||||
|
||||
@@ -47,7 +47,7 @@ pub enum PerDatabasePhase {
|
||||
DeleteDBRoleReferences,
|
||||
ChangeSchemaPerms,
|
||||
HandleAnonExtension,
|
||||
DropSubscriptionsForDeletedDatabases,
|
||||
DropLogicalSubscriptions,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -58,11 +58,13 @@ pub enum ApplySpecPhase {
|
||||
CreateAndAlterRoles,
|
||||
RenameAndDeleteDatabases,
|
||||
CreateAndAlterDatabases,
|
||||
CreateSchemaNeon,
|
||||
RunInEachDatabase { db: DB, subphase: PerDatabasePhase },
|
||||
HandleOtherExtensions,
|
||||
HandleNeonExtension,
|
||||
CreateAvailabilityCheck,
|
||||
DropRoles,
|
||||
FinalizeDropLogicalSubscriptions,
|
||||
}
|
||||
|
||||
pub struct Operation {
|
||||
@@ -331,7 +333,7 @@ async fn get_operations<'a>(
|
||||
// NB: there could be other db states, which prevent us from dropping
|
||||
// the database. For example, if db is used by any active subscription
|
||||
// or replication slot.
|
||||
// Such cases are handled in the DropSubscriptionsForDeletedDatabases
|
||||
// Such cases are handled in the DropLogicalSubscriptions
|
||||
// phase. We do all the cleanup before actually dropping the database.
|
||||
let drop_db_query: String = format!(
|
||||
"DROP DATABASE IF EXISTS {} WITH (FORCE)",
|
||||
@@ -442,13 +444,19 @@ async fn get_operations<'a>(
|
||||
|
||||
Ok(Box::new(operations))
|
||||
}
|
||||
ApplySpecPhase::CreateSchemaNeon => Ok(Box::new(once(Operation {
|
||||
query: String::from("CREATE SCHEMA IF NOT EXISTS neon"),
|
||||
comment: Some(String::from(
|
||||
"create schema for neon extension and utils tables",
|
||||
)),
|
||||
}))),
|
||||
ApplySpecPhase::RunInEachDatabase { db, subphase } => {
|
||||
match subphase {
|
||||
PerDatabasePhase::DropSubscriptionsForDeletedDatabases => {
|
||||
PerDatabasePhase::DropLogicalSubscriptions => {
|
||||
match &db {
|
||||
DB::UserDB(db) => {
|
||||
let drop_subscription_query: String = format!(
|
||||
include_str!("sql/drop_subscription_for_drop_dbs.sql"),
|
||||
include_str!("sql/drop_subscriptions.sql"),
|
||||
datname_str = escape_literal(&db.name),
|
||||
);
|
||||
|
||||
@@ -666,10 +674,6 @@ async fn get_operations<'a>(
|
||||
}
|
||||
ApplySpecPhase::HandleNeonExtension => {
|
||||
let operations = vec![
|
||||
Operation {
|
||||
query: String::from("CREATE SCHEMA IF NOT EXISTS neon"),
|
||||
comment: Some(String::from("init: add schema for extension")),
|
||||
},
|
||||
Operation {
|
||||
query: String::from("CREATE EXTENSION IF NOT EXISTS neon WITH SCHEMA neon"),
|
||||
comment: Some(String::from(
|
||||
@@ -712,5 +716,9 @@ async fn get_operations<'a>(
|
||||
|
||||
Ok(Box::new(operations))
|
||||
}
|
||||
ApplySpecPhase::FinalizeDropLogicalSubscriptions => Ok(Box::new(once(Operation {
|
||||
query: String::from(include_str!("sql/finalize_drop_subscriptions.sql")),
|
||||
comment: None,
|
||||
}))),
|
||||
}
|
||||
}
|
||||
|
||||
21
compute_tools/src/sql/finalize_drop_subscriptions.sql
Normal file
21
compute_tools/src/sql/finalize_drop_subscriptions.sql
Normal file
@@ -0,0 +1,21 @@
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS(
|
||||
SELECT 1
|
||||
FROM pg_catalog.pg_tables
|
||||
WHERE tablename = 'drop_subscriptions_done'
|
||||
AND schemaname = 'neon'
|
||||
)
|
||||
THEN
|
||||
CREATE TABLE neon.drop_subscriptions_done
|
||||
(id serial primary key, timeline_id text);
|
||||
END IF;
|
||||
|
||||
-- preserve the timeline_id of the last drop_subscriptions run
|
||||
-- to ensure that the cleanup of a timeline is executed only once.
|
||||
-- use upsert to avoid the table bloat in case of cascade branching (branch of a branch)
|
||||
INSERT INTO neon.drop_subscriptions_done VALUES (1, current_setting('neon.timeline_id'))
|
||||
ON CONFLICT (id) DO UPDATE
|
||||
SET timeline_id = current_setting('neon.timeline_id');
|
||||
END
|
||||
$$
|
||||
@@ -1357,6 +1357,7 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res
|
||||
args.pg_version,
|
||||
mode,
|
||||
!args.update_catalog,
|
||||
false,
|
||||
)?;
|
||||
}
|
||||
EndpointCmd::Start(args) => {
|
||||
|
||||
@@ -76,6 +76,7 @@ pub struct EndpointConf {
|
||||
http_port: u16,
|
||||
pg_version: u32,
|
||||
skip_pg_catalog_updates: bool,
|
||||
drop_subscriptions_before_start: bool,
|
||||
features: Vec<ComputeFeature>,
|
||||
}
|
||||
|
||||
@@ -143,6 +144,7 @@ impl ComputeControlPlane {
|
||||
pg_version: u32,
|
||||
mode: ComputeMode,
|
||||
skip_pg_catalog_updates: bool,
|
||||
drop_subscriptions_before_start: bool,
|
||||
) -> Result<Arc<Endpoint>> {
|
||||
let pg_port = pg_port.unwrap_or_else(|| self.get_port());
|
||||
let http_port = http_port.unwrap_or_else(|| self.get_port() + 1);
|
||||
@@ -162,6 +164,7 @@ impl ComputeControlPlane {
|
||||
// with this we basically test a case of waking up an idle compute, where
|
||||
// we also skip catalog updates in the cloud.
|
||||
skip_pg_catalog_updates,
|
||||
drop_subscriptions_before_start,
|
||||
features: vec![],
|
||||
});
|
||||
|
||||
@@ -177,6 +180,7 @@ impl ComputeControlPlane {
|
||||
pg_port,
|
||||
pg_version,
|
||||
skip_pg_catalog_updates,
|
||||
drop_subscriptions_before_start,
|
||||
features: vec![],
|
||||
})?,
|
||||
)?;
|
||||
@@ -240,6 +244,7 @@ pub struct Endpoint {
|
||||
// Optimizations
|
||||
skip_pg_catalog_updates: bool,
|
||||
|
||||
drop_subscriptions_before_start: bool,
|
||||
// Feature flags
|
||||
features: Vec<ComputeFeature>,
|
||||
}
|
||||
@@ -291,6 +296,7 @@ impl Endpoint {
|
||||
tenant_id: conf.tenant_id,
|
||||
pg_version: conf.pg_version,
|
||||
skip_pg_catalog_updates: conf.skip_pg_catalog_updates,
|
||||
drop_subscriptions_before_start: conf.drop_subscriptions_before_start,
|
||||
features: conf.features,
|
||||
})
|
||||
}
|
||||
@@ -625,6 +631,7 @@ impl Endpoint {
|
||||
shard_stripe_size: Some(shard_stripe_size),
|
||||
local_proxy_config: None,
|
||||
reconfigure_concurrency: 1,
|
||||
drop_subscriptions_before_start: self.drop_subscriptions_before_start,
|
||||
};
|
||||
let spec_path = self.endpoint_path().join("spec.json");
|
||||
std::fs::write(spec_path, serde_json::to_string_pretty(&spec)?)?;
|
||||
|
||||
@@ -352,6 +352,16 @@ impl PageServerNode {
|
||||
.map(serde_json::from_str)
|
||||
.transpose()
|
||||
.context("Failed to parse 'compaction_algorithm' json")?,
|
||||
l0_flush_delay_threshold: settings
|
||||
.remove("l0_flush_delay_threshold")
|
||||
.map(|x| x.parse::<usize>())
|
||||
.transpose()
|
||||
.context("Failed to parse 'l0_flush_delay_threshold' as an integer")?,
|
||||
l0_flush_stall_threshold: settings
|
||||
.remove("l0_flush_stall_threshold")
|
||||
.map(|x| x.parse::<usize>())
|
||||
.transpose()
|
||||
.context("Failed to parse 'l0_flush_stall_threshold' as an integer")?,
|
||||
gc_horizon: settings
|
||||
.remove("gc_horizon")
|
||||
.map(|x| x.parse::<u64>())
|
||||
@@ -423,6 +433,21 @@ impl PageServerNode {
|
||||
.map(|x| x.parse::<bool>())
|
||||
.transpose()
|
||||
.context("Failed to parse 'rel_size_v2_enabled' as bool")?,
|
||||
gc_compaction_enabled: settings
|
||||
.remove("gc_compaction_enabled")
|
||||
.map(|x| x.parse::<bool>())
|
||||
.transpose()
|
||||
.context("Failed to parse 'gc_compaction_enabled' as bool")?,
|
||||
gc_compaction_initial_threshold_kb: settings
|
||||
.remove("gc_compaction_initial_threshold_kb")
|
||||
.map(|x| x.parse::<u64>())
|
||||
.transpose()
|
||||
.context("Failed to parse 'gc_compaction_initial_threshold_kb' as integer")?,
|
||||
gc_compaction_ratio_percent: settings
|
||||
.remove("gc_compaction_ratio_percent")
|
||||
.map(|x| x.parse::<u64>())
|
||||
.transpose()
|
||||
.context("Failed to parse 'gc_compaction_ratio_percent' as integer")?,
|
||||
};
|
||||
if !settings.is_empty() {
|
||||
bail!("Unrecognized tenant settings: {settings:?}")
|
||||
|
||||
@@ -10,10 +10,7 @@ USER root
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl \
|
||||
jq \
|
||||
python3-pip \
|
||||
netcat-openbsd
|
||||
#Faker is required for the pg_anon test
|
||||
RUN case $COMPUTE_IMAGE in compute-node-v17) OPT="--break-system-packages";; *) OPT= ;; esac && pip3 install $OPT Faker
|
||||
#This is required for the pg_hintplan test
|
||||
RUN mkdir -p /ext-src/pg_hint_plan-src && chown postgres /ext-src/pg_hint_plan-src
|
||||
|
||||
|
||||
@@ -150,8 +150,8 @@ services:
|
||||
- REPOSITORY=${REPOSITORY:-neondatabase}
|
||||
- COMPUTE_IMAGE=compute-node-v${PG_VERSION:-16}
|
||||
- TAG=${TAG:-latest}
|
||||
- http_proxy=$http_proxy
|
||||
- https_proxy=$https_proxy
|
||||
- http_proxy=${http_proxy:-}
|
||||
- https_proxy=${https_proxy:-}
|
||||
environment:
|
||||
- PG_VERSION=${PG_VERSION:-16}
|
||||
#- RUST_BACKTRACE=1
|
||||
@@ -185,6 +185,8 @@ services:
|
||||
neon-test-extensions:
|
||||
profiles: ["test-extensions"]
|
||||
image: ${REPOSITORY:-neondatabase}/neon-test-extensions-v${PG_TEST_VERSION:-16}:${TAG:-latest}
|
||||
environment:
|
||||
- PGPASSWORD=cloud_admin
|
||||
entrypoint:
|
||||
- "/bin/bash"
|
||||
- "-c"
|
||||
|
||||
@@ -18,14 +18,10 @@ cd $(dirname $0)
|
||||
COMPUTE_CONTAINER_NAME=docker-compose-compute-1
|
||||
TEST_CONTAINER_NAME=docker-compose-neon-test-extensions-1
|
||||
PSQL_OPTION="-h localhost -U cloud_admin -p 55433 -d postgres"
|
||||
: ${http_proxy:=}
|
||||
: ${https_proxy:=}
|
||||
export http_proxy https_proxy
|
||||
|
||||
cleanup() {
|
||||
echo "show container information"
|
||||
docker ps
|
||||
docker compose --profile test-extensions -f $COMPOSE_FILE logs
|
||||
echo "stop containers..."
|
||||
docker compose --profile test-extensions -f $COMPOSE_FILE down
|
||||
}
|
||||
@@ -35,12 +31,6 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
||||
echo "clean up containers if exists"
|
||||
cleanup
|
||||
PG_TEST_VERSION=$((pg_version < 16 ? 16 : pg_version))
|
||||
# The support of pg_anon not yet added to PG17, so we have to add the corresponding option for other PG versions
|
||||
if [ "${pg_version}" -ne 17 ]; then
|
||||
SPEC_PATH="compute_wrapper/var/db/postgres/specs"
|
||||
mv $SPEC_PATH/spec.json $SPEC_PATH/spec.bak
|
||||
jq '.cluster.settings += [{"name": "session_preload_libraries","value": "anon","vartype": "string"}]' "${SPEC_PATH}/spec.bak" > "${SPEC_PATH}/spec.json"
|
||||
fi
|
||||
PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose --profile test-extensions -f $COMPOSE_FILE up --build -d
|
||||
|
||||
echo "wait until the compute is ready. timeout after 60s. "
|
||||
@@ -50,7 +40,6 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
||||
cnt=`expr $cnt + 3`
|
||||
if [ $cnt -gt 60 ]; then
|
||||
echo "timeout before the compute is ready."
|
||||
cleanup
|
||||
exit 1
|
||||
fi
|
||||
if docker compose --profile test-extensions -f $COMPOSE_FILE logs "compute_is_ready" | grep -q "accepting connections"; then
|
||||
@@ -62,36 +51,19 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
||||
done
|
||||
|
||||
if [ $pg_version -ge 16 ]; then
|
||||
echo Enabling trust connection
|
||||
docker exec $COMPUTE_CONTAINER_NAME bash -c "sed -i '\$d' /var/db/postgres/compute/pg_hba.conf && echo -e 'host\t all\t all\t all\t trust' >> /var/db/postgres/compute/pg_hba.conf && psql $PSQL_OPTION -c 'select pg_reload_conf()' "
|
||||
echo Adding postgres role
|
||||
docker exec $COMPUTE_CONTAINER_NAME psql $PSQL_OPTION -c "CREATE ROLE postgres SUPERUSER LOGIN"
|
||||
# This is required for the pg_hint_plan test, to prevent flaky log message causing the test to fail
|
||||
# It cannot be moved to Dockerfile now because the database directory is created after the start of the container
|
||||
echo Adding dummy config
|
||||
docker exec $COMPUTE_CONTAINER_NAME touch /var/db/postgres/compute/compute_ctl_temp_override.conf
|
||||
# This block is required for the pg_anon extension test.
|
||||
# The test assumes that it is running on the same host with the postgres engine.
|
||||
# In our case it's not true, that's why we are copying files to the compute node
|
||||
# The following block copies the files for the pg_hintplan test to the compute node for the extension test in an isolated docker-compose environment
|
||||
TMPDIR=$(mktemp -d)
|
||||
# Add support for pg_anon for pg_v16
|
||||
if [ $pg_version -ne 17 ]; then
|
||||
docker cp $TEST_CONTAINER_NAME:/ext-src/pg_anon-src/data $TMPDIR/data
|
||||
echo -e '1\t too \t many \t tabs' > $TMPDIR/data/bad.csv
|
||||
docker cp $TMPDIR/data $COMPUTE_CONTAINER_NAME:/tmp/tmp_anon_alternate_data
|
||||
rm -rf $TMPDIR
|
||||
fi
|
||||
TMPDIR=$(mktemp -d)
|
||||
# The following block does the same for the pg_hintplan test
|
||||
docker cp $TEST_CONTAINER_NAME:/ext-src/pg_hint_plan-src/data $TMPDIR/data
|
||||
docker cp $TMPDIR/data $COMPUTE_CONTAINER_NAME:/ext-src/pg_hint_plan-src/
|
||||
rm -rf $TMPDIR
|
||||
# We are running tests now
|
||||
if docker exec -e SKIP=timescaledb-src,rdkit-src,postgis-src,pgx_ulid-src,pgtap-src,pg_tiktoken-src,pg_jsonschema-src,pg_graphql-src,kq_imcx-src,wal2json_2_5-src \
|
||||
if ! docker exec -e SKIP=timescaledb-src,rdkit-src,postgis-src,pgx_ulid-src,pgtap-src,pg_tiktoken-src,pg_jsonschema-src,pg_graphql-src,kq_imcx-src,wal2json_2_5-src \
|
||||
$TEST_CONTAINER_NAME /run-tests.sh | tee testout.txt
|
||||
then
|
||||
cleanup
|
||||
else
|
||||
FAILED=$(tail -1 testout.txt)
|
||||
for d in $FAILED
|
||||
do
|
||||
@@ -101,13 +73,7 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
|
||||
cat $d/regression.out $d/regression.diffs || true
|
||||
done
|
||||
rm -rf $FAILED
|
||||
cleanup
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
cleanup
|
||||
# Restore the original spec.json
|
||||
if [ "$pg_version" -ne 17 ]; then
|
||||
mv "$SPEC_PATH/spec.bak" "$SPEC_PATH/spec.json"
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -7,7 +7,10 @@ LIST=$( (echo -e "${SKIP//","/"\n"}"; ls -d -- *-src) | sort | uniq -u)
|
||||
for d in ${LIST}
|
||||
do
|
||||
[ -d "${d}" ] || continue
|
||||
psql -c "select 1" >/dev/null || break
|
||||
if ! psql -w -c "select 1" >/dev/null; then
|
||||
FAILED="${d} ${FAILED}"
|
||||
break
|
||||
fi
|
||||
USE_PGXS=1 make -C "${d}" installcheck || FAILED="${d} ${FAILED}"
|
||||
done
|
||||
[ -z "${FAILED}" ] && exit 0
|
||||
|
||||
@@ -138,6 +138,13 @@ pub struct ComputeSpec {
|
||||
/// enough spare connections for reconfiguration process to succeed.
|
||||
#[serde(default = "default_reconfigure_concurrency")]
|
||||
pub reconfigure_concurrency: usize,
|
||||
|
||||
/// If set to true, the compute_ctl will drop all subscriptions before starting the
|
||||
/// compute. This is needed when we start an endpoint on a branch, so that child
|
||||
/// would not compete with parent branch subscriptions
|
||||
/// over the same replication content from publisher.
|
||||
#[serde(default)] // Default false
|
||||
pub drop_subscriptions_before_start: bool,
|
||||
}
|
||||
|
||||
/// Feature flag to signal `compute_ctl` to enable certain experimental functionality.
|
||||
|
||||
@@ -120,6 +120,7 @@ pub struct ConfigToml {
|
||||
pub no_sync: Option<bool>,
|
||||
pub wal_receiver_protocol: PostgresClientProtocol,
|
||||
pub page_service_pipelining: PageServicePipeliningConfig,
|
||||
pub get_vectored_concurrent_io: GetVectoredConcurrentIo,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
||||
@@ -158,6 +159,25 @@ pub enum PageServiceProtocolPipelinedExecutionStrategy {
|
||||
Tasks,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
||||
#[serde(tag = "mode", rename_all = "kebab-case")]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub enum GetVectoredConcurrentIo {
|
||||
/// The read path is fully sequential: layers are visited
|
||||
/// one after the other and IOs are issued and waited upon
|
||||
/// from the same task that traverses the layers.
|
||||
Sequential,
|
||||
/// The read path still traverses layers sequentially, and
|
||||
/// index blocks will be read into the PS PageCache from
|
||||
/// that task, with waiting.
|
||||
/// But data IOs are dispatched and waited upon from a sidecar
|
||||
/// task so that the traversing task can continue to traverse
|
||||
/// layers while the IOs are in flight.
|
||||
/// If the PS PageCache miss rate is low, this improves
|
||||
/// throughput dramatically.
|
||||
SidecarTask,
|
||||
}
|
||||
|
||||
pub mod statvfs {
|
||||
pub mod mock {
|
||||
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
||||
@@ -234,9 +254,18 @@ pub struct TenantConfigToml {
|
||||
// Duration::ZERO means automatic compaction is disabled.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub compaction_period: Duration,
|
||||
// Level0 delta layer threshold for compaction.
|
||||
/// Level0 delta layer threshold for compaction.
|
||||
pub compaction_threshold: usize,
|
||||
pub compaction_algorithm: crate::models::CompactionAlgorithmSettings,
|
||||
/// Level0 delta layer threshold at which to delay layer flushes for compaction backpressure,
|
||||
/// such that they take 2x as long, and start waiting for layer flushes during ephemeral layer
|
||||
/// rolls. This helps compaction keep up with WAL ingestion, and avoids read amplification
|
||||
/// blowing up. Should be >compaction_threshold. If None, defaults to 2 * compaction_threshold.
|
||||
/// 0 to disable.
|
||||
pub l0_flush_delay_threshold: Option<usize>,
|
||||
/// Level0 delta layer threshold at which to stall layer flushes. 0 to disable. If None,
|
||||
/// defaults to 4 * compaction_threshold. Must be >compaction_threshold to avoid deadlock.
|
||||
pub l0_flush_stall_threshold: Option<usize>,
|
||||
// Determines how much history is retained, to allow
|
||||
// branching and read replicas at an older point in time.
|
||||
// The unit is #of bytes of WAL.
|
||||
@@ -305,6 +334,16 @@ pub struct TenantConfigToml {
|
||||
/// Enable rel_size_v2 for this tenant. Once enabled, the tenant will persist this information into
|
||||
/// `index_part.json`, and it cannot be reversed.
|
||||
pub rel_size_v2_enabled: Option<bool>,
|
||||
|
||||
// gc-compaction related configs
|
||||
/// Enable automatic gc-compaction trigger on this tenant.
|
||||
pub gc_compaction_enabled: bool,
|
||||
/// The initial threshold for gc-compaction in KB. Once the total size of layers below the gc-horizon is above this threshold,
|
||||
/// gc-compaction will be triggered.
|
||||
pub gc_compaction_initial_threshold_kb: u64,
|
||||
/// The ratio that triggers the auto gc-compaction. If (the total size of layers between L2 LSN and gc-horizon) / (size below the L2 LSN)
|
||||
/// is above this ratio, gc-compaction will be triggered.
|
||||
pub gc_compaction_ratio_percent: u64,
|
||||
}
|
||||
|
||||
pub mod defaults {
|
||||
@@ -454,6 +493,11 @@ impl Default for ConfigToml {
|
||||
execution: PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures,
|
||||
})
|
||||
},
|
||||
get_vectored_concurrent_io: if !cfg!(test) {
|
||||
GetVectoredConcurrentIo::Sequential
|
||||
} else {
|
||||
GetVectoredConcurrentIo::SidecarTask
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -498,6 +542,9 @@ pub mod tenant_conf_defaults {
|
||||
// By default ingest enough WAL for two new L0 layers before checking if new image
|
||||
// image layers should be created.
|
||||
pub const DEFAULT_IMAGE_LAYER_CREATION_CHECK_THRESHOLD: u8 = 2;
|
||||
pub const DEFAULT_GC_COMPACTION_ENABLED: bool = false;
|
||||
pub const DEFAULT_GC_COMPACTION_INITIAL_THRESHOLD_KB: u64 = 10240000;
|
||||
pub const DEFAULT_GC_COMPACTION_RATIO_PERCENT: u64 = 100;
|
||||
}
|
||||
|
||||
impl Default for TenantConfigToml {
|
||||
@@ -514,6 +561,8 @@ impl Default for TenantConfigToml {
|
||||
compaction_algorithm: crate::models::CompactionAlgorithmSettings {
|
||||
kind: DEFAULT_COMPACTION_ALGORITHM,
|
||||
},
|
||||
l0_flush_delay_threshold: None,
|
||||
l0_flush_stall_threshold: None,
|
||||
gc_horizon: DEFAULT_GC_HORIZON,
|
||||
gc_period: humantime::parse_duration(DEFAULT_GC_PERIOD)
|
||||
.expect("cannot parse default gc period"),
|
||||
@@ -543,6 +592,9 @@ impl Default for TenantConfigToml {
|
||||
timeline_offloading: false,
|
||||
wal_receiver_protocol_override: None,
|
||||
rel_size_v2_enabled: None,
|
||||
gc_compaction_enabled: DEFAULT_GC_COMPACTION_ENABLED,
|
||||
gc_compaction_initial_threshold_kb: DEFAULT_GC_COMPACTION_INITIAL_THRESHOLD_KB,
|
||||
gc_compaction_ratio_percent: DEFAULT_GC_COMPACTION_RATIO_PERCENT,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -462,6 +462,10 @@ pub struct TenantConfigPatch {
|
||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||
pub compaction_algorithm: FieldPatch<CompactionAlgorithmSettings>,
|
||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||
pub l0_flush_delay_threshold: FieldPatch<usize>,
|
||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||
pub l0_flush_stall_threshold: FieldPatch<usize>,
|
||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||
pub gc_horizon: FieldPatch<u64>,
|
||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||
pub gc_period: FieldPatch<String>,
|
||||
@@ -499,6 +503,12 @@ pub struct TenantConfigPatch {
|
||||
pub wal_receiver_protocol_override: FieldPatch<PostgresClientProtocol>,
|
||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||
pub rel_size_v2_enabled: FieldPatch<bool>,
|
||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||
pub gc_compaction_enabled: FieldPatch<bool>,
|
||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||
pub gc_compaction_initial_threshold_kb: FieldPatch<u64>,
|
||||
#[serde(skip_serializing_if = "FieldPatch::is_noop")]
|
||||
pub gc_compaction_ratio_percent: FieldPatch<u64>,
|
||||
}
|
||||
|
||||
/// An alternative representation of `pageserver::tenant::TenantConf` with
|
||||
@@ -512,6 +522,8 @@ pub struct TenantConfig {
|
||||
pub compaction_threshold: Option<usize>,
|
||||
// defer parsing compaction_algorithm, like eviction_policy
|
||||
pub compaction_algorithm: Option<CompactionAlgorithmSettings>,
|
||||
pub l0_flush_delay_threshold: Option<usize>,
|
||||
pub l0_flush_stall_threshold: Option<usize>,
|
||||
pub gc_horizon: Option<u64>,
|
||||
pub gc_period: Option<String>,
|
||||
pub image_creation_threshold: Option<usize>,
|
||||
@@ -531,6 +543,9 @@ pub struct TenantConfig {
|
||||
pub timeline_offloading: Option<bool>,
|
||||
pub wal_receiver_protocol_override: Option<PostgresClientProtocol>,
|
||||
pub rel_size_v2_enabled: Option<bool>,
|
||||
pub gc_compaction_enabled: Option<bool>,
|
||||
pub gc_compaction_initial_threshold_kb: Option<u64>,
|
||||
pub gc_compaction_ratio_percent: Option<u64>,
|
||||
}
|
||||
|
||||
impl TenantConfig {
|
||||
@@ -542,6 +557,8 @@ impl TenantConfig {
|
||||
mut compaction_period,
|
||||
mut compaction_threshold,
|
||||
mut compaction_algorithm,
|
||||
mut l0_flush_delay_threshold,
|
||||
mut l0_flush_stall_threshold,
|
||||
mut gc_horizon,
|
||||
mut gc_period,
|
||||
mut image_creation_threshold,
|
||||
@@ -561,6 +578,9 @@ impl TenantConfig {
|
||||
mut timeline_offloading,
|
||||
mut wal_receiver_protocol_override,
|
||||
mut rel_size_v2_enabled,
|
||||
mut gc_compaction_enabled,
|
||||
mut gc_compaction_initial_threshold_kb,
|
||||
mut gc_compaction_ratio_percent,
|
||||
} = self;
|
||||
|
||||
patch.checkpoint_distance.apply(&mut checkpoint_distance);
|
||||
@@ -571,6 +591,12 @@ impl TenantConfig {
|
||||
patch.compaction_period.apply(&mut compaction_period);
|
||||
patch.compaction_threshold.apply(&mut compaction_threshold);
|
||||
patch.compaction_algorithm.apply(&mut compaction_algorithm);
|
||||
patch
|
||||
.l0_flush_delay_threshold
|
||||
.apply(&mut l0_flush_delay_threshold);
|
||||
patch
|
||||
.l0_flush_stall_threshold
|
||||
.apply(&mut l0_flush_stall_threshold);
|
||||
patch.gc_horizon.apply(&mut gc_horizon);
|
||||
patch.gc_period.apply(&mut gc_period);
|
||||
patch
|
||||
@@ -606,6 +632,15 @@ impl TenantConfig {
|
||||
.wal_receiver_protocol_override
|
||||
.apply(&mut wal_receiver_protocol_override);
|
||||
patch.rel_size_v2_enabled.apply(&mut rel_size_v2_enabled);
|
||||
patch
|
||||
.gc_compaction_enabled
|
||||
.apply(&mut gc_compaction_enabled);
|
||||
patch
|
||||
.gc_compaction_initial_threshold_kb
|
||||
.apply(&mut gc_compaction_initial_threshold_kb);
|
||||
patch
|
||||
.gc_compaction_ratio_percent
|
||||
.apply(&mut gc_compaction_ratio_percent);
|
||||
|
||||
Self {
|
||||
checkpoint_distance,
|
||||
@@ -614,6 +649,8 @@ impl TenantConfig {
|
||||
compaction_period,
|
||||
compaction_threshold,
|
||||
compaction_algorithm,
|
||||
l0_flush_delay_threshold,
|
||||
l0_flush_stall_threshold,
|
||||
gc_horizon,
|
||||
gc_period,
|
||||
image_creation_threshold,
|
||||
@@ -633,6 +670,9 @@ impl TenantConfig {
|
||||
timeline_offloading,
|
||||
wal_receiver_protocol_override,
|
||||
rel_size_v2_enabled,
|
||||
gc_compaction_enabled,
|
||||
gc_compaction_initial_threshold_kb,
|
||||
gc_compaction_ratio_percent,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
use std::{fmt::Display, str::FromStr};
|
||||
|
||||
/// For types `V` that implement [`FromStr`].
|
||||
pub fn var<V, E>(varname: &str) -> Option<V>
|
||||
where
|
||||
V: FromStr<Err = E>,
|
||||
@@ -10,7 +11,9 @@ where
|
||||
match std::env::var(varname) {
|
||||
Ok(s) => Some(
|
||||
s.parse()
|
||||
.map_err(|e| format!("failed to parse env var {varname}: {e:#}"))
|
||||
.map_err(|e| {
|
||||
format!("failed to parse env var {varname} using FromStr::parse: {e:#}")
|
||||
})
|
||||
.unwrap(),
|
||||
),
|
||||
Err(std::env::VarError::NotPresent) => None,
|
||||
@@ -19,3 +22,24 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// For types `V` that implement [`serde::de::DeserializeOwned`].
|
||||
pub fn var_serde_json_string<V>(varname: &str) -> Option<V>
|
||||
where
|
||||
V: serde::de::DeserializeOwned,
|
||||
{
|
||||
match std::env::var(varname) {
|
||||
Ok(s) => Some({
|
||||
let value = serde_json::Value::String(s);
|
||||
serde_json::from_value(value)
|
||||
.map_err(|e| {
|
||||
format!("failed to parse env var {varname} as a serde_json json string: {e:#}")
|
||||
})
|
||||
.unwrap()
|
||||
}),
|
||||
Err(std::env::VarError::NotPresent) => None,
|
||||
Err(std::env::VarError::NotUnicode(_)) => {
|
||||
panic!("env var {varname} is not unicode")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,31 +11,55 @@ use tracing::*;
|
||||
|
||||
/// Declare a failpoint that can use to `pause` failpoint action.
|
||||
/// We don't want to block the executor thread, hence, spawn_blocking + await.
|
||||
///
|
||||
/// Optionally pass a cancellation token, and this failpoint will drop out of
|
||||
/// its pause when the cancellation token fires. This is useful for testing
|
||||
/// cases where we would like to block something, but test its clean shutdown behavior.
|
||||
/// The macro evaluates to a Result in that case, where Ok(()) is the case
|
||||
/// where the failpoint was not paused, and Err() is the case where cancellation
|
||||
/// token fired while evaluating the failpoint.
|
||||
///
|
||||
/// Remember to unpause the failpoint in the test; until that happens, one of the
|
||||
/// limited number of spawn_blocking thread pool threads is leaked.
|
||||
#[macro_export]
|
||||
macro_rules! pausable_failpoint {
|
||||
($name:literal) => {
|
||||
($name:literal) => {{
|
||||
if cfg!(feature = "testing") {
|
||||
tokio::task::spawn_blocking({
|
||||
let current = tracing::Span::current();
|
||||
let cancel = ::tokio_util::sync::CancellationToken::new();
|
||||
let _ = $crate::pausable_failpoint!($name, &cancel);
|
||||
}
|
||||
}};
|
||||
($name:literal, $cancel:expr) => {{
|
||||
if cfg!(feature = "testing") {
|
||||
let failpoint_fut = ::tokio::task::spawn_blocking({
|
||||
let current = ::tracing::Span::current();
|
||||
move || {
|
||||
let _entered = current.entered();
|
||||
tracing::info!("at failpoint {}", $name);
|
||||
fail::fail_point!($name);
|
||||
::tracing::info!("at failpoint {}", $name);
|
||||
::fail::fail_point!($name);
|
||||
}
|
||||
});
|
||||
let cancel_fut = async move {
|
||||
$cancel.cancelled().await;
|
||||
};
|
||||
::tokio::select! {
|
||||
res = failpoint_fut => {
|
||||
res.expect("spawn_blocking");
|
||||
// continue with execution
|
||||
Ok(())
|
||||
},
|
||||
_ = cancel_fut => {
|
||||
Err(())
|
||||
}
|
||||
})
|
||||
.await
|
||||
.expect("spawn_blocking");
|
||||
}
|
||||
};
|
||||
($name:literal, $cond:expr) => {
|
||||
if cfg!(feature = "testing") {
|
||||
if $cond {
|
||||
pausable_failpoint!($name)
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
}};
|
||||
}
|
||||
|
||||
pub use pausable_failpoint;
|
||||
|
||||
/// use with fail::cfg("$name", "return(2000)")
|
||||
///
|
||||
/// The effect is similar to a "sleep(2000)" action, i.e. we sleep for the
|
||||
|
||||
@@ -64,6 +64,12 @@ pub struct GateGuard {
|
||||
gate: Arc<GateInner>,
|
||||
}
|
||||
|
||||
impl GateGuard {
|
||||
pub fn try_clone(&self) -> Result<Self, GateError> {
|
||||
Gate::enter_impl(self.gate.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for GateGuard {
|
||||
fn drop(&mut self) {
|
||||
if self.gate.closing.load(Ordering::Relaxed) {
|
||||
@@ -107,11 +113,11 @@ impl Gate {
|
||||
/// to avoid blocking close() indefinitely: typically types that contain a Gate will
|
||||
/// also contain a CancellationToken.
|
||||
pub fn enter(&self) -> Result<GateGuard, GateError> {
|
||||
let permit = self
|
||||
.inner
|
||||
.sem
|
||||
.try_acquire()
|
||||
.map_err(|_| GateError::GateClosed)?;
|
||||
Self::enter_impl(self.inner.clone())
|
||||
}
|
||||
|
||||
fn enter_impl(gate: Arc<GateInner>) -> Result<GateGuard, GateError> {
|
||||
let permit = gate.sem.try_acquire().map_err(|_| GateError::GateClosed)?;
|
||||
|
||||
// we now have the permit, let's disable the normal raii functionality and leave
|
||||
// "returning" the permit to our GateGuard::drop.
|
||||
@@ -122,7 +128,7 @@ impl Gate {
|
||||
|
||||
Ok(GateGuard {
|
||||
span_at_enter: tracing::Span::current(),
|
||||
gate: self.inner.clone(),
|
||||
gate,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -252,4 +258,39 @@ mod tests {
|
||||
// Attempting to enter() is still forbidden
|
||||
gate.enter().expect_err("enter should fail finishing close");
|
||||
}
|
||||
|
||||
#[tokio::test(start_paused = true)]
|
||||
async fn clone_gate_guard() {
|
||||
let gate = Gate::default();
|
||||
let forever = Duration::from_secs(24 * 7 * 365);
|
||||
|
||||
let guard1 = gate.enter().expect("gate isn't closed");
|
||||
|
||||
let guard2 = guard1.try_clone().expect("gate isn't clsoed");
|
||||
|
||||
let mut close_fut = std::pin::pin!(gate.close());
|
||||
|
||||
tokio::time::timeout(forever, &mut close_fut)
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
// we polled close_fut once, that should prevent all later enters and clones
|
||||
gate.enter().unwrap_err();
|
||||
guard1.try_clone().unwrap_err();
|
||||
guard2.try_clone().unwrap_err();
|
||||
|
||||
// guard2 keeps gate open even if guard1 is closed
|
||||
drop(guard1);
|
||||
tokio::time::timeout(forever, &mut close_fut)
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
drop(guard2);
|
||||
|
||||
// now that the last guard is dropped, closing should complete
|
||||
close_fut.await;
|
||||
|
||||
// entering is still forbidden
|
||||
gate.enter().expect_err("enter should stilll fail");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ postgres_ffi.workspace = true
|
||||
serde.workspace = true
|
||||
thiserror.workspace = true
|
||||
tokio = { workspace = true, features = ["io-util"] }
|
||||
tonic.workspace = true
|
||||
tracing.workspace = true
|
||||
utils.workspace = true
|
||||
workspace_hack = { version = "0.1", path = "../../workspace_hack" }
|
||||
|
||||
@@ -45,7 +45,7 @@ pub mod proto {
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
// The generated ValueMeta has a `len` method generate for its `len` field.
|
||||
#![allow(clippy::len_without_is_empty)]
|
||||
tonic::include_proto!("interpreted_wal");
|
||||
include!(concat!(env!("OUT_DIR"), concat!("/interpreted_wal.rs")));
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
|
||||
@@ -13,7 +13,7 @@ use rand::prelude::*;
|
||||
use tokio::task::JoinSet;
|
||||
use tracing::info;
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::collections::{HashSet, VecDeque};
|
||||
use std::future::Future;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::pin::Pin;
|
||||
@@ -63,6 +63,10 @@ pub(crate) struct Args {
|
||||
#[clap(long)]
|
||||
set_io_mode: Option<pageserver_api::models::virtual_file::IoMode>,
|
||||
|
||||
/// Queue depth generated in each client.
|
||||
#[clap(long, default_value = "1")]
|
||||
queue_depth: NonZeroUsize,
|
||||
|
||||
targets: Option<Vec<TenantTimelineId>>,
|
||||
}
|
||||
|
||||
@@ -298,6 +302,7 @@ async fn main_impl(
|
||||
start_work_barrier.wait().await;
|
||||
let client_start = Instant::now();
|
||||
let mut ticks_processed = 0;
|
||||
let mut inflight = VecDeque::new();
|
||||
while !cancel.is_cancelled() {
|
||||
// Detect if a request took longer than the RPS rate
|
||||
if let Some(period) = &rps_period {
|
||||
@@ -311,31 +316,37 @@ async fn main_impl(
|
||||
ticks_processed = periods_passed_until_now;
|
||||
}
|
||||
|
||||
let start = Instant::now();
|
||||
let req = {
|
||||
let mut rng = rand::thread_rng();
|
||||
let r = &ranges[weights.sample(&mut rng)];
|
||||
let key: i128 = rng.gen_range(r.start..r.end);
|
||||
let key = Key::from_i128(key);
|
||||
assert!(key.is_rel_block_key());
|
||||
let (rel_tag, block_no) = key
|
||||
.to_rel_block()
|
||||
.expect("we filter non-rel-block keys out above");
|
||||
PagestreamGetPageRequest {
|
||||
hdr: PagestreamRequest {
|
||||
reqid: 0,
|
||||
request_lsn: if rng.gen_bool(args.req_latest_probability) {
|
||||
Lsn::MAX
|
||||
} else {
|
||||
r.timeline_lsn
|
||||
while inflight.len() < args.queue_depth.get() {
|
||||
let start = Instant::now();
|
||||
let req = {
|
||||
let mut rng = rand::thread_rng();
|
||||
let r = &ranges[weights.sample(&mut rng)];
|
||||
let key: i128 = rng.gen_range(r.start..r.end);
|
||||
let key = Key::from_i128(key);
|
||||
assert!(key.is_rel_block_key());
|
||||
let (rel_tag, block_no) = key
|
||||
.to_rel_block()
|
||||
.expect("we filter non-rel-block keys out above");
|
||||
PagestreamGetPageRequest {
|
||||
hdr: PagestreamRequest {
|
||||
reqid: 0,
|
||||
request_lsn: if rng.gen_bool(args.req_latest_probability) {
|
||||
Lsn::MAX
|
||||
} else {
|
||||
r.timeline_lsn
|
||||
},
|
||||
not_modified_since: r.timeline_lsn,
|
||||
},
|
||||
not_modified_since: r.timeline_lsn,
|
||||
},
|
||||
rel: rel_tag,
|
||||
blkno: block_no,
|
||||
}
|
||||
};
|
||||
client.getpage(req).await.unwrap();
|
||||
rel: rel_tag,
|
||||
blkno: block_no,
|
||||
}
|
||||
};
|
||||
client.getpage_send(req).await.unwrap();
|
||||
inflight.push_back(start);
|
||||
}
|
||||
|
||||
let start = inflight.pop_front().unwrap();
|
||||
client.getpage_recv().await.unwrap();
|
||||
let end = Instant::now();
|
||||
live_stats.request_done();
|
||||
ticks_processed += 1;
|
||||
|
||||
@@ -25,6 +25,7 @@ use tokio_tar::{Builder, EntryType, Header};
|
||||
|
||||
use crate::context::RequestContext;
|
||||
use crate::pgdatadir_mapping::Version;
|
||||
use crate::tenant::storage_layer::IoConcurrency;
|
||||
use crate::tenant::Timeline;
|
||||
use pageserver_api::reltag::{RelTag, SlruKind};
|
||||
|
||||
@@ -123,6 +124,13 @@ where
|
||||
full_backup,
|
||||
replica,
|
||||
ctx,
|
||||
io_concurrency: IoConcurrency::spawn_from_conf(
|
||||
timeline.conf,
|
||||
timeline
|
||||
.gate
|
||||
.enter()
|
||||
.map_err(|e| BasebackupError::Server(e.into()))?,
|
||||
),
|
||||
};
|
||||
basebackup
|
||||
.send_tarball()
|
||||
@@ -144,6 +152,7 @@ where
|
||||
full_backup: bool,
|
||||
replica: bool,
|
||||
ctx: &'a RequestContext,
|
||||
io_concurrency: IoConcurrency,
|
||||
}
|
||||
|
||||
/// A sink that accepts SLRU blocks ordered by key and forwards
|
||||
@@ -303,7 +312,7 @@ where
|
||||
for part in slru_partitions.parts {
|
||||
let blocks = self
|
||||
.timeline
|
||||
.get_vectored(part, self.lsn, self.ctx)
|
||||
.get_vectored(part, self.lsn, self.io_concurrency.clone(), self.ctx)
|
||||
.await
|
||||
.map_err(|e| BasebackupError::Server(e.into()))?;
|
||||
|
||||
@@ -358,7 +367,7 @@ where
|
||||
let start_time = Instant::now();
|
||||
let aux_files = self
|
||||
.timeline
|
||||
.list_aux_files(self.lsn, self.ctx)
|
||||
.list_aux_files(self.lsn, self.ctx, self.io_concurrency.clone())
|
||||
.await
|
||||
.map_err(|e| BasebackupError::Server(e.into()))?;
|
||||
let aux_scan_time = start_time.elapsed();
|
||||
@@ -422,7 +431,7 @@ where
|
||||
}
|
||||
let repl_origins = self
|
||||
.timeline
|
||||
.get_replorigins(self.lsn, self.ctx)
|
||||
.get_replorigins(self.lsn, self.ctx, self.io_concurrency.clone())
|
||||
.await
|
||||
.map_err(|e| BasebackupError::Server(e.into()))?;
|
||||
let n_origins = repl_origins.len();
|
||||
@@ -489,7 +498,13 @@ where
|
||||
for blknum in startblk..endblk {
|
||||
let img = self
|
||||
.timeline
|
||||
.get_rel_page_at_lsn(src, blknum, Version::Lsn(self.lsn), self.ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
src,
|
||||
blknum,
|
||||
Version::Lsn(self.lsn),
|
||||
self.ctx,
|
||||
self.io_concurrency.clone(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| BasebackupError::Server(e.into()))?;
|
||||
segment_data.extend_from_slice(&img[..]);
|
||||
|
||||
@@ -135,6 +135,7 @@ fn main() -> anyhow::Result<()> {
|
||||
info!(?conf.virtual_file_io_mode, "starting with virtual_file IO mode");
|
||||
info!(?conf.wal_receiver_protocol, "starting with WAL receiver protocol");
|
||||
info!(?conf.page_service_pipelining, "starting with page service pipelining config");
|
||||
info!(?conf.get_vectored_concurrent_io, "starting with get_vectored IO concurrency config");
|
||||
|
||||
// The tenants directory contains all the pageserver local disk state.
|
||||
// Create if not exists and make sure all the contents are durable before proceeding.
|
||||
|
||||
@@ -191,6 +191,8 @@ pub struct PageServerConf {
|
||||
pub wal_receiver_protocol: PostgresClientProtocol,
|
||||
|
||||
pub page_service_pipelining: pageserver_api::config::PageServicePipeliningConfig,
|
||||
|
||||
pub get_vectored_concurrent_io: pageserver_api::config::GetVectoredConcurrentIo,
|
||||
}
|
||||
|
||||
/// Token for authentication to safekeepers
|
||||
@@ -352,6 +354,7 @@ impl PageServerConf {
|
||||
no_sync,
|
||||
wal_receiver_protocol,
|
||||
page_service_pipelining,
|
||||
get_vectored_concurrent_io,
|
||||
} = config_toml;
|
||||
|
||||
let mut conf = PageServerConf {
|
||||
@@ -396,6 +399,7 @@ impl PageServerConf {
|
||||
import_pgdata_aws_endpoint_url,
|
||||
wal_receiver_protocol,
|
||||
page_service_pipelining,
|
||||
get_vectored_concurrent_io,
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// fields that require additional validation or custom handling
|
||||
|
||||
@@ -84,6 +84,7 @@ use crate::tenant::remote_timeline_client::list_remote_tenant_shards;
|
||||
use crate::tenant::remote_timeline_client::list_remote_timelines;
|
||||
use crate::tenant::secondary::SecondaryController;
|
||||
use crate::tenant::size::ModelInputs;
|
||||
use crate::tenant::storage_layer::IoConcurrency;
|
||||
use crate::tenant::storage_layer::LayerAccessStatsReset;
|
||||
use crate::tenant::storage_layer::LayerName;
|
||||
use crate::tenant::timeline::import_pgdata;
|
||||
@@ -2938,8 +2939,15 @@ async fn list_aux_files(
|
||||
active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
|
||||
.await?;
|
||||
|
||||
let io_concurrency = IoConcurrency::spawn_from_conf(
|
||||
state.conf,
|
||||
timeline.gate.enter().map_err(|_| ApiError::Cancelled)?,
|
||||
);
|
||||
|
||||
let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
|
||||
let files = timeline.list_aux_files(body.lsn, &ctx).await?;
|
||||
let files = timeline
|
||||
.list_aux_files(body.lsn, &ctx, io_concurrency)
|
||||
.await?;
|
||||
json_response(StatusCode::OK, files)
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ use metrics::{
|
||||
register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec,
|
||||
register_int_counter, register_int_counter_pair_vec, register_int_counter_vec,
|
||||
register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec,
|
||||
Counter, CounterVec, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair,
|
||||
Counter, CounterVec, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair,
|
||||
IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
|
||||
};
|
||||
use once_cell::sync::Lazy;
|
||||
@@ -38,6 +38,9 @@ pub(crate) enum StorageTimeOperation {
|
||||
#[strum(serialize = "layer flush")]
|
||||
LayerFlush,
|
||||
|
||||
#[strum(serialize = "layer flush delay")]
|
||||
LayerFlushDelay,
|
||||
|
||||
#[strum(serialize = "compact")]
|
||||
Compact,
|
||||
|
||||
@@ -126,73 +129,6 @@ pub(crate) static INITDB_RUN_TIME: Lazy<Histogram> = Lazy::new(|| {
|
||||
.expect("failed to define metric")
|
||||
});
|
||||
|
||||
// Metrics collected on operations on the storage repository.
|
||||
#[derive(
|
||||
Clone, Copy, enum_map::Enum, strum_macros::EnumString, strum_macros::Display, IntoStaticStr,
|
||||
)]
|
||||
pub(crate) enum GetKind {
|
||||
Singular,
|
||||
Vectored,
|
||||
}
|
||||
|
||||
pub(crate) struct ReconstructTimeMetrics {
|
||||
singular: Histogram,
|
||||
vectored: Histogram,
|
||||
}
|
||||
|
||||
pub(crate) static RECONSTRUCT_TIME: Lazy<ReconstructTimeMetrics> = Lazy::new(|| {
|
||||
let inner = register_histogram_vec!(
|
||||
"pageserver_getpage_reconstruct_seconds",
|
||||
"Time spent in reconstruct_value (reconstruct a page from deltas)",
|
||||
&["get_kind"],
|
||||
CRITICAL_OP_BUCKETS.into(),
|
||||
)
|
||||
.expect("failed to define a metric");
|
||||
|
||||
ReconstructTimeMetrics {
|
||||
singular: inner.with_label_values(&[GetKind::Singular.into()]),
|
||||
vectored: inner.with_label_values(&[GetKind::Vectored.into()]),
|
||||
}
|
||||
});
|
||||
|
||||
impl ReconstructTimeMetrics {
|
||||
pub(crate) fn for_get_kind(&self, get_kind: GetKind) -> &Histogram {
|
||||
match get_kind {
|
||||
GetKind::Singular => &self.singular,
|
||||
GetKind::Vectored => &self.vectored,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct ReconstructDataTimeMetrics {
|
||||
singular: Histogram,
|
||||
vectored: Histogram,
|
||||
}
|
||||
|
||||
impl ReconstructDataTimeMetrics {
|
||||
pub(crate) fn for_get_kind(&self, get_kind: GetKind) -> &Histogram {
|
||||
match get_kind {
|
||||
GetKind::Singular => &self.singular,
|
||||
GetKind::Vectored => &self.vectored,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) static GET_RECONSTRUCT_DATA_TIME: Lazy<ReconstructDataTimeMetrics> = Lazy::new(|| {
|
||||
let inner = register_histogram_vec!(
|
||||
"pageserver_getpage_get_reconstruct_data_seconds",
|
||||
"Time spent in get_reconstruct_value_data",
|
||||
&["get_kind"],
|
||||
CRITICAL_OP_BUCKETS.into(),
|
||||
)
|
||||
.expect("failed to define a metric");
|
||||
|
||||
ReconstructDataTimeMetrics {
|
||||
singular: inner.with_label_values(&[GetKind::Singular.into()]),
|
||||
vectored: inner.with_label_values(&[GetKind::Vectored.into()]),
|
||||
}
|
||||
});
|
||||
|
||||
pub(crate) struct GetVectoredLatency {
|
||||
map: EnumMap<TaskKind, Option<Histogram>>,
|
||||
}
|
||||
@@ -462,15 +398,6 @@ pub(crate) static WAIT_LSN_TIME: Lazy<Histogram> = Lazy::new(|| {
|
||||
.expect("failed to define a metric")
|
||||
});
|
||||
|
||||
static FLUSH_WAIT_UPLOAD_TIME: Lazy<GaugeVec> = Lazy::new(|| {
|
||||
register_gauge_vec!(
|
||||
"pageserver_flush_wait_upload_seconds",
|
||||
"Time spent waiting for preceding uploads during layer flush",
|
||||
&["tenant_id", "shard_id", "timeline_id"]
|
||||
)
|
||||
.expect("failed to define a metric")
|
||||
});
|
||||
|
||||
static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| {
|
||||
register_int_gauge_vec!(
|
||||
"pageserver_last_record_lsn",
|
||||
@@ -2550,12 +2477,19 @@ impl StorageTimeMetricsTimer {
|
||||
}
|
||||
}
|
||||
|
||||
/// Record the time from creation to now.
|
||||
pub fn stop_and_record(self) {
|
||||
let duration = self.start.elapsed().as_secs_f64();
|
||||
self.metrics.timeline_sum.inc_by(duration);
|
||||
/// Returns the elapsed duration of the timer.
|
||||
pub fn elapsed(&self) -> Duration {
|
||||
self.start.elapsed()
|
||||
}
|
||||
|
||||
/// Record the time from creation to now and return it.
|
||||
pub fn stop_and_record(self) -> Duration {
|
||||
let duration = self.elapsed();
|
||||
let seconds = duration.as_secs_f64();
|
||||
self.metrics.timeline_sum.inc_by(seconds);
|
||||
self.metrics.timeline_count.inc();
|
||||
self.metrics.global_histogram.observe(duration);
|
||||
self.metrics.global_histogram.observe(seconds);
|
||||
duration
|
||||
}
|
||||
|
||||
/// Turns this timer into a timer, which will always record -- usually this means recording
|
||||
@@ -2575,6 +2509,13 @@ impl Drop for AlwaysRecordingStorageTimeMetricsTimer {
|
||||
}
|
||||
}
|
||||
|
||||
impl AlwaysRecordingStorageTimeMetricsTimer {
|
||||
/// Returns the elapsed duration of the timer.
|
||||
pub fn elapsed(&self) -> Duration {
|
||||
self.0.as_ref().expect("not dropped yet").elapsed()
|
||||
}
|
||||
}
|
||||
|
||||
/// Timing facilities for an globally histogrammed metric, which is supported by per tenant and
|
||||
/// timeline total sum and count.
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -2627,7 +2568,7 @@ pub(crate) struct TimelineMetrics {
|
||||
shard_id: String,
|
||||
timeline_id: String,
|
||||
pub flush_time_histo: StorageTimeMetrics,
|
||||
pub flush_wait_upload_time_gauge: Gauge,
|
||||
pub flush_delay_histo: StorageTimeMetrics,
|
||||
pub compact_time_histo: StorageTimeMetrics,
|
||||
pub create_images_time_histo: StorageTimeMetrics,
|
||||
pub logical_size_histo: StorageTimeMetrics,
|
||||
@@ -2673,9 +2614,12 @@ impl TimelineMetrics {
|
||||
&shard_id,
|
||||
&timeline_id,
|
||||
);
|
||||
let flush_wait_upload_time_gauge = FLUSH_WAIT_UPLOAD_TIME
|
||||
.get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id])
|
||||
.unwrap();
|
||||
let flush_delay_histo = StorageTimeMetrics::new(
|
||||
StorageTimeOperation::LayerFlushDelay,
|
||||
&tenant_id,
|
||||
&shard_id,
|
||||
&timeline_id,
|
||||
);
|
||||
let compact_time_histo = StorageTimeMetrics::new(
|
||||
StorageTimeOperation::Compact,
|
||||
&tenant_id,
|
||||
@@ -2821,7 +2765,7 @@ impl TimelineMetrics {
|
||||
shard_id,
|
||||
timeline_id,
|
||||
flush_time_histo,
|
||||
flush_wait_upload_time_gauge,
|
||||
flush_delay_histo,
|
||||
compact_time_histo,
|
||||
create_images_time_histo,
|
||||
logical_size_histo,
|
||||
@@ -2871,14 +2815,6 @@ impl TimelineMetrics {
|
||||
self.resident_physical_size_gauge.get()
|
||||
}
|
||||
|
||||
pub(crate) fn flush_wait_upload_time_gauge_add(&self, duration: f64) {
|
||||
self.flush_wait_upload_time_gauge.add(duration);
|
||||
crate::metrics::FLUSH_WAIT_UPLOAD_TIME
|
||||
.get_metric_with_label_values(&[&self.tenant_id, &self.shard_id, &self.timeline_id])
|
||||
.unwrap()
|
||||
.add(duration);
|
||||
}
|
||||
|
||||
pub(crate) fn shutdown(&self) {
|
||||
let was_shutdown = self
|
||||
.shutdown
|
||||
@@ -2896,7 +2832,6 @@ impl TimelineMetrics {
|
||||
let shard_id = &self.shard_id;
|
||||
let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
|
||||
let _ = DISK_CONSISTENT_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]);
|
||||
let _ = FLUSH_WAIT_UPLOAD_TIME.remove_label_values(&[tenant_id, shard_id, timeline_id]);
|
||||
let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]);
|
||||
{
|
||||
RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get());
|
||||
@@ -3919,7 +3854,6 @@ pub fn preinitialize_metrics(conf: &'static PageServerConf) {
|
||||
});
|
||||
|
||||
// Custom
|
||||
Lazy::force(&RECONSTRUCT_TIME);
|
||||
Lazy::force(&BASEBACKUP_QUERY_TIME);
|
||||
Lazy::force(&COMPUTE_COMMANDS_COUNTERS);
|
||||
Lazy::force(&tokio_epoll_uring::THREAD_LOCAL_METRICS_STORAGE);
|
||||
|
||||
@@ -39,6 +39,7 @@ use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::*;
|
||||
use utils::sync::gate::{Gate, GateGuard};
|
||||
use utils::sync::spsc_fold;
|
||||
use utils::{
|
||||
auth::{Claims, Scope, SwappableJwtAuth},
|
||||
@@ -61,6 +62,7 @@ use crate::task_mgr::{self, COMPUTE_REQUEST_RUNTIME};
|
||||
use crate::tenant::mgr::ShardSelector;
|
||||
use crate::tenant::mgr::TenantManager;
|
||||
use crate::tenant::mgr::{GetActiveTenantError, GetTenantError, ShardResolveResult};
|
||||
use crate::tenant::storage_layer::IoConcurrency;
|
||||
use crate::tenant::timeline::{self, WaitLsnError};
|
||||
use crate::tenant::GetTimelineError;
|
||||
use crate::tenant::PageReconstructError;
|
||||
@@ -90,6 +92,7 @@ pub struct Listener {
|
||||
pub struct Connections {
|
||||
cancel: CancellationToken,
|
||||
tasks: tokio::task::JoinSet<ConnectionHandlerResult>,
|
||||
gate: Gate,
|
||||
}
|
||||
|
||||
pub fn spawn(
|
||||
@@ -110,6 +113,7 @@ pub fn spawn(
|
||||
let task = COMPUTE_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
|
||||
"libpq listener",
|
||||
libpq_listener_main(
|
||||
conf,
|
||||
tenant_manager,
|
||||
pg_auth,
|
||||
tcp_listener,
|
||||
@@ -134,11 +138,16 @@ impl Listener {
|
||||
}
|
||||
impl Connections {
|
||||
pub(crate) async fn shutdown(self) {
|
||||
let Self { cancel, mut tasks } = self;
|
||||
let Self {
|
||||
cancel,
|
||||
mut tasks,
|
||||
gate,
|
||||
} = self;
|
||||
cancel.cancel();
|
||||
while let Some(res) = tasks.join_next().await {
|
||||
Self::handle_connection_completion(res);
|
||||
}
|
||||
gate.close().await;
|
||||
}
|
||||
|
||||
fn handle_connection_completion(res: Result<anyhow::Result<()>, tokio::task::JoinError>) {
|
||||
@@ -158,7 +167,9 @@ impl Connections {
|
||||
/// Returns Ok(()) upon cancellation via `cancel`, returning the set of
|
||||
/// open connections.
|
||||
///
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn libpq_listener_main(
|
||||
conf: &'static PageServerConf,
|
||||
tenant_manager: Arc<TenantManager>,
|
||||
auth: Option<Arc<SwappableJwtAuth>>,
|
||||
listener: tokio::net::TcpListener,
|
||||
@@ -168,9 +179,15 @@ pub async fn libpq_listener_main(
|
||||
listener_cancel: CancellationToken,
|
||||
) -> Connections {
|
||||
let connections_cancel = CancellationToken::new();
|
||||
let connections_gate = Gate::default();
|
||||
let mut connection_handler_tasks = tokio::task::JoinSet::default();
|
||||
|
||||
loop {
|
||||
let gate_guard = match connections_gate.enter() {
|
||||
Ok(guard) => guard,
|
||||
Err(_) => break,
|
||||
};
|
||||
|
||||
let accepted = tokio::select! {
|
||||
biased;
|
||||
_ = listener_cancel.cancelled() => break,
|
||||
@@ -190,6 +207,7 @@ pub async fn libpq_listener_main(
|
||||
let connection_ctx = listener_ctx
|
||||
.detached_child(TaskKind::PageRequestHandler, DownloadBehavior::Download);
|
||||
connection_handler_tasks.spawn(page_service_conn_main(
|
||||
conf,
|
||||
tenant_manager.clone(),
|
||||
local_auth,
|
||||
socket,
|
||||
@@ -197,6 +215,7 @@ pub async fn libpq_listener_main(
|
||||
pipelining_config.clone(),
|
||||
connection_ctx,
|
||||
connections_cancel.child_token(),
|
||||
gate_guard,
|
||||
));
|
||||
}
|
||||
Err(err) => {
|
||||
@@ -211,13 +230,16 @@ pub async fn libpq_listener_main(
|
||||
Connections {
|
||||
cancel: connections_cancel,
|
||||
tasks: connection_handler_tasks,
|
||||
gate: connections_gate,
|
||||
}
|
||||
}
|
||||
|
||||
type ConnectionHandlerResult = anyhow::Result<()>;
|
||||
|
||||
#[instrument(skip_all, fields(peer_addr))]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn page_service_conn_main(
|
||||
conf: &'static PageServerConf,
|
||||
tenant_manager: Arc<TenantManager>,
|
||||
auth: Option<Arc<SwappableJwtAuth>>,
|
||||
socket: tokio::net::TcpStream,
|
||||
@@ -225,6 +247,7 @@ async fn page_service_conn_main(
|
||||
pipelining_config: PageServicePipeliningConfig,
|
||||
connection_ctx: RequestContext,
|
||||
cancel: CancellationToken,
|
||||
gate_guard: GateGuard,
|
||||
) -> ConnectionHandlerResult {
|
||||
let _guard = LIVE_CONNECTIONS
|
||||
.with_label_values(&["page_service"])
|
||||
@@ -274,11 +297,13 @@ async fn page_service_conn_main(
|
||||
// But it's in a shared crate, so, we store connection_ctx inside PageServerHandler
|
||||
// and create the per-query context in process_query ourselves.
|
||||
let mut conn_handler = PageServerHandler::new(
|
||||
conf,
|
||||
tenant_manager,
|
||||
auth,
|
||||
pipelining_config,
|
||||
connection_ctx,
|
||||
cancel.clone(),
|
||||
gate_guard,
|
||||
);
|
||||
let pgbackend = PostgresBackend::new_from_io(socket, peer_addr, auth_type, None)?;
|
||||
|
||||
@@ -310,6 +335,7 @@ async fn page_service_conn_main(
|
||||
}
|
||||
|
||||
struct PageServerHandler {
|
||||
conf: &'static PageServerConf,
|
||||
auth: Option<Arc<SwappableJwtAuth>>,
|
||||
claims: Option<Claims>,
|
||||
|
||||
@@ -325,6 +351,8 @@ struct PageServerHandler {
|
||||
timeline_handles: Option<TimelineHandles>,
|
||||
|
||||
pipelining_config: PageServicePipeliningConfig,
|
||||
|
||||
gate_guard: GateGuard,
|
||||
}
|
||||
|
||||
struct TimelineHandles {
|
||||
@@ -634,19 +662,23 @@ impl BatchedFeMessage {
|
||||
|
||||
impl PageServerHandler {
|
||||
pub fn new(
|
||||
conf: &'static PageServerConf,
|
||||
tenant_manager: Arc<TenantManager>,
|
||||
auth: Option<Arc<SwappableJwtAuth>>,
|
||||
pipelining_config: PageServicePipeliningConfig,
|
||||
connection_ctx: RequestContext,
|
||||
cancel: CancellationToken,
|
||||
gate_guard: GateGuard,
|
||||
) -> Self {
|
||||
PageServerHandler {
|
||||
conf,
|
||||
auth,
|
||||
claims: None,
|
||||
connection_ctx,
|
||||
timeline_handles: Some(TimelineHandles::new(tenant_manager)),
|
||||
cancel,
|
||||
pipelining_config,
|
||||
gate_guard,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1015,6 +1047,7 @@ impl PageServerHandler {
|
||||
&mut self,
|
||||
pgb_writer: &mut PostgresBackend<IO>,
|
||||
batch: BatchedFeMessage,
|
||||
io_concurrency: IoConcurrency,
|
||||
cancel: &CancellationToken,
|
||||
protocol_version: PagestreamProtocolVersion,
|
||||
ctx: &RequestContext,
|
||||
@@ -1084,6 +1117,7 @@ impl PageServerHandler {
|
||||
&*shard.upgrade()?,
|
||||
effective_request_lsn,
|
||||
pages,
|
||||
io_concurrency,
|
||||
ctx,
|
||||
)
|
||||
.instrument(span.clone())
|
||||
@@ -1288,6 +1322,17 @@ impl PageServerHandler {
|
||||
}
|
||||
}
|
||||
|
||||
let io_concurrency = IoConcurrency::spawn_from_conf(
|
||||
self.conf,
|
||||
match self.gate_guard.try_clone() {
|
||||
Ok(guard) => guard,
|
||||
Err(_) => {
|
||||
info!("shutdown request received in page handler");
|
||||
return Err(QueryError::Shutdown);
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
let pgb_reader = pgb
|
||||
.split()
|
||||
.context("implementation error: split pgb into reader and writer")?;
|
||||
@@ -1309,6 +1354,7 @@ impl PageServerHandler {
|
||||
request_span,
|
||||
pipelining_config,
|
||||
protocol_version,
|
||||
io_concurrency,
|
||||
&ctx,
|
||||
)
|
||||
.await
|
||||
@@ -1322,6 +1368,7 @@ impl PageServerHandler {
|
||||
timeline_handles,
|
||||
request_span,
|
||||
protocol_version,
|
||||
io_concurrency,
|
||||
&ctx,
|
||||
)
|
||||
.await
|
||||
@@ -1349,6 +1396,7 @@ impl PageServerHandler {
|
||||
mut timeline_handles: TimelineHandles,
|
||||
request_span: Span,
|
||||
protocol_version: PagestreamProtocolVersion,
|
||||
io_concurrency: IoConcurrency,
|
||||
ctx: &RequestContext,
|
||||
) -> (
|
||||
(PostgresBackendReader<IO>, TimelineHandles),
|
||||
@@ -1383,7 +1431,14 @@ impl PageServerHandler {
|
||||
};
|
||||
|
||||
let err = self
|
||||
.pagesteam_handle_batched_message(pgb_writer, msg, &cancel, protocol_version, ctx)
|
||||
.pagesteam_handle_batched_message(
|
||||
pgb_writer,
|
||||
msg,
|
||||
io_concurrency.clone(),
|
||||
&cancel,
|
||||
protocol_version,
|
||||
ctx,
|
||||
)
|
||||
.await;
|
||||
match err {
|
||||
Ok(()) => {}
|
||||
@@ -1407,6 +1462,7 @@ impl PageServerHandler {
|
||||
request_span: Span,
|
||||
pipelining_config: PageServicePipeliningConfigPipelined,
|
||||
protocol_version: PagestreamProtocolVersion,
|
||||
io_concurrency: IoConcurrency,
|
||||
ctx: &RequestContext,
|
||||
) -> (
|
||||
(PostgresBackendReader<IO>, TimelineHandles),
|
||||
@@ -1550,6 +1606,7 @@ impl PageServerHandler {
|
||||
self.pagesteam_handle_batched_message(
|
||||
pgb_writer,
|
||||
batch,
|
||||
io_concurrency.clone(),
|
||||
&cancel,
|
||||
protocol_version,
|
||||
&ctx,
|
||||
@@ -1806,6 +1863,7 @@ impl PageServerHandler {
|
||||
timeline: &Timeline,
|
||||
effective_lsn: Lsn,
|
||||
requests: smallvec::SmallVec<[BatchedGetPageRequest; 1]>,
|
||||
io_concurrency: IoConcurrency,
|
||||
ctx: &RequestContext,
|
||||
) -> Vec<Result<(PagestreamBeMessage, SmgrOpTimer), BatchedPageStreamError>> {
|
||||
debug_assert_current_span_has_tenant_and_timeline_id();
|
||||
@@ -1832,6 +1890,7 @@ impl PageServerHandler {
|
||||
.get_rel_page_at_lsn_batched(
|
||||
requests.iter().map(|p| (&p.req.rel, &p.req.blkno)),
|
||||
effective_lsn,
|
||||
io_concurrency,
|
||||
ctx,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -17,6 +17,7 @@ use crate::span::{
|
||||
debug_assert_current_span_has_tenant_and_timeline_id,
|
||||
debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id,
|
||||
};
|
||||
use crate::tenant::storage_layer::IoConcurrency;
|
||||
use crate::tenant::timeline::GetVectoredError;
|
||||
use anyhow::{ensure, Context};
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
@@ -200,6 +201,7 @@ impl Timeline {
|
||||
blknum: BlockNumber,
|
||||
version: Version<'_>,
|
||||
ctx: &RequestContext,
|
||||
io_concurrency: IoConcurrency,
|
||||
) -> Result<Bytes, PageReconstructError> {
|
||||
match version {
|
||||
Version::Lsn(effective_lsn) => {
|
||||
@@ -208,6 +210,7 @@ impl Timeline {
|
||||
.get_rel_page_at_lsn_batched(
|
||||
pages.iter().map(|(tag, blknum)| (tag, blknum)),
|
||||
effective_lsn,
|
||||
io_concurrency.clone(),
|
||||
ctx,
|
||||
)
|
||||
.await;
|
||||
@@ -246,6 +249,7 @@ impl Timeline {
|
||||
&self,
|
||||
pages: impl ExactSizeIterator<Item = (&RelTag, &BlockNumber)>,
|
||||
effective_lsn: Lsn,
|
||||
io_concurrency: IoConcurrency,
|
||||
ctx: &RequestContext,
|
||||
) -> Vec<Result<Bytes, PageReconstructError>> {
|
||||
debug_assert_current_span_has_tenant_and_timeline_id();
|
||||
@@ -309,7 +313,10 @@ impl Timeline {
|
||||
acc.to_keyspace()
|
||||
};
|
||||
|
||||
match self.get_vectored(keyspace, effective_lsn, ctx).await {
|
||||
match self
|
||||
.get_vectored(keyspace, effective_lsn, io_concurrency, ctx)
|
||||
.await
|
||||
{
|
||||
Ok(results) => {
|
||||
for (key, res) in results {
|
||||
let mut key_slots = keys_slots.remove(&key).unwrap().into_iter();
|
||||
@@ -889,9 +896,15 @@ impl Timeline {
|
||||
&self,
|
||||
lsn: Lsn,
|
||||
ctx: &RequestContext,
|
||||
io_concurrency: IoConcurrency,
|
||||
) -> Result<HashMap<String, Bytes>, PageReconstructError> {
|
||||
let kv = self
|
||||
.scan(KeySpace::single(Key::metadata_aux_key_range()), lsn, ctx)
|
||||
.scan(
|
||||
KeySpace::single(Key::metadata_aux_key_range()),
|
||||
lsn,
|
||||
ctx,
|
||||
io_concurrency,
|
||||
)
|
||||
.await?;
|
||||
let mut result = HashMap::new();
|
||||
let mut sz = 0;
|
||||
@@ -914,8 +927,9 @@ impl Timeline {
|
||||
&self,
|
||||
lsn: Lsn,
|
||||
ctx: &RequestContext,
|
||||
io_concurrency: IoConcurrency,
|
||||
) -> Result<(), PageReconstructError> {
|
||||
self.list_aux_files_v2(lsn, ctx).await?;
|
||||
self.list_aux_files_v2(lsn, ctx, io_concurrency).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -923,17 +937,24 @@ impl Timeline {
|
||||
&self,
|
||||
lsn: Lsn,
|
||||
ctx: &RequestContext,
|
||||
io_concurrency: IoConcurrency,
|
||||
) -> Result<HashMap<String, Bytes>, PageReconstructError> {
|
||||
self.list_aux_files_v2(lsn, ctx).await
|
||||
self.list_aux_files_v2(lsn, ctx, io_concurrency).await
|
||||
}
|
||||
|
||||
pub(crate) async fn get_replorigins(
|
||||
&self,
|
||||
lsn: Lsn,
|
||||
ctx: &RequestContext,
|
||||
io_concurrency: IoConcurrency,
|
||||
) -> Result<HashMap<RepOriginId, Lsn>, PageReconstructError> {
|
||||
let kv = self
|
||||
.scan(KeySpace::single(repl_origin_key_range()), lsn, ctx)
|
||||
.scan(
|
||||
KeySpace::single(repl_origin_key_range()),
|
||||
lsn,
|
||||
ctx,
|
||||
io_concurrency,
|
||||
)
|
||||
.await?;
|
||||
let mut result = HashMap::new();
|
||||
for (k, v) in kv {
|
||||
@@ -2432,7 +2453,11 @@ mod tests {
|
||||
("foo/bar2".to_string(), Bytes::from_static(b"content2")),
|
||||
]);
|
||||
|
||||
let readback = tline.list_aux_files(Lsn(0x1008), &ctx).await?;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
|
||||
let readback = tline
|
||||
.list_aux_files(Lsn(0x1008), &ctx, io_concurrency.clone())
|
||||
.await?;
|
||||
assert_eq!(readback, expect_1008);
|
||||
|
||||
// Second modification: update one key, remove the other
|
||||
@@ -2444,11 +2469,15 @@ mod tests {
|
||||
let expect_2008 =
|
||||
HashMap::from([("foo/bar1".to_string(), Bytes::from_static(b"content3"))]);
|
||||
|
||||
let readback = tline.list_aux_files(Lsn(0x2008), &ctx).await?;
|
||||
let readback = tline
|
||||
.list_aux_files(Lsn(0x2008), &ctx, io_concurrency.clone())
|
||||
.await?;
|
||||
assert_eq!(readback, expect_2008);
|
||||
|
||||
// Reading back in time works
|
||||
let readback = tline.list_aux_files(Lsn(0x1008), &ctx).await?;
|
||||
let readback = tline
|
||||
.list_aux_files(Lsn(0x1008), &ctx, io_concurrency.clone())
|
||||
.await?;
|
||||
assert_eq!(readback, expect_1008);
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -5453,6 +5453,8 @@ pub(crate) mod harness {
|
||||
compaction_period: Some(tenant_conf.compaction_period),
|
||||
compaction_threshold: Some(tenant_conf.compaction_threshold),
|
||||
compaction_algorithm: Some(tenant_conf.compaction_algorithm),
|
||||
l0_flush_delay_threshold: tenant_conf.l0_flush_delay_threshold,
|
||||
l0_flush_stall_threshold: tenant_conf.l0_flush_stall_threshold,
|
||||
gc_horizon: Some(tenant_conf.gc_horizon),
|
||||
gc_period: Some(tenant_conf.gc_period),
|
||||
image_creation_threshold: Some(tenant_conf.image_creation_threshold),
|
||||
@@ -5476,6 +5478,11 @@ pub(crate) mod harness {
|
||||
timeline_offloading: Some(tenant_conf.timeline_offloading),
|
||||
wal_receiver_protocol_override: tenant_conf.wal_receiver_protocol_override,
|
||||
rel_size_v2_enabled: tenant_conf.rel_size_v2_enabled,
|
||||
gc_compaction_enabled: Some(tenant_conf.gc_compaction_enabled),
|
||||
gc_compaction_initial_threshold_kb: Some(
|
||||
tenant_conf.gc_compaction_initial_threshold_kb,
|
||||
),
|
||||
gc_compaction_ratio_percent: Some(tenant_conf.gc_compaction_ratio_percent),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5709,7 +5716,7 @@ mod tests {
|
||||
use pageserver_api::value::Value;
|
||||
use pageserver_compaction::helpers::overlaps_with;
|
||||
use rand::{thread_rng, Rng};
|
||||
use storage_layer::PersistentLayerKey;
|
||||
use storage_layer::{IoConcurrency, PersistentLayerKey};
|
||||
use tests::storage_layer::ValuesReconstructState;
|
||||
use tests::timeline::{GetVectoredError, ShutdownMode};
|
||||
use timeline::{CompactOptions, DeltaLayerTestDesc};
|
||||
@@ -6490,6 +6497,7 @@ mod tests {
|
||||
async fn test_get_vectored() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("test_get_vectored").await?;
|
||||
let (tenant, ctx) = harness.load().await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
let tline = tenant
|
||||
.create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
|
||||
.await?;
|
||||
@@ -6554,7 +6562,7 @@ mod tests {
|
||||
.get_vectored_impl(
|
||||
read.clone(),
|
||||
reads_lsn,
|
||||
&mut ValuesReconstructState::new(),
|
||||
&mut ValuesReconstructState::new(io_concurrency.clone()),
|
||||
&ctx,
|
||||
)
|
||||
.await;
|
||||
@@ -6601,6 +6609,7 @@ mod tests {
|
||||
let harness = TenantHarness::create("test_get_vectored_aux_files").await?;
|
||||
|
||||
let (tenant, ctx) = harness.load().await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
let tline = tenant
|
||||
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
|
||||
.await?;
|
||||
@@ -6635,7 +6644,7 @@ mod tests {
|
||||
.get_vectored_impl(
|
||||
aux_keyspace.clone(),
|
||||
read_lsn,
|
||||
&mut ValuesReconstructState::new(),
|
||||
&mut ValuesReconstructState::new(io_concurrency.clone()),
|
||||
&ctx,
|
||||
)
|
||||
.await;
|
||||
@@ -6683,6 +6692,7 @@ mod tests {
|
||||
)
|
||||
.await?;
|
||||
let (tenant, ctx) = harness.load().await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
|
||||
let mut current_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
|
||||
let gap_at_key = current_key.add(100);
|
||||
@@ -6783,7 +6793,7 @@ mod tests {
|
||||
.get_vectored_impl(
|
||||
read.clone(),
|
||||
current_lsn,
|
||||
&mut ValuesReconstructState::new(),
|
||||
&mut ValuesReconstructState::new(io_concurrency.clone()),
|
||||
&ctx,
|
||||
)
|
||||
.await?;
|
||||
@@ -6826,6 +6836,7 @@ mod tests {
|
||||
async fn test_get_vectored_ancestor_descent() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("test_get_vectored_on_lsn_axis").await?;
|
||||
let (tenant, ctx) = harness.load().await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
|
||||
let start_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
|
||||
let end_key = start_key.add(1000);
|
||||
@@ -6918,7 +6929,7 @@ mod tests {
|
||||
ranges: vec![child_gap_at_key..child_gap_at_key.next()],
|
||||
},
|
||||
query_lsn,
|
||||
&mut ValuesReconstructState::new(),
|
||||
&mut ValuesReconstructState::new(io_concurrency.clone()),
|
||||
&ctx,
|
||||
)
|
||||
.await;
|
||||
@@ -7364,6 +7375,7 @@ mod tests {
|
||||
async fn test_metadata_scan() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("test_metadata_scan").await?;
|
||||
let (tenant, ctx) = harness.load().await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
let tline = tenant
|
||||
.create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
|
||||
.await?;
|
||||
@@ -7417,7 +7429,7 @@ mod tests {
|
||||
.get_vectored_impl(
|
||||
keyspace.clone(),
|
||||
lsn,
|
||||
&mut ValuesReconstructState::default(),
|
||||
&mut ValuesReconstructState::new(io_concurrency.clone()),
|
||||
&ctx,
|
||||
)
|
||||
.await?
|
||||
@@ -7532,6 +7544,7 @@ mod tests {
|
||||
let harness = TenantHarness::create("test_aux_file_e2e").await.unwrap();
|
||||
|
||||
let (tenant, ctx) = harness.load().await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
|
||||
let mut lsn = Lsn(0x08);
|
||||
|
||||
@@ -7551,7 +7564,10 @@ mod tests {
|
||||
}
|
||||
|
||||
// we can read everything from the storage
|
||||
let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
|
||||
let files = tline
|
||||
.list_aux_files(lsn, &ctx, io_concurrency.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
files.get("pg_logical/mappings/test1"),
|
||||
Some(&bytes::Bytes::from_static(b"first"))
|
||||
@@ -7567,7 +7583,10 @@ mod tests {
|
||||
modification.commit(&ctx).await.unwrap();
|
||||
}
|
||||
|
||||
let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
|
||||
let files = tline
|
||||
.list_aux_files(lsn, &ctx, io_concurrency.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
files.get("pg_logical/mappings/test2"),
|
||||
Some(&bytes::Bytes::from_static(b"second"))
|
||||
@@ -7578,7 +7597,10 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let files = child.list_aux_files(lsn, &ctx).await.unwrap();
|
||||
let files = child
|
||||
.list_aux_files(lsn, &ctx, io_concurrency.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(files.get("pg_logical/mappings/test1"), None);
|
||||
assert_eq!(files.get("pg_logical/mappings/test2"), None);
|
||||
}
|
||||
@@ -7587,6 +7609,7 @@ mod tests {
|
||||
async fn test_metadata_image_creation() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("test_metadata_image_creation").await?;
|
||||
let (tenant, ctx) = harness.load().await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
let tline = tenant
|
||||
.create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
|
||||
.await?;
|
||||
@@ -7606,8 +7629,9 @@ mod tests {
|
||||
keyspace: &KeySpace,
|
||||
lsn: Lsn,
|
||||
ctx: &RequestContext,
|
||||
io_concurrency: IoConcurrency,
|
||||
) -> anyhow::Result<(BTreeMap<Key, Result<Bytes, PageReconstructError>>, usize)> {
|
||||
let mut reconstruct_state = ValuesReconstructState::default();
|
||||
let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
|
||||
let res = tline
|
||||
.get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
|
||||
.await?;
|
||||
@@ -7655,7 +7679,8 @@ mod tests {
|
||||
|
||||
if iter % 5 == 0 {
|
||||
let (_, before_delta_file_accessed) =
|
||||
scan_with_statistics(&tline, &keyspace, lsn, &ctx).await?;
|
||||
scan_with_statistics(&tline, &keyspace, lsn, &ctx, io_concurrency.clone())
|
||||
.await?;
|
||||
tline
|
||||
.compact(
|
||||
&cancel,
|
||||
@@ -7669,7 +7694,8 @@ mod tests {
|
||||
)
|
||||
.await?;
|
||||
let (_, after_delta_file_accessed) =
|
||||
scan_with_statistics(&tline, &keyspace, lsn, &ctx).await?;
|
||||
scan_with_statistics(&tline, &keyspace, lsn, &ctx, io_concurrency.clone())
|
||||
.await?;
|
||||
assert!(after_delta_file_accessed < before_delta_file_accessed, "after_delta_file_accessed={after_delta_file_accessed}, before_delta_file_accessed={before_delta_file_accessed}");
|
||||
// Given that we already produced an image layer, there should be no delta layer needed for the scan, but still setting a low threshold there for unforeseen circumstances.
|
||||
assert!(
|
||||
@@ -7758,6 +7784,7 @@ mod tests {
|
||||
async fn test_vectored_missing_metadata_key_reads() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("test_vectored_missing_metadata_key_reads").await?;
|
||||
let (tenant, ctx) = harness.load().await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
|
||||
let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap();
|
||||
let base_key_child = Key::from_hex("620000000033333333444444445500000001").unwrap();
|
||||
@@ -7896,7 +7923,7 @@ mod tests {
|
||||
);
|
||||
|
||||
// test vectored scan on parent timeline
|
||||
let mut reconstruct_state = ValuesReconstructState::new();
|
||||
let mut reconstruct_state = ValuesReconstructState::new(io_concurrency.clone());
|
||||
let res = tline
|
||||
.get_vectored_impl(
|
||||
KeySpace::single(Key::metadata_key_range()),
|
||||
@@ -7922,7 +7949,7 @@ mod tests {
|
||||
);
|
||||
|
||||
// test vectored scan on child timeline
|
||||
let mut reconstruct_state = ValuesReconstructState::new();
|
||||
let mut reconstruct_state = ValuesReconstructState::new(io_concurrency.clone());
|
||||
let res = child
|
||||
.get_vectored_impl(
|
||||
KeySpace::single(Key::metadata_key_range()),
|
||||
@@ -7960,7 +7987,9 @@ mod tests {
|
||||
lsn: Lsn,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<Option<Bytes>, GetVectoredError> {
|
||||
let mut reconstruct_state = ValuesReconstructState::new();
|
||||
let io_concurrency =
|
||||
IoConcurrency::spawn_from_conf(tline.conf, tline.gate.enter().unwrap());
|
||||
let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
|
||||
let mut res = tline
|
||||
.get_vectored_impl(
|
||||
KeySpace::single(key..key.next()),
|
||||
@@ -8061,6 +8090,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
let (tenant, ctx) = harness.load().await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
|
||||
let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap();
|
||||
let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
|
||||
@@ -8120,7 +8150,7 @@ mod tests {
|
||||
|
||||
// Image layers are created at last_record_lsn
|
||||
let images = tline
|
||||
.inspect_image_layers(Lsn(0x40), &ctx)
|
||||
.inspect_image_layers(Lsn(0x40), &ctx, io_concurrency.clone())
|
||||
.await
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
@@ -8135,6 +8165,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
let (tenant, ctx) = harness.load().await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
|
||||
let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
|
||||
let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
|
||||
@@ -8185,7 +8216,7 @@ mod tests {
|
||||
|
||||
// Image layers are created at last_record_lsn
|
||||
let images = tline
|
||||
.inspect_image_layers(Lsn(0x30), &ctx)
|
||||
.inspect_image_layers(Lsn(0x30), &ctx, io_concurrency.clone())
|
||||
.await
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
@@ -8194,10 +8225,18 @@ mod tests {
|
||||
assert_eq!(images.len(), 0); // the image layer should not contain tombstones, or it is not created
|
||||
}
|
||||
|
||||
fn key_max_minus_1() -> Key {
|
||||
Key {
|
||||
field6: u32::MAX - 1,
|
||||
..Key::MAX
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_simple_bottom_most_compaction_images() -> anyhow::Result<()> {
|
||||
let harness = TenantHarness::create("test_simple_bottom_most_compaction_images").await?;
|
||||
let (tenant, ctx) = harness.load().await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
|
||||
fn get_key(id: u32) -> Key {
|
||||
// using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
|
||||
@@ -8339,7 +8378,7 @@ mod tests {
|
||||
|
||||
// Check if the image layer at the GC horizon contains exactly what we want
|
||||
let image_at_gc_horizon = tline
|
||||
.inspect_image_layers(Lsn(0x30), &ctx)
|
||||
.inspect_image_layers(Lsn(0x30), &ctx, io_concurrency.clone())
|
||||
.await
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
@@ -8373,7 +8412,7 @@ mod tests {
|
||||
vec![
|
||||
// Image layer at GC horizon
|
||||
PersistentLayerKey {
|
||||
key_range: Key::MIN..Key::MAX,
|
||||
key_range: Key::MIN..key_max_minus_1(),
|
||||
lsn_range: Lsn(0x30)..Lsn(0x31),
|
||||
is_delta: false
|
||||
},
|
||||
@@ -10052,7 +10091,12 @@ mod tests {
|
||||
|
||||
let keyspace = KeySpace::single(get_key(0)..get_key(10));
|
||||
let results = tline
|
||||
.get_vectored(keyspace, delta_layer_end_lsn, &ctx)
|
||||
.get_vectored(
|
||||
keyspace,
|
||||
delta_layer_end_lsn,
|
||||
IoConcurrency::sequential(),
|
||||
&ctx,
|
||||
)
|
||||
.await
|
||||
.expect("No vectored errors");
|
||||
for (key, res) in results {
|
||||
@@ -10740,7 +10784,7 @@ mod tests {
|
||||
vec![
|
||||
// The compacted image layer (full key range)
|
||||
PersistentLayerKey {
|
||||
key_range: Key::MIN..Key::MAX,
|
||||
key_range: Key::MIN..key_max_minus_1(),
|
||||
lsn_range: Lsn(0x10)..Lsn(0x11),
|
||||
is_delta: false,
|
||||
},
|
||||
@@ -11087,7 +11131,7 @@ mod tests {
|
||||
vec![
|
||||
// The compacted image layer (full key range)
|
||||
PersistentLayerKey {
|
||||
key_range: Key::MIN..Key::MAX,
|
||||
key_range: Key::MIN..key_max_minus_1(),
|
||||
lsn_range: Lsn(0x10)..Lsn(0x11),
|
||||
is_delta: false,
|
||||
},
|
||||
|
||||
@@ -281,6 +281,14 @@ pub struct TenantConfOpt {
|
||||
#[serde(default)]
|
||||
pub compaction_algorithm: Option<CompactionAlgorithmSettings>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default)]
|
||||
pub l0_flush_delay_threshold: Option<usize>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default)]
|
||||
pub l0_flush_stall_threshold: Option<usize>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default)]
|
||||
pub gc_horizon: Option<u64>,
|
||||
@@ -360,6 +368,15 @@ pub struct TenantConfOpt {
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub rel_size_v2_enabled: Option<bool>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub gc_compaction_enabled: Option<bool>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub gc_compaction_initial_threshold_kb: Option<u64>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub gc_compaction_ratio_percent: Option<u64>,
|
||||
}
|
||||
|
||||
impl TenantConfOpt {
|
||||
@@ -385,6 +402,12 @@ impl TenantConfOpt {
|
||||
.as_ref()
|
||||
.unwrap_or(&global_conf.compaction_algorithm)
|
||||
.clone(),
|
||||
l0_flush_delay_threshold: self
|
||||
.l0_flush_delay_threshold
|
||||
.or(global_conf.l0_flush_delay_threshold),
|
||||
l0_flush_stall_threshold: self
|
||||
.l0_flush_stall_threshold
|
||||
.or(global_conf.l0_flush_stall_threshold),
|
||||
gc_horizon: self.gc_horizon.unwrap_or(global_conf.gc_horizon),
|
||||
gc_period: self.gc_period.unwrap_or(global_conf.gc_period),
|
||||
image_creation_threshold: self
|
||||
@@ -429,6 +452,15 @@ impl TenantConfOpt {
|
||||
.wal_receiver_protocol_override
|
||||
.or(global_conf.wal_receiver_protocol_override),
|
||||
rel_size_v2_enabled: self.rel_size_v2_enabled.or(global_conf.rel_size_v2_enabled),
|
||||
gc_compaction_enabled: self
|
||||
.gc_compaction_enabled
|
||||
.unwrap_or(global_conf.gc_compaction_enabled),
|
||||
gc_compaction_initial_threshold_kb: self
|
||||
.gc_compaction_initial_threshold_kb
|
||||
.unwrap_or(global_conf.gc_compaction_initial_threshold_kb),
|
||||
gc_compaction_ratio_percent: self
|
||||
.gc_compaction_ratio_percent
|
||||
.unwrap_or(global_conf.gc_compaction_ratio_percent),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -440,6 +472,8 @@ impl TenantConfOpt {
|
||||
mut compaction_period,
|
||||
mut compaction_threshold,
|
||||
mut compaction_algorithm,
|
||||
mut l0_flush_delay_threshold,
|
||||
mut l0_flush_stall_threshold,
|
||||
mut gc_horizon,
|
||||
mut gc_period,
|
||||
mut image_creation_threshold,
|
||||
@@ -459,6 +493,9 @@ impl TenantConfOpt {
|
||||
mut timeline_offloading,
|
||||
mut wal_receiver_protocol_override,
|
||||
mut rel_size_v2_enabled,
|
||||
mut gc_compaction_enabled,
|
||||
mut gc_compaction_initial_threshold_kb,
|
||||
mut gc_compaction_ratio_percent,
|
||||
} = self;
|
||||
|
||||
patch.checkpoint_distance.apply(&mut checkpoint_distance);
|
||||
@@ -475,6 +512,12 @@ impl TenantConfOpt {
|
||||
.apply(&mut compaction_period);
|
||||
patch.compaction_threshold.apply(&mut compaction_threshold);
|
||||
patch.compaction_algorithm.apply(&mut compaction_algorithm);
|
||||
patch
|
||||
.l0_flush_delay_threshold
|
||||
.apply(&mut l0_flush_delay_threshold);
|
||||
patch
|
||||
.l0_flush_stall_threshold
|
||||
.apply(&mut l0_flush_stall_threshold);
|
||||
patch.gc_horizon.apply(&mut gc_horizon);
|
||||
patch
|
||||
.gc_period
|
||||
@@ -528,6 +571,15 @@ impl TenantConfOpt {
|
||||
.wal_receiver_protocol_override
|
||||
.apply(&mut wal_receiver_protocol_override);
|
||||
patch.rel_size_v2_enabled.apply(&mut rel_size_v2_enabled);
|
||||
patch
|
||||
.gc_compaction_enabled
|
||||
.apply(&mut gc_compaction_enabled);
|
||||
patch
|
||||
.gc_compaction_initial_threshold_kb
|
||||
.apply(&mut gc_compaction_initial_threshold_kb);
|
||||
patch
|
||||
.gc_compaction_ratio_percent
|
||||
.apply(&mut gc_compaction_ratio_percent);
|
||||
|
||||
Ok(Self {
|
||||
checkpoint_distance,
|
||||
@@ -536,6 +588,8 @@ impl TenantConfOpt {
|
||||
compaction_period,
|
||||
compaction_threshold,
|
||||
compaction_algorithm,
|
||||
l0_flush_delay_threshold,
|
||||
l0_flush_stall_threshold,
|
||||
gc_horizon,
|
||||
gc_period,
|
||||
image_creation_threshold,
|
||||
@@ -555,6 +609,9 @@ impl TenantConfOpt {
|
||||
timeline_offloading,
|
||||
wal_receiver_protocol_override,
|
||||
rel_size_v2_enabled,
|
||||
gc_compaction_enabled,
|
||||
gc_compaction_initial_threshold_kb,
|
||||
gc_compaction_ratio_percent,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -590,6 +647,8 @@ impl From<TenantConfOpt> for models::TenantConfig {
|
||||
compaction_target_size: value.compaction_target_size,
|
||||
compaction_period: value.compaction_period.map(humantime),
|
||||
compaction_threshold: value.compaction_threshold,
|
||||
l0_flush_delay_threshold: value.l0_flush_delay_threshold,
|
||||
l0_flush_stall_threshold: value.l0_flush_stall_threshold,
|
||||
gc_horizon: value.gc_horizon,
|
||||
gc_period: value.gc_period.map(humantime),
|
||||
image_creation_threshold: value.image_creation_threshold,
|
||||
@@ -611,6 +670,9 @@ impl From<TenantConfOpt> for models::TenantConfig {
|
||||
timeline_offloading: value.timeline_offloading,
|
||||
wal_receiver_protocol_override: value.wal_receiver_protocol_override,
|
||||
rel_size_v2_enabled: value.rel_size_v2_enabled,
|
||||
gc_compaction_enabled: value.gc_compaction_enabled,
|
||||
gc_compaction_initial_threshold_kb: value.gc_compaction_initial_threshold_kb,
|
||||
gc_compaction_ratio_percent: value.gc_compaction_ratio_percent,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,6 +57,7 @@ use std::collections::{HashMap, VecDeque};
|
||||
use std::iter::Peekable;
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::watch;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
use historic_layer_coverage::BufferedHistoricLayerCoverage;
|
||||
@@ -67,7 +68,6 @@ use super::storage_layer::{LayerVisibilityHint, PersistentLayerDesc};
|
||||
///
|
||||
/// LayerMap tracks what layers exist on a timeline.
|
||||
///
|
||||
#[derive(Default)]
|
||||
pub struct LayerMap {
|
||||
//
|
||||
// 'open_layer' holds the current InMemoryLayer that is accepting new
|
||||
@@ -93,7 +93,25 @@ pub struct LayerMap {
|
||||
|
||||
/// L0 layers have key range Key::MIN..Key::MAX, and locating them using R-Tree search is very inefficient.
|
||||
/// So L0 layers are held in l0_delta_layers vector, in addition to the R-tree.
|
||||
///
|
||||
/// NB: make sure to notify `watch_l0_deltas` on changes.
|
||||
l0_delta_layers: Vec<Arc<PersistentLayerDesc>>,
|
||||
|
||||
/// Notifies about L0 delta layer changes, sending the current number of L0 layers.
|
||||
watch_l0_deltas: watch::Sender<usize>,
|
||||
}
|
||||
|
||||
impl Default for LayerMap {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
open_layer: Default::default(),
|
||||
next_open_layer_at: Default::default(),
|
||||
frozen_layers: Default::default(),
|
||||
historic: Default::default(),
|
||||
l0_delta_layers: Default::default(),
|
||||
watch_l0_deltas: watch::channel(0).0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The primary update API for the layer map.
|
||||
@@ -466,6 +484,8 @@ impl LayerMap {
|
||||
|
||||
if Self::is_l0(&layer_desc.key_range, layer_desc.is_delta) {
|
||||
self.l0_delta_layers.push(layer_desc.clone().into());
|
||||
self.watch_l0_deltas
|
||||
.send_replace(self.l0_delta_layers.len());
|
||||
}
|
||||
|
||||
self.historic.insert(
|
||||
@@ -488,6 +508,8 @@ impl LayerMap {
|
||||
let mut l0_delta_layers = std::mem::take(&mut self.l0_delta_layers);
|
||||
l0_delta_layers.retain(|other| other.key() != layer_key);
|
||||
self.l0_delta_layers = l0_delta_layers;
|
||||
self.watch_l0_deltas
|
||||
.send_replace(self.l0_delta_layers.len());
|
||||
// this assertion is related to use of Arc::ptr_eq in Self::compare_arced_layers,
|
||||
// there's a chance that the comparison fails at runtime due to it comparing (pointer,
|
||||
// vtable) pairs.
|
||||
@@ -850,6 +872,11 @@ impl LayerMap {
|
||||
&self.l0_delta_layers
|
||||
}
|
||||
|
||||
/// Subscribes to L0 delta layer changes, sending the current number of L0 delta layers.
|
||||
pub fn watch_level0_deltas(&self) -> watch::Receiver<usize> {
|
||||
self.watch_l0_deltas.subscribe()
|
||||
}
|
||||
|
||||
/// debugging function to print out the contents of the layer map
|
||||
#[allow(unused)]
|
||||
pub async fn dump(&self, verbose: bool, ctx: &RequestContext) -> Result<()> {
|
||||
|
||||
@@ -77,11 +77,17 @@ pub struct IndexPart {
|
||||
///
|
||||
/// None means no aux files have been written to the storage before the point
|
||||
/// when this flag is introduced.
|
||||
///
|
||||
/// This flag is not used any more as all tenants have been transitioned to the new aux file policy.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub(crate) last_aux_file_policy: Option<AuxFilePolicy>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub(crate) rel_size_migration: Option<RelSizeMigration>,
|
||||
|
||||
/// The LSN of gc-compaction horizon. Once gc-compaction is finished for all layer files below an LSN, this LSN will be updated.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub(crate) l2_lsn: Option<Lsn>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
@@ -116,10 +122,11 @@ impl IndexPart {
|
||||
/// - 9: +gc_blocking
|
||||
/// - 10: +import_pgdata
|
||||
/// - 11: +rel_size_migration
|
||||
const LATEST_VERSION: usize = 11;
|
||||
/// - 12: +l2_lsn
|
||||
const LATEST_VERSION: usize = 12;
|
||||
|
||||
// Versions we may see when reading from a bucket.
|
||||
pub const KNOWN_VERSIONS: &'static [usize] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11];
|
||||
pub const KNOWN_VERSIONS: &'static [usize] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
|
||||
|
||||
pub const FILE_NAME: &'static str = "index_part.json";
|
||||
|
||||
@@ -136,6 +143,7 @@ impl IndexPart {
|
||||
last_aux_file_policy: None,
|
||||
import_pgdata: None,
|
||||
rel_size_migration: None,
|
||||
l2_lsn: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -437,6 +445,7 @@ mod tests {
|
||||
last_aux_file_policy: None,
|
||||
import_pgdata: None,
|
||||
rel_size_migration: None,
|
||||
l2_lsn: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -483,6 +492,7 @@ mod tests {
|
||||
last_aux_file_policy: None,
|
||||
import_pgdata: None,
|
||||
rel_size_migration: None,
|
||||
l2_lsn: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -530,6 +540,7 @@ mod tests {
|
||||
last_aux_file_policy: None,
|
||||
import_pgdata: None,
|
||||
rel_size_migration: None,
|
||||
l2_lsn: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -580,6 +591,7 @@ mod tests {
|
||||
last_aux_file_policy: None,
|
||||
import_pgdata: None,
|
||||
rel_size_migration: None,
|
||||
l2_lsn: None,
|
||||
};
|
||||
|
||||
let empty_layers_parsed = IndexPart::from_json_bytes(empty_layers_json.as_bytes()).unwrap();
|
||||
@@ -625,6 +637,7 @@ mod tests {
|
||||
last_aux_file_policy: None,
|
||||
import_pgdata: None,
|
||||
rel_size_migration: None,
|
||||
l2_lsn: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -673,6 +686,7 @@ mod tests {
|
||||
last_aux_file_policy: None,
|
||||
import_pgdata: None,
|
||||
rel_size_migration: None,
|
||||
l2_lsn: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -726,6 +740,7 @@ mod tests {
|
||||
last_aux_file_policy: Some(AuxFilePolicy::V2),
|
||||
import_pgdata: None,
|
||||
rel_size_migration: None,
|
||||
l2_lsn: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -784,6 +799,7 @@ mod tests {
|
||||
last_aux_file_policy: Default::default(),
|
||||
import_pgdata: None,
|
||||
rel_size_migration: None,
|
||||
l2_lsn: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -843,6 +859,7 @@ mod tests {
|
||||
last_aux_file_policy: Default::default(),
|
||||
import_pgdata: None,
|
||||
rel_size_migration: None,
|
||||
l2_lsn: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -907,6 +924,7 @@ mod tests {
|
||||
archived_at: None,
|
||||
import_pgdata: None,
|
||||
rel_size_migration: None,
|
||||
l2_lsn: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -984,6 +1002,7 @@ mod tests {
|
||||
idempotency_key: import_pgdata::index_part_format::IdempotencyKey::new("specified-by-client-218a5213-5044-4562-a28d-d024c5f057f5".to_string()),
|
||||
}))),
|
||||
rel_size_migration: None,
|
||||
l2_lsn: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -1062,6 +1081,87 @@ mod tests {
|
||||
idempotency_key: import_pgdata::index_part_format::IdempotencyKey::new("specified-by-client-218a5213-5044-4562-a28d-d024c5f057f5".to_string()),
|
||||
}))),
|
||||
rel_size_migration: Some(RelSizeMigration::Legacy),
|
||||
l2_lsn: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
assert_eq!(part, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn v12_l2_lsn_is_parsed() {
|
||||
let example = r#"{
|
||||
"version": 12,
|
||||
"layer_metadata":{
|
||||
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 },
|
||||
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51": { "file_size": 9007199254741001 }
|
||||
},
|
||||
"disk_consistent_lsn":"0/16960E8",
|
||||
"metadata": {
|
||||
"disk_consistent_lsn": "0/16960E8",
|
||||
"prev_record_lsn": "0/1696070",
|
||||
"ancestor_timeline": "e45a7f37d3ee2ff17dc14bf4f4e3f52e",
|
||||
"ancestor_lsn": "0/0",
|
||||
"latest_gc_cutoff_lsn": "0/1696070",
|
||||
"initdb_lsn": "0/1696070",
|
||||
"pg_version": 14
|
||||
},
|
||||
"gc_blocking": {
|
||||
"started_at": "2024-07-19T09:00:00.123",
|
||||
"reasons": ["DetachAncestor"]
|
||||
},
|
||||
"import_pgdata": {
|
||||
"V1": {
|
||||
"Done": {
|
||||
"idempotency_key": "specified-by-client-218a5213-5044-4562-a28d-d024c5f057f5",
|
||||
"started_at": "2024-11-13T09:23:42.123",
|
||||
"finished_at": "2024-11-13T09:42:23.123"
|
||||
}
|
||||
}
|
||||
},
|
||||
"rel_size_migration": "legacy",
|
||||
"l2_lsn": "0/16960E8"
|
||||
}"#;
|
||||
|
||||
let expected = IndexPart {
|
||||
version: 12,
|
||||
layer_metadata: HashMap::from([
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata {
|
||||
file_size: 25600000,
|
||||
generation: Generation::none(),
|
||||
shard: ShardIndex::unsharded()
|
||||
}),
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata {
|
||||
file_size: 9007199254741001,
|
||||
generation: Generation::none(),
|
||||
shard: ShardIndex::unsharded()
|
||||
})
|
||||
]),
|
||||
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
metadata: TimelineMetadata::new(
|
||||
Lsn::from_str("0/16960E8").unwrap(),
|
||||
Some(Lsn::from_str("0/1696070").unwrap()),
|
||||
Some(TimelineId::from_str("e45a7f37d3ee2ff17dc14bf4f4e3f52e").unwrap()),
|
||||
Lsn::INVALID,
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
14,
|
||||
).with_recalculated_checksum().unwrap(),
|
||||
deleted_at: None,
|
||||
lineage: Default::default(),
|
||||
gc_blocking: Some(GcBlocking {
|
||||
started_at: parse_naive_datetime("2024-07-19T09:00:00.123000000"),
|
||||
reasons: enumset::EnumSet::from_iter([GcBlockingReason::DetachAncestor]),
|
||||
}),
|
||||
last_aux_file_policy: Default::default(),
|
||||
archived_at: None,
|
||||
import_pgdata: Some(import_pgdata::index_part_format::Root::V1(import_pgdata::index_part_format::V1::Done(import_pgdata::index_part_format::Done{
|
||||
started_at: parse_naive_datetime("2024-11-13T09:23:42.123000000"),
|
||||
finished_at: parse_naive_datetime("2024-11-13T09:42:23.123000000"),
|
||||
idempotency_key: import_pgdata::index_part_format::IdempotencyKey::new("specified-by-client-218a5213-5044-4562-a28d-d024c5f057f5".to_string()),
|
||||
}))),
|
||||
rel_size_migration: Some(RelSizeMigration::Legacy),
|
||||
l2_lsn: Some("0/16960E8".parse::<Lsn>().unwrap()),
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
|
||||
@@ -10,18 +10,26 @@ mod layer_desc;
|
||||
mod layer_name;
|
||||
pub mod merge_iterator;
|
||||
|
||||
use crate::config::PageServerConf;
|
||||
use crate::context::{AccessStatsBehavior, RequestContext};
|
||||
use bytes::Bytes;
|
||||
use futures::stream::FuturesUnordered;
|
||||
use futures::StreamExt;
|
||||
use pageserver_api::key::Key;
|
||||
use pageserver_api::keyspace::{KeySpace, KeySpaceRandomAccum};
|
||||
use pageserver_api::record::NeonWalRecord;
|
||||
use pageserver_api::value::Value;
|
||||
use std::cmp::{Ordering, Reverse};
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::{BinaryHeap, HashMap};
|
||||
use std::future::Future;
|
||||
use std::ops::Range;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
use tracing::{trace, Instrument};
|
||||
use utils::sync::gate::GateGuard;
|
||||
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
@@ -78,30 +86,151 @@ pub(crate) enum ValueReconstructSituation {
|
||||
Continue,
|
||||
}
|
||||
|
||||
/// Reconstruct data accumulated for a single key during a vectored get
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub(crate) struct VectoredValueReconstructState {
|
||||
pub(crate) records: Vec<(Lsn, NeonWalRecord)>,
|
||||
pub(crate) img: Option<(Lsn, Bytes)>,
|
||||
|
||||
situation: ValueReconstructSituation,
|
||||
/// On disk representation of a value loaded in a buffer
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum OnDiskValue {
|
||||
/// Unencoded [`Value::Image`]
|
||||
RawImage(Bytes),
|
||||
/// Encoded [`Value`]. Can deserialize into an image or a WAL record
|
||||
WalRecordOrImage(Bytes),
|
||||
}
|
||||
|
||||
impl VectoredValueReconstructState {
|
||||
fn get_cached_lsn(&self) -> Option<Lsn> {
|
||||
self.img.as_ref().map(|img| img.0)
|
||||
/// Reconstruct data accumulated for a single key during a vectored get
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct VectoredValueReconstructState {
|
||||
pub(crate) on_disk_values: Vec<(Lsn, OnDiskValueIoWaiter)>,
|
||||
|
||||
pub(crate) situation: ValueReconstructSituation,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct OnDiskValueIoWaiter {
|
||||
rx: tokio::sync::oneshot::Receiver<OnDiskValueIoResult>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
pub(crate) enum OnDiskValueIo {
|
||||
/// Traversal identified this IO as required to complete the vectored get.
|
||||
Required {
|
||||
num_active_ios: Arc<AtomicUsize>,
|
||||
tx: tokio::sync::oneshot::Sender<OnDiskValueIoResult>,
|
||||
},
|
||||
/// Sparse keyspace reads always read all the values for a given key,
|
||||
/// even though only the first value is needed.
|
||||
///
|
||||
/// This variant represents the unnecessary IOs for those values at lower LSNs
|
||||
/// that aren't needed, but are currently still being done.
|
||||
///
|
||||
/// The execution of unnecessary IOs was a pre-existing behavior before concurrent IO.
|
||||
/// We added this explicit representation here so that we can drop
|
||||
/// unnecessary IO results immediately, instead of buffering them in
|
||||
/// `oneshot` channels inside [`VectoredValueReconstructState`] until
|
||||
/// [`VectoredValueReconstructState::collect_pending_ios`] gets called.
|
||||
Unnecessary,
|
||||
}
|
||||
|
||||
type OnDiskValueIoResult = Result<OnDiskValue, std::io::Error>;
|
||||
|
||||
impl OnDiskValueIo {
|
||||
pub(crate) fn complete(self, res: OnDiskValueIoResult) {
|
||||
match self {
|
||||
OnDiskValueIo::Required { num_active_ios, tx } => {
|
||||
num_active_ios.fetch_sub(1, std::sync::atomic::Ordering::Release);
|
||||
let _ = tx.send(res);
|
||||
}
|
||||
OnDiskValueIo::Unnecessary => {
|
||||
// Nobody cared, see variant doc comment.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VectoredValueReconstructState> for ValueReconstructState {
|
||||
fn from(mut state: VectoredValueReconstructState) -> Self {
|
||||
// walredo expects the records to be descending in terms of Lsn
|
||||
state.records.sort_by_key(|(lsn, _)| Reverse(*lsn));
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum WaitCompletionError {
|
||||
#[error("OnDiskValueIo was dropped without completing, likely the sidecar task panicked")]
|
||||
IoDropped,
|
||||
}
|
||||
|
||||
ValueReconstructState {
|
||||
records: state.records,
|
||||
img: state.img,
|
||||
impl OnDiskValueIoWaiter {
|
||||
pub(crate) async fn wait_completion(self) -> Result<OnDiskValueIoResult, WaitCompletionError> {
|
||||
// NB: for Unnecessary IOs, this method never gets called because we don't add them to `on_disk_values`.
|
||||
self.rx.await.map_err(|_| WaitCompletionError::IoDropped)
|
||||
}
|
||||
}
|
||||
|
||||
impl VectoredValueReconstructState {
|
||||
/// # Cancel-Safety
|
||||
///
|
||||
/// Technically fine to stop polling this future, but, the IOs will still
|
||||
/// be executed to completion by the sidecar task and hold on to / consume resources.
|
||||
/// Better not do it to make reasonsing about the system easier.
|
||||
pub(crate) async fn collect_pending_ios(
|
||||
self,
|
||||
) -> Result<ValueReconstructState, PageReconstructError> {
|
||||
use utils::bin_ser::BeSer;
|
||||
|
||||
let mut res = Ok(ValueReconstructState::default());
|
||||
|
||||
// We should try hard not to bail early, so that by the time we return from this
|
||||
// function, all IO for this value is done. It's not required -- we could totally
|
||||
// stop polling the IO futures in the sidecar task, they need to support that,
|
||||
// but just stopping to poll doesn't reduce the IO load on the disk. It's easier
|
||||
// to reason about the system if we just wait for all IO to complete, even if
|
||||
// we're no longer interested in the result.
|
||||
//
|
||||
// Revisit this when IO futures are replaced with a more sophisticated IO system
|
||||
// and an IO scheduler, where we know which IOs were submitted and which ones
|
||||
// just queued. Cf the comment on IoConcurrency::spawn_io.
|
||||
for (lsn, waiter) in self.on_disk_values {
|
||||
let value_recv_res = waiter
|
||||
.wait_completion()
|
||||
// we rely on the caller to poll us to completion, so this is not a bail point
|
||||
.await;
|
||||
// Force not bailing early by wrapping the code into a closure.
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
let _: () = (|| {
|
||||
match (&mut res, value_recv_res) {
|
||||
(Err(_), _) => {
|
||||
// We've already failed, no need to process more.
|
||||
}
|
||||
(Ok(_), Err(wait_err)) => {
|
||||
// This shouldn't happen - likely the sidecar task panicked.
|
||||
res = Err(PageReconstructError::Other(wait_err.into()));
|
||||
}
|
||||
(Ok(_), Ok(Err(err))) => {
|
||||
let err: std::io::Error = err;
|
||||
// TODO: returning IO error here will fail a compute query.
|
||||
// Probably not what we want, we're not doing `maybe_fatal_err`
|
||||
// in the IO futures.
|
||||
// But it's been like that for a long time, not changing it
|
||||
// as part of concurrent IO.
|
||||
// => https://github.com/neondatabase/neon/issues/10454
|
||||
res = Err(PageReconstructError::Other(err.into()));
|
||||
}
|
||||
(Ok(ok), Ok(Ok(OnDiskValue::RawImage(img)))) => {
|
||||
assert!(ok.img.is_none());
|
||||
ok.img = Some((lsn, img));
|
||||
}
|
||||
(Ok(ok), Ok(Ok(OnDiskValue::WalRecordOrImage(buf)))) => {
|
||||
match Value::des(&buf) {
|
||||
Ok(Value::WalRecord(rec)) => {
|
||||
ok.records.push((lsn, rec));
|
||||
}
|
||||
Ok(Value::Image(img)) => {
|
||||
assert!(ok.img.is_none());
|
||||
ok.img = Some((lsn, img));
|
||||
}
|
||||
Err(err) => {
|
||||
res = Err(PageReconstructError::Other(err.into()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,7 +238,7 @@ impl From<VectoredValueReconstructState> for ValueReconstructState {
|
||||
pub(crate) struct ValuesReconstructState {
|
||||
/// The keys will be removed after `get_vectored` completes. The caller outside `Timeline`
|
||||
/// should not expect to get anything from this hashmap.
|
||||
pub(crate) keys: HashMap<Key, Result<VectoredValueReconstructState, PageReconstructError>>,
|
||||
pub(crate) keys: HashMap<Key, VectoredValueReconstructState>,
|
||||
/// The keys which are already retrieved
|
||||
keys_done: KeySpaceRandomAccum,
|
||||
|
||||
@@ -119,27 +248,365 @@ pub(crate) struct ValuesReconstructState {
|
||||
// Statistics that are still accessible as a caller of `get_vectored_impl`.
|
||||
layers_visited: u32,
|
||||
delta_layers_visited: u32,
|
||||
|
||||
pub(crate) io_concurrency: IoConcurrency,
|
||||
num_active_ios: Arc<AtomicUsize>,
|
||||
}
|
||||
|
||||
/// The level of IO concurrency to be used on the read path
|
||||
///
|
||||
/// The desired end state is that we always do parallel IO.
|
||||
/// This struct and the dispatching in the impl will be removed once
|
||||
/// we've built enough confidence.
|
||||
pub(crate) enum IoConcurrency {
|
||||
Sequential,
|
||||
SidecarTask {
|
||||
task_id: usize,
|
||||
ios_tx: tokio::sync::mpsc::UnboundedSender<IoFuture>,
|
||||
},
|
||||
}
|
||||
|
||||
type IoFuture = Pin<Box<dyn Send + Future<Output = ()>>>;
|
||||
|
||||
pub(crate) enum SelectedIoConcurrency {
|
||||
Sequential,
|
||||
SidecarTask(GateGuard),
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for IoConcurrency {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
IoConcurrency::Sequential => write!(f, "Sequential"),
|
||||
IoConcurrency::SidecarTask { .. } => write!(f, "SidecarTask"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for SelectedIoConcurrency {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
SelectedIoConcurrency::Sequential => write!(f, "Sequential"),
|
||||
SelectedIoConcurrency::SidecarTask(_) => write!(f, "SidecarTask"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IoConcurrency {
|
||||
/// Force sequential IO. This is a temporary workaround until we have
|
||||
/// moved plumbing-through-the-call-stack
|
||||
/// of IoConcurrency into `RequestContextq.
|
||||
///
|
||||
/// DO NOT USE for new code.
|
||||
///
|
||||
/// Tracking issue: <https://github.com/neondatabase/neon/issues/10460>.
|
||||
pub(crate) fn sequential() -> Self {
|
||||
Self::spawn(SelectedIoConcurrency::Sequential)
|
||||
}
|
||||
|
||||
pub(crate) fn spawn_from_conf(
|
||||
conf: &'static PageServerConf,
|
||||
gate_guard: GateGuard,
|
||||
) -> IoConcurrency {
|
||||
use pageserver_api::config::GetVectoredConcurrentIo;
|
||||
let selected = match conf.get_vectored_concurrent_io {
|
||||
GetVectoredConcurrentIo::Sequential => SelectedIoConcurrency::Sequential,
|
||||
GetVectoredConcurrentIo::SidecarTask => SelectedIoConcurrency::SidecarTask(gate_guard),
|
||||
};
|
||||
Self::spawn(selected)
|
||||
}
|
||||
|
||||
pub(crate) fn spawn(io_concurrency: SelectedIoConcurrency) -> Self {
|
||||
match io_concurrency {
|
||||
SelectedIoConcurrency::Sequential => IoConcurrency::Sequential,
|
||||
SelectedIoConcurrency::SidecarTask(gate_guard) => {
|
||||
let (ios_tx, ios_rx) = tokio::sync::mpsc::unbounded_channel();
|
||||
static TASK_ID: AtomicUsize = AtomicUsize::new(0);
|
||||
let task_id = TASK_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
// TODO: enrich the span with more context (tenant,shard,timeline) + (basebackup|pagestream|...)
|
||||
let span =
|
||||
tracing::info_span!(parent: None, "IoConcurrency_sidecar", task_id = task_id);
|
||||
trace!(task_id, "spawning sidecar task");
|
||||
tokio::spawn(async move {
|
||||
trace!("start");
|
||||
scopeguard::defer!{ trace!("end") };
|
||||
type IosRx = tokio::sync::mpsc::UnboundedReceiver<IoFuture>;
|
||||
enum State {
|
||||
Waiting {
|
||||
// invariant: is_empty(), but we recycle the allocation
|
||||
empty_futures: FuturesUnordered<IoFuture>,
|
||||
ios_rx: IosRx,
|
||||
},
|
||||
Executing {
|
||||
futures: FuturesUnordered<IoFuture>,
|
||||
ios_rx: IosRx,
|
||||
},
|
||||
ShuttingDown {
|
||||
futures: FuturesUnordered<IoFuture>,
|
||||
},
|
||||
}
|
||||
let mut state = State::Waiting {
|
||||
empty_futures: FuturesUnordered::new(),
|
||||
ios_rx,
|
||||
};
|
||||
loop {
|
||||
match state {
|
||||
State::Waiting {
|
||||
empty_futures,
|
||||
mut ios_rx,
|
||||
} => {
|
||||
assert!(empty_futures.is_empty());
|
||||
tokio::select! {
|
||||
fut = ios_rx.recv() => {
|
||||
if let Some(fut) = fut {
|
||||
trace!("received new io future");
|
||||
empty_futures.push(fut);
|
||||
state = State::Executing { futures: empty_futures, ios_rx };
|
||||
} else {
|
||||
state = State::ShuttingDown { futures: empty_futures }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
State::Executing {
|
||||
mut futures,
|
||||
mut ios_rx,
|
||||
} => {
|
||||
tokio::select! {
|
||||
res = futures.next() => {
|
||||
trace!("io future completed");
|
||||
assert!(res.is_some());
|
||||
if futures.is_empty() {
|
||||
state = State::Waiting { empty_futures: futures, ios_rx};
|
||||
} else {
|
||||
state = State::Executing { futures, ios_rx };
|
||||
}
|
||||
}
|
||||
fut = ios_rx.recv() => {
|
||||
if let Some(fut) = fut {
|
||||
trace!("received new io future");
|
||||
futures.push(fut);
|
||||
state = State::Executing { futures, ios_rx};
|
||||
} else {
|
||||
state = State::ShuttingDown { futures };
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
State::ShuttingDown {
|
||||
mut futures,
|
||||
} => {
|
||||
trace!("shutting down");
|
||||
while let Some(()) = futures.next().await {
|
||||
trace!("io future completed (shutdown)");
|
||||
// drain
|
||||
}
|
||||
trace!("shutdown complete");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
drop(gate_guard); // drop it right before we exit
|
||||
}.instrument(span));
|
||||
IoConcurrency::SidecarTask { task_id, ios_tx }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn clone(&self) -> Self {
|
||||
match self {
|
||||
IoConcurrency::Sequential => IoConcurrency::Sequential,
|
||||
IoConcurrency::SidecarTask { task_id, ios_tx } => IoConcurrency::SidecarTask {
|
||||
task_id: *task_id,
|
||||
ios_tx: ios_tx.clone(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Submit an IO to be executed in the background. DEADLOCK RISK, read the full doc string.
|
||||
///
|
||||
/// The IO is represented as an opaque future.
|
||||
/// IO completion must be handled inside the future, e.g., through a oneshot channel.
|
||||
///
|
||||
/// The API seems simple but there are multiple **pitfalls** involving
|
||||
/// DEADLOCK RISK.
|
||||
///
|
||||
/// First, there are no guarantees about the exexecution of the IO.
|
||||
/// It may be `await`ed in-place before this function returns.
|
||||
/// It may be polled partially by this task and handed off to another task to be finished.
|
||||
/// It may be polled and then dropped before returning ready.
|
||||
///
|
||||
/// This means that submitted IOs must not be interedependent.
|
||||
/// Interdependence may be through shared limited resources, e.g.,
|
||||
/// - VirtualFile file descriptor cache slot acquisition
|
||||
/// - tokio-epoll-uring slot
|
||||
///
|
||||
/// # Why current usage is safe from deadlocks
|
||||
///
|
||||
/// Textbook condition for a deadlock is that _all_ of the following be given
|
||||
/// - Mutual exclusion
|
||||
/// - Hold and wait
|
||||
/// - No preemption
|
||||
/// - Circular wait
|
||||
///
|
||||
/// The current usage is safe because:
|
||||
/// - Mutual exclusion: IO futures definitely use mutexes, no way around that for now
|
||||
/// - Hold and wait: IO futures currently hold two kinds of locks/resources while waiting
|
||||
/// for acquisition of other resources:
|
||||
/// - VirtualFile file descriptor cache slot tokio mutex
|
||||
/// - tokio-epoll-uring slot (uses tokio notify => wait queue, much like mutex)
|
||||
/// - No preemption: there's no taking-away of acquired locks/resources => given
|
||||
/// - Circular wait: this is the part of the condition that isn't met: all IO futures
|
||||
/// first acquire VirtualFile mutex, then tokio-epoll-uring slot.
|
||||
/// There is no IO future that acquires slot before VirtualFile.
|
||||
/// Hence there can be no circular waiting.
|
||||
/// Hence there cannot be a deadlock.
|
||||
///
|
||||
/// This is a very fragile situation and must be revisited whenver any code called from
|
||||
/// inside the IO futures is changed.
|
||||
///
|
||||
/// We will move away from opaque IO futures towards well-defined IOs at some point in
|
||||
/// the future when we have shipped this first version of concurrent IO to production
|
||||
/// and are ready to retire the Sequential mode which runs the futures in place.
|
||||
/// Right now, while brittle, the opaque IO approach allows us to ship the feature
|
||||
/// with minimal changes to the code and minimal changes to existing behavior in Sequential mode.
|
||||
///
|
||||
/// Also read the comment in `collect_pending_ios`.
|
||||
pub(crate) async fn spawn_io<F>(&mut self, fut: F)
|
||||
where
|
||||
F: std::future::Future<Output = ()> + Send + 'static,
|
||||
{
|
||||
match self {
|
||||
IoConcurrency::Sequential => fut.await,
|
||||
IoConcurrency::SidecarTask { ios_tx, .. } => {
|
||||
let fut = Box::pin(fut);
|
||||
// NB: experiments showed that doing an opportunistic poll of `fut` here was bad for throughput
|
||||
// while insignificant for latency.
|
||||
// It would make sense to revisit the tokio-epoll-uring API in the future such that we can try
|
||||
// a submission here, but never poll the future. That way, io_uring can make proccess while
|
||||
// the future sits in the ios_tx queue.
|
||||
match ios_tx.send(fut) {
|
||||
Ok(()) => {}
|
||||
Err(_) => {
|
||||
unreachable!("the io task must have exited, likely it panicked")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn spawn_for_test() -> impl std::ops::DerefMut<Target = Self> {
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use tracing::info;
|
||||
use utils::sync::gate::Gate;
|
||||
|
||||
// Spawn needs a Gate, give it one.
|
||||
struct Wrapper {
|
||||
inner: IoConcurrency,
|
||||
#[allow(dead_code)]
|
||||
gate: Box<Gate>,
|
||||
}
|
||||
impl Deref for Wrapper {
|
||||
type Target = IoConcurrency;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
impl DerefMut for Wrapper {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.inner
|
||||
}
|
||||
}
|
||||
let gate = Box::new(Gate::default());
|
||||
|
||||
// The default behavior when running Rust unit tests without any further
|
||||
// flags is to use the new behavior.
|
||||
// The CI uses the following environment variable to unit test both old
|
||||
// and new behavior.
|
||||
// NB: the Python regression & perf tests take the `else` branch
|
||||
// below and have their own defaults management.
|
||||
let selected = {
|
||||
// The pageserver_api::config type is unsuitable because it's internally tagged.
|
||||
#[derive(serde::Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
enum TestOverride {
|
||||
Sequential,
|
||||
SidecarTask,
|
||||
}
|
||||
use once_cell::sync::Lazy;
|
||||
static TEST_OVERRIDE: Lazy<TestOverride> = Lazy::new(|| {
|
||||
utils::env::var_serde_json_string(
|
||||
"NEON_PAGESERVER_UNIT_TEST_GET_VECTORED_CONCURRENT_IO",
|
||||
)
|
||||
.unwrap_or(TestOverride::SidecarTask)
|
||||
});
|
||||
|
||||
match *TEST_OVERRIDE {
|
||||
TestOverride::Sequential => SelectedIoConcurrency::Sequential,
|
||||
TestOverride::SidecarTask => {
|
||||
SelectedIoConcurrency::SidecarTask(gate.enter().expect("just created it"))
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
info!(?selected, "get_vectored_concurrent_io test");
|
||||
|
||||
Wrapper {
|
||||
inner: Self::spawn(selected),
|
||||
gate,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Make noise in case the [`ValuesReconstructState`] gets dropped while
|
||||
/// there are still IOs in flight.
|
||||
/// Refer to `collect_pending_ios` for why we prefer not to do that.
|
||||
//
|
||||
/// We log from here instead of from the sidecar task because the [`ValuesReconstructState`]
|
||||
/// gets dropped in a tracing span with more context.
|
||||
/// We repeat the sidecar tasks's `task_id` so we can correlate what we emit here with
|
||||
/// the logs / panic handler logs from the sidecar task, which also logs the `task_id`.
|
||||
impl Drop for ValuesReconstructState {
|
||||
fn drop(&mut self) {
|
||||
let num_active_ios = self
|
||||
.num_active_ios
|
||||
.load(std::sync::atomic::Ordering::Acquire);
|
||||
if num_active_ios == 0 {
|
||||
return;
|
||||
}
|
||||
let sidecar_task_id = match &self.io_concurrency {
|
||||
IoConcurrency::Sequential => None,
|
||||
IoConcurrency::SidecarTask { task_id, .. } => Some(*task_id),
|
||||
};
|
||||
tracing::warn!(
|
||||
num_active_ios,
|
||||
?sidecar_task_id,
|
||||
backtrace=%std::backtrace::Backtrace::force_capture(),
|
||||
"dropping ValuesReconstructState while some IOs have not been completed",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl ValuesReconstructState {
|
||||
pub(crate) fn new() -> Self {
|
||||
pub(crate) fn new(io_concurrency: IoConcurrency) -> Self {
|
||||
Self {
|
||||
keys: HashMap::new(),
|
||||
keys_done: KeySpaceRandomAccum::new(),
|
||||
keys_with_image_coverage: None,
|
||||
layers_visited: 0,
|
||||
delta_layers_visited: 0,
|
||||
io_concurrency,
|
||||
num_active_ios: Arc::new(AtomicUsize::new(0)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Associate a key with the error which it encountered and mark it as done
|
||||
pub(crate) fn on_key_error(&mut self, key: Key, err: PageReconstructError) {
|
||||
let previous = self.keys.insert(key, Err(err));
|
||||
if let Some(Ok(state)) = previous {
|
||||
if state.situation == ValueReconstructSituation::Continue {
|
||||
self.keys_done.add_key(key);
|
||||
}
|
||||
}
|
||||
/// Absolutely read [`IoConcurrency::spawn_io`] to learn about assumptions & pitfalls.
|
||||
pub(crate) async fn spawn_io<F>(&mut self, fut: F)
|
||||
where
|
||||
F: std::future::Future<Output = ()> + Send + 'static,
|
||||
{
|
||||
self.io_concurrency.spawn_io(fut).await;
|
||||
}
|
||||
|
||||
pub(crate) fn on_layer_visited(&mut self, layer: &ReadableLayer) {
|
||||
@@ -159,29 +626,6 @@ impl ValuesReconstructState {
|
||||
self.layers_visited
|
||||
}
|
||||
|
||||
/// This function is called after reading a keyspace from a layer.
|
||||
/// It checks if the read path has now moved past the cached Lsn for any keys.
|
||||
///
|
||||
/// Implementation note: We intentionally iterate over the keys for which we've
|
||||
/// already collected some reconstruct data. This avoids scaling complexity with
|
||||
/// the size of the search space.
|
||||
pub(crate) fn on_lsn_advanced(&mut self, keyspace: &KeySpace, advanced_to: Lsn) {
|
||||
for (key, value) in self.keys.iter_mut() {
|
||||
if !keyspace.contains(key) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Ok(state) = value {
|
||||
if state.situation != ValueReconstructSituation::Complete
|
||||
&& state.get_cached_lsn() >= Some(advanced_to)
|
||||
{
|
||||
state.situation = ValueReconstructSituation::Complete;
|
||||
self.keys_done.add_key(*key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// On hitting image layer, we can mark all keys in this range as done, because
|
||||
/// if the image layer does not contain a key, it is deleted/never added.
|
||||
pub(crate) fn on_image_layer_visited(&mut self, key_range: &Range<Key>) {
|
||||
@@ -199,70 +643,42 @@ impl ValuesReconstructState {
|
||||
///
|
||||
/// If the key is in the sparse keyspace (i.e., aux files), we do not track them in
|
||||
/// `key_done`.
|
||||
pub(crate) fn update_key(
|
||||
&mut self,
|
||||
key: &Key,
|
||||
lsn: Lsn,
|
||||
value: Value,
|
||||
) -> ValueReconstructSituation {
|
||||
let state = self
|
||||
.keys
|
||||
.entry(*key)
|
||||
.or_insert(Ok(VectoredValueReconstructState::default()));
|
||||
// TODO: rename this method & update description.
|
||||
pub(crate) fn update_key(&mut self, key: &Key, lsn: Lsn, completes: bool) -> OnDiskValueIo {
|
||||
let state = self.keys.entry(*key).or_default();
|
||||
|
||||
let is_sparse_key = key.is_sparse();
|
||||
if let Ok(state) = state {
|
||||
let key_done = match state.situation {
|
||||
ValueReconstructSituation::Complete => {
|
||||
if is_sparse_key {
|
||||
// Sparse keyspace might be visited multiple times because
|
||||
// we don't track unmapped keyspaces.
|
||||
return ValueReconstructSituation::Complete;
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
ValueReconstructSituation::Continue => match value {
|
||||
Value::Image(img) => {
|
||||
state.img = Some((lsn, img));
|
||||
true
|
||||
}
|
||||
Value::WalRecord(rec) => {
|
||||
debug_assert!(
|
||||
Some(lsn) > state.get_cached_lsn(),
|
||||
"Attempt to collect a record below cached LSN for walredo: {} < {}",
|
||||
lsn,
|
||||
state
|
||||
.get_cached_lsn()
|
||||
.expect("Assertion can only fire if a cached lsn is present")
|
||||
);
|
||||
|
||||
let will_init = rec.will_init();
|
||||
state.records.push((lsn, rec));
|
||||
will_init
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
if key_done && state.situation == ValueReconstructSituation::Continue {
|
||||
state.situation = ValueReconstructSituation::Complete;
|
||||
if !is_sparse_key {
|
||||
self.keys_done.add_key(*key);
|
||||
let required_io = match state.situation {
|
||||
ValueReconstructSituation::Complete => {
|
||||
if is_sparse_key {
|
||||
// Sparse keyspace might be visited multiple times because
|
||||
// we don't track unmapped keyspaces.
|
||||
return OnDiskValueIo::Unnecessary;
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
ValueReconstructSituation::Continue => {
|
||||
self.num_active_ios
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Release);
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
state.on_disk_values.push((lsn, OnDiskValueIoWaiter { rx }));
|
||||
OnDiskValueIo::Required {
|
||||
tx,
|
||||
num_active_ios: Arc::clone(&self.num_active_ios),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
state.situation
|
||||
} else {
|
||||
ValueReconstructSituation::Complete
|
||||
if completes && state.situation == ValueReconstructSituation::Continue {
|
||||
state.situation = ValueReconstructSituation::Complete;
|
||||
if !is_sparse_key {
|
||||
self.keys_done.add_key(*key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the Lsn at which this key is cached if one exists.
|
||||
/// The read path should go no further than this Lsn for the given key.
|
||||
pub(crate) fn get_cached_lsn(&self, key: &Key) -> Option<Lsn> {
|
||||
self.keys
|
||||
.get(key)
|
||||
.and_then(|k| k.as_ref().ok())
|
||||
.and_then(|state| state.get_cached_lsn())
|
||||
required_io
|
||||
}
|
||||
|
||||
/// Returns the key space describing the keys that have
|
||||
@@ -276,12 +692,6 @@ impl ValuesReconstructState {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ValuesReconstructState {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// A key that uniquely identifies a layer in a timeline
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
|
||||
pub(crate) enum LayerId {
|
||||
@@ -720,3 +1130,78 @@ impl<T: std::fmt::Display> std::fmt::Debug for RangeDisplayDebug<'_, T> {
|
||||
write!(f, "{}..{}", self.0.start, self.0.end)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests2 {
|
||||
use pageserver_api::key::DBDIR_KEY;
|
||||
use tracing::info;
|
||||
|
||||
use super::*;
|
||||
use crate::tenant::storage_layer::IoConcurrency;
|
||||
|
||||
/// TODO: currently this test relies on manual visual inspection of the --no-capture output.
|
||||
/// Should look like so:
|
||||
/// ```text
|
||||
/// RUST_LOG=trace cargo nextest run --features testing --no-capture test_io_concurrency_noise
|
||||
/// running 1 test
|
||||
/// 2025-01-21T17:42:01.335679Z INFO get_vectored_concurrent_io test selected=SidecarTask
|
||||
/// 2025-01-21T17:42:01.335680Z TRACE spawning sidecar task task_id=0
|
||||
/// 2025-01-21T17:42:01.335937Z TRACE IoConcurrency_sidecar{task_id=0}: start
|
||||
/// 2025-01-21T17:42:01.335972Z TRACE IoConcurrency_sidecar{task_id=0}: received new io future
|
||||
/// 2025-01-21T17:42:01.335999Z INFO IoConcurrency_sidecar{task_id=0}: waiting for signal to complete IO
|
||||
/// 2025-01-21T17:42:01.336229Z WARN dropping ValuesReconstructState while some IOs have not been completed num_active_ios=1 sidecar_task_id=Some(0) backtrace= 0: <pageserver::tenant::storage_layer::ValuesReconstructState as core::ops::drop::Drop>::drop
|
||||
/// at ./src/tenant/storage_layer.rs:553:24
|
||||
/// 1: core::ptr::drop_in_place<pageserver::tenant::storage_layer::ValuesReconstructState>
|
||||
/// at /home/christian/.rustup/toolchains/1.84.0-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/ptr/mod.rs:521:1
|
||||
/// 2: core::mem::drop
|
||||
/// at /home/christian/.rustup/toolchains/1.84.0-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/mem/mod.rs:942:24
|
||||
/// 3: pageserver::tenant::storage_layer::tests2::test_io_concurrency_noise::{{closure}}
|
||||
/// at ./src/tenant/storage_layer.rs:1159:9
|
||||
/// ...
|
||||
/// 49: <unknown>
|
||||
/// 2025-01-21T17:42:01.452293Z INFO IoConcurrency_sidecar{task_id=0}: completing IO
|
||||
/// 2025-01-21T17:42:01.452357Z TRACE IoConcurrency_sidecar{task_id=0}: io future completed
|
||||
/// 2025-01-21T17:42:01.452473Z TRACE IoConcurrency_sidecar{task_id=0}: end
|
||||
/// test tenant::storage_layer::tests2::test_io_concurrency_noise ... ok
|
||||
///
|
||||
/// ```
|
||||
#[tokio::test]
|
||||
async fn test_io_concurrency_noise() {
|
||||
crate::tenant::harness::setup_logging();
|
||||
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
match *io_concurrency {
|
||||
IoConcurrency::Sequential => {
|
||||
// This test asserts behavior in sidecar mode, doesn't make sense in sequential mode.
|
||||
return;
|
||||
}
|
||||
IoConcurrency::SidecarTask { .. } => {}
|
||||
}
|
||||
let mut reconstruct_state = ValuesReconstructState::new(io_concurrency.clone());
|
||||
|
||||
let (io_fut_is_waiting_tx, io_fut_is_waiting) = tokio::sync::oneshot::channel();
|
||||
let (do_complete_io, should_complete_io) = tokio::sync::oneshot::channel();
|
||||
let (io_fut_exiting_tx, io_fut_exiting) = tokio::sync::oneshot::channel();
|
||||
|
||||
let io = reconstruct_state.update_key(&DBDIR_KEY, Lsn(8), true);
|
||||
reconstruct_state
|
||||
.spawn_io(async move {
|
||||
info!("waiting for signal to complete IO");
|
||||
io_fut_is_waiting_tx.send(()).unwrap();
|
||||
should_complete_io.await.unwrap();
|
||||
info!("completing IO");
|
||||
io.complete(Ok(OnDiskValue::RawImage(Bytes::new())));
|
||||
io_fut_exiting_tx.send(()).unwrap();
|
||||
})
|
||||
.await;
|
||||
|
||||
io_fut_is_waiting.await.unwrap();
|
||||
|
||||
// this is what makes the noise
|
||||
drop(reconstruct_state);
|
||||
|
||||
do_complete_io.send(()).unwrap();
|
||||
|
||||
io_fut_exiting.await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,13 +41,12 @@ use crate::tenant::vectored_blob_io::{
|
||||
BlobFlag, BufView, StreamingVectoredReadPlanner, VectoredBlobReader, VectoredRead,
|
||||
VectoredReadPlanner,
|
||||
};
|
||||
use crate::tenant::PageReconstructError;
|
||||
use crate::virtual_file::owned_buffers_io::io_buf_ext::{FullSlice, IoBufExt};
|
||||
use crate::virtual_file::IoBufferMut;
|
||||
use crate::virtual_file::{self, MaybeFatalIo, VirtualFile};
|
||||
use crate::TEMP_FILE_SUFFIX;
|
||||
use crate::{DELTA_FILE_MAGIC, STORAGE_FORMAT_VERSION};
|
||||
use anyhow::{anyhow, bail, ensure, Context, Result};
|
||||
use anyhow::{bail, ensure, Context, Result};
|
||||
use camino::{Utf8Path, Utf8PathBuf};
|
||||
use futures::StreamExt;
|
||||
use itertools::Itertools;
|
||||
@@ -60,7 +59,7 @@ use pageserver_api::shard::TenantShardId;
|
||||
use pageserver_api::value::Value;
|
||||
use rand::{distributions::Alphanumeric, Rng};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::VecDeque;
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::fs::File;
|
||||
use std::io::SeekFrom;
|
||||
use std::ops::Range;
|
||||
@@ -77,7 +76,10 @@ use utils::{
|
||||
lsn::Lsn,
|
||||
};
|
||||
|
||||
use super::{AsLayerDesc, LayerName, PersistentLayerDesc, ValuesReconstructState};
|
||||
use super::{
|
||||
AsLayerDesc, LayerName, OnDiskValue, OnDiskValueIo, PersistentLayerDesc, ResidentLayer,
|
||||
ValuesReconstructState,
|
||||
};
|
||||
|
||||
///
|
||||
/// Header stored in the beginning of the file
|
||||
@@ -226,7 +228,7 @@ pub struct DeltaLayerInner {
|
||||
index_start_blk: u32,
|
||||
index_root_blk: u32,
|
||||
|
||||
file: VirtualFile,
|
||||
file: Arc<VirtualFile>,
|
||||
file_id: FileId,
|
||||
|
||||
layer_key_range: Range<Key>,
|
||||
@@ -795,9 +797,11 @@ impl DeltaLayerInner {
|
||||
max_vectored_read_bytes: Option<MaxVectoredReadBytes>,
|
||||
ctx: &RequestContext,
|
||||
) -> anyhow::Result<Self> {
|
||||
let file = VirtualFile::open_v2(path, ctx)
|
||||
.await
|
||||
.context("open layer file")?;
|
||||
let file = Arc::new(
|
||||
VirtualFile::open_v2(path, ctx)
|
||||
.await
|
||||
.context("open layer file")?,
|
||||
);
|
||||
|
||||
let file_id = page_cache::next_file_id();
|
||||
|
||||
@@ -842,12 +846,11 @@ impl DeltaLayerInner {
|
||||
// Look up the keys in the provided keyspace and update
|
||||
// the reconstruct state with whatever is found.
|
||||
//
|
||||
// If the key is cached, go no further than the cached Lsn.
|
||||
//
|
||||
// Currently, the index is visited for each range, but this
|
||||
// can be further optimised to visit the index only once.
|
||||
pub(super) async fn get_values_reconstruct_data(
|
||||
&self,
|
||||
this: ResidentLayer,
|
||||
keyspace: KeySpace,
|
||||
lsn_range: Range<Lsn>,
|
||||
reconstruct_state: &mut ValuesReconstructState,
|
||||
@@ -875,17 +878,14 @@ impl DeltaLayerInner {
|
||||
data_end_offset,
|
||||
index_reader,
|
||||
planner,
|
||||
reconstruct_state,
|
||||
ctx,
|
||||
)
|
||||
.await
|
||||
.map_err(GetVectoredError::Other)?;
|
||||
|
||||
self.do_reads_and_update_state(reads, reconstruct_state, ctx)
|
||||
self.do_reads_and_update_state(this, reads, reconstruct_state, ctx)
|
||||
.await;
|
||||
|
||||
reconstruct_state.on_lsn_advanced(&keyspace, lsn_range.start);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -895,7 +895,6 @@ impl DeltaLayerInner {
|
||||
data_end_offset: u64,
|
||||
index_reader: DiskBtreeReader<Reader, DELTA_KEY_SIZE>,
|
||||
mut planner: VectoredReadPlanner,
|
||||
reconstruct_state: &mut ValuesReconstructState,
|
||||
ctx: &RequestContext,
|
||||
) -> anyhow::Result<Vec<VectoredRead>>
|
||||
where
|
||||
@@ -922,10 +921,9 @@ impl DeltaLayerInner {
|
||||
assert!(key >= range.start);
|
||||
|
||||
let outside_lsn_range = !lsn_range.contains(&lsn);
|
||||
let below_cached_lsn = reconstruct_state.get_cached_lsn(&key) >= Some(lsn);
|
||||
|
||||
let flag = {
|
||||
if outside_lsn_range || below_cached_lsn {
|
||||
if outside_lsn_range {
|
||||
BlobFlag::Ignore
|
||||
} else if blob_ref.will_init() {
|
||||
BlobFlag::ReplaceAll
|
||||
@@ -994,98 +992,78 @@ impl DeltaLayerInner {
|
||||
|
||||
async fn do_reads_and_update_state(
|
||||
&self,
|
||||
this: ResidentLayer,
|
||||
reads: Vec<VectoredRead>,
|
||||
reconstruct_state: &mut ValuesReconstructState,
|
||||
ctx: &RequestContext,
|
||||
) {
|
||||
let vectored_blob_reader = VectoredBlobReader::new(&self.file);
|
||||
let mut ignore_key_with_err = None;
|
||||
|
||||
let max_vectored_read_bytes = self
|
||||
.max_vectored_read_bytes
|
||||
.expect("Layer is loaded with max vectored bytes config")
|
||||
.0
|
||||
.into();
|
||||
let buf_size = Self::get_min_read_buffer_size(&reads, max_vectored_read_bytes);
|
||||
let mut buf = Some(IoBufferMut::with_capacity(buf_size));
|
||||
|
||||
// Note that reads are processed in reverse order (from highest key+lsn).
|
||||
// This is the order that `ReconstructState` requires such that it can
|
||||
// track when a key is done.
|
||||
for read in reads.into_iter().rev() {
|
||||
let res = vectored_blob_reader
|
||||
.read_blobs(&read, buf.take().expect("Should have a buffer"), ctx)
|
||||
.await;
|
||||
|
||||
let blobs_buf = match res {
|
||||
Ok(blobs_buf) => blobs_buf,
|
||||
Err(err) => {
|
||||
let kind = err.kind();
|
||||
for (_, blob_meta) in read.blobs_at.as_slice() {
|
||||
reconstruct_state.on_key_error(
|
||||
blob_meta.key,
|
||||
PageReconstructError::Other(anyhow!(
|
||||
"Failed to read blobs from virtual file {}: {}",
|
||||
self.file.path(),
|
||||
kind
|
||||
)),
|
||||
);
|
||||
}
|
||||
|
||||
// We have "lost" the buffer since the lower level IO api
|
||||
// doesn't return the buffer on error. Allocate a new one.
|
||||
buf = Some(IoBufferMut::with_capacity(buf_size));
|
||||
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let view = BufView::new_slice(&blobs_buf.buf);
|
||||
for meta in blobs_buf.blobs.iter().rev() {
|
||||
if Some(meta.meta.key) == ignore_key_with_err {
|
||||
continue;
|
||||
}
|
||||
let blob_read = meta.read(&view).await;
|
||||
let blob_read = match blob_read {
|
||||
Ok(buf) => buf,
|
||||
Err(e) => {
|
||||
reconstruct_state.on_key_error(
|
||||
meta.meta.key,
|
||||
PageReconstructError::Other(anyhow!(e).context(format!(
|
||||
"Failed to decompress blob from virtual file {}",
|
||||
self.file.path(),
|
||||
))),
|
||||
);
|
||||
|
||||
ignore_key_with_err = Some(meta.meta.key);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let value = Value::des(&blob_read);
|
||||
|
||||
let value = match value {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
reconstruct_state.on_key_error(
|
||||
meta.meta.key,
|
||||
PageReconstructError::Other(anyhow!(e).context(format!(
|
||||
"Failed to deserialize blob from virtual file {}",
|
||||
self.file.path(),
|
||||
))),
|
||||
);
|
||||
|
||||
ignore_key_with_err = Some(meta.meta.key);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Invariant: once a key reaches [`ValueReconstructSituation::Complete`]
|
||||
// state, no further updates shall be made to it. The call below will
|
||||
// panic if the invariant is violated.
|
||||
reconstruct_state.update_key(&meta.meta.key, meta.meta.lsn, value);
|
||||
let mut ios: HashMap<(Key, Lsn), OnDiskValueIo> = Default::default();
|
||||
for (_, blob_meta) in read.blobs_at.as_slice().iter().rev() {
|
||||
let io = reconstruct_state.update_key(
|
||||
&blob_meta.key,
|
||||
blob_meta.lsn,
|
||||
blob_meta.will_init,
|
||||
);
|
||||
ios.insert((blob_meta.key, blob_meta.lsn), io);
|
||||
}
|
||||
|
||||
buf = Some(blobs_buf.buf);
|
||||
let read_extend_residency = this.clone();
|
||||
let read_from = self.file.clone();
|
||||
let read_ctx = ctx.attached_child();
|
||||
reconstruct_state
|
||||
.spawn_io(async move {
|
||||
let vectored_blob_reader = VectoredBlobReader::new(&read_from);
|
||||
let buf = IoBufferMut::with_capacity(buf_size);
|
||||
|
||||
let res = vectored_blob_reader.read_blobs(&read, buf, &read_ctx).await;
|
||||
match res {
|
||||
Ok(blobs_buf) => {
|
||||
let view = BufView::new_slice(&blobs_buf.buf);
|
||||
for meta in blobs_buf.blobs.iter().rev() {
|
||||
let io = ios.remove(&(meta.meta.key, meta.meta.lsn)).unwrap();
|
||||
|
||||
let blob_read = meta.read(&view).await;
|
||||
let blob_read = match blob_read {
|
||||
Ok(buf) => buf,
|
||||
Err(e) => {
|
||||
io.complete(Err(e));
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
io.complete(Ok(OnDiskValue::WalRecordOrImage(
|
||||
blob_read.into_bytes(),
|
||||
)));
|
||||
}
|
||||
|
||||
assert!(ios.is_empty());
|
||||
}
|
||||
Err(err) => {
|
||||
for (_, sender) in ios {
|
||||
sender.complete(Err(std::io::Error::new(
|
||||
err.kind(),
|
||||
"vec read failed",
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// keep layer resident until this IO is done; this spawned IO future generally outlives the
|
||||
// call to `self` / the `Arc<DownloadedLayer>` / the `ResidentLayer` that guarantees residency
|
||||
drop(read_extend_residency);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1224,7 +1202,14 @@ impl DeltaLayerInner {
|
||||
let actionable = if let Some((key, lsn, start_offset)) = prev.take() {
|
||||
let end_offset = offset;
|
||||
|
||||
Some((BlobMeta { key, lsn }, start_offset..end_offset))
|
||||
Some((
|
||||
BlobMeta {
|
||||
key,
|
||||
lsn,
|
||||
will_init: false,
|
||||
},
|
||||
start_offset..end_offset,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -1560,7 +1545,9 @@ impl DeltaLayerIterator<'_> {
|
||||
let lsn = DeltaKey::extract_lsn_from_buf(&raw_key);
|
||||
let blob_ref = BlobRef(value);
|
||||
let offset = blob_ref.pos();
|
||||
if let Some(batch_plan) = self.planner.handle(key, lsn, offset) {
|
||||
if let Some(batch_plan) =
|
||||
self.planner.handle(key, lsn, offset, blob_ref.will_init())
|
||||
{
|
||||
break batch_plan;
|
||||
}
|
||||
} else {
|
||||
@@ -1673,7 +1660,6 @@ pub(crate) mod test {
|
||||
.expect("In memory disk finish should never fail");
|
||||
let reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new(0, root_offset, disk);
|
||||
let planner = VectoredReadPlanner::new(100);
|
||||
let mut reconstruct_state = ValuesReconstructState::new();
|
||||
let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
|
||||
|
||||
let keyspace = KeySpace {
|
||||
@@ -1691,7 +1677,6 @@ pub(crate) mod test {
|
||||
disk_offset,
|
||||
reader,
|
||||
planner,
|
||||
&mut reconstruct_state,
|
||||
&ctx,
|
||||
)
|
||||
.await
|
||||
@@ -1935,7 +1920,6 @@ pub(crate) mod test {
|
||||
);
|
||||
|
||||
let planner = VectoredReadPlanner::new(constants::MAX_VECTORED_READ_BYTES);
|
||||
let mut reconstruct_state = ValuesReconstructState::new();
|
||||
let keyspace = pick_random_keyspace(rng, &entries_meta.key_range);
|
||||
let data_end_offset = inner.index_start_blk as u64 * PAGE_SZ as u64;
|
||||
|
||||
@@ -1945,7 +1929,6 @@ pub(crate) mod test {
|
||||
data_end_offset,
|
||||
index_reader,
|
||||
planner,
|
||||
&mut reconstruct_state,
|
||||
&ctx,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -38,12 +38,11 @@ use crate::tenant::vectored_blob_io::{
|
||||
BlobFlag, BufView, StreamingVectoredReadPlanner, VectoredBlobReader, VectoredRead,
|
||||
VectoredReadPlanner,
|
||||
};
|
||||
use crate::tenant::PageReconstructError;
|
||||
use crate::virtual_file::owned_buffers_io::io_buf_ext::IoBufExt;
|
||||
use crate::virtual_file::IoBufferMut;
|
||||
use crate::virtual_file::{self, MaybeFatalIo, VirtualFile};
|
||||
use crate::{IMAGE_FILE_MAGIC, STORAGE_FORMAT_VERSION, TEMP_FILE_SUFFIX};
|
||||
use anyhow::{anyhow, bail, ensure, Context, Result};
|
||||
use anyhow::{bail, ensure, Context, Result};
|
||||
use bytes::Bytes;
|
||||
use camino::{Utf8Path, Utf8PathBuf};
|
||||
use hex;
|
||||
@@ -56,12 +55,13 @@ use pageserver_api::shard::{ShardIdentity, TenantShardId};
|
||||
use pageserver_api::value::Value;
|
||||
use rand::{distributions::Alphanumeric, Rng};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::VecDeque;
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::fs::File;
|
||||
use std::io::SeekFrom;
|
||||
use std::ops::Range;
|
||||
use std::os::unix::prelude::FileExt;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::OnceCell;
|
||||
use tokio_stream::StreamExt;
|
||||
use tracing::*;
|
||||
@@ -73,7 +73,10 @@ use utils::{
|
||||
};
|
||||
|
||||
use super::layer_name::ImageLayerName;
|
||||
use super::{AsLayerDesc, LayerName, PersistentLayerDesc, ValuesReconstructState};
|
||||
use super::{
|
||||
AsLayerDesc, LayerName, OnDiskValue, OnDiskValueIo, PersistentLayerDesc, ResidentLayer,
|
||||
ValuesReconstructState,
|
||||
};
|
||||
|
||||
///
|
||||
/// Header stored in the beginning of the file
|
||||
@@ -164,7 +167,7 @@ pub struct ImageLayerInner {
|
||||
key_range: Range<Key>,
|
||||
lsn: Lsn,
|
||||
|
||||
file: VirtualFile,
|
||||
file: Arc<VirtualFile>,
|
||||
file_id: FileId,
|
||||
|
||||
max_vectored_read_bytes: Option<MaxVectoredReadBytes>,
|
||||
@@ -391,9 +394,11 @@ impl ImageLayerInner {
|
||||
max_vectored_read_bytes: Option<MaxVectoredReadBytes>,
|
||||
ctx: &RequestContext,
|
||||
) -> anyhow::Result<Self> {
|
||||
let file = VirtualFile::open_v2(path, ctx)
|
||||
.await
|
||||
.context("open layer file")?;
|
||||
let file = Arc::new(
|
||||
VirtualFile::open_v2(path, ctx)
|
||||
.await
|
||||
.context("open layer file")?,
|
||||
);
|
||||
let file_id = page_cache::next_file_id();
|
||||
let block_reader = FileBlockReader::new(&file, file_id);
|
||||
let summary_blk = block_reader
|
||||
@@ -439,6 +444,7 @@ impl ImageLayerInner {
|
||||
// the reconstruct state with whatever is found.
|
||||
pub(super) async fn get_values_reconstruct_data(
|
||||
&self,
|
||||
this: ResidentLayer,
|
||||
keyspace: KeySpace,
|
||||
reconstruct_state: &mut ValuesReconstructState,
|
||||
ctx: &RequestContext,
|
||||
@@ -448,7 +454,7 @@ impl ImageLayerInner {
|
||||
.await
|
||||
.map_err(GetVectoredError::Other)?;
|
||||
|
||||
self.do_reads_and_update_state(reads, reconstruct_state, ctx)
|
||||
self.do_reads_and_update_state(this, reads, reconstruct_state, ctx)
|
||||
.await;
|
||||
|
||||
reconstruct_state.on_image_layer_visited(&self.key_range);
|
||||
@@ -570,6 +576,7 @@ impl ImageLayerInner {
|
||||
|
||||
async fn do_reads_and_update_state(
|
||||
&self,
|
||||
this: ResidentLayer,
|
||||
reads: Vec<VectoredRead>,
|
||||
reconstruct_state: &mut ValuesReconstructState,
|
||||
ctx: &RequestContext,
|
||||
@@ -580,8 +587,13 @@ impl ImageLayerInner {
|
||||
.0
|
||||
.into();
|
||||
|
||||
let vectored_blob_reader = VectoredBlobReader::new(&self.file);
|
||||
for read in reads.into_iter() {
|
||||
let mut ios: HashMap<(Key, Lsn), OnDiskValueIo> = Default::default();
|
||||
for (_, blob_meta) in read.blobs_at.as_slice() {
|
||||
let io = reconstruct_state.update_key(&blob_meta.key, blob_meta.lsn, true);
|
||||
ios.insert((blob_meta.key, blob_meta.lsn), io);
|
||||
}
|
||||
|
||||
let buf_size = read.size();
|
||||
|
||||
if buf_size > max_vectored_read_bytes {
|
||||
@@ -611,50 +623,51 @@ impl ImageLayerInner {
|
||||
}
|
||||
}
|
||||
|
||||
let buf = IoBufferMut::with_capacity(buf_size);
|
||||
let res = vectored_blob_reader.read_blobs(&read, buf, ctx).await;
|
||||
let read_extend_residency = this.clone();
|
||||
let read_from = self.file.clone();
|
||||
let read_ctx = ctx.attached_child();
|
||||
reconstruct_state
|
||||
.spawn_io(async move {
|
||||
let buf = IoBufferMut::with_capacity(buf_size);
|
||||
let vectored_blob_reader = VectoredBlobReader::new(&read_from);
|
||||
let res = vectored_blob_reader.read_blobs(&read, buf, &read_ctx).await;
|
||||
|
||||
match res {
|
||||
Ok(blobs_buf) => {
|
||||
let view = BufView::new_slice(&blobs_buf.buf);
|
||||
for meta in blobs_buf.blobs.iter() {
|
||||
let img_buf = meta.read(&view).await;
|
||||
match res {
|
||||
Ok(blobs_buf) => {
|
||||
let view = BufView::new_slice(&blobs_buf.buf);
|
||||
for meta in blobs_buf.blobs.iter() {
|
||||
let io: OnDiskValueIo =
|
||||
ios.remove(&(meta.meta.key, meta.meta.lsn)).unwrap();
|
||||
let img_buf = meta.read(&view).await;
|
||||
|
||||
let img_buf = match img_buf {
|
||||
Ok(img_buf) => img_buf,
|
||||
Err(e) => {
|
||||
reconstruct_state.on_key_error(
|
||||
meta.meta.key,
|
||||
PageReconstructError::Other(anyhow!(e).context(format!(
|
||||
"Failed to decompress blob from virtual file {}",
|
||||
self.file.path(),
|
||||
))),
|
||||
);
|
||||
let img_buf = match img_buf {
|
||||
Ok(img_buf) => img_buf,
|
||||
Err(e) => {
|
||||
io.complete(Err(e));
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
continue;
|
||||
io.complete(Ok(OnDiskValue::RawImage(img_buf.into_bytes())));
|
||||
}
|
||||
};
|
||||
reconstruct_state.update_key(
|
||||
&meta.meta.key,
|
||||
self.lsn,
|
||||
Value::Image(img_buf.into_bytes()),
|
||||
);
|
||||
|
||||
assert!(ios.is_empty());
|
||||
}
|
||||
Err(err) => {
|
||||
for (_, io) in ios {
|
||||
io.complete(Err(std::io::Error::new(
|
||||
err.kind(),
|
||||
"vec read failed",
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
let kind = err.kind();
|
||||
for (_, blob_meta) in read.blobs_at.as_slice() {
|
||||
reconstruct_state.on_key_error(
|
||||
blob_meta.key,
|
||||
PageReconstructError::from(anyhow!(
|
||||
"Failed to read blobs from virtual file {}: {}",
|
||||
self.file.path(),
|
||||
kind
|
||||
)),
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// keep layer resident until this IO is done; this spawned IO future generally outlives the
|
||||
// call to `self` / the `Arc<DownloadedLayer>` / the `ResidentLayer` that guarantees residency
|
||||
drop(read_extend_residency);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1069,6 +1082,7 @@ impl ImageLayerIterator<'_> {
|
||||
Key::from_slice(&raw_key[..KEY_SIZE]),
|
||||
self.image_layer.lsn,
|
||||
offset,
|
||||
true,
|
||||
) {
|
||||
break batch_plan;
|
||||
}
|
||||
|
||||
@@ -8,23 +8,22 @@ use crate::assert_u64_eq_usize::{u64_to_usize, U64IsUsize, UsizeIsU64};
|
||||
use crate::config::PageServerConf;
|
||||
use crate::context::{PageContentKind, RequestContext, RequestContextBuilder};
|
||||
use crate::tenant::ephemeral_file::EphemeralFile;
|
||||
use crate::tenant::storage_layer::{OnDiskValue, OnDiskValueIo};
|
||||
use crate::tenant::timeline::GetVectoredError;
|
||||
use crate::tenant::PageReconstructError;
|
||||
use crate::virtual_file::owned_buffers_io::io_buf_ext::IoBufExt;
|
||||
use crate::{l0_flush, page_cache};
|
||||
use anyhow::{anyhow, Result};
|
||||
use anyhow::Result;
|
||||
use camino::Utf8PathBuf;
|
||||
use pageserver_api::key::CompactKey;
|
||||
use pageserver_api::key::Key;
|
||||
use pageserver_api::keyspace::KeySpace;
|
||||
use pageserver_api::models::InMemoryLayerInfo;
|
||||
use pageserver_api::shard::TenantShardId;
|
||||
use pageserver_api::value::Value;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use std::time::Instant;
|
||||
use tracing::*;
|
||||
use utils::{bin_ser::BeSer, id::TimelineId, lsn::Lsn, vec_map::VecMap};
|
||||
use utils::{id::TimelineId, lsn::Lsn, vec_map::VecMap};
|
||||
use wal_decoder::serialized_batch::{SerializedValueBatch, SerializedValueMeta, ValueMeta};
|
||||
// avoid binding to Write (conflicts with std::io::Write)
|
||||
// while being able to use std::fmt::Write's methods
|
||||
@@ -36,9 +35,7 @@ use std::sync::atomic::Ordering as AtomicOrdering;
|
||||
use std::sync::atomic::{AtomicU64, AtomicUsize};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use super::{
|
||||
DeltaLayerWriter, PersistentLayerDesc, ValueReconstructSituation, ValuesReconstructState,
|
||||
};
|
||||
use super::{DeltaLayerWriter, PersistentLayerDesc, ValuesReconstructState};
|
||||
|
||||
pub(crate) mod vectored_dio_read;
|
||||
|
||||
@@ -415,10 +412,8 @@ impl InMemoryLayer {
|
||||
|
||||
// Look up the keys in the provided keyspace and update
|
||||
// the reconstruct state with whatever is found.
|
||||
//
|
||||
// If the key is cached, go no further than the cached Lsn.
|
||||
pub(crate) async fn get_values_reconstruct_data(
|
||||
&self,
|
||||
self: &Arc<InMemoryLayer>,
|
||||
keyspace: KeySpace,
|
||||
end_lsn: Lsn,
|
||||
reconstruct_state: &mut ValuesReconstructState,
|
||||
@@ -435,6 +430,9 @@ impl InMemoryLayer {
|
||||
read: vectored_dio_read::LogicalRead<Vec<u8>>,
|
||||
}
|
||||
let mut reads: HashMap<Key, Vec<ValueRead>> = HashMap::new();
|
||||
let mut ios: HashMap<(Key, Lsn), OnDiskValueIo> = Default::default();
|
||||
|
||||
let lsn_range = self.start_lsn..end_lsn;
|
||||
|
||||
for range in keyspace.ranges.iter() {
|
||||
for (key, vec_map) in inner
|
||||
@@ -442,12 +440,7 @@ impl InMemoryLayer {
|
||||
.range(range.start.to_compact()..range.end.to_compact())
|
||||
{
|
||||
let key = Key::from_compact(*key);
|
||||
let lsn_range = match reconstruct_state.get_cached_lsn(&key) {
|
||||
Some(cached_lsn) => (cached_lsn + 1)..end_lsn,
|
||||
None => self.start_lsn..end_lsn,
|
||||
};
|
||||
|
||||
let slice = vec_map.slice_range(lsn_range);
|
||||
let slice = vec_map.slice_range(lsn_range.clone());
|
||||
|
||||
for (entry_lsn, index_entry) in slice.iter().rev() {
|
||||
let IndexEntryUnpacked {
|
||||
@@ -463,55 +456,59 @@ impl InMemoryLayer {
|
||||
Vec::with_capacity(len as usize),
|
||||
),
|
||||
});
|
||||
|
||||
let io = reconstruct_state.update_key(&key, *entry_lsn, will_init);
|
||||
ios.insert((key, *entry_lsn), io);
|
||||
|
||||
if will_init {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
drop(inner); // release the lock before we spawn the IO; if it's serial-mode IO we will deadlock on the read().await below
|
||||
let read_from = Arc::clone(self);
|
||||
let read_ctx = ctx.attached_child();
|
||||
reconstruct_state
|
||||
.spawn_io(async move {
|
||||
let inner = read_from.inner.read().await;
|
||||
let f = vectored_dio_read::execute(
|
||||
&inner.file,
|
||||
reads
|
||||
.iter()
|
||||
.flat_map(|(_, value_reads)| value_reads.iter().map(|v| &v.read)),
|
||||
&read_ctx,
|
||||
);
|
||||
send_future::SendFuture::send(f) // https://github.com/rust-lang/rust/issues/96865
|
||||
.await;
|
||||
|
||||
// Execute the reads.
|
||||
|
||||
let f = vectored_dio_read::execute(
|
||||
&inner.file,
|
||||
reads
|
||||
.iter()
|
||||
.flat_map(|(_, value_reads)| value_reads.iter().map(|v| &v.read)),
|
||||
&ctx,
|
||||
);
|
||||
send_future::SendFuture::send(f) // https://github.com/rust-lang/rust/issues/96865
|
||||
.await;
|
||||
|
||||
// Process results into the reconstruct state
|
||||
'next_key: for (key, value_reads) in reads {
|
||||
for ValueRead { entry_lsn, read } in value_reads {
|
||||
match read.into_result().expect("we run execute() above") {
|
||||
Err(e) => {
|
||||
reconstruct_state.on_key_error(key, PageReconstructError::from(anyhow!(e)));
|
||||
continue 'next_key;
|
||||
}
|
||||
Ok(value_buf) => {
|
||||
let value = Value::des(&value_buf);
|
||||
if let Err(e) = value {
|
||||
reconstruct_state
|
||||
.on_key_error(key, PageReconstructError::from(anyhow!(e)));
|
||||
continue 'next_key;
|
||||
for (key, value_reads) in reads {
|
||||
for ValueRead { entry_lsn, read } in value_reads {
|
||||
let io = ios.remove(&(key, entry_lsn)).expect("sender must exist");
|
||||
match read.into_result().expect("we run execute() above") {
|
||||
Err(e) => {
|
||||
io.complete(Err(std::io::Error::new(
|
||||
e.kind(),
|
||||
"dio vec read failed",
|
||||
)));
|
||||
}
|
||||
Ok(value_buf) => {
|
||||
io.complete(Ok(OnDiskValue::WalRecordOrImage(value_buf.into())));
|
||||
}
|
||||
}
|
||||
|
||||
let key_situation =
|
||||
reconstruct_state.update_key(&key, entry_lsn, value.unwrap());
|
||||
if key_situation == ValueReconstructSituation::Complete {
|
||||
// TODO: metric to see if we fetched more values than necessary
|
||||
continue 'next_key;
|
||||
}
|
||||
|
||||
// process the next value in the next iteration of the loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reconstruct_state.on_lsn_advanced(&keyspace, self.start_lsn);
|
||||
assert!(ios.is_empty());
|
||||
|
||||
// Keep layer existent until this IO is done;
|
||||
// This is kinda forced for InMemoryLayer because we need to inner.read() anyway,
|
||||
// but it's less obvious for DeltaLayer and ImageLayer. So, keep this explicit
|
||||
// drop for consistency among all three layer types.
|
||||
drop(inner);
|
||||
drop(read_from);
|
||||
})
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -606,6 +603,7 @@ impl InMemoryLayer {
|
||||
// Write the batch to the file
|
||||
inner.file.write_raw(&raw, ctx).await?;
|
||||
let new_size = inner.file.len();
|
||||
|
||||
let expected_new_len = base_offset
|
||||
.checked_add(raw.len().into_u64())
|
||||
// write_raw would error if we were to overflow u64.
|
||||
|
||||
@@ -308,7 +308,7 @@ impl Layer {
|
||||
reconstruct_data: &mut ValuesReconstructState,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<(), GetVectoredError> {
|
||||
let layer = self
|
||||
let downloaded = self
|
||||
.0
|
||||
.get_or_maybe_download(true, Some(ctx))
|
||||
.await
|
||||
@@ -318,11 +318,15 @@ impl Layer {
|
||||
}
|
||||
other => GetVectoredError::Other(anyhow::anyhow!(other)),
|
||||
})?;
|
||||
let this = ResidentLayer {
|
||||
downloaded: downloaded.clone(),
|
||||
owner: self.clone(),
|
||||
};
|
||||
|
||||
self.record_access(ctx);
|
||||
|
||||
layer
|
||||
.get_values_reconstruct_data(keyspace, lsn_range, reconstruct_data, &self.0, ctx)
|
||||
downloaded
|
||||
.get_values_reconstruct_data(this, keyspace, lsn_range, reconstruct_data, ctx)
|
||||
.instrument(tracing::debug_span!("get_values_reconstruct_data", layer=%self))
|
||||
.await
|
||||
.map_err(|err| match err {
|
||||
@@ -1768,25 +1772,25 @@ impl DownloadedLayer {
|
||||
|
||||
async fn get_values_reconstruct_data(
|
||||
&self,
|
||||
this: ResidentLayer,
|
||||
keyspace: KeySpace,
|
||||
lsn_range: Range<Lsn>,
|
||||
reconstruct_data: &mut ValuesReconstructState,
|
||||
owner: &Arc<LayerInner>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<(), GetVectoredError> {
|
||||
use LayerKind::*;
|
||||
|
||||
match self
|
||||
.get(owner, ctx)
|
||||
.get(&this.owner.0, ctx)
|
||||
.await
|
||||
.map_err(GetVectoredError::Other)?
|
||||
{
|
||||
Delta(d) => {
|
||||
d.get_values_reconstruct_data(keyspace, lsn_range, reconstruct_data, ctx)
|
||||
d.get_values_reconstruct_data(this, keyspace, lsn_range, reconstruct_data, ctx)
|
||||
.await
|
||||
}
|
||||
Image(i) => {
|
||||
i.get_values_reconstruct_data(keyspace, reconstruct_data, ctx)
|
||||
i.get_values_reconstruct_data(this, keyspace, reconstruct_data, ctx)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,7 +11,10 @@ use super::failpoints::{Failpoint, FailpointKind};
|
||||
use super::*;
|
||||
use crate::{
|
||||
context::DownloadBehavior,
|
||||
tenant::{harness::test_img, storage_layer::LayerVisibilityHint},
|
||||
tenant::{
|
||||
harness::test_img,
|
||||
storage_layer::{IoConcurrency, LayerVisibilityHint},
|
||||
},
|
||||
};
|
||||
use crate::{task_mgr::TaskKind, tenant::harness::TenantHarness};
|
||||
|
||||
@@ -31,6 +34,7 @@ async fn smoke_test() {
|
||||
let span = h.span();
|
||||
let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1));
|
||||
let (tenant, _) = h.load().await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
|
||||
let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Download);
|
||||
|
||||
@@ -89,7 +93,7 @@ async fn smoke_test() {
|
||||
};
|
||||
|
||||
let img_before = {
|
||||
let mut data = ValuesReconstructState::default();
|
||||
let mut data = ValuesReconstructState::new(io_concurrency.clone());
|
||||
layer
|
||||
.get_values_reconstruct_data(
|
||||
controlfile_keyspace.clone(),
|
||||
@@ -99,10 +103,13 @@ async fn smoke_test() {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
data.keys
|
||||
.remove(&CONTROLFILE_KEY)
|
||||
.expect("must be present")
|
||||
.expect("should not error")
|
||||
.collect_pending_ios()
|
||||
.await
|
||||
.expect("must not error")
|
||||
.img
|
||||
.take()
|
||||
.expect("tenant harness writes the control file")
|
||||
@@ -121,7 +128,7 @@ async fn smoke_test() {
|
||||
|
||||
// on accesses when the layer is evicted, it will automatically be downloaded.
|
||||
let img_after = {
|
||||
let mut data = ValuesReconstructState::default();
|
||||
let mut data = ValuesReconstructState::new(io_concurrency.clone());
|
||||
layer
|
||||
.get_values_reconstruct_data(
|
||||
controlfile_keyspace.clone(),
|
||||
@@ -135,7 +142,9 @@ async fn smoke_test() {
|
||||
data.keys
|
||||
.remove(&CONTROLFILE_KEY)
|
||||
.expect("must be present")
|
||||
.expect("should not error")
|
||||
.collect_pending_ios()
|
||||
.await
|
||||
.expect("must not error")
|
||||
.img
|
||||
.take()
|
||||
.expect("tenant harness writes the control file")
|
||||
|
||||
@@ -67,10 +67,9 @@ pub(crate) async fn concurrent_background_tasks_rate_limit_permit(
|
||||
) -> tokio::sync::SemaphorePermit<'static> {
|
||||
let _guard = crate::metrics::BACKGROUND_LOOP_SEMAPHORE.measure_acquisition(loop_kind);
|
||||
|
||||
pausable_failpoint!(
|
||||
"initial-size-calculation-permit-pause",
|
||||
loop_kind == BackgroundLoopKind::InitialLogicalSizeCalculation
|
||||
);
|
||||
if loop_kind == BackgroundLoopKind::InitialLogicalSizeCalculation {
|
||||
pausable_failpoint!("initial-size-calculation-permit-pause");
|
||||
}
|
||||
|
||||
// TODO: assert that we run on BACKGROUND_RUNTIME; requires tokio_unstable Handle::id();
|
||||
match CONCURRENT_BACKGROUND_TASKS.acquire().await {
|
||||
|
||||
@@ -20,12 +20,13 @@ use camino::Utf8Path;
|
||||
use chrono::{DateTime, Utc};
|
||||
use enumset::EnumSet;
|
||||
use fail::fail_point;
|
||||
use futures::{stream::FuturesUnordered, StreamExt};
|
||||
use handle::ShardTimelineId;
|
||||
use layer_manager::Shutdown;
|
||||
use offload::OffloadError;
|
||||
use once_cell::sync::Lazy;
|
||||
use pageserver_api::models::PageTraceEvent;
|
||||
use pageserver_api::{
|
||||
config::tenant_conf_defaults::DEFAULT_COMPACTION_THRESHOLD,
|
||||
key::{
|
||||
KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE,
|
||||
SPARSE_RANGE,
|
||||
@@ -59,21 +60,16 @@ use utils::{
|
||||
};
|
||||
use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta};
|
||||
|
||||
use std::sync::atomic::Ordering as AtomicOrdering;
|
||||
use std::sync::{Arc, Mutex, RwLock, Weak};
|
||||
use std::array;
|
||||
use std::cmp::{max, min};
|
||||
use std::collections::btree_map::Entry;
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::ops::{ControlFlow, Deref, Range};
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering as AtomicOrdering};
|
||||
use std::sync::{Arc, Mutex, OnceLock, RwLock, Weak};
|
||||
use std::time::{Duration, Instant, SystemTime};
|
||||
use std::{
|
||||
array,
|
||||
collections::{BTreeMap, HashMap, HashSet},
|
||||
sync::atomic::AtomicU64,
|
||||
};
|
||||
use std::{cmp::min, ops::ControlFlow};
|
||||
use std::{
|
||||
collections::btree_map::Entry,
|
||||
ops::{Deref, Range},
|
||||
};
|
||||
use std::{pin::pin, sync::OnceLock};
|
||||
|
||||
use crate::l0_flush::{self, L0FlushGlobalState};
|
||||
use crate::{
|
||||
aux_file::AuxFileSizeEstimator,
|
||||
page_service::TenantManagerTypes,
|
||||
@@ -81,7 +77,10 @@ use crate::{
|
||||
config::AttachmentMode,
|
||||
layer_map::{LayerMap, SearchResult},
|
||||
metadata::TimelineMetadata,
|
||||
storage_layer::{inmemory_layer::IndexEntry, PersistentLayerDesc},
|
||||
storage_layer::{
|
||||
inmemory_layer::IndexEntry, IoConcurrency, PersistentLayerDesc,
|
||||
ValueReconstructSituation,
|
||||
},
|
||||
},
|
||||
walingest::WalLagCooldown,
|
||||
walredo,
|
||||
@@ -102,10 +101,6 @@ use crate::{
|
||||
use crate::{
|
||||
disk_usage_eviction_task::EvictionCandidate, tenant::storage_layer::delta_layer::DeltaEntry,
|
||||
};
|
||||
use crate::{
|
||||
l0_flush::{self, L0FlushGlobalState},
|
||||
metrics::GetKind,
|
||||
};
|
||||
use crate::{
|
||||
metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize,
|
||||
};
|
||||
@@ -149,19 +144,15 @@ use self::layer_manager::LayerManager;
|
||||
use self::logical_size::LogicalSize;
|
||||
use self::walreceiver::{WalReceiver, WalReceiverConf};
|
||||
|
||||
use super::config::TenantConf;
|
||||
use super::remote_timeline_client::index::IndexPart;
|
||||
use super::remote_timeline_client::RemoteTimelineClient;
|
||||
use super::secondary::heatmap::{HeatMapLayer, HeatMapTimeline};
|
||||
use super::storage_layer::{LayerFringe, LayerVisibilityHint, ReadableLayer};
|
||||
use super::upload_queue::NotInitialized;
|
||||
use super::GcError;
|
||||
use super::{
|
||||
config::TenantConf, storage_layer::LayerVisibilityHint, upload_queue::NotInitialized,
|
||||
MaybeOffloaded,
|
||||
};
|
||||
use super::{debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf};
|
||||
use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe};
|
||||
use super::{
|
||||
remote_timeline_client::RemoteTimelineClient, remote_timeline_client::WaitCompletionError,
|
||||
storage_layer::ReadableLayer,
|
||||
};
|
||||
use super::{
|
||||
secondary::heatmap::{HeatMapLayer, HeatMapTimeline},
|
||||
GcError,
|
||||
debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf, MaybeOffloaded,
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -407,6 +398,9 @@ pub struct Timeline {
|
||||
/// Timeline deletion will acquire both compaction and gc locks in whatever order.
|
||||
compaction_lock: tokio::sync::Mutex<()>,
|
||||
|
||||
/// If true, the last compaction failed.
|
||||
compaction_failed: AtomicBool,
|
||||
|
||||
/// Make sure we only have one running gc at a time.
|
||||
///
|
||||
/// Must only be taken in two places:
|
||||
@@ -1005,9 +999,7 @@ impl Timeline {
|
||||
ranges: vec![key..key.next()],
|
||||
};
|
||||
|
||||
// Initialise the reconstruct state for the key with the cache
|
||||
// entry returned above.
|
||||
let mut reconstruct_state = ValuesReconstructState::new();
|
||||
let mut reconstruct_state = ValuesReconstructState::new(IoConcurrency::sequential());
|
||||
|
||||
let vectored_res = self
|
||||
.get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
|
||||
@@ -1050,6 +1042,7 @@ impl Timeline {
|
||||
&self,
|
||||
keyspace: KeySpace,
|
||||
lsn: Lsn,
|
||||
io_concurrency: super::storage_layer::IoConcurrency,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
|
||||
if !lsn.is_valid() {
|
||||
@@ -1084,7 +1077,7 @@ impl Timeline {
|
||||
.get_vectored_impl(
|
||||
keyspace.clone(),
|
||||
lsn,
|
||||
&mut ValuesReconstructState::new(),
|
||||
&mut ValuesReconstructState::new(io_concurrency),
|
||||
ctx,
|
||||
)
|
||||
.await;
|
||||
@@ -1109,6 +1102,7 @@ impl Timeline {
|
||||
keyspace: KeySpace,
|
||||
lsn: Lsn,
|
||||
ctx: &RequestContext,
|
||||
io_concurrency: super::storage_layer::IoConcurrency,
|
||||
) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
|
||||
if !lsn.is_valid() {
|
||||
return Err(GetVectoredError::InvalidLsn(lsn));
|
||||
@@ -1140,7 +1134,7 @@ impl Timeline {
|
||||
.get_vectored_impl(
|
||||
keyspace.clone(),
|
||||
lsn,
|
||||
&mut ValuesReconstructState::default(),
|
||||
&mut ValuesReconstructState::new(io_concurrency),
|
||||
ctx,
|
||||
)
|
||||
.await;
|
||||
@@ -1159,39 +1153,56 @@ impl Timeline {
|
||||
reconstruct_state: &mut ValuesReconstructState,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
|
||||
let get_kind = if keyspace.total_raw_size() == 1 {
|
||||
GetKind::Singular
|
||||
} else {
|
||||
GetKind::Vectored
|
||||
let traversal_res: Result<(), _> = self
|
||||
.get_vectored_reconstruct_data(keyspace.clone(), lsn, reconstruct_state, ctx)
|
||||
.await;
|
||||
if let Err(err) = traversal_res {
|
||||
// Wait for all the spawned IOs to complete.
|
||||
// See comments on `spawn_io` inside `storage_layer` for more details.
|
||||
let mut collect_futs = std::mem::take(&mut reconstruct_state.keys)
|
||||
.into_values()
|
||||
.map(|state| state.collect_pending_ios())
|
||||
.collect::<FuturesUnordered<_>>();
|
||||
while collect_futs.next().await.is_some() {}
|
||||
return Err(err);
|
||||
};
|
||||
|
||||
let get_data_timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
|
||||
.for_get_kind(get_kind)
|
||||
.start_timer();
|
||||
self.get_vectored_reconstruct_data(keyspace.clone(), lsn, reconstruct_state, ctx)
|
||||
.await?;
|
||||
get_data_timer.stop_and_record();
|
||||
|
||||
let reconstruct_timer = crate::metrics::RECONSTRUCT_TIME
|
||||
.for_get_kind(get_kind)
|
||||
.start_timer();
|
||||
let mut results: BTreeMap<Key, Result<Bytes, PageReconstructError>> = BTreeMap::new();
|
||||
let layers_visited = reconstruct_state.get_layers_visited();
|
||||
|
||||
for (key, res) in std::mem::take(&mut reconstruct_state.keys) {
|
||||
match res {
|
||||
Err(err) => {
|
||||
results.insert(key, Err(err));
|
||||
}
|
||||
Ok(state) => {
|
||||
let state = ValueReconstructState::from(state);
|
||||
let futs = FuturesUnordered::new();
|
||||
for (key, state) in std::mem::take(&mut reconstruct_state.keys) {
|
||||
futs.push({
|
||||
let walredo_self = self.myself.upgrade().expect("&self method holds the arc");
|
||||
async move {
|
||||
assert_eq!(state.situation, ValueReconstructSituation::Complete);
|
||||
|
||||
let reconstruct_res = self.reconstruct_value(key, lsn, state).await;
|
||||
results.insert(key, reconstruct_res);
|
||||
let converted = match state.collect_pending_ios().await {
|
||||
Ok(ok) => ok,
|
||||
Err(err) => {
|
||||
return (key, Err(err));
|
||||
}
|
||||
};
|
||||
|
||||
// The walredo module expects the records to be descending in terms of Lsn.
|
||||
// And we submit the IOs in that order, so, there shuold be no need to sort here.
|
||||
debug_assert!(
|
||||
converted
|
||||
.records
|
||||
.is_sorted_by_key(|(lsn, _)| std::cmp::Reverse(*lsn)),
|
||||
"{converted:?}"
|
||||
);
|
||||
|
||||
(
|
||||
key,
|
||||
walredo_self.reconstruct_value(key, lsn, converted).await,
|
||||
)
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
reconstruct_timer.stop_and_record();
|
||||
|
||||
let results = futs
|
||||
.collect::<BTreeMap<Key, Result<Bytes, PageReconstructError>>>()
|
||||
.await;
|
||||
|
||||
// For aux file keys (v1 or v2) the vectored read path does not return an error
|
||||
// when they're missing. Instead they are omitted from the resulting btree
|
||||
@@ -1684,13 +1695,27 @@ impl Timeline {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
match self.get_compaction_algorithm_settings().kind {
|
||||
let result = match self.get_compaction_algorithm_settings().kind {
|
||||
CompactionAlgorithm::Tiered => {
|
||||
self.compact_tiered(cancel, ctx).await?;
|
||||
Ok(false)
|
||||
}
|
||||
CompactionAlgorithm::Legacy => self.compact_legacy(cancel, options, ctx).await,
|
||||
}
|
||||
};
|
||||
|
||||
// Signal compaction failure to avoid L0 flush stalls when it's broken.
|
||||
match result {
|
||||
Ok(_) => self.compaction_failed.store(false, AtomicOrdering::Relaxed),
|
||||
Err(CompactionError::Other(_)) => {
|
||||
self.compaction_failed.store(true, AtomicOrdering::Relaxed)
|
||||
}
|
||||
// Don't change the current value on offload failure or shutdown. We don't want to
|
||||
// abruptly stall nor resume L0 flushes in these cases.
|
||||
Err(CompactionError::Offload(_)) => {}
|
||||
Err(CompactionError::ShuttingDown) => {}
|
||||
};
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Mutate the timeline with a [`TimelineWriter`].
|
||||
@@ -2119,6 +2144,13 @@ impl Timeline {
|
||||
.unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
|
||||
}
|
||||
|
||||
fn get_compaction_period(&self) -> Duration {
|
||||
let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
|
||||
tenant_conf
|
||||
.compaction_period
|
||||
.unwrap_or(self.conf.default_tenant_conf.compaction_period)
|
||||
}
|
||||
|
||||
fn get_compaction_target_size(&self) -> u64 {
|
||||
let tenant_conf = self.tenant_conf.load();
|
||||
tenant_conf
|
||||
@@ -2135,6 +2167,91 @@ impl Timeline {
|
||||
.unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
|
||||
}
|
||||
|
||||
fn get_l0_flush_delay_threshold(&self) -> Option<usize> {
|
||||
// Default to delay L0 flushes at 3x compaction threshold.
|
||||
const DEFAULT_L0_FLUSH_DELAY_FACTOR: usize = 3;
|
||||
|
||||
// If compaction is disabled, don't delay.
|
||||
if self.get_compaction_period() == Duration::ZERO {
|
||||
return None;
|
||||
}
|
||||
|
||||
let compaction_threshold = self.get_compaction_threshold();
|
||||
let tenant_conf = self.tenant_conf.load();
|
||||
let l0_flush_delay_threshold = tenant_conf
|
||||
.tenant_conf
|
||||
.l0_flush_delay_threshold
|
||||
.or(self.conf.default_tenant_conf.l0_flush_delay_threshold)
|
||||
.unwrap_or(DEFAULT_L0_FLUSH_DELAY_FACTOR * compaction_threshold);
|
||||
|
||||
// 0 disables backpressure.
|
||||
if l0_flush_delay_threshold == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Clamp the flush delay threshold to the compaction threshold; it doesn't make sense to
|
||||
// backpressure flushes below this.
|
||||
// TODO: the tenant config should have validation to prevent this instead.
|
||||
debug_assert!(l0_flush_delay_threshold >= compaction_threshold);
|
||||
Some(max(l0_flush_delay_threshold, compaction_threshold))
|
||||
}
|
||||
|
||||
fn get_l0_flush_stall_threshold(&self) -> Option<usize> {
|
||||
// Default to stall L0 flushes at 5x compaction threshold.
|
||||
// TODO: stalls are temporarily disabled by default, see below.
|
||||
#[allow(unused)]
|
||||
const DEFAULT_L0_FLUSH_STALL_FACTOR: usize = 5;
|
||||
|
||||
// If compaction is disabled, don't stall.
|
||||
if self.get_compaction_period() == Duration::ZERO {
|
||||
return None;
|
||||
}
|
||||
|
||||
// If compaction is failing, don't stall and try to keep the tenant alive. This may not be a
|
||||
// good idea: read amp can grow unbounded, leading to terrible performance, and we may take
|
||||
// on unbounded compaction debt that can take a long time to fix once compaction comes back
|
||||
// online. At least we'll delay flushes, slowing down the growth and buying some time.
|
||||
if self.compaction_failed.load(AtomicOrdering::Relaxed) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let compaction_threshold = self.get_compaction_threshold();
|
||||
let tenant_conf = self.tenant_conf.load();
|
||||
let l0_flush_stall_threshold = tenant_conf
|
||||
.tenant_conf
|
||||
.l0_flush_stall_threshold
|
||||
.or(self.conf.default_tenant_conf.l0_flush_stall_threshold);
|
||||
|
||||
// Tests sometimes set compaction_threshold=1 to generate lots of layer files, and don't
|
||||
// handle the 20-second compaction delay. Some (e.g. `test_backward_compatibility`) can't
|
||||
// easily adjust the L0 backpressure settings, so just disable stalls in this case.
|
||||
if cfg!(feature = "testing")
|
||||
&& compaction_threshold == 1
|
||||
&& l0_flush_stall_threshold.is_none()
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
// Disable stalls by default. In ingest benchmarks, we see image compaction take >10
|
||||
// minutes, blocking L0 compaction, and we can't stall L0 flushes for that long.
|
||||
//
|
||||
// TODO: fix this.
|
||||
// let l0_flush_stall_threshold = l0_flush_stall_threshold
|
||||
// .unwrap_or(DEFAULT_L0_FLUSH_STALL_FACTOR * compaction_threshold);
|
||||
let l0_flush_stall_threshold = l0_flush_stall_threshold?;
|
||||
|
||||
// 0 disables backpressure.
|
||||
if l0_flush_stall_threshold == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Clamp the flush stall threshold to the compaction threshold; it doesn't make sense to
|
||||
// backpressure flushes below this.
|
||||
// TODO: the tenant config should have validation to prevent this instead.
|
||||
debug_assert!(l0_flush_stall_threshold >= compaction_threshold);
|
||||
Some(max(l0_flush_stall_threshold, compaction_threshold))
|
||||
}
|
||||
|
||||
fn get_image_creation_threshold(&self) -> usize {
|
||||
let tenant_conf = self.tenant_conf.load();
|
||||
tenant_conf
|
||||
@@ -2371,6 +2488,7 @@ impl Timeline {
|
||||
gate: Gate::default(),
|
||||
|
||||
compaction_lock: tokio::sync::Mutex::default(),
|
||||
compaction_failed: AtomicBool::default(),
|
||||
gc_lock: tokio::sync::Mutex::default(),
|
||||
|
||||
standby_horizon: AtomicLsn::new(0),
|
||||
@@ -2804,12 +2922,10 @@ impl Timeline {
|
||||
"initial size calculation",
|
||||
// NB: don't log errors here, task_mgr will do that.
|
||||
async move {
|
||||
let cancel = task_mgr::shutdown_token();
|
||||
self_clone
|
||||
.initial_logical_size_calculation_task(
|
||||
initial_part_end,
|
||||
cancel_wait_for_background_loop_concurrency_limit_semaphore,
|
||||
cancel,
|
||||
background_ctx,
|
||||
)
|
||||
.await;
|
||||
@@ -2819,11 +2935,21 @@ impl Timeline {
|
||||
);
|
||||
}
|
||||
|
||||
/// # Cancellation
|
||||
///
|
||||
/// This method is sensitive to `Timeline::cancel`.
|
||||
///
|
||||
/// It is _not_ sensitive to task_mgr::shutdown_token().
|
||||
///
|
||||
/// # Cancel-Safety
|
||||
///
|
||||
/// It does Timeline IO, hence this should be polled to completion because
|
||||
/// we could be leaving in-flight IOs behind, which is safe, but annoying
|
||||
/// to reason about.
|
||||
async fn initial_logical_size_calculation_task(
|
||||
self: Arc<Self>,
|
||||
initial_part_end: Lsn,
|
||||
skip_concurrency_limiter: CancellationToken,
|
||||
cancel: CancellationToken,
|
||||
background_ctx: RequestContext,
|
||||
) {
|
||||
scopeguard::defer! {
|
||||
@@ -2836,7 +2962,6 @@ impl Timeline {
|
||||
let self_ref = &self;
|
||||
let skip_concurrency_limiter = &skip_concurrency_limiter;
|
||||
async move {
|
||||
let cancel = task_mgr::shutdown_token();
|
||||
let wait_for_permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
|
||||
BackgroundLoopKind::InitialLogicalSizeCalculation,
|
||||
background_ctx,
|
||||
@@ -2850,9 +2975,6 @@ impl Timeline {
|
||||
_ = self_ref.cancel.cancelled() => {
|
||||
return Err(CalculateLogicalSizeError::Cancelled);
|
||||
}
|
||||
_ = cancel.cancelled() => {
|
||||
return Err(CalculateLogicalSizeError::Cancelled);
|
||||
},
|
||||
() = skip_concurrency_limiter.cancelled() => {
|
||||
// Some action that is part of a end user interaction requested logical size
|
||||
// => break out of the rate limit
|
||||
@@ -2869,6 +2991,14 @@ impl Timeline {
|
||||
crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
|
||||
};
|
||||
|
||||
let io_concurrency = IoConcurrency::spawn_from_conf(
|
||||
self_ref.conf,
|
||||
self_ref
|
||||
.gate
|
||||
.enter()
|
||||
.map_err(|_| CalculateLogicalSizeError::Cancelled)?,
|
||||
);
|
||||
|
||||
let calculated_size = self_ref
|
||||
.logical_size_calculation_task(
|
||||
initial_part_end,
|
||||
@@ -2878,7 +3008,11 @@ impl Timeline {
|
||||
.await?;
|
||||
|
||||
self_ref
|
||||
.trigger_aux_file_size_computation(initial_part_end, background_ctx)
|
||||
.trigger_aux_file_size_computation(
|
||||
initial_part_end,
|
||||
background_ctx,
|
||||
io_concurrency,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// TODO: add aux file size to logical size
|
||||
@@ -2911,22 +3045,18 @@ impl Timeline {
|
||||
)
|
||||
.expect("10min < 1hour"),
|
||||
);
|
||||
tokio::time::sleep(sleep_duration).await;
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(sleep_duration) => {}
|
||||
_ = self.cancel.cancelled() => return ControlFlow::Break(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let (calculated_size, metrics_guard) = tokio::select! {
|
||||
res = retrying => {
|
||||
match res {
|
||||
ControlFlow::Continue(calculated_size) => calculated_size,
|
||||
ControlFlow::Break(()) => return,
|
||||
}
|
||||
}
|
||||
_ = cancel.cancelled() => {
|
||||
return;
|
||||
}
|
||||
let (calculated_size, metrics_guard) = match retrying.await {
|
||||
ControlFlow::Continue(calculated_size) => calculated_size,
|
||||
ControlFlow::Break(()) => return,
|
||||
};
|
||||
|
||||
// we cannot query current_logical_size.current_size() to know the current
|
||||
@@ -2982,9 +3112,6 @@ impl Timeline {
|
||||
receiver
|
||||
}
|
||||
|
||||
/// # Cancel-Safety
|
||||
///
|
||||
/// This method is cancellation-safe.
|
||||
#[instrument(skip_all)]
|
||||
async fn logical_size_calculation_task(
|
||||
self: &Arc<Self>,
|
||||
@@ -3002,32 +3129,13 @@ impl Timeline {
|
||||
.enter()
|
||||
.map_err(|_| CalculateLogicalSizeError::Cancelled)?;
|
||||
|
||||
let self_calculation = Arc::clone(self);
|
||||
|
||||
let mut calculation = pin!(async {
|
||||
let ctx = ctx.attached_child();
|
||||
self_calculation
|
||||
.calculate_logical_size(lsn, cause, &guard, &ctx)
|
||||
.await
|
||||
});
|
||||
|
||||
tokio::select! {
|
||||
res = &mut calculation => { res }
|
||||
_ = self.cancel.cancelled() => {
|
||||
debug!("cancelling logical size calculation for timeline shutdown");
|
||||
calculation.await
|
||||
}
|
||||
}
|
||||
self.calculate_logical_size(lsn, cause, &guard, ctx).await
|
||||
}
|
||||
|
||||
/// Calculate the logical size of the database at the latest LSN.
|
||||
///
|
||||
/// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
|
||||
/// especially if we need to download remote layers.
|
||||
///
|
||||
/// # Cancel-Safety
|
||||
///
|
||||
/// This method is cancellation-safe.
|
||||
async fn calculate_logical_size(
|
||||
&self,
|
||||
up_to_lsn: Lsn,
|
||||
@@ -3040,7 +3148,10 @@ impl Timeline {
|
||||
self.timeline_id, up_to_lsn
|
||||
);
|
||||
|
||||
pausable_failpoint!("timeline-calculate-logical-size-pause");
|
||||
if let Err(()) = pausable_failpoint!("timeline-calculate-logical-size-pause", &self.cancel)
|
||||
{
|
||||
return Err(CalculateLogicalSizeError::Cancelled);
|
||||
}
|
||||
|
||||
// See if we've already done the work for initial size calculation.
|
||||
// This is a short-cut for timelines that are mostly unused.
|
||||
@@ -3593,6 +3704,12 @@ impl Timeline {
|
||||
mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
|
||||
ctx: &RequestContext,
|
||||
) {
|
||||
// Subscribe to L0 delta layer updates, for compaction backpressure.
|
||||
let mut watch_l0 = match self.layers.read().await.layer_map() {
|
||||
Ok(lm) => lm.watch_level0_deltas(),
|
||||
Err(Shutdown) => return,
|
||||
};
|
||||
|
||||
info!("started flush loop");
|
||||
loop {
|
||||
tokio::select! {
|
||||
@@ -3617,43 +3734,68 @@ impl Timeline {
|
||||
return;
|
||||
}
|
||||
|
||||
let timer = self.metrics.flush_time_histo.start_timer();
|
||||
// Break to notify potential waiters as soon as we've flushed the requested LSN. If
|
||||
// more requests have arrived in the meanwhile, we'll resume flushing afterwards.
|
||||
if flushed_to_lsn >= frozen_to_lsn {
|
||||
break Ok(());
|
||||
}
|
||||
|
||||
let num_frozen_layers;
|
||||
let frozen_layer_total_size;
|
||||
let layer_to_flush = {
|
||||
let guard = self.layers.read().await;
|
||||
let Ok(lm) = guard.layer_map() else {
|
||||
// Fetch the next layer to flush, if any.
|
||||
let (layer, l0_count, frozen_count, frozen_size) = {
|
||||
let layers = self.layers.read().await;
|
||||
let Ok(lm) = layers.layer_map() else {
|
||||
info!("dropping out of flush loop for timeline shutdown");
|
||||
return;
|
||||
};
|
||||
num_frozen_layers = lm.frozen_layers.len();
|
||||
frozen_layer_total_size = lm
|
||||
let l0_count = lm.level0_deltas().len();
|
||||
let frozen_count = lm.frozen_layers.len();
|
||||
let frozen_size: u64 = lm
|
||||
.frozen_layers
|
||||
.iter()
|
||||
.map(|l| l.estimated_in_mem_size())
|
||||
.sum::<u64>();
|
||||
lm.frozen_layers.front().cloned()
|
||||
// drop 'layers' lock to allow concurrent reads and writes
|
||||
.sum();
|
||||
let layer = lm.frozen_layers.front().cloned();
|
||||
(layer, l0_count, frozen_count, frozen_size)
|
||||
// drop 'layers' lock
|
||||
};
|
||||
let Some(layer_to_flush) = layer_to_flush else {
|
||||
let Some(layer) = layer else {
|
||||
break Ok(());
|
||||
};
|
||||
if num_frozen_layers
|
||||
> std::cmp::max(
|
||||
self.get_compaction_threshold(),
|
||||
DEFAULT_COMPACTION_THRESHOLD,
|
||||
)
|
||||
&& frozen_layer_total_size >= /* 128 MB */ 128000000
|
||||
{
|
||||
tracing::warn!(
|
||||
"too many frozen layers: {num_frozen_layers} layers with estimated in-mem size of {frozen_layer_total_size} bytes",
|
||||
);
|
||||
}
|
||||
match self.flush_frozen_layer(layer_to_flush, ctx).await {
|
||||
Ok(this_layer_to_lsn) => {
|
||||
flushed_to_lsn = std::cmp::max(flushed_to_lsn, this_layer_to_lsn);
|
||||
|
||||
// Stall flushes to backpressure if compaction can't keep up. This is propagated up
|
||||
// to WAL ingestion by having ephemeral layer rolls wait for flushes.
|
||||
//
|
||||
// NB: the compaction loop only checks `compaction_threshold` every 20 seconds, so
|
||||
// we can end up stalling before compaction even starts. Consider making it more
|
||||
// responsive (e.g. via `watch_level0_deltas`).
|
||||
if let Some(stall_threshold) = self.get_l0_flush_stall_threshold() {
|
||||
if l0_count >= stall_threshold {
|
||||
warn!(
|
||||
"stalling layer flushes for compaction backpressure at {l0_count} \
|
||||
L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
|
||||
);
|
||||
let stall_timer = self
|
||||
.metrics
|
||||
.flush_delay_histo
|
||||
.start_timer()
|
||||
.record_on_drop();
|
||||
tokio::select! {
|
||||
result = watch_l0.wait_for(|l0| *l0 < stall_threshold) => {
|
||||
if let Ok(l0) = result.as_deref() {
|
||||
let delay = stall_timer.elapsed().as_secs_f64();
|
||||
info!("resuming layer flushes at {l0} L0 layers after {delay:.3}s");
|
||||
}
|
||||
},
|
||||
_ = self.cancel.cancelled() => {},
|
||||
}
|
||||
continue; // check again
|
||||
}
|
||||
}
|
||||
|
||||
// Flush the layer.
|
||||
let flush_timer = self.metrics.flush_time_histo.start_timer();
|
||||
match self.flush_frozen_layer(layer, ctx).await {
|
||||
Ok(layer_lsn) => flushed_to_lsn = max(flushed_to_lsn, layer_lsn),
|
||||
Err(FlushLayerError::Cancelled) => {
|
||||
info!("dropping out of flush loop for timeline shutdown");
|
||||
return;
|
||||
@@ -3667,7 +3809,30 @@ impl Timeline {
|
||||
break err.map(|_| ());
|
||||
}
|
||||
}
|
||||
timer.stop_and_record();
|
||||
let flush_duration = flush_timer.stop_and_record();
|
||||
|
||||
// Delay the next flush to backpressure if compaction can't keep up. We delay by the
|
||||
// flush duration such that the flush takes 2x as long. This is propagated up to WAL
|
||||
// ingestion by having ephemeral layer rolls wait for flushes.
|
||||
if let Some(delay_threshold) = self.get_l0_flush_delay_threshold() {
|
||||
if l0_count >= delay_threshold {
|
||||
let delay = flush_duration.as_secs_f64();
|
||||
info!(
|
||||
"delaying layer flush by {delay:.3}s for compaction backpressure at \
|
||||
{l0_count} L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
|
||||
);
|
||||
let _delay_timer = self
|
||||
.metrics
|
||||
.flush_delay_histo
|
||||
.start_timer()
|
||||
.record_on_drop();
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(flush_duration) => {},
|
||||
_ = watch_l0.wait_for(|l0| *l0 < delay_threshold) => {},
|
||||
_ = self.cancel.cancelled() => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Unsharded tenants should never advance their LSN beyond the end of the
|
||||
@@ -3869,24 +4034,6 @@ impl Timeline {
|
||||
// release lock on 'layers'
|
||||
};
|
||||
|
||||
// Backpressure mechanism: wait with continuation of the flush loop until we have uploaded all layer files.
|
||||
// This makes us refuse ingest until the new layers have been persisted to the remote
|
||||
let start = Instant::now();
|
||||
self.remote_client
|
||||
.wait_completion()
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
WaitCompletionError::UploadQueueShutDownOrStopped
|
||||
| WaitCompletionError::NotInitialized(
|
||||
NotInitialized::ShuttingDown | NotInitialized::Stopped,
|
||||
) => FlushLayerError::Cancelled,
|
||||
WaitCompletionError::NotInitialized(NotInitialized::Uninitialized) => {
|
||||
FlushLayerError::Other(anyhow!(e).into())
|
||||
}
|
||||
})?;
|
||||
let duration = start.elapsed().as_secs_f64();
|
||||
self.metrics.flush_wait_upload_time_gauge_add(duration);
|
||||
|
||||
// FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
|
||||
// a compaction can delete the file and then it won't be available for uploads any more.
|
||||
// We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
|
||||
@@ -4128,6 +4275,7 @@ impl Timeline {
|
||||
|
||||
/// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
|
||||
/// so that at most one image layer will be produced from this function.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn create_image_layer_for_rel_blocks(
|
||||
self: &Arc<Self>,
|
||||
partition: &KeySpace,
|
||||
@@ -4136,6 +4284,7 @@ impl Timeline {
|
||||
ctx: &RequestContext,
|
||||
img_range: Range<Key>,
|
||||
start: Key,
|
||||
io_concurrency: IoConcurrency,
|
||||
) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
|
||||
let mut wrote_keys = false;
|
||||
|
||||
@@ -4164,7 +4313,12 @@ impl Timeline {
|
||||
|| (last_key_in_range && key_request_accum.raw_size() > 0)
|
||||
{
|
||||
let results = self
|
||||
.get_vectored(key_request_accum.consume_keyspace(), lsn, ctx)
|
||||
.get_vectored(
|
||||
key_request_accum.consume_keyspace(),
|
||||
lsn,
|
||||
io_concurrency.clone(),
|
||||
ctx,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if self.cancel.is_cancelled() {
|
||||
@@ -4243,9 +4397,10 @@ impl Timeline {
|
||||
img_range: Range<Key>,
|
||||
mode: ImageLayerCreationMode,
|
||||
start: Key,
|
||||
io_concurrency: IoConcurrency,
|
||||
) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
|
||||
// Metadata keys image layer creation.
|
||||
let mut reconstruct_state = ValuesReconstructState::default();
|
||||
let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
|
||||
let begin = Instant::now();
|
||||
let data = self
|
||||
.get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx)
|
||||
@@ -4462,6 +4617,13 @@ impl Timeline {
|
||||
)))
|
||||
});
|
||||
|
||||
let io_concurrency = IoConcurrency::spawn_from_conf(
|
||||
self.conf,
|
||||
self.gate
|
||||
.enter()
|
||||
.map_err(|_| CreateImageLayersError::Cancelled)?,
|
||||
);
|
||||
|
||||
if !compact_metadata {
|
||||
let ImageLayerCreationOutcome {
|
||||
image,
|
||||
@@ -4474,6 +4636,7 @@ impl Timeline {
|
||||
ctx,
|
||||
img_range,
|
||||
start,
|
||||
io_concurrency,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -4492,6 +4655,7 @@ impl Timeline {
|
||||
img_range,
|
||||
mode,
|
||||
start,
|
||||
io_concurrency,
|
||||
)
|
||||
.await?;
|
||||
start = next_start_key;
|
||||
@@ -5759,13 +5923,14 @@ impl Timeline {
|
||||
self: &Arc<Timeline>,
|
||||
lsn: Lsn,
|
||||
ctx: &RequestContext,
|
||||
io_concurrency: IoConcurrency,
|
||||
) -> anyhow::Result<Vec<(Key, Bytes)>> {
|
||||
let mut all_data = Vec::new();
|
||||
let guard = self.layers.read().await;
|
||||
for layer in guard.layer_map()?.iter_historic_layers() {
|
||||
if !layer.is_delta() && layer.image_layer_lsn() == lsn {
|
||||
let layer = guard.get_from_desc(&layer);
|
||||
let mut reconstruct_data = ValuesReconstructState::default();
|
||||
let mut reconstruct_data = ValuesReconstructState::new(io_concurrency.clone());
|
||||
layer
|
||||
.get_values_reconstruct_data(
|
||||
KeySpace::single(Key::MIN..Key::MAX),
|
||||
@@ -5774,8 +5939,9 @@ impl Timeline {
|
||||
ctx,
|
||||
)
|
||||
.await?;
|
||||
for (k, v) in reconstruct_data.keys {
|
||||
all_data.push((k, v?.img.unwrap().1));
|
||||
for (k, v) in std::mem::take(&mut reconstruct_data.keys) {
|
||||
let v = v.collect_pending_ios().await?;
|
||||
all_data.push((k, v.img.unwrap().1));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5896,13 +6062,37 @@ impl TimelineWriter<'_> {
|
||||
async fn roll_layer(&mut self, freeze_at: Lsn) -> Result<(), FlushLayerError> {
|
||||
let current_size = self.write_guard.as_ref().unwrap().current_size;
|
||||
|
||||
// If layer flushes are backpressured due to compaction not keeping up, wait for the flush
|
||||
// to propagate the backpressure up into WAL ingestion.
|
||||
let l0_count = self
|
||||
.tl
|
||||
.layers
|
||||
.read()
|
||||
.await
|
||||
.layer_map()?
|
||||
.level0_deltas()
|
||||
.len();
|
||||
let wait_thresholds = [
|
||||
self.get_l0_flush_delay_threshold(),
|
||||
self.get_l0_flush_stall_threshold(),
|
||||
];
|
||||
let wait_threshold = wait_thresholds.into_iter().flatten().min();
|
||||
|
||||
// self.write_guard will be taken by the freezing
|
||||
self.tl
|
||||
let flush_id = self
|
||||
.tl
|
||||
.freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
|
||||
.await?;
|
||||
|
||||
assert!(self.write_guard.is_none());
|
||||
|
||||
if let Some(wait_threshold) = wait_threshold {
|
||||
if l0_count >= wait_threshold {
|
||||
info!("layer roll waiting for flush due to compaction backpressure at {l0_count} L0 layers");
|
||||
self.tl.wait_flush_completion(flush_id).await?;
|
||||
}
|
||||
}
|
||||
|
||||
if current_size >= self.get_checkpoint_distance() * 2 {
|
||||
warn!("Flushed oversized open layer with size {}", current_size)
|
||||
}
|
||||
|
||||
@@ -42,8 +42,8 @@ use crate::tenant::storage_layer::merge_iterator::MergeIterator;
|
||||
use crate::tenant::storage_layer::{
|
||||
AsLayerDesc, PersistentLayerDesc, PersistentLayerKey, ValueReconstructState,
|
||||
};
|
||||
use crate::tenant::timeline::ImageLayerCreationOutcome;
|
||||
use crate::tenant::timeline::{drop_rlock, DeltaLayerWriter, ImageLayerWriter};
|
||||
use crate::tenant::timeline::{ImageLayerCreationOutcome, IoConcurrency};
|
||||
use crate::tenant::timeline::{Layer, ResidentLayer};
|
||||
use crate::tenant::{gc_block, DeltaLayer, MaybeOffloaded};
|
||||
use crate::virtual_file::{MaybeFatalIo, VirtualFile};
|
||||
@@ -436,12 +436,24 @@ impl KeyHistoryRetention {
|
||||
if dry_run {
|
||||
return true;
|
||||
}
|
||||
let guard = tline.layers.read().await;
|
||||
if !guard.contains_key(key) {
|
||||
return false;
|
||||
if key.key_range.end == Key::MAX {
|
||||
warn!(
|
||||
key=%key,
|
||||
"discard layer due to layer key range being Key::MAX, which should not happen",
|
||||
);
|
||||
if cfg!(feature = "testing") {
|
||||
panic!("Key::MAX should not be part of the key space");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
let layer_generation;
|
||||
{
|
||||
let guard = tline.layers.read().await;
|
||||
if !guard.contains_key(key) {
|
||||
return false;
|
||||
}
|
||||
layer_generation = guard.get_from_key(key).metadata().generation;
|
||||
}
|
||||
let layer_generation = guard.get_from_key(key).metadata().generation;
|
||||
drop(guard);
|
||||
if layer_generation == tline.generation {
|
||||
info!(
|
||||
key=%key,
|
||||
@@ -2138,6 +2150,11 @@ impl Timeline {
|
||||
self.get_gc_compaction_watermark()
|
||||
};
|
||||
|
||||
if compact_below_lsn == Lsn::INVALID {
|
||||
tracing::warn!("no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction");
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
// Split compaction job to about 4GB each
|
||||
const GC_COMPACT_MAX_SIZE_MB: u64 = 4 * 1024;
|
||||
let sub_compaction_max_job_size_mb =
|
||||
@@ -2212,6 +2229,12 @@ impl Timeline {
|
||||
} else {
|
||||
end
|
||||
};
|
||||
let end = if ranges_num == idx + 1 {
|
||||
// extend the compaction range to the end of the key range if it's the last partition
|
||||
end.max(job.compact_key_range.end)
|
||||
} else {
|
||||
end
|
||||
};
|
||||
info!(
|
||||
"splitting compaction job: {}..{}, estimated_size={}",
|
||||
start, end, total_size
|
||||
@@ -2332,6 +2355,11 @@ impl Timeline {
|
||||
// each of the retain_lsn. Therefore, if the user-provided `compact_lsn_range.end` is larger than the real gc cutoff, we will use
|
||||
// the real cutoff.
|
||||
let mut gc_cutoff = if compact_lsn_range.end == Lsn::MAX {
|
||||
if real_gc_cutoff == Lsn::INVALID {
|
||||
// If the gc_cutoff is not generated yet, we should not compact anything.
|
||||
tracing::warn!("no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction");
|
||||
return Ok(());
|
||||
}
|
||||
real_gc_cutoff
|
||||
} else {
|
||||
compact_lsn_range.end
|
||||
@@ -2742,7 +2770,15 @@ impl Timeline {
|
||||
|
||||
let produced_image_layers = if let Some(writer) = image_layer_writer {
|
||||
if !dry_run {
|
||||
let end_key = job_desc.compaction_key_range.end;
|
||||
let mut end_key = job_desc.compaction_key_range.end;
|
||||
if end_key == Key::MAX {
|
||||
// There's a potential bug to-be-resolved when end_key is Key::MAX, so we need to subtract 1 to workaround it.
|
||||
// Note that we never write Key::MAX into the image layer, as this is not part of the key space.
|
||||
end_key = Key {
|
||||
field6: u32::MAX - 1,
|
||||
..Key::MAX
|
||||
};
|
||||
}
|
||||
writer
|
||||
.finish_with_discard_fn(self, ctx, end_key, discard)
|
||||
.await?
|
||||
@@ -2863,7 +2899,7 @@ impl Timeline {
|
||||
"produced {} delta layers and {} image layers, {} layers are kept",
|
||||
produced_delta_layers_len,
|
||||
produced_image_layers_len,
|
||||
layer_selection.len()
|
||||
keep_layers.len()
|
||||
);
|
||||
|
||||
// Step 3: Place back to the layer map.
|
||||
@@ -2909,8 +2945,28 @@ impl Timeline {
|
||||
// be batched into `schedule_compaction_update`.
|
||||
let disk_consistent_lsn = self.disk_consistent_lsn.load();
|
||||
self.schedule_uploads(disk_consistent_lsn, None)?;
|
||||
// If a layer gets rewritten throughout gc-compaction, we need to keep that layer only in `compact_to` instead
|
||||
// of `compact_from`.
|
||||
let compact_from = {
|
||||
let mut compact_from = Vec::new();
|
||||
let mut compact_to_set = HashMap::new();
|
||||
for layer in &compact_to {
|
||||
compact_to_set.insert(layer.layer_desc().key(), layer);
|
||||
}
|
||||
for layer in &layer_selection {
|
||||
if let Some(to) = compact_to_set.get(&layer.layer_desc().key()) {
|
||||
tracing::info!(
|
||||
"skipping delete {} because found same layer key at different generation {}",
|
||||
layer, to
|
||||
);
|
||||
} else {
|
||||
compact_from.push(layer.clone());
|
||||
}
|
||||
}
|
||||
compact_from
|
||||
};
|
||||
self.remote_client
|
||||
.schedule_compaction_update(&layer_selection, &compact_to)?;
|
||||
.schedule_compaction_update(&compact_from, &compact_to)?;
|
||||
|
||||
drop(gc_lock);
|
||||
|
||||
@@ -3170,6 +3226,7 @@ impl TimelineAdaptor {
|
||||
ctx,
|
||||
key_range.clone(),
|
||||
start,
|
||||
IoConcurrency::sequential(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -337,16 +337,45 @@ impl OpenLayerManager {
|
||||
compact_to: &[ResidentLayer],
|
||||
metrics: &TimelineMetrics,
|
||||
) {
|
||||
// We can simply reuse compact l0 logic. Use a different function name to indicate a different type of layer map modification.
|
||||
self.finish_compact_l0(compact_from, compact_to, metrics)
|
||||
// gc-compaction could contain layer rewrites. We need to delete the old layers and insert the new ones.
|
||||
|
||||
// Match the old layers with the new layers
|
||||
let mut add_layers = HashMap::new();
|
||||
let mut rewrite_layers = HashMap::new();
|
||||
let mut drop_layers = HashMap::new();
|
||||
for layer in compact_from {
|
||||
drop_layers.insert(layer.layer_desc().key(), layer.clone());
|
||||
}
|
||||
for layer in compact_to {
|
||||
if let Some(old_layer) = drop_layers.remove(&layer.layer_desc().key()) {
|
||||
rewrite_layers.insert(layer.layer_desc().key(), (old_layer.clone(), layer.clone()));
|
||||
} else {
|
||||
add_layers.insert(layer.layer_desc().key(), layer.clone());
|
||||
}
|
||||
}
|
||||
let add_layers = add_layers.values().cloned().collect::<Vec<_>>();
|
||||
let drop_layers = drop_layers.values().cloned().collect::<Vec<_>>();
|
||||
let rewrite_layers = rewrite_layers.values().cloned().collect::<Vec<_>>();
|
||||
|
||||
self.rewrite_layers_inner(&rewrite_layers, &drop_layers, &add_layers, metrics);
|
||||
}
|
||||
|
||||
/// Called post-compaction when some previous generation image layers were trimmed.
|
||||
pub(crate) fn rewrite_layers(
|
||||
pub fn rewrite_layers(
|
||||
&mut self,
|
||||
rewrite_layers: &[(Layer, ResidentLayer)],
|
||||
drop_layers: &[Layer],
|
||||
metrics: &TimelineMetrics,
|
||||
) {
|
||||
self.rewrite_layers_inner(rewrite_layers, drop_layers, &[], metrics);
|
||||
}
|
||||
|
||||
fn rewrite_layers_inner(
|
||||
&mut self,
|
||||
rewrite_layers: &[(Layer, ResidentLayer)],
|
||||
drop_layers: &[Layer],
|
||||
add_layers: &[ResidentLayer],
|
||||
metrics: &TimelineMetrics,
|
||||
) {
|
||||
let mut updates = self.layer_map.batch_update();
|
||||
for (old_layer, new_layer) in rewrite_layers {
|
||||
@@ -382,6 +411,10 @@ impl OpenLayerManager {
|
||||
for l in drop_layers {
|
||||
Self::delete_historic_layer(l, &mut updates, &mut self.layer_fmgr);
|
||||
}
|
||||
for l in add_layers {
|
||||
Self::insert_historic_layer(l.as_ref().clone(), &mut updates, &mut self.layer_fmgr);
|
||||
metrics.record_new_file_metrics(l.layer_desc().file_size);
|
||||
}
|
||||
updates.flush();
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@ use crate::virtual_file::{self, VirtualFile};
|
||||
pub struct BlobMeta {
|
||||
pub key: Key,
|
||||
pub lsn: Lsn,
|
||||
pub will_init: bool,
|
||||
}
|
||||
|
||||
/// A view into the vectored blobs read buffer.
|
||||
@@ -310,7 +311,15 @@ pub enum BlobFlag {
|
||||
/// * Iterate over the collected blobs and coalesce them into reads at the end
|
||||
pub struct VectoredReadPlanner {
|
||||
// Track all the blob offsets. Start offsets must be ordered.
|
||||
blobs: BTreeMap<Key, Vec<(Lsn, u64, u64)>>,
|
||||
// Values in the value tuples are:
|
||||
// (
|
||||
// lsn of the blob,
|
||||
// start offset of the blob in the underlying file,
|
||||
// end offset of the blob in the underlying file,
|
||||
// whether the blob initializes the page image or not
|
||||
// see [`pageserver_api::record::NeonWalRecord::will_init`]
|
||||
// )
|
||||
blobs: BTreeMap<Key, Vec<(Lsn, u64, u64, bool)>>,
|
||||
// Arguments for previous blob passed into [`VectoredReadPlanner::handle`]
|
||||
prev: Option<(Key, Lsn, u64, BlobFlag)>,
|
||||
|
||||
@@ -371,12 +380,12 @@ impl VectoredReadPlanner {
|
||||
match flag {
|
||||
BlobFlag::None => {
|
||||
let blobs_for_key = self.blobs.entry(key).or_default();
|
||||
blobs_for_key.push((lsn, start_offset, end_offset));
|
||||
blobs_for_key.push((lsn, start_offset, end_offset, false));
|
||||
}
|
||||
BlobFlag::ReplaceAll => {
|
||||
let blobs_for_key = self.blobs.entry(key).or_default();
|
||||
blobs_for_key.clear();
|
||||
blobs_for_key.push((lsn, start_offset, end_offset));
|
||||
blobs_for_key.push((lsn, start_offset, end_offset, true));
|
||||
}
|
||||
BlobFlag::Ignore => {}
|
||||
}
|
||||
@@ -387,11 +396,17 @@ impl VectoredReadPlanner {
|
||||
let mut reads = Vec::new();
|
||||
|
||||
for (key, blobs_for_key) in self.blobs {
|
||||
for (lsn, start_offset, end_offset) in blobs_for_key {
|
||||
for (lsn, start_offset, end_offset, will_init) in blobs_for_key {
|
||||
let extended = match &mut current_read_builder {
|
||||
Some(read_builder) => {
|
||||
read_builder.extend(start_offset, end_offset, BlobMeta { key, lsn })
|
||||
}
|
||||
Some(read_builder) => read_builder.extend(
|
||||
start_offset,
|
||||
end_offset,
|
||||
BlobMeta {
|
||||
key,
|
||||
lsn,
|
||||
will_init,
|
||||
},
|
||||
),
|
||||
None => VectoredReadExtended::No,
|
||||
};
|
||||
|
||||
@@ -399,7 +414,11 @@ impl VectoredReadPlanner {
|
||||
let next_read_builder = ChunkedVectoredReadBuilder::new(
|
||||
start_offset,
|
||||
end_offset,
|
||||
BlobMeta { key, lsn },
|
||||
BlobMeta {
|
||||
key,
|
||||
lsn,
|
||||
will_init,
|
||||
},
|
||||
self.max_read_size,
|
||||
);
|
||||
|
||||
@@ -527,7 +546,7 @@ impl<'a> VectoredBlobReader<'a> {
|
||||
pub struct StreamingVectoredReadPlanner {
|
||||
read_builder: Option<ChunkedVectoredReadBuilder>,
|
||||
// Arguments for previous blob passed into [`StreamingVectoredReadPlanner::handle`]
|
||||
prev: Option<(Key, Lsn, u64)>,
|
||||
prev: Option<(Key, Lsn, u64, bool)>,
|
||||
/// Max read size per batch. This is not a strict limit. If there are [0, 100) and [100, 200), while the `max_read_size` is 150,
|
||||
/// we will produce a single batch instead of split them.
|
||||
max_read_size: u64,
|
||||
@@ -550,27 +569,47 @@ impl StreamingVectoredReadPlanner {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle(&mut self, key: Key, lsn: Lsn, offset: u64) -> Option<VectoredRead> {
|
||||
pub fn handle(
|
||||
&mut self,
|
||||
key: Key,
|
||||
lsn: Lsn,
|
||||
offset: u64,
|
||||
will_init: bool,
|
||||
) -> Option<VectoredRead> {
|
||||
// Implementation note: internally lag behind by one blob such that
|
||||
// we have a start and end offset when initialising [`VectoredRead`]
|
||||
let (prev_key, prev_lsn, prev_offset) = match self.prev {
|
||||
let (prev_key, prev_lsn, prev_offset, prev_will_init) = match self.prev {
|
||||
None => {
|
||||
self.prev = Some((key, lsn, offset));
|
||||
self.prev = Some((key, lsn, offset, will_init));
|
||||
return None;
|
||||
}
|
||||
Some(prev) => prev,
|
||||
};
|
||||
|
||||
let res = self.add_blob(prev_key, prev_lsn, prev_offset, offset, false);
|
||||
let res = self.add_blob(
|
||||
prev_key,
|
||||
prev_lsn,
|
||||
prev_offset,
|
||||
offset,
|
||||
false,
|
||||
prev_will_init,
|
||||
);
|
||||
|
||||
self.prev = Some((key, lsn, offset));
|
||||
self.prev = Some((key, lsn, offset, will_init));
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
pub fn handle_range_end(&mut self, offset: u64) -> Option<VectoredRead> {
|
||||
let res = if let Some((prev_key, prev_lsn, prev_offset)) = self.prev {
|
||||
self.add_blob(prev_key, prev_lsn, prev_offset, offset, true)
|
||||
let res = if let Some((prev_key, prev_lsn, prev_offset, prev_will_init)) = self.prev {
|
||||
self.add_blob(
|
||||
prev_key,
|
||||
prev_lsn,
|
||||
prev_offset,
|
||||
offset,
|
||||
true,
|
||||
prev_will_init,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -587,10 +626,19 @@ impl StreamingVectoredReadPlanner {
|
||||
start_offset: u64,
|
||||
end_offset: u64,
|
||||
is_last_blob_in_read: bool,
|
||||
will_init: bool,
|
||||
) -> Option<VectoredRead> {
|
||||
match &mut self.read_builder {
|
||||
Some(read_builder) => {
|
||||
let extended = read_builder.extend(start_offset, end_offset, BlobMeta { key, lsn });
|
||||
let extended = read_builder.extend(
|
||||
start_offset,
|
||||
end_offset,
|
||||
BlobMeta {
|
||||
key,
|
||||
lsn,
|
||||
will_init,
|
||||
},
|
||||
);
|
||||
assert_eq!(extended, VectoredReadExtended::Yes);
|
||||
}
|
||||
None => {
|
||||
@@ -598,7 +646,11 @@ impl StreamingVectoredReadPlanner {
|
||||
Some(ChunkedVectoredReadBuilder::new_streaming(
|
||||
start_offset,
|
||||
end_offset,
|
||||
BlobMeta { key, lsn },
|
||||
BlobMeta {
|
||||
key,
|
||||
lsn,
|
||||
will_init,
|
||||
},
|
||||
))
|
||||
};
|
||||
}
|
||||
@@ -812,7 +864,7 @@ mod tests {
|
||||
let mut planner = StreamingVectoredReadPlanner::new(max_read_size, 1000);
|
||||
let mut reads = Vec::new();
|
||||
for (key, lsn, offset, _) in blob_descriptions.clone() {
|
||||
reads.extend(planner.handle(key, lsn, offset));
|
||||
reads.extend(planner.handle(key, lsn, offset, false));
|
||||
}
|
||||
reads.extend(planner.handle_range_end(652 * 1024));
|
||||
|
||||
@@ -850,7 +902,7 @@ mod tests {
|
||||
let mut planner = StreamingVectoredReadPlanner::new(max_read_size, 2);
|
||||
let mut reads = Vec::new();
|
||||
for (key, lsn, offset, _) in blob_descriptions.clone() {
|
||||
reads.extend(planner.handle(key, lsn, offset));
|
||||
reads.extend(planner.handle(key, lsn, offset, false));
|
||||
}
|
||||
reads.extend(planner.handle_range_end(652 * 1024));
|
||||
|
||||
@@ -875,7 +927,7 @@ mod tests {
|
||||
{
|
||||
let mut planner = StreamingVectoredReadPlanner::new(max_read_size, 1);
|
||||
let mut reads = Vec::new();
|
||||
reads.extend(planner.handle(key, lsn, 0));
|
||||
reads.extend(planner.handle(key, lsn, 0, false));
|
||||
reads.extend(planner.handle_range_end(652 * 1024));
|
||||
assert_eq!(reads.len(), 1);
|
||||
validate_read(&reads[0], &[(key, lsn, 0, BlobFlag::None)]);
|
||||
@@ -883,8 +935,8 @@ mod tests {
|
||||
{
|
||||
let mut planner = StreamingVectoredReadPlanner::new(max_read_size, 1);
|
||||
let mut reads = Vec::new();
|
||||
reads.extend(planner.handle(key, lsn, 0));
|
||||
reads.extend(planner.handle(key, lsn, 128 * 1024));
|
||||
reads.extend(planner.handle(key, lsn, 0, false));
|
||||
reads.extend(planner.handle(key, lsn, 128 * 1024, false));
|
||||
reads.extend(planner.handle_range_end(652 * 1024));
|
||||
assert_eq!(reads.len(), 2);
|
||||
validate_read(&reads[0], &[(key, lsn, 0, BlobFlag::None)]);
|
||||
@@ -893,8 +945,8 @@ mod tests {
|
||||
{
|
||||
let mut planner = StreamingVectoredReadPlanner::new(max_read_size, 2);
|
||||
let mut reads = Vec::new();
|
||||
reads.extend(planner.handle(key, lsn, 0));
|
||||
reads.extend(planner.handle(key, lsn, 128 * 1024));
|
||||
reads.extend(planner.handle(key, lsn, 0, false));
|
||||
reads.extend(planner.handle(key, lsn, 128 * 1024, false));
|
||||
reads.extend(planner.handle_range_end(652 * 1024));
|
||||
assert_eq!(reads.len(), 1);
|
||||
validate_read(
|
||||
@@ -923,6 +975,7 @@ mod tests {
|
||||
let meta = BlobMeta {
|
||||
key: Key::MIN,
|
||||
lsn: Lsn(0),
|
||||
will_init: false,
|
||||
};
|
||||
|
||||
for (idx, (blob, offset)) in blobs.iter().zip(offsets.iter()).enumerate() {
|
||||
|
||||
@@ -499,7 +499,13 @@ impl WalIngest {
|
||||
|
||||
let content = modification
|
||||
.tline
|
||||
.get_rel_page_at_lsn(src_rel, blknum, Version::Modified(modification), ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
src_rel,
|
||||
blknum,
|
||||
Version::Modified(modification),
|
||||
ctx,
|
||||
crate::tenant::storage_layer::IoConcurrency::sequential(),
|
||||
)
|
||||
.await?;
|
||||
modification.put_rel_page_image(dst_rel, blknum, content)?;
|
||||
num_blocks_copied += 1;
|
||||
@@ -1489,6 +1495,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::tenant::harness::*;
|
||||
use crate::tenant::remote_timeline_client::{remote_initdb_archive_path, INITDB_PATH};
|
||||
use crate::tenant::storage_layer::IoConcurrency;
|
||||
use postgres_ffi::RELSEG_SIZE;
|
||||
|
||||
use crate::DEFAULT_PG_VERSION;
|
||||
@@ -1532,6 +1539,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_relsize() -> Result<()> {
|
||||
let (tenant, ctx) = TenantHarness::create("test_relsize").await?.load().await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
let tline = tenant
|
||||
.create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
|
||||
.await?;
|
||||
@@ -1599,7 +1607,13 @@ mod tests {
|
||||
// Check page contents at each LSN
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x20)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
0,
|
||||
Version::Lsn(Lsn(0x20)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img("foo blk 0 at 2")
|
||||
@@ -1607,7 +1621,13 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x30)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
0,
|
||||
Version::Lsn(Lsn(0x30)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img("foo blk 0 at 3")
|
||||
@@ -1615,14 +1635,26 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x40)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
0,
|
||||
Version::Lsn(Lsn(0x40)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img("foo blk 0 at 3")
|
||||
);
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x40)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
1,
|
||||
Version::Lsn(Lsn(0x40)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img("foo blk 1 at 4")
|
||||
@@ -1630,21 +1662,39 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x50)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
0,
|
||||
Version::Lsn(Lsn(0x50)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img("foo blk 0 at 3")
|
||||
);
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x50)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
1,
|
||||
Version::Lsn(Lsn(0x50)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img("foo blk 1 at 4")
|
||||
);
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, 2, Version::Lsn(Lsn(0x50)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
2,
|
||||
Version::Lsn(Lsn(0x50)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img("foo blk 2 at 5")
|
||||
@@ -1667,14 +1717,26 @@ mod tests {
|
||||
);
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x60)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
0,
|
||||
Version::Lsn(Lsn(0x60)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img("foo blk 0 at 3")
|
||||
);
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x60)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
1,
|
||||
Version::Lsn(Lsn(0x60)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img("foo blk 1 at 4")
|
||||
@@ -1689,7 +1751,13 @@ mod tests {
|
||||
);
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, 2, Version::Lsn(Lsn(0x50)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
2,
|
||||
Version::Lsn(Lsn(0x50)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img("foo blk 2 at 5")
|
||||
@@ -1722,14 +1790,26 @@ mod tests {
|
||||
);
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x70)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
0,
|
||||
Version::Lsn(Lsn(0x70)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
ZERO_PAGE
|
||||
);
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x70)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
1,
|
||||
Version::Lsn(Lsn(0x70)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img("foo blk 1")
|
||||
@@ -1750,7 +1830,13 @@ mod tests {
|
||||
for blk in 2..1500 {
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, blk, Version::Lsn(Lsn(0x80)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
blk,
|
||||
Version::Lsn(Lsn(0x80)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
ZERO_PAGE
|
||||
@@ -1758,7 +1844,13 @@ mod tests {
|
||||
}
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, 1500, Version::Lsn(Lsn(0x80)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
1500,
|
||||
Version::Lsn(Lsn(0x80)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img("foo blk 1500")
|
||||
@@ -1851,6 +1943,7 @@ mod tests {
|
||||
.await?
|
||||
.load()
|
||||
.await;
|
||||
let io_concurrency = IoConcurrency::spawn_for_test();
|
||||
let tline = tenant
|
||||
.create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
|
||||
.await?;
|
||||
@@ -1903,7 +1996,13 @@ mod tests {
|
||||
let data = format!("foo blk {} at {}", blkno, lsn);
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(lsn), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
blkno,
|
||||
Version::Lsn(lsn),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img(&data)
|
||||
@@ -1931,7 +2030,13 @@ mod tests {
|
||||
let data = format!("foo blk {} at {}", blkno, lsn);
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x60)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
blkno,
|
||||
Version::Lsn(Lsn(0x60)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img(&data)
|
||||
@@ -1950,7 +2055,13 @@ mod tests {
|
||||
let data = format!("foo blk {} at {}", blkno, lsn);
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x50)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
blkno,
|
||||
Version::Lsn(Lsn(0x50)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img(&data)
|
||||
@@ -1987,7 +2098,13 @@ mod tests {
|
||||
let data = format!("foo blk {} at {}", blkno, lsn);
|
||||
assert_eq!(
|
||||
tline
|
||||
.get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x80)), &ctx)
|
||||
.get_rel_page_at_lsn(
|
||||
TESTREL_A,
|
||||
blkno,
|
||||
Version::Lsn(Lsn(0x80)),
|
||||
&ctx,
|
||||
io_concurrency.clone()
|
||||
)
|
||||
.instrument(test_span.clone())
|
||||
.await?,
|
||||
test_img(&data)
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include "access/xlogrecovery.h"
|
||||
#endif
|
||||
#include "replication/logical.h"
|
||||
#include "replication/logicallauncher.h"
|
||||
#include "replication/slot.h"
|
||||
#include "replication/walsender.h"
|
||||
#include "storage/proc.h"
|
||||
@@ -434,6 +435,15 @@ _PG_init(void)
|
||||
|
||||
restore_running_xacts_callback = RestoreRunningXactsFromClog;
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"neon.disable_logical_replication_subscribers",
|
||||
"Disables incomming logical replication",
|
||||
NULL,
|
||||
&disable_logical_replication_subscribers,
|
||||
false,
|
||||
PGC_SIGHUP,
|
||||
0,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"neon.allow_replica_misconfig",
|
||||
|
||||
190
poetry.lock
generated
190
poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aiohappyeyeballs"
|
||||
@@ -6,6 +6,7 @@ version = "2.3.5"
|
||||
description = "Happy Eyeballs for asyncio"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aiohappyeyeballs-2.3.5-py3-none-any.whl", hash = "sha256:4d6dea59215537dbc746e93e779caea8178c866856a721c9c660d7a5a7b8be03"},
|
||||
{file = "aiohappyeyeballs-2.3.5.tar.gz", hash = "sha256:6fa48b9f1317254f122a07a131a86b71ca6946ca989ce6326fff54a99a920105"},
|
||||
@@ -17,6 +18,7 @@ version = "3.10.11"
|
||||
description = "Async http client/server framework (asyncio)"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5077b1a5f40ffa3ba1f40d537d3bec4383988ee51fbba6b74aa8fb1bc466599e"},
|
||||
{file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8d6a14a4d93b5b3c2891fca94fa9d41b2322a68194422bef0dd5ec1e57d7d298"},
|
||||
@@ -128,6 +130,7 @@ version = "1.4.0"
|
||||
description = "Postgres integration with asyncio."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aiopg-1.4.0-py3-none-any.whl", hash = "sha256:aea46e8aff30b039cfa818e6db4752c97656e893fc75e5a5dc57355a9e9dedbd"},
|
||||
{file = "aiopg-1.4.0.tar.gz", hash = "sha256:116253bef86b4d954116716d181e9a0294037f266718b2e1c9766af995639d71"},
|
||||
@@ -146,6 +149,7 @@ version = "1.3.1"
|
||||
description = "aiosignal: a list of registered asynchronous callbacks"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"},
|
||||
{file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"},
|
||||
@@ -160,6 +164,7 @@ version = "2.13.2"
|
||||
description = "Allure pytest integration"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "allure-pytest-2.13.2.tar.gz", hash = "sha256:22243159e8ec81ce2b5254b4013802198821b1b42f118f69d4a289396607c7b3"},
|
||||
{file = "allure_pytest-2.13.2-py3-none-any.whl", hash = "sha256:17de9dbee7f61c8e66a5b5e818b00e419dbcea44cb55c24319401ba813220690"},
|
||||
@@ -175,6 +180,7 @@ version = "2.13.2"
|
||||
description = "Common module for integrate allure with python-based frameworks"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "allure-python-commons-2.13.2.tar.gz", hash = "sha256:8a03681330231b1deadd86b97ff68841c6591320114ae638570f1ed60d7a2033"},
|
||||
{file = "allure_python_commons-2.13.2-py3-none-any.whl", hash = "sha256:2bb3646ec3fbf5b36d178a5e735002bc130ae9f9ba80f080af97d368ba375051"},
|
||||
@@ -190,6 +196,7 @@ version = "0.6.0"
|
||||
description = "Reusable constraint types to use with typing.Annotated"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"},
|
||||
{file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"},
|
||||
@@ -201,6 +208,7 @@ version = "4.13.1"
|
||||
description = "ANTLR 4.13.1 runtime for Python 3"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "antlr4-python3-runtime-4.13.1.tar.gz", hash = "sha256:3cd282f5ea7cfb841537fe01f143350fdb1c0b1ce7981443a2fa8513fddb6d1a"},
|
||||
{file = "antlr4_python3_runtime-4.13.1-py3-none-any.whl", hash = "sha256:78ec57aad12c97ac039ca27403ad61cb98aaec8a3f9bb8144f889aa0fa28b943"},
|
||||
@@ -212,6 +220,7 @@ version = "4.3.0"
|
||||
description = "High level compatibility layer for multiple asynchronous event loop implementations"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"},
|
||||
{file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"},
|
||||
@@ -232,6 +241,7 @@ version = "4.0.3"
|
||||
description = "Timeout context manager for asyncio programs"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"},
|
||||
{file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"},
|
||||
@@ -243,6 +253,7 @@ version = "0.30.0"
|
||||
description = "An asyncio PostgreSQL driver"
|
||||
optional = false
|
||||
python-versions = ">=3.8.0"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "asyncpg-0.30.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bfb4dd5ae0699bad2b233672c8fc5ccbd9ad24b89afded02341786887e37927e"},
|
||||
{file = "asyncpg-0.30.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dc1f62c792752a49f88b7e6f774c26077091b44caceb1983509edc18a2222ec0"},
|
||||
@@ -306,6 +317,7 @@ version = "21.4.0"
|
||||
description = "Classes Without Boilerplate"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"},
|
||||
{file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"},
|
||||
@@ -323,6 +335,7 @@ version = "1.88.0"
|
||||
description = "AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates"
|
||||
optional = false
|
||||
python-versions = "!=4.0,<=4.0,>=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aws_sam_translator-1.88.0-py3-none-any.whl", hash = "sha256:aa93d498d8de3fb3d485c316155b1628144b823bbc176099a20de06df666fcac"},
|
||||
{file = "aws_sam_translator-1.88.0.tar.gz", hash = "sha256:e77c65f3488566122277accd44a0f1ec018e37403e0d5fe25120d96e537e91a7"},
|
||||
@@ -343,6 +356,7 @@ version = "2.10.0"
|
||||
description = "The AWS X-Ray SDK for Python (the SDK) enables Python developers to record and emit information from within their applications to the AWS X-Ray service."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aws-xray-sdk-2.10.0.tar.gz", hash = "sha256:9b14924fd0628cf92936055864655354003f0b1acc3e1c3ffde6403d0799dd7a"},
|
||||
{file = "aws_xray_sdk-2.10.0-py2.py3-none-any.whl", hash = "sha256:7551e81a796e1a5471ebe84844c40e8edf7c218db33506d046fec61f7495eda4"},
|
||||
@@ -358,6 +372,7 @@ version = "2.2.1"
|
||||
description = "Function decoration for backoff and retry"
|
||||
optional = false
|
||||
python-versions = ">=3.7,<4.0"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"},
|
||||
{file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"},
|
||||
@@ -369,6 +384,7 @@ version = "1.34.11"
|
||||
description = "The AWS SDK for Python"
|
||||
optional = false
|
||||
python-versions = ">= 3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "boto3-1.34.11-py3-none-any.whl", hash = "sha256:1af021e0c6e3040e8de66d403e963566476235bb70f9a8e3f6784813ac2d8026"},
|
||||
{file = "boto3-1.34.11.tar.gz", hash = "sha256:31c130a40ec0631059b77d7e87f67ad03ff1685a5b37638ac0c4687026a3259d"},
|
||||
@@ -388,6 +404,7 @@ version = "1.26.16"
|
||||
description = "Type annotations for boto3 1.26.16 generated with mypy-boto3-builder 7.11.11"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "boto3-stubs-1.26.16.tar.gz", hash = "sha256:618253ae19f1480785759bcaee8c8b10ed3fc037027247c26a3461a50f58406d"},
|
||||
{file = "boto3_stubs-1.26.16-py3-none-any.whl", hash = "sha256:8cf2925bc3e1349c93eb0f49c1061affc5ca314d69eeb335349037969d0787ed"},
|
||||
@@ -732,6 +749,7 @@ version = "1.34.11"
|
||||
description = "Low-level, data-driven core of boto 3."
|
||||
optional = false
|
||||
python-versions = ">= 3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "botocore-1.34.11-py3-none-any.whl", hash = "sha256:1ff1398b6ea670e1c01ac67a33af3da854f8e700d3528289c04f319c330d8250"},
|
||||
{file = "botocore-1.34.11.tar.gz", hash = "sha256:51905c3d623c60df5dc5794387de7caf886d350180a01a3dfa762e903edb45a9"},
|
||||
@@ -751,6 +769,7 @@ version = "1.27.38"
|
||||
description = "Type annotations for botocore 1.27.38 generated with mypy-boto3-builder 7.10.1"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "botocore-stubs-1.27.38.tar.gz", hash = "sha256:408e8b86b5d171b58f81c74ca9d3b5317a5a8e2d3bc2073aa841ac13b8939e56"},
|
||||
{file = "botocore_stubs-1.27.38-py3-none-any.whl", hash = "sha256:7add7641e9a479a9c8366893bb522fd9ca3d58714201e43662a200a148a1bc38"},
|
||||
@@ -765,6 +784,7 @@ version = "2024.7.4"
|
||||
description = "Python package for providing Mozilla's CA Bundle."
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"},
|
||||
{file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"},
|
||||
@@ -776,6 +796,7 @@ version = "1.17.1"
|
||||
description = "Foreign Function Interface for Python calling C code."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"},
|
||||
{file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"},
|
||||
@@ -855,6 +876,7 @@ version = "0.87.1"
|
||||
description = "Checks CloudFormation templates for practices and behaviour that could potentially be improved"
|
||||
optional = false
|
||||
python-versions = "!=4.0,<=4.0,>=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "cfn_lint-0.87.1-py3-none-any.whl", hash = "sha256:d450f450635fc223b6f66880ccac52a5fd1a52966fa1705f1ba52b88dfed3071"},
|
||||
{file = "cfn_lint-0.87.1.tar.gz", hash = "sha256:b3ce9d3e5e0eadcea5d584c8ccaa00bf2a990a36a64d7ffd8683bc60b7e4f06f"},
|
||||
@@ -878,6 +900,7 @@ version = "2.1.0"
|
||||
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
|
||||
optional = false
|
||||
python-versions = ">=3.6.0"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "charset-normalizer-2.1.0.tar.gz", hash = "sha256:575e708016ff3a5e3681541cb9d79312c416835686d054a23accb873b254f413"},
|
||||
{file = "charset_normalizer-2.1.0-py3-none-any.whl", hash = "sha256:5189b6f22b01957427f35b6a08d9a0bc45b46d3788ef5a92e978433c7a35f8a5"},
|
||||
@@ -892,6 +915,7 @@ version = "8.1.3"
|
||||
description = "Composable command line interface toolkit"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
|
||||
{file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
|
||||
@@ -906,6 +930,7 @@ version = "0.7.17"
|
||||
description = "ClickHouse Database Core Driver for Python, Pandas, and Superset"
|
||||
optional = false
|
||||
python-versions = "~=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "clickhouse-connect-0.7.17.tar.gz", hash = "sha256:854f1f9f3e024e7f89ae5d57cd3289d7a4c3dc91a9f24c4d233014f0ea19cb2d"},
|
||||
{file = "clickhouse_connect-0.7.17-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:aca36f5f28be1ada2981fce87724bbf451f267c918015baec59e527de3c9c882"},
|
||||
@@ -996,6 +1021,8 @@ version = "0.4.5"
|
||||
description = "Cross-platform colored terminal text."
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
groups = ["main"]
|
||||
markers = "sys_platform == \"win32\" or platform_system == \"Windows\""
|
||||
files = [
|
||||
{file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"},
|
||||
{file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"},
|
||||
@@ -1007,6 +1034,7 @@ version = "43.0.1"
|
||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"},
|
||||
{file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"},
|
||||
@@ -1056,6 +1084,7 @@ version = "7.1.0"
|
||||
description = "A Python library for the Docker Engine API."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0"},
|
||||
{file = "docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c"},
|
||||
@@ -1078,6 +1107,7 @@ version = "1.9.0"
|
||||
description = "execnet: rapid multi-Python deployment"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "execnet-1.9.0-py2.py3-none-any.whl", hash = "sha256:a295f7cc774947aac58dde7fdc85f4aa00c42adf5d8f5468fc630c1acf30a142"},
|
||||
{file = "execnet-1.9.0.tar.gz", hash = "sha256:8f694f3ba9cc92cab508b152dcfe322153975c29bda272e2fd7f3f00f36e47c5"},
|
||||
@@ -1092,6 +1122,7 @@ version = "2.2.5"
|
||||
description = "A simple framework for building complex web applications."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "Flask-2.2.5-py3-none-any.whl", hash = "sha256:58107ed83443e86067e41eff4631b058178191a355886f8e479e347fa1285fdf"},
|
||||
{file = "Flask-2.2.5.tar.gz", hash = "sha256:edee9b0a7ff26621bd5a8c10ff484ae28737a2410d99b0bb9a6850c7fb977aa0"},
|
||||
@@ -1113,6 +1144,7 @@ version = "5.0.0"
|
||||
description = "A Flask extension adding a decorator for CORS support"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "Flask_Cors-5.0.0-py2.py3-none-any.whl", hash = "sha256:b9e307d082a9261c100d8fb0ba909eec6a228ed1b60a8315fd85f783d61910bc"},
|
||||
{file = "flask_cors-5.0.0.tar.gz", hash = "sha256:5aadb4b950c4e93745034594d9f3ea6591f734bb3662e16e255ffbf5e89c88ef"},
|
||||
@@ -1127,6 +1159,7 @@ version = "1.5.0"
|
||||
description = "A list-like structure which implements collections.abc.MutableSequence"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"},
|
||||
{file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"},
|
||||
@@ -1228,6 +1261,7 @@ version = "3.2.1"
|
||||
description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL."
|
||||
optional = false
|
||||
python-versions = ">=3.6,<4"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "graphql-core-3.2.1.tar.gz", hash = "sha256:9d1bf141427b7d54be944587c8349df791ce60ade2e3cccaf9c56368c133c201"},
|
||||
{file = "graphql_core-3.2.1-py3-none-any.whl", hash = "sha256:f83c658e4968998eed1923a2e3e3eddd347e005ac0315fbb7ca4d70ea9156323"},
|
||||
@@ -1239,6 +1273,7 @@ version = "0.14.0"
|
||||
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
|
||||
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
|
||||
@@ -1247,27 +1282,33 @@ files = [
|
||||
[[package]]
|
||||
name = "h2"
|
||||
version = "4.1.0"
|
||||
description = "HTTP/2 State-Machine based protocol implementation"
|
||||
description = "Pure-Python HTTP/2 protocol implementation"
|
||||
optional = false
|
||||
python-versions = ">=3.6.1"
|
||||
files = [
|
||||
{file = "h2-4.1.0-py3-none-any.whl", hash = "sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d"},
|
||||
{file = "h2-4.1.0.tar.gz", hash = "sha256:a83aca08fbe7aacb79fec788c9c0bac936343560ed9ec18b82a13a12c28d2abb"},
|
||||
]
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = []
|
||||
develop = false
|
||||
|
||||
[package.dependencies]
|
||||
hpack = ">=4.0,<5"
|
||||
hyperframe = ">=6.0,<7"
|
||||
hpack = ">=4.1,<5"
|
||||
hyperframe = ">=6.1,<7"
|
||||
|
||||
[package.source]
|
||||
type = "git"
|
||||
url = "https://github.com/python-hyper/h2"
|
||||
reference = "HEAD"
|
||||
resolved_reference = "0b98b244b5fd1fe96100ac14905417a3b70a4286"
|
||||
|
||||
[[package]]
|
||||
name = "hpack"
|
||||
version = "4.0.0"
|
||||
description = "Pure-Python HPACK header compression"
|
||||
version = "4.1.0"
|
||||
description = "Pure-Python HPACK header encoding"
|
||||
optional = false
|
||||
python-versions = ">=3.6.1"
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "hpack-4.0.0-py3-none-any.whl", hash = "sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c"},
|
||||
{file = "hpack-4.0.0.tar.gz", hash = "sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095"},
|
||||
{file = "hpack-4.1.0-py3-none-any.whl", hash = "sha256:157ac792668d995c657d93111f46b4535ed114f0c9c8d672271bbec7eae1b496"},
|
||||
{file = "hpack-4.1.0.tar.gz", hash = "sha256:ec5eca154f7056aa06f196a557655c5b009b382873ac8d1e66e79e87535f1dca"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1276,6 +1317,7 @@ version = "1.0.3"
|
||||
description = "A minimal low-level HTTP client."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "httpcore-1.0.3-py3-none-any.whl", hash = "sha256:9a6a501c3099307d9fd76ac244e08503427679b1e81ceb1d922485e2f2462ad2"},
|
||||
{file = "httpcore-1.0.3.tar.gz", hash = "sha256:5c0f9546ad17dac4d0772b0808856eb616eb8b48ce94f49ed819fd6982a8a544"},
|
||||
@@ -1297,6 +1339,7 @@ version = "0.26.0"
|
||||
description = "The next generation HTTP client."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "httpx-0.26.0-py3-none-any.whl", hash = "sha256:8915f5a3627c4d47b73e8202457cb28f1266982d1159bd5779d86a80c0eab1cd"},
|
||||
{file = "httpx-0.26.0.tar.gz", hash = "sha256:451b55c30d5185ea6b23c2c793abf9bb237d2a7dfb901ced6ff69ad37ec1dfaf"},
|
||||
@@ -1318,13 +1361,14 @@ socks = ["socksio (==1.*)"]
|
||||
|
||||
[[package]]
|
||||
name = "hyperframe"
|
||||
version = "6.0.1"
|
||||
description = "HTTP/2 framing layer for Python"
|
||||
version = "6.1.0"
|
||||
description = "Pure-Python HTTP/2 framing"
|
||||
optional = false
|
||||
python-versions = ">=3.6.1"
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "hyperframe-6.0.1-py3-none-any.whl", hash = "sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15"},
|
||||
{file = "hyperframe-6.0.1.tar.gz", hash = "sha256:ae510046231dc8e9ecb1a6586f63d2347bf4c8905914aa84ba585ae85f28a914"},
|
||||
{file = "hyperframe-6.1.0-py3-none-any.whl", hash = "sha256:b03380493a519fce58ea5af42e4a42317bf9bd425596f7a0835ffce80f1a42e5"},
|
||||
{file = "hyperframe-6.1.0.tar.gz", hash = "sha256:f630908a00854a7adeabd6382b43923a4c4cd4b821fcb527e6ab9e15382a3b08"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1333,6 +1377,7 @@ version = "3.7"
|
||||
description = "Internationalized Domain Names in Applications (IDNA)"
|
||||
optional = false
|
||||
python-versions = ">=3.5"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"},
|
||||
{file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
|
||||
@@ -1344,6 +1389,7 @@ version = "1.1.1"
|
||||
description = "iniconfig: brain-dead simple config-ini parsing"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"},
|
||||
{file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"},
|
||||
@@ -1355,6 +1401,7 @@ version = "2.1.2"
|
||||
description = "Safely pass data to untrusted environments and back."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "itsdangerous-2.1.2-py3-none-any.whl", hash = "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44"},
|
||||
{file = "itsdangerous-2.1.2.tar.gz", hash = "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"},
|
||||
@@ -1366,6 +1413,7 @@ version = "3.1.5"
|
||||
description = "A very fast and expressive template engine."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"},
|
||||
{file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"},
|
||||
@@ -1383,6 +1431,7 @@ version = "1.0.1"
|
||||
description = "JSON Matching Expressions"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"},
|
||||
{file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"},
|
||||
@@ -1394,6 +1443,7 @@ version = "0.9.0"
|
||||
description = "The ultimate Python library for JOSE RFCs, including JWS, JWE, JWK, JWA, JWT"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "joserfc-0.9.0-py3-none-any.whl", hash = "sha256:4026bdbe2c196cd40574e916fa1e28874d99649412edaab0e373dec3077153fb"},
|
||||
{file = "joserfc-0.9.0.tar.gz", hash = "sha256:eebca7f587b1761ce43a98ffd5327f2b600b9aa5bb0a77b947687f503ad43bc0"},
|
||||
@@ -1411,6 +1461,7 @@ version = "1.2.3"
|
||||
description = "Generate source code for Python classes from a JSON schema."
|
||||
optional = false
|
||||
python-versions = ">= 2.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jschema_to_python-1.2.3-py3-none-any.whl", hash = "sha256:8a703ca7604d42d74b2815eecf99a33359a8dccbb80806cce386d5e2dd992b05"},
|
||||
{file = "jschema_to_python-1.2.3.tar.gz", hash = "sha256:76ff14fe5d304708ccad1284e4b11f96a658949a31ee7faed9e0995279549b91"},
|
||||
@@ -1427,6 +1478,7 @@ version = "2.0.0"
|
||||
description = "Diff JSON and JSON-like structures in Python"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jsondiff-2.0.0-py3-none-any.whl", hash = "sha256:689841d66273fc88fc79f7d33f4c074774f4f214b6466e3aff0e5adaf889d1e0"},
|
||||
{file = "jsondiff-2.0.0.tar.gz", hash = "sha256:2795844ef075ec8a2b8d385c4d59f5ea48b08e7180fce3cb2787be0db00b1fb4"},
|
||||
@@ -1438,6 +1490,8 @@ version = "0.20.0"
|
||||
description = "Python bindings for Jsonnet - The data templating language"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "python_version < \"3.13\""
|
||||
files = [
|
||||
{file = "jsonnet-0.20.0.tar.gz", hash = "sha256:7e770c7bf3a366b97b650a39430450f77612e74406731eb75c5bd59f3f104d4f"},
|
||||
]
|
||||
@@ -1448,6 +1502,7 @@ version = "1.32"
|
||||
description = "Apply JSON-Patches (RFC 6902)"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jsonpatch-1.32-py2.py3-none-any.whl", hash = "sha256:26ac385719ac9f54df8a2f0827bb8253aa3ea8ab7b3368457bcdb8c14595a397"},
|
||||
{file = "jsonpatch-1.32.tar.gz", hash = "sha256:b6ddfe6c3db30d81a96aaeceb6baf916094ffa23d7dd5fa2c13e13f8b6e600c2"},
|
||||
@@ -1462,6 +1517,7 @@ version = "1.6.1"
|
||||
description = "A final implementation of JSONPath for Python that aims to be standard compliant, including arithmetic and binary comparison operators and providing clear AST for metaprogramming."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jsonpath-ng-1.6.1.tar.gz", hash = "sha256:086c37ba4917304850bd837aeab806670224d3f038fe2833ff593a672ef0a5fa"},
|
||||
{file = "jsonpath_ng-1.6.1-py3-none-any.whl", hash = "sha256:8f22cd8273d7772eea9aaa84d922e0841aa36fdb8a2c6b7f6c3791a16a9bc0be"},
|
||||
@@ -1476,6 +1532,7 @@ version = "2.2.0"
|
||||
description = "Python library for serializing any arbitrary object graph into JSON"
|
||||
optional = false
|
||||
python-versions = ">=2.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jsonpickle-2.2.0-py2.py3-none-any.whl", hash = "sha256:de7f2613818aa4f234138ca11243d6359ff83ae528b2185efdd474f62bcf9ae1"},
|
||||
{file = "jsonpickle-2.2.0.tar.gz", hash = "sha256:7b272918b0554182e53dc340ddd62d9b7f902fec7e7b05620c04f3ccef479a0e"},
|
||||
@@ -1492,6 +1549,7 @@ version = "2.3"
|
||||
description = "Identify specific nodes in a JSON document (RFC 6901)"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jsonpointer-2.3-py2.py3-none-any.whl", hash = "sha256:51801e558539b4e9cd268638c078c6c5746c9ac96bc38152d443400e4f3793e9"},
|
||||
{file = "jsonpointer-2.3.tar.gz", hash = "sha256:97cba51526c829282218feb99dab1b1e6bdf8efd1c43dc9d57be093c0d69c99a"},
|
||||
@@ -1503,6 +1561,7 @@ version = "4.17.3"
|
||||
description = "An implementation of JSON Schema validation for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"},
|
||||
{file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"},
|
||||
@@ -1522,6 +1581,7 @@ version = "0.1.6"
|
||||
description = "JSONSchema Spec with object-oriented paths"
|
||||
optional = false
|
||||
python-versions = ">=3.7.0,<4.0.0"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jsonschema_spec-0.1.6-py3-none-any.whl", hash = "sha256:f2206d18c89d1824c1f775ba14ed039743b41a9167bd2c5bdb774b66b3ca0bbf"},
|
||||
{file = "jsonschema_spec-0.1.6.tar.gz", hash = "sha256:90215863b56e212086641956b20127ccbf6d8a3a38343dad01d6a74d19482f76"},
|
||||
@@ -1539,6 +1599,7 @@ version = "1.9"
|
||||
description = "Creates JUnit XML test result documents that can be read by tools such as Jenkins"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "junit-xml-1.9.tar.gz", hash = "sha256:de16a051990d4e25a3982b2dd9e89d671067548718866416faec14d9de56db9f"},
|
||||
{file = "junit_xml-1.9-py2.py3-none-any.whl", hash = "sha256:ec5ca1a55aefdd76d28fcc0b135251d156c7106fa979686a4b48d62b761b4732"},
|
||||
@@ -1553,6 +1614,7 @@ version = "1.5.6"
|
||||
description = "Implementation of JOSE Web standards"
|
||||
optional = false
|
||||
python-versions = ">= 3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jwcrypto-1.5.6-py3-none-any.whl", hash = "sha256:150d2b0ebbdb8f40b77f543fb44ffd2baeff48788be71f67f03566692fd55789"},
|
||||
{file = "jwcrypto-1.5.6.tar.gz", hash = "sha256:771a87762a0c081ae6166958a954f80848820b2ab066937dc8b8379d65b1b039"},
|
||||
@@ -1568,6 +1630,7 @@ version = "2.0.2"
|
||||
description = "Pure Python client for Apache Kafka"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "kafka-python-2.0.2.tar.gz", hash = "sha256:04dfe7fea2b63726cd6f3e79a2d86e709d608d74406638c5da33a01d45a9d7e3"},
|
||||
{file = "kafka_python-2.0.2-py2.py3-none-any.whl", hash = "sha256:2d92418c7cb1c298fa6c7f0fb3519b520d0d7526ac6cb7ae2a4fc65a51a94b6e"},
|
||||
@@ -1582,6 +1645,7 @@ version = "1.10.0"
|
||||
description = "A fast and thorough lazy object proxy."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "lazy-object-proxy-1.10.0.tar.gz", hash = "sha256:78247b6d45f43a52ef35c25b5581459e85117225408a4128a3daf8bf9648ac69"},
|
||||
{file = "lazy_object_proxy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:855e068b0358ab916454464a884779c7ffa312b8925c6f7401e952dcf3b89977"},
|
||||
@@ -1628,6 +1692,7 @@ version = "4.3.3"
|
||||
description = "LZ4 Bindings for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "lz4-4.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b891880c187e96339474af2a3b2bfb11a8e4732ff5034be919aa9029484cd201"},
|
||||
{file = "lz4-4.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:222a7e35137d7539c9c33bb53fcbb26510c5748779364014235afc62b0ec797f"},
|
||||
@@ -1678,6 +1743,7 @@ version = "2.1.1"
|
||||
description = "Safely add untrusted strings to HTML/XML markup."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812"},
|
||||
{file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a"},
|
||||
@@ -1727,6 +1793,7 @@ version = "5.0.6"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "moto-5.0.6-py2.py3-none-any.whl", hash = "sha256:ca1e22831a741733b581ff2ef4d6ae2e1c6db1eab97af1b78b86ca2c6e88c609"},
|
||||
{file = "moto-5.0.6.tar.gz", hash = "sha256:ad8b23f2b555ad694da8b2432a42b6d96beaaf67a4e7d932196a72193a2eee2c"},
|
||||
@@ -1786,6 +1853,7 @@ version = "1.3.0"
|
||||
description = "Python library for arbitrary-precision floating-point arithmetic"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"},
|
||||
{file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"},
|
||||
@@ -1803,6 +1871,7 @@ version = "6.0.5"
|
||||
description = "multidict implementation"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604"},
|
||||
@@ -1902,6 +1971,7 @@ version = "1.13.0"
|
||||
description = "Optional static typing for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"},
|
||||
{file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"},
|
||||
@@ -1954,6 +2024,7 @@ version = "1.26.0.post1"
|
||||
description = "Type annotations for boto3.S3 1.26.0 service generated with mypy-boto3-builder 7.11.10"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "mypy-boto3-s3-1.26.0.post1.tar.gz", hash = "sha256:6d7079f8c739dc993cbedad0736299c413b297814b73795a3855a79169ecc938"},
|
||||
{file = "mypy_boto3_s3-1.26.0.post1-py3-none-any.whl", hash = "sha256:7de2792ff0cc541b84cd46ff3a6aa2b6e5f267217f2203f27f6e4016bddc644d"},
|
||||
@@ -1968,6 +2039,7 @@ version = "1.0.0"
|
||||
description = "Type system extensions for programs checked with the mypy type checker."
|
||||
optional = false
|
||||
python-versions = ">=3.5"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
|
||||
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
|
||||
@@ -1979,6 +2051,7 @@ version = "2.8.5"
|
||||
description = "Python package for creating and manipulating graphs and networks"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "networkx-2.8.5-py3-none-any.whl", hash = "sha256:a762f4b385692d9c3a6f2912d058d76d29a827deaedf9e63ed14d397b8030687"},
|
||||
{file = "networkx-2.8.5.tar.gz", hash = "sha256:15a7b81a360791c458c55a417418ea136c13378cfdc06a2dcdc12bd2f9cf09c1"},
|
||||
@@ -1997,6 +2070,7 @@ version = "0.4.4"
|
||||
description = "OpenAPI schema validation for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.7.0,<4.0.0"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "openapi_schema_validator-0.4.4-py3-none-any.whl", hash = "sha256:79f37f38ef9fd5206b924ed7a6f382cea7b649b3b56383c47f1906082b7b9015"},
|
||||
{file = "openapi_schema_validator-0.4.4.tar.gz", hash = "sha256:c573e2be2c783abae56c5a1486ab716ca96e09d1c3eab56020d1dc680aa57bf8"},
|
||||
@@ -2015,6 +2089,7 @@ version = "0.5.7"
|
||||
description = "OpenAPI 2.0 (aka Swagger) and OpenAPI 3 spec validator"
|
||||
optional = false
|
||||
python-versions = ">=3.7.0,<4.0.0"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "openapi_spec_validator-0.5.7-py3-none-any.whl", hash = "sha256:8712d2879db7692974ef89c47a3ebfc79436442921ec3a826ac0ce80cde8c549"},
|
||||
{file = "openapi_spec_validator-0.5.7.tar.gz", hash = "sha256:6c2d42180045a80fd6314de848b94310bdb0fa4949f4b099578b69f79d9fa5ac"},
|
||||
@@ -2032,6 +2107,7 @@ version = "24.2"
|
||||
description = "Core utilities for Python packages"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
|
||||
{file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
|
||||
@@ -2043,6 +2119,7 @@ version = "0.4.3"
|
||||
description = "Object-oriented paths"
|
||||
optional = false
|
||||
python-versions = ">=3.7.0,<4.0.0"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pathable-0.4.3-py3-none-any.whl", hash = "sha256:cdd7b1f9d7d5c8b8d3315dbf5a86b2596053ae845f056f57d97c0eefff84da14"},
|
||||
{file = "pathable-0.4.3.tar.gz", hash = "sha256:5c869d315be50776cc8a993f3af43e0c60dc01506b399643f919034ebf4cdcab"},
|
||||
@@ -2054,6 +2131,7 @@ version = "5.9.0"
|
||||
description = "Python Build Reasonableness"
|
||||
optional = false
|
||||
python-versions = ">=2.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pbr-5.9.0-py2.py3-none-any.whl", hash = "sha256:e547125940bcc052856ded43be8e101f63828c2d94239ffbe2b327ba3d5ccf0a"},
|
||||
{file = "pbr-5.9.0.tar.gz", hash = "sha256:e8dca2f4b43560edef58813969f52a56cef023146cbb8931626db80e6c1c4308"},
|
||||
@@ -2065,6 +2143,7 @@ version = "1.0.0"
|
||||
description = "plugin and hook calling mechanisms for python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"},
|
||||
{file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"},
|
||||
@@ -2080,6 +2159,7 @@ version = "3.11"
|
||||
description = "Python Lex & Yacc"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "ply-3.11-py2.py3-none-any.whl", hash = "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce"},
|
||||
{file = "ply-3.11.tar.gz", hash = "sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3"},
|
||||
@@ -2091,6 +2171,7 @@ version = "0.14.1"
|
||||
description = "Python client for the Prometheus monitoring system."
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "prometheus_client-0.14.1-py3-none-any.whl", hash = "sha256:522fded625282822a89e2773452f42df14b5a8e84a86433e3f8a189c1d54dc01"},
|
||||
{file = "prometheus_client-0.14.1.tar.gz", hash = "sha256:5459c427624961076277fdc6dc50540e2bacb98eebde99886e59ec55ed92093a"},
|
||||
@@ -2105,6 +2186,7 @@ version = "0.2.0"
|
||||
description = "Accelerated property cache"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "propcache-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c5869b8fd70b81835a6f187c5fdbe67917a04d7e52b6e7cc4e5fe39d55c39d58"},
|
||||
{file = "propcache-0.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:952e0d9d07609d9c5be361f33b0d6d650cd2bae393aabb11d9b719364521984b"},
|
||||
@@ -2212,6 +2294,7 @@ version = "5.9.4"
|
||||
description = "Cross-platform lib for process and system monitoring in Python."
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "psutil-5.9.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c1ca331af862803a42677c120aff8a814a804e09832f166f226bfd22b56feee8"},
|
||||
{file = "psutil-5.9.4-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:68908971daf802203f3d37e78d3f8831b6d1014864d7a85937941bb35f09aefe"},
|
||||
@@ -2238,6 +2321,7 @@ version = "2.9.10"
|
||||
description = "psycopg2 - Python-PostgreSQL Database Adapter"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "psycopg2-binary-2.9.10.tar.gz", hash = "sha256:4b3df0e6990aa98acda57d983942eff13d824135fe2250e6522edaa782a06de2"},
|
||||
{file = "psycopg2_binary-2.9.10-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:0ea8e3d0ae83564f2fc554955d327fa081d065c8ca5cc6d2abb643e2c9c1200f"},
|
||||
@@ -2286,6 +2370,7 @@ files = [
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bb89f0a835bcfc1d42ccd5f41f04870c1b936d8507c6df12b7737febc40f0909"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f0c2d907a1e102526dd2986df638343388b94c33860ff3bbe1384130828714b1"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8157bed2f51db683f31306aa497311b560f2265998122abe1dce6428bd86567"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:27422aa5f11fbcd9b18da48373eb67081243662f9b46e6fd07c3eb46e4535142"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:eb09aa7f9cecb45027683bb55aebaaf45a0df8bf6de68801a6afdc7947bb09d4"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b73d6d7f0ccdad7bc43e6d34273f70d587ef62f824d7261c4ae9b8b1b6af90e8"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce5ab4bf46a211a8e924d307c1b1fcda82368586a19d0a24f8ae166f5c784864"},
|
||||
@@ -2314,6 +2399,7 @@ version = "0.5.4"
|
||||
description = "Pure Python PartiQL Parser"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "py_partiql_parser-0.5.4-py2.py3-none-any.whl", hash = "sha256:3dc4295a47da9587681a96b35c6e151886fdbd0a4acbe0d97c4c68e5f689d315"},
|
||||
{file = "py_partiql_parser-0.5.4.tar.gz", hash = "sha256:72e043919538fa63edae72fb59afc7e3fd93adbde656718a7d2b4666f23dd114"},
|
||||
@@ -2328,6 +2414,7 @@ version = "2.21"
|
||||
description = "C parser in Python"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
|
||||
{file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
|
||||
@@ -2339,6 +2426,7 @@ version = "2.10.4"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pydantic-2.10.4-py3-none-any.whl", hash = "sha256:597e135ea68be3a37552fb524bc7d0d66dcf93d395acd93a00682f1efcb8ee3d"},
|
||||
{file = "pydantic-2.10.4.tar.gz", hash = "sha256:82f12e9723da6de4fe2ba888b5971157b3be7ad914267dea8f05f82b28254f06"},
|
||||
@@ -2359,6 +2447,7 @@ version = "2.27.2"
|
||||
description = "Core functionality for Pydantic validation and serialization"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"},
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"},
|
||||
@@ -2471,6 +2560,7 @@ version = "2.4.0"
|
||||
description = "JSON Web Token implementation in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"},
|
||||
{file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"},
|
||||
@@ -2491,6 +2581,7 @@ version = "3.0.9"
|
||||
description = "pyparsing module - Classes and methods to define and execute parsing grammars"
|
||||
optional = false
|
||||
python-versions = ">=3.6.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"},
|
||||
{file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"},
|
||||
@@ -2505,6 +2596,7 @@ version = "0.18.1"
|
||||
description = "Persistent/Functional/Immutable data structures"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"},
|
||||
{file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d45866ececf4a5fff8742c25722da6d4c9e180daa7b405dc0a2a2790d668c26"},
|
||||
@@ -2535,6 +2627,7 @@ version = "7.4.4"
|
||||
description = "pytest: simple powerful testing with Python"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
|
||||
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
|
||||
@@ -2555,6 +2648,7 @@ version = "0.21.0"
|
||||
description = "Pytest support for asyncio"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest-asyncio-0.21.0.tar.gz", hash = "sha256:2b38a496aef56f56b0e87557ec313e11e1ab9276fc3863f6a7be0f1d0e415e1b"},
|
||||
{file = "pytest_asyncio-0.21.0-py3-none-any.whl", hash = "sha256:f2b3366b7cd501a4056858bd39349d5af19742aed2d81660b7998b6341c7eb9c"},
|
||||
@@ -2573,6 +2667,7 @@ version = "1.0.8"
|
||||
description = "pytest-httpserver is a httpserver for pytest"
|
||||
optional = false
|
||||
python-versions = ">=3.8,<4.0"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest_httpserver-1.0.8-py3-none-any.whl", hash = "sha256:24cd3d9f6a0b927c7bfc400d0b3fda7442721b8267ce29942bf307b190f0bb09"},
|
||||
{file = "pytest_httpserver-1.0.8.tar.gz", hash = "sha256:e052f69bc8a9073db02484681e8e47004dd1fb3763b0ae833bd899e5895c559a"},
|
||||
@@ -2587,6 +2682,7 @@ version = "0.6.3"
|
||||
description = "It helps to use fixtures in pytest.mark.parametrize"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest-lazy-fixture-0.6.3.tar.gz", hash = "sha256:0e7d0c7f74ba33e6e80905e9bfd81f9d15ef9a790de97993e34213deb5ad10ac"},
|
||||
{file = "pytest_lazy_fixture-0.6.3-py3-none-any.whl", hash = "sha256:e0b379f38299ff27a653f03eaa69b08a6fd4484e46fd1c9907d984b9f9daeda6"},
|
||||
@@ -2601,6 +2697,7 @@ version = "1.1.0"
|
||||
description = "pytest plugin to run your tests in a specific order"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest-order-1.1.0.tar.gz", hash = "sha256:139d25b30826b78eebb42722f747eab14c44b88059d7a71d4f79d14a057269a5"},
|
||||
{file = "pytest_order-1.1.0-py3-none-any.whl", hash = "sha256:3b3730969c97900fa5cd31ecff80847680ed56b2490954565c14949ba60d9371"},
|
||||
@@ -2615,6 +2712,7 @@ version = "0.9.3"
|
||||
description = "pytest plugin for repeating tests"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest_repeat-0.9.3-py3-none-any.whl", hash = "sha256:26ab2df18226af9d5ce441c858f273121e92ff55f5bb311d25755b8d7abdd8ed"},
|
||||
{file = "pytest_repeat-0.9.3.tar.gz", hash = "sha256:ffd3836dfcd67bb270bec648b330e20be37d2966448c4148c4092d1e8aba8185"},
|
||||
@@ -2629,6 +2727,7 @@ version = "15.0"
|
||||
description = "pytest plugin to re-run tests to eliminate flaky failures"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest-rerunfailures-15.0.tar.gz", hash = "sha256:2d9ac7baf59f4c13ac730b47f6fa80e755d1ba0581da45ce30b72fb3542b4474"},
|
||||
{file = "pytest_rerunfailures-15.0-py3-none-any.whl", hash = "sha256:dd150c4795c229ef44320adc9a0c0532c51b78bb7a6843a8c53556b9a611df1a"},
|
||||
@@ -2644,6 +2743,7 @@ version = "0.8.1"
|
||||
description = "Pytest plugin which splits the test suite to equally sized sub suites based on test execution time."
|
||||
optional = false
|
||||
python-versions = ">=3.7.1,<4.0"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest_split-0.8.1-py3-none-any.whl", hash = "sha256:74b110ea091bd147cc1c5f9665a59506e5cedfa66f96a89fb03e4ab447c2c168"},
|
||||
{file = "pytest_split-0.8.1.tar.gz", hash = "sha256:2d88bd3dc528689a7a3f58fc12ea165c3aa62e90795e420dfad920afe5612d6d"},
|
||||
@@ -2658,6 +2758,7 @@ version = "2.1.0"
|
||||
description = "pytest plugin to abort hanging tests"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest-timeout-2.1.0.tar.gz", hash = "sha256:c07ca07404c612f8abbe22294b23c368e2e5104b521c1790195561f37e1ac3d9"},
|
||||
{file = "pytest_timeout-2.1.0-py3-none-any.whl", hash = "sha256:f6f50101443ce70ad325ceb4473c4255e9d74e3c7cd0ef827309dfa4c0d975c6"},
|
||||
@@ -2672,6 +2773,7 @@ version = "3.3.1"
|
||||
description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest-xdist-3.3.1.tar.gz", hash = "sha256:d5ee0520eb1b7bcca50a60a518ab7a7707992812c578198f8b44fdfac78e8c93"},
|
||||
{file = "pytest_xdist-3.3.1-py3-none-any.whl", hash = "sha256:ff9daa7793569e6a68544850fd3927cd257cc03a7ef76c95e86915355e82b5f2"},
|
||||
@@ -2692,6 +2794,7 @@ version = "2.8.2"
|
||||
description = "Extensions to the standard Python datetime module"
|
||||
optional = false
|
||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
|
||||
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
|
||||
@@ -2706,6 +2809,7 @@ version = "1.0.1"
|
||||
description = "Read key-value pairs from a .env file and set them as environment variables"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"},
|
||||
{file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"},
|
||||
@@ -2720,6 +2824,7 @@ version = "2024.1"
|
||||
description = "World timezone definitions, modern and historical"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"},
|
||||
{file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"},
|
||||
@@ -2731,6 +2836,8 @@ version = "308"
|
||||
description = "Python for Window Extensions"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "sys_platform == \"win32\""
|
||||
files = [
|
||||
{file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"},
|
||||
{file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"},
|
||||
@@ -2758,6 +2865,7 @@ version = "6.0.2"
|
||||
description = "YAML parser and emitter for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
|
||||
@@ -2820,6 +2928,7 @@ version = "2024.4.28"
|
||||
description = "Alternative regular expression module, to replace re."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "regex-2024.4.28-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd196d056b40af073d95a2879678585f0b74ad35190fac04ca67954c582c6b61"},
|
||||
{file = "regex-2024.4.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8bb381f777351bd534462f63e1c6afb10a7caa9fa2a421ae22c26e796fe31b1f"},
|
||||
@@ -2908,6 +3017,7 @@ version = "2.32.3"
|
||||
description = "Python HTTP for Humans."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
|
||||
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
|
||||
@@ -2929,6 +3039,7 @@ version = "0.25.3"
|
||||
description = "A utility library for mocking out the `requests` Python library."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "responses-0.25.3-py3-none-any.whl", hash = "sha256:521efcbc82081ab8daa588e08f7e8a64ce79b91c39f6e62199b19159bea7dbcb"},
|
||||
{file = "responses-0.25.3.tar.gz", hash = "sha256:617b9247abd9ae28313d57a75880422d55ec63c29d33d629697590a034358dba"},
|
||||
@@ -2948,6 +3059,7 @@ version = "0.1.4"
|
||||
description = "A pure python RFC3339 validator"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"},
|
||||
{file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"},
|
||||
@@ -2962,6 +3074,7 @@ version = "0.7.0"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "ruff-0.7.0-py3-none-linux_armv6l.whl", hash = "sha256:0cdf20c2b6ff98e37df47b2b0bd3a34aaa155f59a11182c1303cce79be715628"},
|
||||
{file = "ruff-0.7.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:496494d350c7fdeb36ca4ef1c9f21d80d182423718782222c29b3e72b3512737"},
|
||||
@@ -2989,6 +3102,7 @@ version = "0.10.0"
|
||||
description = "An Amazon S3 Transfer Manager"
|
||||
optional = false
|
||||
python-versions = ">= 3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "s3transfer-0.10.0-py3-none-any.whl", hash = "sha256:3cdb40f5cfa6966e812209d0994f2a4709b561c88e90cf00c2696d2df4e56b2e"},
|
||||
{file = "s3transfer-0.10.0.tar.gz", hash = "sha256:d0c8bbf672d5eebbe4e57945e23b972d963f07d82f661cabf678a5c88831595b"},
|
||||
@@ -3006,6 +3120,7 @@ version = "1.0.4"
|
||||
description = "Classes implementing the SARIF 2.1.0 object model."
|
||||
optional = false
|
||||
python-versions = ">= 2.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "sarif_om-1.0.4-py3-none-any.whl", hash = "sha256:539ef47a662329b1c8502388ad92457425e95dc0aaaf995fe46f4984c4771911"},
|
||||
{file = "sarif_om-1.0.4.tar.gz", hash = "sha256:cd5f416b3083e00d402a92e449a7ff67af46f11241073eea0461802a3b5aef98"},
|
||||
@@ -3021,6 +3136,7 @@ version = "70.0.0"
|
||||
description = "Easily download, build, install, upgrade, and uninstall Python packages"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "setuptools-70.0.0-py3-none-any.whl", hash = "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4"},
|
||||
{file = "setuptools-70.0.0.tar.gz", hash = "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"},
|
||||
@@ -3036,6 +3152,7 @@ version = "1.16.0"
|
||||
description = "Python 2 and 3 compatibility utilities"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
|
||||
{file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
|
||||
@@ -3047,6 +3164,7 @@ version = "1.3.0"
|
||||
description = "Sniff out which async library your code is running under"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"},
|
||||
{file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"},
|
||||
@@ -3058,6 +3176,7 @@ version = "1.12"
|
||||
description = "Computer algebra system (CAS) in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"},
|
||||
{file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"},
|
||||
@@ -3072,6 +3191,7 @@ version = "4.9.0"
|
||||
description = "Python library for throwaway instances of anything that can run in a Docker container"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "testcontainers-4.9.0-py3-none-any.whl", hash = "sha256:c6fee929990972c40bf6b91b7072c94064ff3649b405a14fde0274c8b2479d32"},
|
||||
{file = "testcontainers-4.9.0.tar.gz", hash = "sha256:2cd6af070109ff68c1ab5389dc89c86c2dc3ab30a21ca734b2cb8f0f80ad479e"},
|
||||
@@ -3125,6 +3245,7 @@ version = "0.10.2"
|
||||
description = "Python Library for Tom's Obvious, Minimal Language"
|
||||
optional = false
|
||||
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
|
||||
{file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
|
||||
@@ -3136,6 +3257,7 @@ version = "1.5.0.20240925"
|
||||
description = "Typing stubs for jwcrypto"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "types-jwcrypto-1.5.0.20240925.tar.gz", hash = "sha256:50e17b790378c96239344476c7bd13b52d0c7eeb6d16c2d53723e48cc6bbf4fe"},
|
||||
{file = "types_jwcrypto-1.5.0.20240925-py3-none-any.whl", hash = "sha256:2d12a2d528240d326075e896aafec7056b9136bf3207fa6ccf3fcb8fbf9e11a1"},
|
||||
@@ -3150,6 +3272,7 @@ version = "5.9.5.12"
|
||||
description = "Typing stubs for psutil"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "types-psutil-5.9.5.12.tar.gz", hash = "sha256:61a91679d3fe737250013b624dca09375e7cc3ad77dcc734553746c429c02aca"},
|
||||
{file = "types_psutil-5.9.5.12-py3-none-any.whl", hash = "sha256:e9a147b8561235c6afcce5aa1adb973fad9ab2c50cf89820697687f53510358f"},
|
||||
@@ -3161,6 +3284,7 @@ version = "2.9.21.20241019"
|
||||
description = "Typing stubs for psycopg2"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "types-psycopg2-2.9.21.20241019.tar.gz", hash = "sha256:bca89b988d2ebd19bcd08b177d22a877ea8b841decb10ed130afcf39404612fa"},
|
||||
{file = "types_psycopg2-2.9.21.20241019-py3-none-any.whl", hash = "sha256:44d091e67732d16a941baae48cd7b53bf91911bc36888652447cf1ef0c1fb3f6"},
|
||||
@@ -3172,6 +3296,7 @@ version = "0.6.3.3"
|
||||
description = "Typing stubs for pytest-lazy-fixture"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "types-pytest-lazy-fixture-0.6.3.3.tar.gz", hash = "sha256:2ef79d66bcde0e50acdac8dc55074b9ae0d4cfaeabdd638f5522f4cac7c8a2c7"},
|
||||
{file = "types_pytest_lazy_fixture-0.6.3.3-py3-none-any.whl", hash = "sha256:a56a55649147ff960ff79d4b2c781a4f769351abc1876873f3116d0bd0c96353"},
|
||||
@@ -3183,6 +3308,7 @@ version = "6.0.12.20240917"
|
||||
description = "Typing stubs for PyYAML"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "types-PyYAML-6.0.12.20240917.tar.gz", hash = "sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587"},
|
||||
{file = "types_PyYAML-6.0.12.20240917-py3-none-any.whl", hash = "sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570"},
|
||||
@@ -3194,6 +3320,7 @@ version = "2.31.0.0"
|
||||
description = "Typing stubs for requests"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "types-requests-2.31.0.0.tar.gz", hash = "sha256:c1c29d20ab8d84dff468d7febfe8e0cb0b4664543221b386605e14672b44ea25"},
|
||||
{file = "types_requests-2.31.0.0-py3-none-any.whl", hash = "sha256:7c5cea7940f8e92ec560bbc468f65bf684aa3dcf0554a6f8c4710f5f708dc598"},
|
||||
@@ -3208,6 +3335,7 @@ version = "0.6.0.post3"
|
||||
description = "Type annotations and code completion for s3transfer"
|
||||
optional = false
|
||||
python-versions = ">=3.7,<4.0"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "types-s3transfer-0.6.0.post3.tar.gz", hash = "sha256:92c3704e5d041202bfb5ddb79d083fd1a02de2c5dfec6a91576823e6b5c93993"},
|
||||
{file = "types_s3transfer-0.6.0.post3-py3-none-any.whl", hash = "sha256:eedc5117275565b3c83662c0ccc81662a34da5dda8bd502b89d296b6d5cb091d"},
|
||||
@@ -3219,6 +3347,7 @@ version = "0.10.8.6"
|
||||
description = "Typing stubs for toml"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "types-toml-0.10.8.6.tar.gz", hash = "sha256:6d3ac79e36c9ee593c5d4fb33a50cca0e3adceb6ef5cff8b8e5aef67b4c4aaf2"},
|
||||
{file = "types_toml-0.10.8.6-py3-none-any.whl", hash = "sha256:de7b2bb1831d6f7a4b554671ffe5875e729753496961b3e9b202745e4955dafa"},
|
||||
@@ -3230,6 +3359,7 @@ version = "1.26.17"
|
||||
description = "Typing stubs for urllib3"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "types-urllib3-1.26.17.tar.gz", hash = "sha256:73fd274524c3fc7cd8cd9ceb0cb67ed99b45f9cb2831013e46d50c1451044800"},
|
||||
{file = "types_urllib3-1.26.17-py3-none-any.whl", hash = "sha256:0d027fcd27dbb3cb532453b4d977e05bc1e13aefd70519866af211b3003d895d"},
|
||||
@@ -3241,6 +3371,7 @@ version = "4.12.2"
|
||||
description = "Backported and Experimental Type Hints for Python 3.8+"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main", "dev"]
|
||||
files = [
|
||||
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
|
||||
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
|
||||
@@ -3252,6 +3383,7 @@ version = "1.26.19"
|
||||
description = "HTTP library with thread-safe connection pooling, file post, and more."
|
||||
optional = false
|
||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"},
|
||||
{file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"},
|
||||
@@ -3268,6 +3400,7 @@ version = "12.0"
|
||||
description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "websockets-12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d554236b2a2006e0ce16315c16eaa0d628dab009c33b63ea03f41c6107958374"},
|
||||
{file = "websockets-12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2d225bb6886591b1746b17c0573e29804619c8f755b5598d875bb4235ea639be"},
|
||||
@@ -3349,6 +3482,7 @@ version = "3.0.6"
|
||||
description = "The comprehensive WSGI web application library."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "werkzeug-3.0.6-py3-none-any.whl", hash = "sha256:1bc0c2310d2fbb07b1dd1105eba2f7af72f322e1e455f2f93c993bee8c8a5f17"},
|
||||
{file = "werkzeug-3.0.6.tar.gz", hash = "sha256:a8dd59d4de28ca70471a34cba79bed5f7ef2e036a76b3ab0835474246eb41f8d"},
|
||||
@@ -3366,6 +3500,7 @@ version = "1.14.1"
|
||||
description = "Module for decorators, wrappers and monkey patching."
|
||||
optional = false
|
||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"},
|
||||
{file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"},
|
||||
@@ -3386,6 +3521,16 @@ files = [
|
||||
{file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"},
|
||||
{file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"},
|
||||
{file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecee4132c6cd2ce5308e21672015ddfed1ff975ad0ac8d27168ea82e71413f55"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2020f391008ef874c6d9e208b24f28e31bcb85ccff4f335f15a3251d222b92d9"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2feecf86e1f7a86517cab34ae6c2f081fd2d0dac860cb0c0ded96d799d20b335"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:240b1686f38ae665d1b15475966fe0472f78e71b1b4903c143a842659c8e4cb9"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9008dad07d71f68487c91e96579c8567c98ca4c3881b9b113bc7b33e9fd78b8"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6447e9f3ba72f8e2b985a1da758767698efa72723d5b59accefd716e9e8272bf"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:acae32e13a4153809db37405f5eba5bac5fbe2e2ba61ab227926a22901051c0a"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49ef582b7a1152ae2766557f0550a9fcbf7bbd76f43fbdc94dd3bf07cc7168be"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-win32.whl", hash = "sha256:358fe87cc899c6bb0ddc185bf3dbfa4ba646f05b1b0b9b5a27c2cb92c2cea204"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:26046cd03936ae745a502abf44dac702a5e6880b2b01c29aea8ddf3353b68224"},
|
||||
{file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"},
|
||||
{file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"},
|
||||
{file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"},
|
||||
@@ -3439,6 +3584,7 @@ version = "0.13.0"
|
||||
description = "Makes working with XML feel like you are working with JSON"
|
||||
optional = false
|
||||
python-versions = ">=3.4"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "xmltodict-0.13.0-py2.py3-none-any.whl", hash = "sha256:aa89e8fd76320154a40d19a0df04a4695fb9dc5ba977cbb68ab3e4eb225e7852"},
|
||||
{file = "xmltodict-0.13.0.tar.gz", hash = "sha256:341595a488e3e01a85a9d8911d8912fd922ede5fecc4dce437eb4b6c8d037e56"},
|
||||
@@ -3450,6 +3596,7 @@ version = "1.17.2"
|
||||
description = "Yet another URL library"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "yarl-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:93771146ef048b34201bfa382c2bf74c524980870bb278e6df515efaf93699ff"},
|
||||
{file = "yarl-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8281db240a1616af2f9c5f71d355057e73a1409c4648c8949901396dc0a3c151"},
|
||||
@@ -3546,6 +3693,7 @@ version = "0.23.0"
|
||||
description = "Zstandard bindings for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "zstandard-0.23.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bf0a05b6059c0528477fba9054d09179beb63744355cab9f38059548fedd46a9"},
|
||||
{file = "zstandard-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fc9ca1c9718cb3b06634c7c8dec57d24e9438b2aa9a0f02b8bb36bf478538880"},
|
||||
@@ -3653,6 +3801,6 @@ cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\
|
||||
cffi = ["cffi (>=1.11)"]
|
||||
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
lock-version = "2.1"
|
||||
python-versions = "^3.11"
|
||||
content-hash = "e6904aca09abc6c805604b21a5702a97e0056406f9ec7469b091d35ee10a6b16"
|
||||
content-hash = "4dc3165fe22c0e0f7a030ea0f8a680ae2ff74561d8658c393abbe9112caaf5d7"
|
||||
|
||||
@@ -43,7 +43,7 @@ websockets = "^12.0"
|
||||
clickhouse-connect = "^0.7.16"
|
||||
kafka-python = "^2.0.2"
|
||||
jwcrypto = "^1.5.6"
|
||||
h2 = "^4.1.0"
|
||||
h2 = {git = "https://github.com/python-hyper/h2"}
|
||||
types-jwcrypto = "^1.5.0.20240925"
|
||||
pyyaml = "^6.0.2"
|
||||
types-pyyaml = "^6.0.12.20240917"
|
||||
@@ -94,6 +94,7 @@ target-version = "py311"
|
||||
extend-exclude = [
|
||||
"vendor/",
|
||||
"target/",
|
||||
"test_runner/stubs/", # Autogenerated by mypy's stubgen
|
||||
]
|
||||
line-length = 100 # this setting is rather guidance, it won't fail if it can't make the shorter
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
//! united.
|
||||
|
||||
use reqwest::{IntoUrl, Method, StatusCode};
|
||||
use safekeeper_api::models::TimelineStatus;
|
||||
use safekeeper_api::models::{TimelineCreateRequest, TimelineStatus};
|
||||
use std::error::Error as _;
|
||||
use utils::{
|
||||
http::error::HttpErrorBody,
|
||||
@@ -76,6 +76,28 @@ impl Client {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create_timeline(&self, req: &TimelineCreateRequest) -> Result<TimelineStatus> {
|
||||
let uri = format!(
|
||||
"{}/v1/tenant/{}/timeline/{}",
|
||||
self.mgmt_api_endpoint, req.tenant_id, req.timeline_id
|
||||
);
|
||||
let resp = self.post(&uri, req).await?;
|
||||
resp.json().await.map_err(Error::ReceiveBody)
|
||||
}
|
||||
|
||||
pub async fn delete_timeline(
|
||||
&self,
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
) -> Result<TimelineStatus> {
|
||||
let uri = format!(
|
||||
"{}/v1/tenant/{}/timeline/{}",
|
||||
self.mgmt_api_endpoint, tenant_id, timeline_id
|
||||
);
|
||||
let resp = self.request(Method::DELETE, &uri, ()).await?;
|
||||
resp.json().await.map_err(Error::ReceiveBody)
|
||||
}
|
||||
|
||||
pub async fn timeline_status(
|
||||
&self,
|
||||
tenant_id: TenantId,
|
||||
@@ -107,6 +129,14 @@ impl Client {
|
||||
self.get(&uri).await
|
||||
}
|
||||
|
||||
async fn post<B: serde::Serialize, U: IntoUrl>(
|
||||
&self,
|
||||
uri: U,
|
||||
body: B,
|
||||
) -> Result<reqwest::Response> {
|
||||
self.request(Method::POST, uri, body).await
|
||||
}
|
||||
|
||||
async fn get<U: IntoUrl>(&self, uri: U) -> Result<reqwest::Response> {
|
||||
self.request(Method::GET, uri, ()).await
|
||||
}
|
||||
|
||||
@@ -84,6 +84,12 @@ const parseReportJson = async ({ reportJsonUrl, fetch }) => {
|
||||
} else {
|
||||
arch = "unknown"
|
||||
}
|
||||
let lfcState = ""
|
||||
if (test.parameters.includes("'with-lfc'")) {
|
||||
lfcState = "with-lfc"
|
||||
} else {
|
||||
lfcState = "without-lfc"
|
||||
}
|
||||
|
||||
// Removing build type and PostgreSQL version from the test name to make it shorter
|
||||
const testName = test.name.replace(new RegExp(`${buildType}-pg${pgVersion}-?`), "").replace("[]", "")
|
||||
@@ -91,6 +97,7 @@ const parseReportJson = async ({ reportJsonUrl, fetch }) => {
|
||||
test.pgVersion = pgVersion
|
||||
test.buildType = buildType
|
||||
test.arch = arch
|
||||
test.lfcState = lfcState
|
||||
|
||||
if (test.status === "passed") {
|
||||
passedTests[pgVersion][testName].push(test)
|
||||
@@ -157,7 +164,7 @@ const reportSummary = async (params) => {
|
||||
const links = []
|
||||
for (const test of tests) {
|
||||
const allureLink = `${reportUrl}#suites/${test.parentUid}/${test.uid}`
|
||||
links.push(`[${test.buildType}-${test.arch}](${allureLink})`)
|
||||
links.push(`[${test.buildType}-${test.arch}-${test.lfcState}](${allureLink})`)
|
||||
}
|
||||
summary += `- \`${testName}\`: ${links.join(", ")}\n`
|
||||
}
|
||||
@@ -188,7 +195,7 @@ const reportSummary = async (params) => {
|
||||
const links = []
|
||||
for (const test of tests) {
|
||||
const allureLink = `${reportUrl}#suites/${test.parentUid}/${test.uid}/retries`
|
||||
links.push(`[${test.buildType}-${test.arch}](${allureLink})`)
|
||||
links.push(`[${test.buildType}-${test.arch}-${test.lfcState}](${allureLink})`)
|
||||
}
|
||||
summary += `- \`${testName}\`: ${links.join(", ")}\n`
|
||||
}
|
||||
|
||||
@@ -134,7 +134,7 @@ def ingest_test_result(
|
||||
if p["name"].startswith("__")
|
||||
}
|
||||
arch = parameters.get("arch", "UNKNOWN").strip("'")
|
||||
lfc = parameters.get("lfc", "False") == "True"
|
||||
lfc = parameters.get("lfc", "without-lfc").strip("'") == "with-lfc"
|
||||
|
||||
build_type, pg_version, unparametrized_name = parse_test_name(test["name"])
|
||||
labels = {label["name"]: label["value"] for label in test["labels"]}
|
||||
|
||||
@@ -32,7 +32,6 @@ use tokio::sync::broadcast::error::RecvError;
|
||||
use tokio::time;
|
||||
use tonic::body::{self, empty_body, BoxBody};
|
||||
use tonic::codegen::Service;
|
||||
use tonic::transport::server::Connected;
|
||||
use tonic::Code;
|
||||
use tonic::{Request, Response, Status};
|
||||
use tracing::*;
|
||||
@@ -459,9 +458,10 @@ impl BrokerService for Broker {
|
||||
&self,
|
||||
request: Request<tonic::Streaming<SafekeeperTimelineInfo>>,
|
||||
) -> Result<Response<()>, Status> {
|
||||
let remote_addr = request
|
||||
.remote_addr()
|
||||
.expect("TCPConnectInfo inserted by handler");
|
||||
let &RemoteAddr(remote_addr) = request
|
||||
.extensions()
|
||||
.get()
|
||||
.expect("RemoteAddr inserted by handler");
|
||||
let mut publisher = self.registry.register_publisher(remote_addr);
|
||||
|
||||
let mut stream = request.into_inner();
|
||||
@@ -484,9 +484,10 @@ impl BrokerService for Broker {
|
||||
&self,
|
||||
request: Request<SubscribeSafekeeperInfoRequest>,
|
||||
) -> Result<Response<Self::SubscribeSafekeeperInfoStream>, Status> {
|
||||
let remote_addr = request
|
||||
.remote_addr()
|
||||
.expect("TCPConnectInfo inserted by handler");
|
||||
let &RemoteAddr(remote_addr) = request
|
||||
.extensions()
|
||||
.get()
|
||||
.expect("RemoteAddr inserted by handler");
|
||||
let proto_key = request
|
||||
.into_inner()
|
||||
.subscription_key
|
||||
@@ -537,9 +538,10 @@ impl BrokerService for Broker {
|
||||
&self,
|
||||
request: Request<SubscribeByFilterRequest>,
|
||||
) -> std::result::Result<Response<Self::SubscribeByFilterStream>, Status> {
|
||||
let remote_addr = request
|
||||
.remote_addr()
|
||||
.expect("TCPConnectInfo inserted by handler");
|
||||
let &RemoteAddr(remote_addr) = request
|
||||
.extensions()
|
||||
.get()
|
||||
.expect("RemoteAddr inserted by handler");
|
||||
let proto_filter = request.into_inner();
|
||||
let ttid_filter = proto_filter.tenant_timeline_id.as_ref();
|
||||
|
||||
@@ -628,6 +630,9 @@ async fn http1_handler(
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct RemoteAddr(SocketAddr);
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let args = Args::parse();
|
||||
@@ -687,13 +692,13 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
.max_concurrent_streams(None);
|
||||
|
||||
let storage_broker_server_cloned = storage_broker_server.clone();
|
||||
let connect_info = stream.connect_info();
|
||||
let remote_addr = RemoteAddr(addr);
|
||||
let service_fn_ = async move {
|
||||
service_fn(move |mut req| {
|
||||
// That's what tonic's MakeSvc.call does to pass conninfo to
|
||||
// the request handler (and where its request.remote_addr()
|
||||
// expects it to find).
|
||||
req.extensions_mut().insert(connect_info.clone());
|
||||
req.extensions_mut().insert(remote_addr);
|
||||
|
||||
// Technically this second clone is not needed, but consume
|
||||
// by async block is apparently unavoidable. BTW, error
|
||||
|
||||
@@ -45,12 +45,11 @@ strum_macros.workspace = true
|
||||
|
||||
diesel = { version = "2.2.6", features = [
|
||||
"serde_json",
|
||||
"postgres",
|
||||
"r2d2",
|
||||
"chrono",
|
||||
] }
|
||||
diesel-async = { version = "0.5.2", features = ["postgres", "bb8", "async-connection-wrapper"] }
|
||||
diesel_migrations = { version = "2.2.0" }
|
||||
r2d2 = { version = "0.8.10" }
|
||||
scoped-futures = "0.1.4"
|
||||
|
||||
utils = { path = "../libs/utils/" }
|
||||
metrics = { path = "../libs/metrics/" }
|
||||
|
||||
@@ -308,7 +308,7 @@ async fn async_main() -> anyhow::Result<()> {
|
||||
// Validate that we can connect to the database
|
||||
Persistence::await_connection(&secrets.database_url, args.db_connect_timeout.into()).await?;
|
||||
|
||||
let persistence = Arc::new(Persistence::new(secrets.database_url));
|
||||
let persistence = Arc::new(Persistence::new(secrets.database_url).await);
|
||||
|
||||
let service = Service::spawn(config, persistence.clone()).await?;
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -208,6 +208,10 @@ class ShardIndex:
|
||||
shard_count=int(input[2:4], 16),
|
||||
)
|
||||
|
||||
@property
|
||||
def is_sharded(self) -> bool:
|
||||
return self.shard_count != 0
|
||||
|
||||
|
||||
class TenantShardId:
|
||||
def __init__(self, tenant_id: TenantId, shard_number: int, shard_count: int):
|
||||
|
||||
@@ -126,12 +126,8 @@ PAGESERVER_GLOBAL_METRICS: tuple[str, ...] = (
|
||||
"pageserver_page_cache_read_accesses_total",
|
||||
"pageserver_page_cache_size_current_bytes",
|
||||
"pageserver_page_cache_size_max_bytes",
|
||||
"pageserver_getpage_reconstruct_seconds_bucket",
|
||||
"pageserver_getpage_reconstruct_seconds_count",
|
||||
"pageserver_getpage_reconstruct_seconds_sum",
|
||||
*[f"pageserver_basebackup_query_seconds_{x}" for x in ["bucket", "count", "sum"]],
|
||||
*histogram("pageserver_smgr_query_seconds_global"),
|
||||
*histogram("pageserver_getpage_get_reconstruct_data_seconds"),
|
||||
*histogram("pageserver_wait_lsn_seconds"),
|
||||
*histogram("pageserver_remote_operation_seconds"),
|
||||
*histogram("pageserver_io_operations_seconds"),
|
||||
@@ -169,7 +165,6 @@ PAGESERVER_PER_TENANT_METRICS: tuple[str, ...] = (
|
||||
"pageserver_evictions_with_low_residence_duration_total",
|
||||
"pageserver_aux_file_estimated_size",
|
||||
"pageserver_valid_lsn_lease_count",
|
||||
"pageserver_flush_wait_upload_seconds",
|
||||
counter("pageserver_tenant_throttling_count_accounted_start"),
|
||||
counter("pageserver_tenant_throttling_count_accounted_finish"),
|
||||
counter("pageserver_tenant_throttling_wait_usecs_sum"),
|
||||
|
||||
@@ -523,6 +523,7 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
remote_ext_config: str | None = None,
|
||||
pageserver_id: int | None = None,
|
||||
allow_multiple: bool = False,
|
||||
create_test_user: bool = False,
|
||||
basebackup_request_tries: int | None = None,
|
||||
env: dict[str, str] | None = None,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
@@ -544,6 +545,8 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
args.extend(["--pageserver-id", str(pageserver_id)])
|
||||
if allow_multiple:
|
||||
args.extend(["--allow-multiple"])
|
||||
if create_test_user:
|
||||
args.extend(["--create-test-user"])
|
||||
|
||||
res = self.raw_cli(args, extra_env_vars)
|
||||
res.check_returncode()
|
||||
|
||||
@@ -313,6 +313,10 @@ class PgProtocol:
|
||||
"""
|
||||
return self.safe_psql(query, log_query=log_query)[0][0]
|
||||
|
||||
def show_timeline_id(self) -> TimelineId:
|
||||
"""SHOW neon.timeline_id"""
|
||||
return TimelineId(cast("str", self.safe_psql("show neon.timeline_id")[0][0]))
|
||||
|
||||
|
||||
class PageserverWalReceiverProtocol(StrEnum):
|
||||
VANILLA = "vanilla"
|
||||
@@ -387,6 +391,7 @@ class NeonEnvBuilder:
|
||||
storage_controller_port_override: int | None = None,
|
||||
pageserver_virtual_file_io_mode: str | None = None,
|
||||
pageserver_wal_receiver_protocol: PageserverWalReceiverProtocol | None = None,
|
||||
pageserver_get_vectored_concurrent_io: str | None = None,
|
||||
):
|
||||
self.repo_dir = repo_dir
|
||||
self.rust_log_override = rust_log_override
|
||||
@@ -426,6 +431,9 @@ class NeonEnvBuilder:
|
||||
self.storage_controller_config: dict[Any, Any] | None = None
|
||||
|
||||
self.pageserver_virtual_file_io_engine: str | None = pageserver_virtual_file_io_engine
|
||||
self.pageserver_get_vectored_concurrent_io: str | None = (
|
||||
pageserver_get_vectored_concurrent_io
|
||||
)
|
||||
|
||||
self.pageserver_default_tenant_config_compaction_algorithm: dict[str, Any] | None = (
|
||||
pageserver_default_tenant_config_compaction_algorithm
|
||||
@@ -452,6 +460,7 @@ class NeonEnvBuilder:
|
||||
self.test_name = test_name
|
||||
self.compatibility_neon_binpath = compatibility_neon_binpath
|
||||
self.compatibility_pg_distrib_dir = compatibility_pg_distrib_dir
|
||||
self.test_may_use_compatibility_snapshot_binaries = False
|
||||
self.version_combination = combination
|
||||
self.mixdir = self.test_output_dir / "mixdir_neon"
|
||||
if self.version_combination is not None:
|
||||
@@ -463,6 +472,7 @@ class NeonEnvBuilder:
|
||||
), "the environment variable COMPATIBILITY_POSTGRES_DISTRIB_DIR is required when using mixed versions"
|
||||
self.mixdir.mkdir(mode=0o755, exist_ok=True)
|
||||
self._mix_versions()
|
||||
self.test_may_use_compatibility_snapshot_binaries = True
|
||||
|
||||
def init_configs(self, default_remote_storage_if_missing: bool = True) -> NeonEnv:
|
||||
# Cannot create more than one environment from one builder
|
||||
@@ -1062,6 +1072,7 @@ class NeonEnv:
|
||||
self.pageserver_virtual_file_io_engine = config.pageserver_virtual_file_io_engine
|
||||
self.pageserver_virtual_file_io_mode = config.pageserver_virtual_file_io_mode
|
||||
self.pageserver_wal_receiver_protocol = config.pageserver_wal_receiver_protocol
|
||||
self.pageserver_get_vectored_concurrent_io = config.pageserver_get_vectored_concurrent_io
|
||||
|
||||
# Create the neon_local's `NeonLocalInitConf`
|
||||
cfg: dict[str, Any] = {
|
||||
@@ -1115,12 +1126,24 @@ class NeonEnv:
|
||||
|
||||
# Batching (https://github.com/neondatabase/neon/issues/9377):
|
||||
# enable batching by default in tests and benchmarks.
|
||||
ps_cfg["page_service_pipelining"] = {
|
||||
"mode": "pipelined",
|
||||
"execution": "concurrent-futures",
|
||||
"max_batch_size": 32,
|
||||
}
|
||||
|
||||
# Concurrent IO (https://github.com/neondatabase/neon/issues/9378):
|
||||
# enable concurrent IO by default in tests and benchmarks.
|
||||
# Compat tests are exempt because old versions fail to parse the new config.
|
||||
if not config.compatibility_neon_binpath:
|
||||
ps_cfg["page_service_pipelining"] = {
|
||||
"mode": "pipelined",
|
||||
"execution": "concurrent-futures",
|
||||
"max_batch_size": 32,
|
||||
get_vectored_concurrent_io = self.pageserver_get_vectored_concurrent_io
|
||||
if config.test_may_use_compatibility_snapshot_binaries:
|
||||
log.info(
|
||||
"Forcing use of binary-built-in default to avoid forward-compatibility related test failures"
|
||||
)
|
||||
get_vectored_concurrent_io = None
|
||||
if get_vectored_concurrent_io is not None:
|
||||
ps_cfg["get_vectored_concurrent_io"] = {
|
||||
"mode": self.pageserver_get_vectored_concurrent_io,
|
||||
}
|
||||
|
||||
if self.pageserver_virtual_file_io_engine is not None:
|
||||
@@ -1457,6 +1480,7 @@ def neon_simple_env(
|
||||
pageserver_virtual_file_io_engine: str,
|
||||
pageserver_default_tenant_config_compaction_algorithm: dict[str, Any] | None,
|
||||
pageserver_virtual_file_io_mode: str | None,
|
||||
pageserver_get_vectored_concurrent_io: str | None,
|
||||
) -> Iterator[NeonEnv]:
|
||||
"""
|
||||
Simple Neon environment, with 1 safekeeper and 1 pageserver. No authentication, no fsync.
|
||||
@@ -1489,6 +1513,7 @@ def neon_simple_env(
|
||||
pageserver_virtual_file_io_engine=pageserver_virtual_file_io_engine,
|
||||
pageserver_default_tenant_config_compaction_algorithm=pageserver_default_tenant_config_compaction_algorithm,
|
||||
pageserver_virtual_file_io_mode=pageserver_virtual_file_io_mode,
|
||||
pageserver_get_vectored_concurrent_io=pageserver_get_vectored_concurrent_io,
|
||||
combination=combination,
|
||||
) as builder:
|
||||
env = builder.init_start()
|
||||
@@ -1515,6 +1540,7 @@ def neon_env_builder(
|
||||
pageserver_default_tenant_config_compaction_algorithm: dict[str, Any] | None,
|
||||
record_property: Callable[[str, object], None],
|
||||
pageserver_virtual_file_io_mode: str | None,
|
||||
pageserver_get_vectored_concurrent_io: str | None,
|
||||
) -> Iterator[NeonEnvBuilder]:
|
||||
"""
|
||||
Fixture to create a Neon environment for test.
|
||||
@@ -1557,6 +1583,7 @@ def neon_env_builder(
|
||||
test_overlay_dir=test_overlay_dir,
|
||||
pageserver_default_tenant_config_compaction_algorithm=pageserver_default_tenant_config_compaction_algorithm,
|
||||
pageserver_virtual_file_io_mode=pageserver_virtual_file_io_mode,
|
||||
pageserver_get_vectored_concurrent_io=pageserver_get_vectored_concurrent_io,
|
||||
) as builder:
|
||||
yield builder
|
||||
# Propogate `preserve_database_files` to make it possible to use in other fixtures,
|
||||
@@ -3891,6 +3918,7 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
pageserver_id: int | None = None,
|
||||
safekeepers: list[int] | None = None,
|
||||
allow_multiple: bool = False,
|
||||
create_test_user: bool = False,
|
||||
basebackup_request_tries: int | None = None,
|
||||
env: dict[str, str] | None = None,
|
||||
) -> Self:
|
||||
@@ -3912,6 +3940,7 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
remote_ext_config=remote_ext_config,
|
||||
pageserver_id=pageserver_id,
|
||||
allow_multiple=allow_multiple,
|
||||
create_test_user=create_test_user,
|
||||
basebackup_request_tries=basebackup_request_tries,
|
||||
env=env,
|
||||
)
|
||||
@@ -4361,6 +4390,7 @@ class Safekeeper(LogUtils):
|
||||
"1s",
|
||||
"--eviction-min-resident",
|
||||
"10s",
|
||||
"--wal-reader-fanout",
|
||||
]
|
||||
|
||||
self.extra_opts = extra_opts
|
||||
|
||||
@@ -99,8 +99,11 @@ DEFAULT_PAGESERVER_ALLOWED_ERRORS = (
|
||||
".*WARN.*path=/v1/utilization .*request was dropped before completing",
|
||||
# Can happen during shutdown
|
||||
".*scheduling deletion on drop failed: queue is in state Stopped.*",
|
||||
# Too many frozen layers error is normal during intensive benchmarks
|
||||
".*too many frozen layers.*",
|
||||
# L0 flush backpressure delays are expected under heavy ingest load. We want to exercise
|
||||
# this backpressure in tests.
|
||||
".*delaying layer flush by \\S+ for compaction backpressure.*",
|
||||
".*stalling layer flushes for compaction backpressure.*",
|
||||
".*layer roll waiting for flush due to compaction backpressure.*",
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -44,6 +44,11 @@ def pageserver_virtual_file_io_mode() -> str | None:
|
||||
return os.getenv("PAGESERVER_VIRTUAL_FILE_IO_MODE")
|
||||
|
||||
|
||||
@pytest.fixture(scope="function", autouse=True)
|
||||
def pageserver_get_vectored_concurrent_io() -> str | None:
|
||||
return os.getenv("PAGESERVER_GET_VECTORED_CONCURRENT_IO")
|
||||
|
||||
|
||||
def get_pageserver_default_tenant_config_compaction_algorithm() -> dict[str, Any] | None:
|
||||
toml_table = os.getenv("PAGESERVER_DEFAULT_TENANT_CONFIG_COMPACTION_ALGORITHM")
|
||||
if toml_table is None:
|
||||
@@ -116,6 +121,8 @@ def pytest_runtest_makereport(*args, **kwargs):
|
||||
}.get(os.uname().machine, "UNKNOWN")
|
||||
arch = os.getenv("RUNNER_ARCH", uname_m)
|
||||
allure.dynamic.parameter("__arch", arch)
|
||||
allure.dynamic.parameter("__lfc", os.getenv("USE_LFC") != "false")
|
||||
allure.dynamic.parameter(
|
||||
"__lfc", "with-lfc" if os.getenv("USE_LFC") != "false" else "without-lfc"
|
||||
)
|
||||
|
||||
yield
|
||||
|
||||
@@ -23,6 +23,8 @@ def test_layer_map(neon_env_builder: NeonEnvBuilder, zenbenchmark):
|
||||
"checkpoint_distance": "16384",
|
||||
"compaction_period": "1 s",
|
||||
"compaction_threshold": "1",
|
||||
"l0_flush_delay_threshold": "0",
|
||||
"l0_flush_stall_threshold": "0",
|
||||
"compaction_target_size": "16384",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -139,6 +139,8 @@ def test_fully_custom_config(positive_env: NeonEnv):
|
||||
fully_custom_config = {
|
||||
"compaction_period": "1h",
|
||||
"compaction_threshold": 13,
|
||||
"l0_flush_delay_threshold": 25,
|
||||
"l0_flush_stall_threshold": 42,
|
||||
"compaction_target_size": 1048576,
|
||||
"checkpoint_distance": 10000,
|
||||
"checkpoint_timeout": "13m",
|
||||
@@ -177,6 +179,9 @@ def test_fully_custom_config(positive_env: NeonEnv):
|
||||
"args": {"format": "bincode", "compression": {"zstd": {"level": 1}}},
|
||||
},
|
||||
"rel_size_v2_enabled": True,
|
||||
"gc_compaction_enabled": True,
|
||||
"gc_compaction_initial_threshold_kb": 1024000,
|
||||
"gc_compaction_ratio_percent": 200,
|
||||
}
|
||||
|
||||
vps_http = env.storage_controller.pageserver_api()
|
||||
|
||||
@@ -64,6 +64,8 @@ def test_branch_and_gc(neon_simple_env: NeonEnv):
|
||||
# tweak the default settings to allow quickly create image layers and L1 layers
|
||||
"compaction_period": "1 s",
|
||||
"compaction_threshold": "2",
|
||||
"l0_flush_delay_threshold": "20",
|
||||
"l0_flush_stall_threshold": "40",
|
||||
"image_creation_threshold": "1",
|
||||
# Disable PITR, this test will set an explicit space-based GC limit
|
||||
"pitr_interval": "0 s",
|
||||
|
||||
@@ -19,6 +19,7 @@ from fixtures.pageserver.utils import wait_until_tenant_active
|
||||
from fixtures.utils import query_scalar
|
||||
from performance.test_perf_pgbench import get_scales_matrix
|
||||
from requests import RequestException
|
||||
from requests.exceptions import RetryError
|
||||
|
||||
|
||||
# Test branch creation
|
||||
@@ -176,11 +177,8 @@ def test_cannot_create_endpoint_on_non_uploaded_timeline(neon_env_builder: NeonE
|
||||
|
||||
env.neon_cli.mappings_map_branch(initial_branch, env.initial_tenant, env.initial_timeline)
|
||||
|
||||
with pytest.raises(RuntimeError, match="ERROR: Not found: Timeline"):
|
||||
env.endpoints.create_start(
|
||||
initial_branch, tenant_id=env.initial_tenant, basebackup_request_tries=2
|
||||
)
|
||||
ps_http.configure_failpoints(("before-upload-index-pausable", "off"))
|
||||
with pytest.raises(RuntimeError, match="is not active, state: Loading"):
|
||||
env.endpoints.create_start(initial_branch, tenant_id=env.initial_tenant)
|
||||
finally:
|
||||
env.pageserver.stop(immediate=True)
|
||||
|
||||
@@ -221,10 +219,7 @@ def test_cannot_branch_from_non_uploaded_branch(neon_env_builder: NeonEnvBuilder
|
||||
|
||||
branch_id = TimelineId.generate()
|
||||
|
||||
with pytest.raises(
|
||||
PageserverApiException,
|
||||
match="Cannot branch off the timeline that's not present in pageserver",
|
||||
):
|
||||
with pytest.raises(RetryError, match="too many 503 error responses"):
|
||||
ps_http.timeline_create(
|
||||
env.pg_version,
|
||||
env.initial_tenant,
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import math
|
||||
import random
|
||||
import time
|
||||
from enum import StrEnum
|
||||
|
||||
@@ -128,11 +130,6 @@ def test_pageserver_gc_compaction_smoke(neon_env_builder: NeonEnvBuilder, with_b
|
||||
}
|
||||
|
||||
env = neon_env_builder.init_start(initial_tenant_conf=SMOKE_CONF)
|
||||
env.pageserver.allowed_errors.append(
|
||||
r".*failed to acquire partition lock during gc-compaction.*"
|
||||
)
|
||||
env.pageserver.allowed_errors.append(r".*repartition() called concurrently.*")
|
||||
|
||||
tenant_id = env.initial_tenant
|
||||
timeline_id = env.initial_timeline
|
||||
|
||||
@@ -147,11 +144,14 @@ def test_pageserver_gc_compaction_smoke(neon_env_builder: NeonEnvBuilder, with_b
|
||||
log.info("Writing initial data ...")
|
||||
workload.write_rows(row_count, env.pageserver.id)
|
||||
|
||||
ps_http.timeline_gc(
|
||||
tenant_id, timeline_id, None
|
||||
) # Force refresh gc info to have gc_cutoff generated
|
||||
|
||||
child_workloads: list[Workload] = []
|
||||
|
||||
for i in range(1, churn_rounds + 1):
|
||||
if i % 10 == 0:
|
||||
log.info(f"Running churn round {i}/{churn_rounds} ...")
|
||||
log.info(f"Running churn round {i}/{churn_rounds} ...")
|
||||
if i % 10 == 5 and with_branches == "with_branches":
|
||||
branch_name = f"child-{i}"
|
||||
branch_timeline_id = env.create_branch(branch_name)
|
||||
@@ -172,8 +172,10 @@ def test_pageserver_gc_compaction_smoke(neon_env_builder: NeonEnvBuilder, with_b
|
||||
"sub_compaction_max_job_size_mb": 16,
|
||||
},
|
||||
)
|
||||
|
||||
workload.churn_rows(row_count, env.pageserver.id)
|
||||
# do not wait for upload so that we can see if gc_compaction works well with data being ingested
|
||||
workload.churn_rows(row_count, env.pageserver.id, upload=False)
|
||||
time.sleep(1)
|
||||
workload.validate(env.pageserver.id)
|
||||
|
||||
def compaction_finished():
|
||||
queue_depth = len(ps_http.timeline_compact_info(tenant_id, timeline_id))
|
||||
@@ -197,6 +199,230 @@ def test_pageserver_gc_compaction_smoke(neon_env_builder: NeonEnvBuilder, with_b
|
||||
ps_http.timeline_gc(tenant_id, timeline_id, None)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"compaction_mode",
|
||||
["before_restart", "after_restart"],
|
||||
)
|
||||
def test_pageserver_gc_compaction_idempotent(
|
||||
neon_env_builder: NeonEnvBuilder, compaction_mode: str
|
||||
):
|
||||
"""
|
||||
Do gc-compaction twice without writing any new data and see if anything breaks.
|
||||
We run this test in two modes:
|
||||
- before_restart: run two gc-compactions before pageserver restart
|
||||
- after_restart: run one gc-compaction before and one after pageserver restart
|
||||
"""
|
||||
SMOKE_CONF = {
|
||||
# Run both gc and gc-compaction.
|
||||
"gc_period": "5s",
|
||||
"compaction_period": "5s",
|
||||
# No PiTR interval and small GC horizon
|
||||
"pitr_interval": "0s",
|
||||
"gc_horizon": 1024,
|
||||
"lsn_lease_length": "0s",
|
||||
}
|
||||
|
||||
env = neon_env_builder.init_start(initial_tenant_conf=SMOKE_CONF)
|
||||
tenant_id = env.initial_tenant
|
||||
timeline_id = env.initial_timeline
|
||||
|
||||
# Only in testing mode: the warning is expected because we rewrite a layer file of different generations.
|
||||
# We could potentially patch the sanity-check code to not emit the warning in the future.
|
||||
env.pageserver.allowed_errors.append(".*was unlinked but was not dangling.*")
|
||||
|
||||
row_count = 10000
|
||||
|
||||
ps_http = env.pageserver.http_client()
|
||||
|
||||
workload = Workload(env, tenant_id, timeline_id)
|
||||
workload.init(env.pageserver.id)
|
||||
|
||||
workload.write_rows(row_count, env.pageserver.id)
|
||||
|
||||
child_workloads: list[Workload] = []
|
||||
|
||||
def compaction_finished():
|
||||
queue_depth = len(ps_http.timeline_compact_info(tenant_id, timeline_id))
|
||||
assert queue_depth == 0
|
||||
|
||||
workload.churn_rows(row_count, env.pageserver.id)
|
||||
env.create_branch("child_branch") # so that we have a retain_lsn
|
||||
workload.churn_rows(row_count, env.pageserver.id)
|
||||
# compact 3 times if mode is before_restart
|
||||
n_compactions = 3 if compaction_mode == "before_restart" else 1
|
||||
for _ in range(n_compactions):
|
||||
# Force refresh gc info to have gc_cutoff generated
|
||||
ps_http.timeline_gc(tenant_id, timeline_id, None)
|
||||
ps_http.timeline_compact(
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
enhanced_gc_bottom_most_compaction=True,
|
||||
body={
|
||||
"scheduled": True,
|
||||
"sub_compaction": True,
|
||||
"compact_key_range": {
|
||||
"start": "000000000000000000000000000000000000",
|
||||
"end": "030000000000000000000000000000000000",
|
||||
},
|
||||
"sub_compaction_max_job_size_mb": 16,
|
||||
},
|
||||
)
|
||||
wait_until(compaction_finished, timeout=60)
|
||||
if compaction_mode == "after_restart":
|
||||
env.pageserver.restart(True)
|
||||
ps_http.timeline_gc(
|
||||
tenant_id, timeline_id, None
|
||||
) # Force refresh gc info to have gc_cutoff generated
|
||||
for _ in range(3):
|
||||
ps_http.timeline_compact(
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
enhanced_gc_bottom_most_compaction=True,
|
||||
body={
|
||||
"scheduled": True,
|
||||
"sub_compaction": True,
|
||||
"compact_key_range": {
|
||||
"start": "000000000000000000000000000000000000",
|
||||
"end": "030000000000000000000000000000000000",
|
||||
},
|
||||
"sub_compaction_max_job_size_mb": 16,
|
||||
},
|
||||
)
|
||||
wait_until(compaction_finished, timeout=60)
|
||||
|
||||
# ensure gc_compaction is scheduled and it's actually running (instead of skipping due to no layers picked)
|
||||
env.pageserver.assert_log_contains(
|
||||
"scheduled_compact_timeline.*picked .* layers for compaction"
|
||||
)
|
||||
|
||||
# ensure we hit the duplicated layer key warning at least once: we did two compactions consecutively,
|
||||
# and the second one should have hit the duplicated layer key warning.
|
||||
if compaction_mode == "before_restart":
|
||||
env.pageserver.assert_log_contains("duplicated layer key in the same generation")
|
||||
else:
|
||||
env.pageserver.assert_log_contains("same layer key at different generation")
|
||||
|
||||
log.info("Validating at workload end ...")
|
||||
workload.validate(env.pageserver.id)
|
||||
for child_workload in child_workloads:
|
||||
log.info(f"Validating at branch {child_workload.branch_name}")
|
||||
child_workload.validate(env.pageserver.id)
|
||||
|
||||
# Run a legacy compaction+gc to ensure gc-compaction can coexist with legacy compaction.
|
||||
ps_http.timeline_checkpoint(tenant_id, timeline_id, wait_until_uploaded=True)
|
||||
ps_http.timeline_gc(tenant_id, timeline_id, None)
|
||||
|
||||
|
||||
@skip_in_debug_build("only run with release build")
|
||||
def test_pageserver_gc_compaction_interrupt(neon_env_builder: NeonEnvBuilder):
|
||||
"""
|
||||
Force interrupt a gc-compaction and see if anything breaks.
|
||||
"""
|
||||
SMOKE_CONF = {
|
||||
# Run both gc and gc-compaction.
|
||||
"gc_period": "5s",
|
||||
"compaction_period": "5s",
|
||||
# No PiTR interval and small GC horizon
|
||||
"pitr_interval": "0s",
|
||||
"gc_horizon": "1024",
|
||||
"lsn_lease_length": "0s",
|
||||
}
|
||||
|
||||
env = neon_env_builder.init_start(initial_tenant_conf=SMOKE_CONF)
|
||||
tenant_id = env.initial_tenant
|
||||
timeline_id = env.initial_timeline
|
||||
|
||||
# Only in testing mode: the warning is expected because we rewrite a layer file of different generations.
|
||||
# We could potentially patch the sanity-check code to not emit the warning in the future.
|
||||
env.pageserver.allowed_errors.append(".*was unlinked but was not dangling.*")
|
||||
|
||||
row_count = 10000
|
||||
churn_rounds = 20
|
||||
|
||||
ps_http = env.pageserver.http_client()
|
||||
|
||||
workload = Workload(env, tenant_id, timeline_id)
|
||||
workload.init(env.pageserver.id)
|
||||
|
||||
log.info("Writing initial data ...")
|
||||
workload.write_rows(row_count, env.pageserver.id)
|
||||
|
||||
def compaction_finished():
|
||||
queue_depth = len(ps_http.timeline_compact_info(tenant_id, timeline_id))
|
||||
assert queue_depth == 0
|
||||
|
||||
expected_compaction_time_seconds = 5.0
|
||||
ps_http.timeline_gc(
|
||||
tenant_id, timeline_id, None
|
||||
) # Force refresh gc info to have gc_cutoff generated
|
||||
for i in range(1, churn_rounds + 1):
|
||||
log.info(f"Running churn round {i}/{churn_rounds} ...")
|
||||
workload.churn_rows(row_count, env.pageserver.id)
|
||||
ps_http.timeline_compact(
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
enhanced_gc_bottom_most_compaction=True,
|
||||
body={
|
||||
"scheduled": True,
|
||||
"sub_compaction": True,
|
||||
"compact_key_range": {
|
||||
"start": "000000000000000000000000000000000000",
|
||||
"end": "030000000000000000000000000000000000",
|
||||
},
|
||||
"sub_compaction_max_job_size_mb": 16,
|
||||
},
|
||||
)
|
||||
# sleep random seconds between 0 and max(compaction_time); if the result is 0, wait until the compaction is complete
|
||||
# This would hopefully trigger the restart at different periods of the compaction:
|
||||
# - while we are doing the compaction
|
||||
# - while we finished the compaction but not yet uploaded the metadata
|
||||
# - after we uploaded the metadata
|
||||
time_to_sleep = random.randint(0, max(5, math.ceil(expected_compaction_time_seconds)))
|
||||
if time_to_sleep == 0 or i == 1:
|
||||
start = time.time()
|
||||
wait_until(compaction_finished, timeout=60)
|
||||
end = time.time()
|
||||
expected_compaction_time_seconds = end - start
|
||||
log.info(
|
||||
f"expected_compaction_time_seconds updated to {expected_compaction_time_seconds} seconds"
|
||||
)
|
||||
else:
|
||||
time.sleep(time_to_sleep)
|
||||
env.pageserver.restart(True)
|
||||
ps_http.timeline_gc(
|
||||
tenant_id, timeline_id, None
|
||||
) # Force refresh gc info to have gc_cutoff generated
|
||||
ps_http.timeline_compact(
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
enhanced_gc_bottom_most_compaction=True,
|
||||
body={
|
||||
"scheduled": True,
|
||||
"sub_compaction": True,
|
||||
"compact_key_range": {
|
||||
"start": "000000000000000000000000000000000000",
|
||||
"end": "030000000000000000000000000000000000",
|
||||
},
|
||||
"sub_compaction_max_job_size_mb": 16,
|
||||
},
|
||||
)
|
||||
workload.validate(env.pageserver.id)
|
||||
|
||||
wait_until(compaction_finished, timeout=60)
|
||||
|
||||
# ensure gc_compaction is scheduled and it's actually running (instead of skipping due to no layers picked)
|
||||
env.pageserver.assert_log_contains(
|
||||
"scheduled_compact_timeline.*picked .* layers for compaction"
|
||||
)
|
||||
|
||||
log.info("Validating at workload end ...")
|
||||
workload.validate(env.pageserver.id)
|
||||
|
||||
# Run a legacy compaction+gc to ensure gc-compaction can coexist with legacy compaction.
|
||||
ps_http.timeline_checkpoint(tenant_id, timeline_id, wait_until_uploaded=True)
|
||||
ps_http.timeline_gc(tenant_id, timeline_id, None)
|
||||
|
||||
|
||||
# Stripe sizes in number of pages.
|
||||
TINY_STRIPES = 16
|
||||
LARGE_STRIPES = 32768
|
||||
@@ -237,7 +463,9 @@ def test_sharding_compaction(
|
||||
"pitr_interval": "0s",
|
||||
# disable background compaction and GC. We invoke it manually when we want it to happen.
|
||||
"gc_period": "0s",
|
||||
"gc_horizon": f"{128 * 1024}",
|
||||
"compaction_period": "0s",
|
||||
"lsn_lease_length": "0s",
|
||||
# create image layers eagerly: we want to exercise image layer creation in this test.
|
||||
"image_creation_threshold": "1",
|
||||
"image_layer_creation_check_threshold": 0,
|
||||
@@ -312,6 +540,8 @@ def test_sharding_compaction(
|
||||
for shard in env.storage_controller.locate(tenant_id):
|
||||
pageserver = env.get_pageserver(shard["node_id"])
|
||||
tenant_shard_id = shard["shard_id"]
|
||||
# Force refresh gc info to have gc_cutoff generated
|
||||
pageserver.http_client().timeline_gc(tenant_shard_id, timeline_id, None)
|
||||
pageserver.http_client().timeline_compact(
|
||||
tenant_shard_id,
|
||||
timeline_id,
|
||||
|
||||
@@ -143,7 +143,7 @@ def test_create_snapshot(
|
||||
|
||||
env = neon_env_builder.init_start(
|
||||
initial_tenant_conf={
|
||||
# Miniature layers to enable generating non-trivial layer map without writing lots of data
|
||||
# Miniature layers to enable generating non-trivial layer map without writing lots of data.
|
||||
"checkpoint_distance": f"{128 * 1024}",
|
||||
"compaction_threshold": "1",
|
||||
"compaction_target_size": f"{128 * 1024}",
|
||||
@@ -251,6 +251,8 @@ def test_forward_compatibility(
|
||||
os.environ.get("ALLOW_FORWARD_COMPATIBILITY_BREAKAGE", "false").lower() == "true"
|
||||
)
|
||||
|
||||
neon_env_builder.test_may_use_compatibility_snapshot_binaries = True
|
||||
|
||||
try:
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
|
||||
|
||||
@@ -11,10 +11,13 @@ from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
# Test pageserver recovery after crash
|
||||
#
|
||||
def test_pageserver_recovery(neon_env_builder: NeonEnvBuilder):
|
||||
# Override default checkpointer settings to run it more often
|
||||
# Override default checkpointer settings to run it more often.
|
||||
# This also creates a bunch more L0 layers, so disable backpressure.
|
||||
env = neon_env_builder.init_start(
|
||||
initial_tenant_conf={
|
||||
"checkpoint_distance": "1048576",
|
||||
"l0_flush_delay_threshold": "0",
|
||||
"l0_flush_stall_threshold": "0",
|
||||
}
|
||||
)
|
||||
env.pageserver.is_testing_enabled_or_skip()
|
||||
|
||||
@@ -539,6 +539,8 @@ def test_timeline_deletion_with_files_stuck_in_upload_queue(
|
||||
# small checkpointing and compaction targets to ensure we generate many operations
|
||||
"checkpoint_distance": f"{64 * 1024}",
|
||||
"compaction_threshold": "1",
|
||||
"l0_flush_delay_threshold": "0",
|
||||
"l0_flush_stall_threshold": "0",
|
||||
"compaction_target_size": f"{64 * 1024}",
|
||||
# large horizon to avoid automatic GC (our assert on gc_result below relies on that)
|
||||
"gc_horizon": f"{1024 ** 4}",
|
||||
@@ -784,54 +786,6 @@ def test_empty_branch_remote_storage_upload_on_restart(neon_env_builder: NeonEnv
|
||||
create_thread.join()
|
||||
|
||||
|
||||
def test_paused_upload_stalls_checkpoint(
|
||||
neon_env_builder: NeonEnvBuilder,
|
||||
):
|
||||
"""
|
||||
This test checks that checkpoints block on uploads to remote storage.
|
||||
"""
|
||||
neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS)
|
||||
|
||||
env = neon_env_builder.init_start(
|
||||
initial_tenant_conf={
|
||||
# Set a small compaction threshold
|
||||
"compaction_threshold": "3",
|
||||
# Disable GC
|
||||
"gc_period": "0s",
|
||||
# disable PITR
|
||||
"pitr_interval": "0s",
|
||||
}
|
||||
)
|
||||
|
||||
env.pageserver.allowed_errors.append(
|
||||
f".*PUT.* path=/v1/tenant/{env.initial_tenant}/timeline.* request was dropped before completing"
|
||||
)
|
||||
|
||||
tenant_id = env.initial_tenant
|
||||
timeline_id = env.initial_timeline
|
||||
|
||||
client = env.pageserver.http_client()
|
||||
layers_at_creation = client.layer_map_info(tenant_id, timeline_id)
|
||||
deltas_at_creation = len(layers_at_creation.delta_layers())
|
||||
assert (
|
||||
deltas_at_creation == 1
|
||||
), "are you fixing #5863? make sure we end up with 2 deltas at the end of endpoint lifecycle"
|
||||
|
||||
# Make new layer uploads get stuck.
|
||||
# Note that timeline creation waits for the initial layers to reach remote storage.
|
||||
# So at this point, the `layers_at_creation` are in remote storage.
|
||||
client.configure_failpoints(("before-upload-layer-pausable", "pause"))
|
||||
|
||||
with env.endpoints.create_start("main", tenant_id=tenant_id) as endpoint:
|
||||
# Build two tables with some data inside
|
||||
endpoint.safe_psql("CREATE TABLE foo AS SELECT x FROM generate_series(1, 10000) g(x)")
|
||||
wait_for_last_flush_lsn(env, endpoint, tenant_id, timeline_id)
|
||||
|
||||
with pytest.raises(ReadTimeout):
|
||||
client.timeline_checkpoint(tenant_id, timeline_id, timeout=5)
|
||||
client.configure_failpoints(("before-upload-layer-pausable", "off"))
|
||||
|
||||
|
||||
def wait_upload_queue_empty(
|
||||
client: PageserverHttpClient, tenant_id: TenantId, timeline_id: TimelineId
|
||||
):
|
||||
|
||||
@@ -271,8 +271,14 @@ def test_scrubber_physical_gc_ancestors(neon_env_builder: NeonEnvBuilder, shard_
|
||||
ps.http_client().timeline_compact(
|
||||
shard, timeline_id, force_image_layer_creation=True, wait_until_uploaded=True
|
||||
)
|
||||
# Add some WAL so that we don't gc at the latest remote consistent lsn
|
||||
workload.churn_rows(1)
|
||||
|
||||
# Add some WAL so that we don't gc at the latest remote consistent lsn
|
||||
workload.churn_rows(10)
|
||||
|
||||
# Now gc the old stuff away
|
||||
for shard in shards:
|
||||
ps = env.get_tenant_pageserver(shard)
|
||||
assert ps is not None
|
||||
ps.http_client().timeline_gc(shard, timeline_id, 0)
|
||||
|
||||
# We will use a min_age_secs=1 threshold for deletion, let it pass
|
||||
|
||||
242
test_runner/regress/test_subscriber_branching.py
Normal file
242
test_runner/regress/test_subscriber_branching.py
Normal file
@@ -0,0 +1,242 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnv, logical_replication_sync
|
||||
from fixtures.utils import query_scalar, wait_until
|
||||
|
||||
|
||||
# This test checks that branching of timeline with logical subscriptions
|
||||
# does not affect logical replication for parent.
|
||||
# Endpoint on a new branch will drop all existing subscriptions at the start,
|
||||
# so it will not receive any changes.
|
||||
# If needed, user can create new subscriptions on the child branch.
|
||||
def test_subscriber_branching(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
env.create_branch("publisher")
|
||||
pub = env.endpoints.create("publisher")
|
||||
pub.respec(
|
||||
skip_pg_catalog_updates=False,
|
||||
create_test_user=True,
|
||||
)
|
||||
pub.start(create_test_user=True)
|
||||
|
||||
env.create_branch("subscriber")
|
||||
sub = env.endpoints.create("subscriber")
|
||||
# Pass create_test_user flag to get properly filled spec.users and spec.databases fields.
|
||||
#
|
||||
# This test checks the per-database operations that happen at compute start
|
||||
# and these operations are applied to the databases that are present in the spec.
|
||||
sub.respec(
|
||||
skip_pg_catalog_updates=False,
|
||||
create_test_user=True,
|
||||
)
|
||||
sub.start(create_test_user=True)
|
||||
|
||||
pub.wait_for_migrations()
|
||||
sub.wait_for_migrations()
|
||||
|
||||
n_records = 1000
|
||||
|
||||
def check_that_changes_propagated():
|
||||
scur.execute("SELECT count(*) FROM t")
|
||||
res = scur.fetchall()
|
||||
assert res[0][0] == n_records
|
||||
|
||||
def insert_data(pub, start):
|
||||
with pub.cursor(dbname="neondb", user="test", password="pubtestpwd") as pcur:
|
||||
for i in range(start, start + n_records):
|
||||
pcur.execute("INSERT into t values (%s,random()*100000)", (i,))
|
||||
|
||||
# create_test_user creates a user without password
|
||||
# but psycopg2 execute() requires a password
|
||||
with sub.cursor() as scur:
|
||||
scur.execute("ALTER USER test WITH PASSWORD 'testpwd'")
|
||||
with pub.cursor() as pcur:
|
||||
# Create a test user to avoid using superuser
|
||||
pcur.execute("ALTER USER test WITH PASSWORD 'pubtestpwd'")
|
||||
# If we don't do this, creating the subscription will fail
|
||||
pub.edit_hba(["host all test 0.0.0.0/0 md5"])
|
||||
|
||||
with pub.cursor(dbname="neondb", user="test", password="pubtestpwd") as pcur:
|
||||
pcur.execute("CREATE TABLE t (pk integer primary key, sk integer)")
|
||||
pcur.execute("CREATE PUBLICATION pub FOR TABLE t")
|
||||
|
||||
with sub.cursor(dbname="neondb", user="test", password="testpwd") as scur:
|
||||
scur.execute("CREATE TABLE t (pk integer primary key, sk integer)")
|
||||
pub_conn = (
|
||||
f"host=localhost port={pub.pg_port} dbname=neondb user=test password=pubtestpwd"
|
||||
)
|
||||
query = f"CREATE SUBSCRIPTION sub CONNECTION '{pub_conn}' PUBLICATION pub"
|
||||
scur.execute(query)
|
||||
time.sleep(2) # let initial table sync complete
|
||||
|
||||
insert_data(pub, 0)
|
||||
|
||||
with sub.cursor(dbname="neondb", user="test", password="testpwd") as scur:
|
||||
wait_until(check_that_changes_propagated)
|
||||
latest_end_lsn = query_scalar(
|
||||
scur, "select latest_end_lsn from pg_catalog.pg_stat_subscription; "
|
||||
)
|
||||
last_insert_lsn = query_scalar(scur, "select pg_current_wal_insert_lsn();")
|
||||
|
||||
log.info(f"latest_end_lsn = {latest_end_lsn}")
|
||||
log.info(f"last_insert_lsn = {last_insert_lsn}")
|
||||
|
||||
# stop the parent subscriber so that it doesn't interfere with the test
|
||||
sub.stop()
|
||||
|
||||
# 1. good scenario:
|
||||
# create subscriber_child_1
|
||||
# it will not get changes from publisher, because drop_subscriptions_before_start is set to True
|
||||
sub_child_1_timeline_id = env.create_branch(
|
||||
"subscriber_child_1",
|
||||
ancestor_branch_name="subscriber",
|
||||
ancestor_start_lsn=last_insert_lsn,
|
||||
)
|
||||
sub_child_1 = env.endpoints.create("subscriber_child_1")
|
||||
# Pass drop_subscriptions_before_start flag
|
||||
sub_child_1.respec(
|
||||
skip_pg_catalog_updates=False,
|
||||
create_test_user=True,
|
||||
drop_subscriptions_before_start=True,
|
||||
)
|
||||
sub_child_1.start(create_test_user=True)
|
||||
|
||||
# ensure that subscriber_child_1 sees all the data
|
||||
with sub_child_1.cursor(dbname="neondb", user="test", password="testpwd") as scur:
|
||||
scur.execute("SELECT count(*) FROM t")
|
||||
res = scur.fetchall()
|
||||
assert res[0][0] == n_records
|
||||
|
||||
# ensure that there are no subscriptions in this database
|
||||
scur.execute("SELECT 1 FROM pg_catalog.pg_subscription WHERE subname = 'sub'")
|
||||
assert len(scur.fetchall()) == 0
|
||||
|
||||
# ensure that drop_subscriptions_done happened on this timeline
|
||||
with sub_child_1.cursor() as scur_postgres:
|
||||
scur_postgres.execute("SELECT timeline_id from neon.drop_subscriptions_done")
|
||||
res = scur_postgres.fetchall()
|
||||
assert len(res) == 1
|
||||
assert str(sub_child_1_timeline_id) == res[0][0]
|
||||
|
||||
old_n_records = n_records
|
||||
# insert more data on publisher
|
||||
insert_data(pub, n_records)
|
||||
n_records += n_records
|
||||
|
||||
pcur.execute("SELECT count(*) FROM t")
|
||||
res = pcur.fetchall()
|
||||
assert res[0][0] == n_records
|
||||
|
||||
# ensure that subscriber_child_1 doesn't see the new data
|
||||
with sub_child_1.cursor(dbname="neondb", user="test", password="testpwd") as scur:
|
||||
scur.execute("SELECT count(*) FROM t")
|
||||
res = scur.fetchall()
|
||||
assert res[0][0] == old_n_records
|
||||
|
||||
# reenable logical replication on subscriber_child_1
|
||||
# using new publication
|
||||
# ensure that new publication works as expected
|
||||
with sub_child_1.cursor(dbname="neondb", user="test", password="testpwd") as scur:
|
||||
scur.execute("TRUNCATE t")
|
||||
|
||||
# create new subscription
|
||||
# with new pub name
|
||||
pcur.execute("CREATE PUBLICATION pub_new FOR TABLE t")
|
||||
query = f"CREATE SUBSCRIPTION sub_new CONNECTION '{pub_conn}' PUBLICATION pub_new"
|
||||
scur.execute(query)
|
||||
|
||||
wait_until(check_that_changes_propagated)
|
||||
|
||||
scur.execute("SELECT count(*) FROM t")
|
||||
res = scur.fetchall()
|
||||
assert res[0][0] == n_records
|
||||
|
||||
# ensure that new publication works as expected after compute restart
|
||||
# first restart with drop_subscriptions_before_start=True
|
||||
# to emulate the case when compute restarts within the VM with stale spec
|
||||
sub_child_1.stop()
|
||||
sub_child_1.respec(
|
||||
skip_pg_catalog_updates=False,
|
||||
create_test_user=True,
|
||||
drop_subscriptions_before_start=True,
|
||||
)
|
||||
sub_child_1.start(create_test_user=True)
|
||||
|
||||
with sub_child_1.cursor(dbname="neondb", user="test", password="testpwd") as scur:
|
||||
# ensure that even though the flag is set, we didn't drop new subscription
|
||||
scur.execute("SELECT 1 FROM pg_catalog.pg_subscription WHERE subname = 'sub_new'")
|
||||
assert len(scur.fetchall()) == 1
|
||||
|
||||
# ensure that drop_subscriptions_done happened on this timeline
|
||||
with sub_child_1.cursor() as scur_postgres:
|
||||
scur_postgres.execute("SELECT timeline_id from neon.drop_subscriptions_done")
|
||||
res = scur_postgres.fetchall()
|
||||
assert len(res) == 1
|
||||
assert str(sub_child_1_timeline_id) == res[0][0]
|
||||
|
||||
sub_child_1.stop()
|
||||
sub_child_1.respec(
|
||||
skip_pg_catalog_updates=False,
|
||||
create_test_user=True,
|
||||
drop_subscriptions_before_start=False,
|
||||
)
|
||||
sub_child_1.start(create_test_user=True)
|
||||
|
||||
# insert more data on publisher
|
||||
insert_data(pub, n_records)
|
||||
n_records += n_records
|
||||
with sub_child_1.cursor(dbname="neondb", user="test", password="testpwd") as scur:
|
||||
# ensure that there is a subscriptions in this database
|
||||
scur.execute("SELECT 1 FROM pg_catalog.pg_subscription WHERE subname = 'sub_new'")
|
||||
assert len(scur.fetchall()) == 1
|
||||
|
||||
wait_until(check_that_changes_propagated)
|
||||
scur.execute("SELECT count(*) FROM t")
|
||||
res = scur.fetchall()
|
||||
assert res[0][0] == n_records
|
||||
|
||||
# ensure that drop_subscriptions_done happened on this timeline
|
||||
with sub_child_1.cursor() as scur_postgres:
|
||||
scur_postgres.execute("SELECT timeline_id from neon.drop_subscriptions_done")
|
||||
res = scur_postgres.fetchall()
|
||||
assert len(res) == 1
|
||||
assert str(sub_child_1_timeline_id) == res[0][0]
|
||||
|
||||
# wake the sub and ensure that it catches up with the new data
|
||||
sub.start(create_test_user=True)
|
||||
with sub.cursor(dbname="neondb", user="test", password="testpwd") as scur:
|
||||
logical_replication_sync(sub, pub)
|
||||
wait_until(check_that_changes_propagated)
|
||||
scur.execute("SELECT count(*) FROM t")
|
||||
res = scur.fetchall()
|
||||
assert res[0][0] == n_records
|
||||
|
||||
# test that we can create a branch of a branch
|
||||
sub_child_2_timeline_id = env.create_branch(
|
||||
"subscriber_child_2",
|
||||
ancestor_branch_name="subscriber_child_1",
|
||||
)
|
||||
sub_child_2 = env.endpoints.create("subscriber_child_2")
|
||||
# Pass drop_subscriptions_before_start flag
|
||||
sub_child_2.respec(
|
||||
skip_pg_catalog_updates=False,
|
||||
drop_subscriptions_before_start=True,
|
||||
)
|
||||
sub_child_2.start(create_test_user=True)
|
||||
|
||||
# ensure that subscriber_child_2 does not inherit subscription from child_1
|
||||
with sub_child_2.cursor(dbname="neondb", user="test", password="testpwd") as scur:
|
||||
# ensure that there are no subscriptions in this database
|
||||
scur.execute("SELECT count(*) FROM pg_catalog.pg_subscription")
|
||||
res = scur.fetchall()
|
||||
assert res[0][0] == 0
|
||||
|
||||
# ensure that drop_subscriptions_done happened on this timeline
|
||||
with sub_child_2.cursor() as scur_postgres:
|
||||
scur_postgres.execute("SELECT timeline_id from neon.drop_subscriptions_done")
|
||||
res = scur_postgres.fetchall()
|
||||
assert len(res) == 1
|
||||
assert str(sub_child_2_timeline_id) == res[0][0]
|
||||
@@ -607,7 +607,7 @@ def test_timeline_ancestor_detach_idempotent_success(
|
||||
|
||||
if shards_after > 1:
|
||||
# FIXME: should this be in the neon_env_builder.init_start?
|
||||
env.storage_controller.reconcile_until_idle()
|
||||
env.storage_controller.reconcile_until_idle(timeout_secs=120)
|
||||
client = env.storage_controller.pageserver_api()
|
||||
else:
|
||||
client = env.pageserver.http_client()
|
||||
@@ -636,7 +636,7 @@ def test_timeline_ancestor_detach_idempotent_success(
|
||||
# Do a shard split
|
||||
# This is a reproducer for https://github.com/neondatabase/neon/issues/9667
|
||||
env.storage_controller.tenant_shard_split(env.initial_tenant, shards_after)
|
||||
env.storage_controller.reconcile_until_idle()
|
||||
env.storage_controller.reconcile_until_idle(timeout_secs=120)
|
||||
|
||||
first_reparenting_response = client.detach_ancestor(env.initial_tenant, first_branch)
|
||||
assert set(first_reparenting_response) == {reparented1, reparented2}
|
||||
|
||||
@@ -440,7 +440,7 @@ def test_timeline_physical_size_post_compaction(neon_env_builder: NeonEnvBuilder
|
||||
env = neon_env_builder.init_start(
|
||||
initial_tenant_conf={
|
||||
"checkpoint_distance": "100000",
|
||||
"compaction_period": "10m",
|
||||
"compaction_period": "0s",
|
||||
}
|
||||
)
|
||||
pageserver_http = env.pageserver.http_client()
|
||||
|
||||
@@ -203,6 +203,9 @@ def test_vm_bit_clear_on_heap_lock_blackbox(neon_env_builder: NeonEnvBuilder):
|
||||
"checkpoint_distance": f"{128 * 1024}",
|
||||
"compaction_target_size": f"{128 * 1024}",
|
||||
"compaction_threshold": "1",
|
||||
# disable L0 backpressure
|
||||
"l0_flush_delay_threshold": "0",
|
||||
"l0_flush_stall_threshold": "0",
|
||||
# create image layers eagerly, so that GC can remove some layers
|
||||
"image_creation_threshold": "1",
|
||||
# set PITR interval to be small, so we can do GC
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
__version__: str
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
from _typeshed import Incomplete
|
||||
from typing import Any
|
||||
|
||||
class _BooleanConfigOption:
|
||||
name: Incomplete
|
||||
attr_name: Incomplete
|
||||
def __init__(self, name) -> None: ...
|
||||
def __get__(self, instance, owner): ...
|
||||
def __set__(self, instance, value) -> None: ...
|
||||
def __init__(self, name: str) -> None: ...
|
||||
def __get__(self, instance: Any, owner: Any) -> bool: ...
|
||||
def __set__(self, instance: Any, value: bool) -> None: ...
|
||||
|
||||
class DummyLogger:
|
||||
def __init__(self, *vargs) -> None: ...
|
||||
@@ -15,7 +16,7 @@ class DummyLogger:
|
||||
class OutputLogger:
|
||||
file: Incomplete
|
||||
trace_level: Incomplete
|
||||
def __init__(self, file: Incomplete | None = ..., trace_level: bool = ...) -> None: ...
|
||||
def __init__(self, file: Incomplete | None = None, trace_level: bool = False) -> None: ...
|
||||
def debug(self, fmtstr, *args) -> None: ...
|
||||
def trace(self, fmtstr, *args) -> None: ...
|
||||
|
||||
@@ -23,20 +24,12 @@ class H2Configuration:
|
||||
client_side: Incomplete
|
||||
validate_outbound_headers: Incomplete
|
||||
normalize_outbound_headers: Incomplete
|
||||
split_outbound_cookies: Incomplete
|
||||
validate_inbound_headers: Incomplete
|
||||
normalize_inbound_headers: Incomplete
|
||||
logger: Incomplete
|
||||
def __init__(
|
||||
self,
|
||||
client_side: bool = ...,
|
||||
header_encoding: Incomplete | None = ...,
|
||||
validate_outbound_headers: bool = ...,
|
||||
normalize_outbound_headers: bool = ...,
|
||||
validate_inbound_headers: bool = ...,
|
||||
normalize_inbound_headers: bool = ...,
|
||||
logger: Incomplete | None = ...,
|
||||
) -> None: ...
|
||||
def __init__(self, client_side: bool = True, header_encoding: bool | str | None = None, validate_outbound_headers: bool = True, normalize_outbound_headers: bool = True, split_outbound_cookies: bool = False, validate_inbound_headers: bool = True, normalize_inbound_headers: bool = True, logger: DummyLogger | OutputLogger | None = None) -> None: ...
|
||||
@property
|
||||
def header_encoding(self): ...
|
||||
def header_encoding(self) -> bool | str | None: ...
|
||||
@header_encoding.setter
|
||||
def header_encoding(self, value) -> None: ...
|
||||
def header_encoding(self, value: bool | str | None) -> None: ...
|
||||
|
||||
@@ -1,72 +1,55 @@
|
||||
from enum import Enum, IntEnum
|
||||
|
||||
from _typeshed import Incomplete
|
||||
|
||||
from .config import H2Configuration as H2Configuration
|
||||
from .errors import ErrorCodes as ErrorCodes
|
||||
from .events import AlternativeServiceAvailable as AlternativeServiceAvailable
|
||||
from .events import ConnectionTerminated as ConnectionTerminated
|
||||
from .events import PingAckReceived as PingAckReceived
|
||||
from .events import PingReceived as PingReceived
|
||||
from .events import PriorityUpdated as PriorityUpdated
|
||||
from .events import RemoteSettingsChanged as RemoteSettingsChanged
|
||||
from .events import SettingsAcknowledged as SettingsAcknowledged
|
||||
from .events import UnknownFrameReceived as UnknownFrameReceived
|
||||
from .events import WindowUpdated as WindowUpdated
|
||||
from .exceptions import DenialOfServiceError as DenialOfServiceError
|
||||
from .exceptions import FlowControlError as FlowControlError
|
||||
from .exceptions import FrameTooLargeError as FrameTooLargeError
|
||||
from .exceptions import NoAvailableStreamIDError as NoAvailableStreamIDError
|
||||
from .exceptions import NoSuchStreamError as NoSuchStreamError
|
||||
from .exceptions import ProtocolError as ProtocolError
|
||||
from .exceptions import RFC1122Error as RFC1122Error
|
||||
from .exceptions import StreamClosedError as StreamClosedError
|
||||
from .exceptions import StreamIDTooLowError as StreamIDTooLowError
|
||||
from .exceptions import TooManyStreamsError as TooManyStreamsError
|
||||
from .events import AlternativeServiceAvailable as AlternativeServiceAvailable, ConnectionTerminated as ConnectionTerminated, Event as Event, InformationalResponseReceived as InformationalResponseReceived, PingAckReceived as PingAckReceived, PingReceived as PingReceived, PriorityUpdated as PriorityUpdated, RemoteSettingsChanged as RemoteSettingsChanged, RequestReceived as RequestReceived, ResponseReceived as ResponseReceived, SettingsAcknowledged as SettingsAcknowledged, TrailersReceived as TrailersReceived, UnknownFrameReceived as UnknownFrameReceived, WindowUpdated as WindowUpdated
|
||||
from .exceptions import DenialOfServiceError as DenialOfServiceError, FlowControlError as FlowControlError, FrameTooLargeError as FrameTooLargeError, NoAvailableStreamIDError as NoAvailableStreamIDError, NoSuchStreamError as NoSuchStreamError, ProtocolError as ProtocolError, RFC1122Error as RFC1122Error, StreamClosedError as StreamClosedError, StreamIDTooLowError as StreamIDTooLowError, TooManyStreamsError as TooManyStreamsError
|
||||
from .frame_buffer import FrameBuffer as FrameBuffer
|
||||
from .settings import SettingCodes as SettingCodes
|
||||
from .settings import Settings as Settings
|
||||
from .stream import H2Stream as H2Stream
|
||||
from .stream import StreamClosedBy as StreamClosedBy
|
||||
from .utilities import guard_increment_window as guard_increment_window
|
||||
from .settings import ChangedSetting as ChangedSetting, SettingCodes as SettingCodes, Settings as Settings
|
||||
from .stream import H2Stream as H2Stream, StreamClosedBy as StreamClosedBy
|
||||
from .utilities import SizeLimitDict as SizeLimitDict, guard_increment_window as guard_increment_window
|
||||
from .windows import WindowManager as WindowManager
|
||||
from _typeshed import Incomplete
|
||||
from collections.abc import Iterable
|
||||
from enum import Enum, IntEnum
|
||||
from hpack.struct import Header as Header, HeaderWeaklyTyped as HeaderWeaklyTyped
|
||||
from hyperframe.frame import Frame as Frame
|
||||
from typing import Any
|
||||
|
||||
class ConnectionState(Enum):
|
||||
IDLE: int
|
||||
CLIENT_OPEN: int
|
||||
SERVER_OPEN: int
|
||||
CLOSED: int
|
||||
IDLE = 0
|
||||
CLIENT_OPEN = 1
|
||||
SERVER_OPEN = 2
|
||||
CLOSED = 3
|
||||
|
||||
class ConnectionInputs(Enum):
|
||||
SEND_HEADERS: int
|
||||
SEND_PUSH_PROMISE: int
|
||||
SEND_DATA: int
|
||||
SEND_GOAWAY: int
|
||||
SEND_WINDOW_UPDATE: int
|
||||
SEND_PING: int
|
||||
SEND_SETTINGS: int
|
||||
SEND_RST_STREAM: int
|
||||
SEND_PRIORITY: int
|
||||
RECV_HEADERS: int
|
||||
RECV_PUSH_PROMISE: int
|
||||
RECV_DATA: int
|
||||
RECV_GOAWAY: int
|
||||
RECV_WINDOW_UPDATE: int
|
||||
RECV_PING: int
|
||||
RECV_SETTINGS: int
|
||||
RECV_RST_STREAM: int
|
||||
RECV_PRIORITY: int
|
||||
SEND_ALTERNATIVE_SERVICE: int
|
||||
RECV_ALTERNATIVE_SERVICE: int
|
||||
SEND_HEADERS = 0
|
||||
SEND_PUSH_PROMISE = 1
|
||||
SEND_DATA = 2
|
||||
SEND_GOAWAY = 3
|
||||
SEND_WINDOW_UPDATE = 4
|
||||
SEND_PING = 5
|
||||
SEND_SETTINGS = 6
|
||||
SEND_RST_STREAM = 7
|
||||
SEND_PRIORITY = 8
|
||||
RECV_HEADERS = 9
|
||||
RECV_PUSH_PROMISE = 10
|
||||
RECV_DATA = 11
|
||||
RECV_GOAWAY = 12
|
||||
RECV_WINDOW_UPDATE = 13
|
||||
RECV_PING = 14
|
||||
RECV_SETTINGS = 15
|
||||
RECV_RST_STREAM = 16
|
||||
RECV_PRIORITY = 17
|
||||
SEND_ALTERNATIVE_SERVICE = 18
|
||||
RECV_ALTERNATIVE_SERVICE = 19
|
||||
|
||||
class AllowedStreamIDs(IntEnum):
|
||||
EVEN: int
|
||||
ODD: int
|
||||
EVEN = 0
|
||||
ODD = 1
|
||||
|
||||
class H2ConnectionStateMachine:
|
||||
state: Incomplete
|
||||
def __init__(self) -> None: ...
|
||||
def process_input(self, input_): ...
|
||||
def process_input(self, input_: ConnectionInputs) -> list[Event]: ...
|
||||
|
||||
class H2Connection:
|
||||
DEFAULT_MAX_OUTBOUND_FRAME_SIZE: int
|
||||
@@ -88,55 +71,30 @@ class H2Connection:
|
||||
max_outbound_frame_size: Incomplete
|
||||
max_inbound_frame_size: Incomplete
|
||||
incoming_buffer: Incomplete
|
||||
def __init__(self, config: Incomplete | None = ...) -> None: ...
|
||||
def __init__(self, config: H2Configuration | None = None) -> None: ...
|
||||
@property
|
||||
def open_outbound_streams(self): ...
|
||||
def open_outbound_streams(self) -> int: ...
|
||||
@property
|
||||
def open_inbound_streams(self): ...
|
||||
def open_inbound_streams(self) -> int: ...
|
||||
@property
|
||||
def inbound_flow_control_window(self): ...
|
||||
def inbound_flow_control_window(self) -> int: ...
|
||||
def initiate_connection(self) -> None: ...
|
||||
def initiate_upgrade_connection(self, settings_header: Incomplete | None = ...): ...
|
||||
def get_next_available_stream_id(self): ...
|
||||
def send_headers(
|
||||
self,
|
||||
stream_id,
|
||||
headers,
|
||||
end_stream: bool = ...,
|
||||
priority_weight: Incomplete | None = ...,
|
||||
priority_depends_on: Incomplete | None = ...,
|
||||
priority_exclusive: Incomplete | None = ...,
|
||||
) -> None: ...
|
||||
def send_data(
|
||||
self, stream_id, data, end_stream: bool = ..., pad_length: Incomplete | None = ...
|
||||
) -> None: ...
|
||||
def end_stream(self, stream_id) -> None: ...
|
||||
def increment_flow_control_window(
|
||||
self, increment, stream_id: Incomplete | None = ...
|
||||
) -> None: ...
|
||||
def push_stream(self, stream_id, promised_stream_id, request_headers) -> None: ...
|
||||
def ping(self, opaque_data) -> None: ...
|
||||
def reset_stream(self, stream_id, error_code: int = ...) -> None: ...
|
||||
def close_connection(
|
||||
self,
|
||||
error_code: int = ...,
|
||||
additional_data: Incomplete | None = ...,
|
||||
last_stream_id: Incomplete | None = ...,
|
||||
) -> None: ...
|
||||
def update_settings(self, new_settings) -> None: ...
|
||||
def advertise_alternative_service(
|
||||
self, field_value, origin: Incomplete | None = ..., stream_id: Incomplete | None = ...
|
||||
) -> None: ...
|
||||
def prioritize(
|
||||
self,
|
||||
stream_id,
|
||||
weight: Incomplete | None = ...,
|
||||
depends_on: Incomplete | None = ...,
|
||||
exclusive: Incomplete | None = ...,
|
||||
) -> None: ...
|
||||
def local_flow_control_window(self, stream_id): ...
|
||||
def remote_flow_control_window(self, stream_id): ...
|
||||
def acknowledge_received_data(self, acknowledged_size, stream_id) -> None: ...
|
||||
def data_to_send(self, amount: Incomplete | None = ...): ...
|
||||
def initiate_upgrade_connection(self, settings_header: bytes | None = None) -> bytes | None: ...
|
||||
def get_next_available_stream_id(self) -> int: ...
|
||||
def send_headers(self, stream_id: int, headers: Iterable[HeaderWeaklyTyped], end_stream: bool = False, priority_weight: int | None = None, priority_depends_on: int | None = None, priority_exclusive: bool | None = None) -> None: ...
|
||||
def send_data(self, stream_id: int, data: bytes | memoryview, end_stream: bool = False, pad_length: Any = None) -> None: ...
|
||||
def end_stream(self, stream_id: int) -> None: ...
|
||||
def increment_flow_control_window(self, increment: int, stream_id: int | None = None) -> None: ...
|
||||
def push_stream(self, stream_id: int, promised_stream_id: int, request_headers: Iterable[HeaderWeaklyTyped]) -> None: ...
|
||||
def ping(self, opaque_data: bytes | str) -> None: ...
|
||||
def reset_stream(self, stream_id: int, error_code: ErrorCodes | int = 0) -> None: ...
|
||||
def close_connection(self, error_code: ErrorCodes | int = 0, additional_data: bytes | None = None, last_stream_id: int | None = None) -> None: ...
|
||||
def update_settings(self, new_settings: dict[SettingCodes | int, int]) -> None: ...
|
||||
def advertise_alternative_service(self, field_value: bytes | str, origin: bytes | None = None, stream_id: int | None = None) -> None: ...
|
||||
def prioritize(self, stream_id: int, weight: int | None = None, depends_on: int | None = None, exclusive: bool | None = None) -> None: ...
|
||||
def local_flow_control_window(self, stream_id: int) -> int: ...
|
||||
def remote_flow_control_window(self, stream_id: int) -> int: ...
|
||||
def acknowledge_received_data(self, acknowledged_size: int, stream_id: int) -> None: ...
|
||||
def data_to_send(self, amount: int | None = None) -> bytes: ...
|
||||
def clear_outbound_data_buffer(self) -> None: ...
|
||||
def receive_data(self, data): ...
|
||||
def receive_data(self, data: bytes) -> list[Event]: ...
|
||||
|
||||
@@ -1,17 +1,19 @@
|
||||
import enum
|
||||
|
||||
__all__ = ['ErrorCodes']
|
||||
|
||||
class ErrorCodes(enum.IntEnum):
|
||||
NO_ERROR: int
|
||||
PROTOCOL_ERROR: int
|
||||
INTERNAL_ERROR: int
|
||||
FLOW_CONTROL_ERROR: int
|
||||
SETTINGS_TIMEOUT: int
|
||||
STREAM_CLOSED: int
|
||||
FRAME_SIZE_ERROR: int
|
||||
REFUSED_STREAM: int
|
||||
CANCEL: int
|
||||
COMPRESSION_ERROR: int
|
||||
CONNECT_ERROR: int
|
||||
ENHANCE_YOUR_CALM: int
|
||||
INADEQUATE_SECURITY: int
|
||||
HTTP_1_1_REQUIRED: int
|
||||
NO_ERROR = 0
|
||||
PROTOCOL_ERROR = 1
|
||||
INTERNAL_ERROR = 2
|
||||
FLOW_CONTROL_ERROR = 3
|
||||
SETTINGS_TIMEOUT = 4
|
||||
STREAM_CLOSED = 5
|
||||
FRAME_SIZE_ERROR = 6
|
||||
REFUSED_STREAM = 7
|
||||
CANCEL = 8
|
||||
COMPRESSION_ERROR = 9
|
||||
CONNECT_ERROR = 10
|
||||
ENHANCE_YOUR_CALM = 11
|
||||
INADEQUATE_SECURITY = 12
|
||||
HTTP_1_1_REQUIRED = 13
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from .errors import ErrorCodes as ErrorCodes
|
||||
from .settings import ChangedSetting as ChangedSetting, SettingCodes as SettingCodes, Settings as Settings
|
||||
from _typeshed import Incomplete
|
||||
|
||||
from .settings import ChangedSetting as ChangedSetting
|
||||
from hpack import HeaderTuple as HeaderTuple
|
||||
from hyperframe.frame import Frame as Frame
|
||||
|
||||
class Event: ...
|
||||
|
||||
@@ -53,7 +55,7 @@ class RemoteSettingsChanged(Event):
|
||||
changed_settings: Incomplete
|
||||
def __init__(self) -> None: ...
|
||||
@classmethod
|
||||
def from_settings(cls, old_settings, new_settings): ...
|
||||
def from_settings(cls, old_settings: Settings | dict[int, int], new_settings: dict[int, int]) -> RemoteSettingsChanged: ...
|
||||
|
||||
class PingReceived(Event):
|
||||
ping_data: Incomplete
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from .errors import ErrorCodes as ErrorCodes
|
||||
from _typeshed import Incomplete
|
||||
|
||||
class H2Error(Exception): ...
|
||||
@@ -19,27 +20,27 @@ class FlowControlError(ProtocolError):
|
||||
class StreamIDTooLowError(ProtocolError):
|
||||
stream_id: Incomplete
|
||||
max_stream_id: Incomplete
|
||||
def __init__(self, stream_id, max_stream_id) -> None: ...
|
||||
def __init__(self, stream_id: int, max_stream_id: int) -> None: ...
|
||||
|
||||
class NoAvailableStreamIDError(ProtocolError): ...
|
||||
|
||||
class NoSuchStreamError(ProtocolError):
|
||||
stream_id: Incomplete
|
||||
def __init__(self, stream_id) -> None: ...
|
||||
def __init__(self, stream_id: int) -> None: ...
|
||||
|
||||
class StreamClosedError(NoSuchStreamError):
|
||||
stream_id: Incomplete
|
||||
error_code: Incomplete
|
||||
def __init__(self, stream_id) -> None: ...
|
||||
def __init__(self, stream_id: int) -> None: ...
|
||||
|
||||
class InvalidSettingsValueError(ProtocolError, ValueError):
|
||||
error_code: Incomplete
|
||||
def __init__(self, msg, error_code) -> None: ...
|
||||
def __init__(self, msg: str, error_code: ErrorCodes) -> None: ...
|
||||
|
||||
class InvalidBodyLengthError(ProtocolError):
|
||||
expected_length: Incomplete
|
||||
actual_length: Incomplete
|
||||
def __init__(self, expected, actual) -> None: ...
|
||||
def __init__(self, expected: int, actual: int) -> None: ...
|
||||
|
||||
class UnsupportedFrameError(ProtocolError): ...
|
||||
class RFC1122Error(H2Error): ...
|
||||
|
||||
@@ -1,19 +1,12 @@
|
||||
from .exceptions import (
|
||||
FrameDataMissingError as FrameDataMissingError,
|
||||
)
|
||||
from .exceptions import (
|
||||
FrameTooLargeError as FrameTooLargeError,
|
||||
)
|
||||
from .exceptions import (
|
||||
ProtocolError as ProtocolError,
|
||||
)
|
||||
from .exceptions import FrameDataMissingError as FrameDataMissingError, FrameTooLargeError as FrameTooLargeError, ProtocolError as ProtocolError
|
||||
from hyperframe.frame import Frame
|
||||
|
||||
CONTINUATION_BACKLOG: int
|
||||
|
||||
class FrameBuffer:
|
||||
data: bytes
|
||||
max_frame_size: int
|
||||
def __init__(self, server: bool = ...) -> None: ...
|
||||
def add_data(self, data) -> None: ...
|
||||
def __iter__(self): ...
|
||||
def __next__(self): ...
|
||||
def __init__(self, server: bool = False) -> None: ...
|
||||
def add_data(self, data: bytes) -> None: ...
|
||||
def __iter__(self) -> FrameBuffer: ...
|
||||
def __next__(self) -> Frame: ...
|
||||
|
||||
@@ -1,61 +1,59 @@
|
||||
import enum
|
||||
from collections.abc import MutableMapping
|
||||
from typing import Any
|
||||
|
||||
from .errors import ErrorCodes as ErrorCodes
|
||||
from .exceptions import InvalidSettingsValueError as InvalidSettingsValueError
|
||||
from _typeshed import Incomplete
|
||||
from h2.errors import ErrorCodes as ErrorCodes
|
||||
from h2.exceptions import InvalidSettingsValueError as InvalidSettingsValueError
|
||||
from collections.abc import Iterator, MutableMapping
|
||||
|
||||
class SettingCodes(enum.IntEnum):
|
||||
HEADER_TABLE_SIZE: Incomplete
|
||||
ENABLE_PUSH: Incomplete
|
||||
MAX_CONCURRENT_STREAMS: Incomplete
|
||||
INITIAL_WINDOW_SIZE: Incomplete
|
||||
MAX_FRAME_SIZE: Incomplete
|
||||
MAX_HEADER_LIST_SIZE: Incomplete
|
||||
ENABLE_CONNECT_PROTOCOL: Incomplete
|
||||
HEADER_TABLE_SIZE = ...
|
||||
ENABLE_PUSH = ...
|
||||
MAX_CONCURRENT_STREAMS = ...
|
||||
INITIAL_WINDOW_SIZE = ...
|
||||
MAX_FRAME_SIZE = ...
|
||||
MAX_HEADER_LIST_SIZE = ...
|
||||
ENABLE_CONNECT_PROTOCOL = ...
|
||||
|
||||
class ChangedSetting:
|
||||
setting: Incomplete
|
||||
original_value: Incomplete
|
||||
new_value: Incomplete
|
||||
def __init__(self, setting, original_value, new_value) -> None: ...
|
||||
def __init__(self, setting: SettingCodes | int, original_value: int | None, new_value: int) -> None: ...
|
||||
|
||||
class Settings(MutableMapping[str, Any]):
|
||||
def __init__(self, client: bool = ..., initial_values: Incomplete | None = ...) -> None: ...
|
||||
def acknowledge(self): ...
|
||||
class Settings(MutableMapping[SettingCodes | int, int]):
|
||||
def __init__(self, client: bool = True, initial_values: dict[SettingCodes, int] | None = None) -> None: ...
|
||||
def acknowledge(self) -> dict[SettingCodes | int, ChangedSetting]: ...
|
||||
@property
|
||||
def header_table_size(self): ...
|
||||
def header_table_size(self) -> int: ...
|
||||
@header_table_size.setter
|
||||
def header_table_size(self, value) -> None: ...
|
||||
def header_table_size(self, value: int) -> None: ...
|
||||
@property
|
||||
def enable_push(self): ...
|
||||
def enable_push(self) -> int: ...
|
||||
@enable_push.setter
|
||||
def enable_push(self, value) -> None: ...
|
||||
def enable_push(self, value: int) -> None: ...
|
||||
@property
|
||||
def initial_window_size(self): ...
|
||||
def initial_window_size(self) -> int: ...
|
||||
@initial_window_size.setter
|
||||
def initial_window_size(self, value) -> None: ...
|
||||
def initial_window_size(self, value: int) -> None: ...
|
||||
@property
|
||||
def max_frame_size(self): ...
|
||||
def max_frame_size(self) -> int: ...
|
||||
@max_frame_size.setter
|
||||
def max_frame_size(self, value) -> None: ...
|
||||
def max_frame_size(self, value: int) -> None: ...
|
||||
@property
|
||||
def max_concurrent_streams(self): ...
|
||||
def max_concurrent_streams(self) -> int: ...
|
||||
@max_concurrent_streams.setter
|
||||
def max_concurrent_streams(self, value) -> None: ...
|
||||
def max_concurrent_streams(self, value: int) -> None: ...
|
||||
@property
|
||||
def max_header_list_size(self): ...
|
||||
def max_header_list_size(self) -> int | None: ...
|
||||
@max_header_list_size.setter
|
||||
def max_header_list_size(self, value) -> None: ...
|
||||
def max_header_list_size(self, value: int) -> None: ...
|
||||
@property
|
||||
def enable_connect_protocol(self): ...
|
||||
def enable_connect_protocol(self) -> int: ...
|
||||
@enable_connect_protocol.setter
|
||||
def enable_connect_protocol(self, value) -> None: ...
|
||||
def __getitem__(self, key): ...
|
||||
def __setitem__(self, key, value) -> None: ...
|
||||
def __delitem__(self, key) -> None: ...
|
||||
def __iter__(self): ...
|
||||
def enable_connect_protocol(self, value: int) -> None: ...
|
||||
def __getitem__(self, key: SettingCodes | int) -> int: ...
|
||||
def __setitem__(self, key: SettingCodes | int, value: int) -> None: ...
|
||||
def __delitem__(self, key: SettingCodes | int) -> None: ...
|
||||
def __iter__(self) -> Iterator[SettingCodes | int]: ...
|
||||
def __len__(self) -> int: ...
|
||||
def __eq__(self, other): ...
|
||||
def __ne__(self, other): ...
|
||||
def __eq__(self, other: object) -> bool: ...
|
||||
def __ne__(self, other: object) -> bool: ...
|
||||
|
||||
@@ -1,114 +1,52 @@
|
||||
from enum import Enum, IntEnum
|
||||
|
||||
from _typeshed import Incomplete
|
||||
|
||||
from .config import H2Configuration as H2Configuration
|
||||
from .errors import ErrorCodes as ErrorCodes
|
||||
from .events import (
|
||||
AlternativeServiceAvailable as AlternativeServiceAvailable,
|
||||
)
|
||||
from .events import (
|
||||
DataReceived as DataReceived,
|
||||
)
|
||||
from .events import (
|
||||
InformationalResponseReceived as InformationalResponseReceived,
|
||||
)
|
||||
from .events import (
|
||||
PushedStreamReceived as PushedStreamReceived,
|
||||
)
|
||||
from .events import (
|
||||
RequestReceived as RequestReceived,
|
||||
)
|
||||
from .events import (
|
||||
ResponseReceived as ResponseReceived,
|
||||
)
|
||||
from .events import (
|
||||
StreamEnded as StreamEnded,
|
||||
)
|
||||
from .events import (
|
||||
StreamReset as StreamReset,
|
||||
)
|
||||
from .events import (
|
||||
TrailersReceived as TrailersReceived,
|
||||
)
|
||||
from .events import (
|
||||
WindowUpdated as WindowUpdated,
|
||||
)
|
||||
from .exceptions import (
|
||||
FlowControlError as FlowControlError,
|
||||
)
|
||||
from .exceptions import (
|
||||
InvalidBodyLengthError as InvalidBodyLengthError,
|
||||
)
|
||||
from .exceptions import (
|
||||
ProtocolError as ProtocolError,
|
||||
)
|
||||
from .exceptions import (
|
||||
StreamClosedError as StreamClosedError,
|
||||
)
|
||||
from .utilities import (
|
||||
HeaderValidationFlags as HeaderValidationFlags,
|
||||
)
|
||||
from .utilities import (
|
||||
authority_from_headers as authority_from_headers,
|
||||
)
|
||||
from .utilities import (
|
||||
extract_method_header as extract_method_header,
|
||||
)
|
||||
from .utilities import (
|
||||
guard_increment_window as guard_increment_window,
|
||||
)
|
||||
from .utilities import (
|
||||
is_informational_response as is_informational_response,
|
||||
)
|
||||
from .utilities import (
|
||||
normalize_inbound_headers as normalize_inbound_headers,
|
||||
)
|
||||
from .utilities import (
|
||||
normalize_outbound_headers as normalize_outbound_headers,
|
||||
)
|
||||
from .utilities import (
|
||||
validate_headers as validate_headers,
|
||||
)
|
||||
from .utilities import (
|
||||
validate_outbound_headers as validate_outbound_headers,
|
||||
)
|
||||
from .events import AlternativeServiceAvailable as AlternativeServiceAvailable, DataReceived as DataReceived, Event as Event, InformationalResponseReceived as InformationalResponseReceived, PushedStreamReceived as PushedStreamReceived, RequestReceived as RequestReceived, ResponseReceived as ResponseReceived, StreamEnded as StreamEnded, StreamReset as StreamReset, TrailersReceived as TrailersReceived, WindowUpdated as WindowUpdated
|
||||
from .exceptions import FlowControlError as FlowControlError, InvalidBodyLengthError as InvalidBodyLengthError, ProtocolError as ProtocolError, StreamClosedError as StreamClosedError
|
||||
from .utilities import HeaderValidationFlags as HeaderValidationFlags, authority_from_headers as authority_from_headers, extract_method_header as extract_method_header, guard_increment_window as guard_increment_window, is_informational_response as is_informational_response, normalize_inbound_headers as normalize_inbound_headers, normalize_outbound_headers as normalize_outbound_headers, utf8_encode_headers as utf8_encode_headers, validate_headers as validate_headers, validate_outbound_headers as validate_outbound_headers
|
||||
from .windows import WindowManager as WindowManager
|
||||
from _typeshed import Incomplete
|
||||
from collections.abc import Iterable
|
||||
from enum import Enum, IntEnum
|
||||
from hpack.hpack import Encoder as Encoder
|
||||
from hpack.struct import Header as Header, HeaderWeaklyTyped as HeaderWeaklyTyped
|
||||
from hyperframe.frame import AltSvcFrame, ContinuationFrame, Frame as Frame, HeadersFrame, PushPromiseFrame, RstStreamFrame
|
||||
from typing import Any
|
||||
|
||||
class StreamState(IntEnum):
|
||||
IDLE: int
|
||||
RESERVED_REMOTE: int
|
||||
RESERVED_LOCAL: int
|
||||
OPEN: int
|
||||
HALF_CLOSED_REMOTE: int
|
||||
HALF_CLOSED_LOCAL: int
|
||||
CLOSED: int
|
||||
IDLE = 0
|
||||
RESERVED_REMOTE = 1
|
||||
RESERVED_LOCAL = 2
|
||||
OPEN = 3
|
||||
HALF_CLOSED_REMOTE = 4
|
||||
HALF_CLOSED_LOCAL = 5
|
||||
CLOSED = 6
|
||||
|
||||
class StreamInputs(Enum):
|
||||
SEND_HEADERS: int
|
||||
SEND_PUSH_PROMISE: int
|
||||
SEND_RST_STREAM: int
|
||||
SEND_DATA: int
|
||||
SEND_WINDOW_UPDATE: int
|
||||
SEND_END_STREAM: int
|
||||
RECV_HEADERS: int
|
||||
RECV_PUSH_PROMISE: int
|
||||
RECV_RST_STREAM: int
|
||||
RECV_DATA: int
|
||||
RECV_WINDOW_UPDATE: int
|
||||
RECV_END_STREAM: int
|
||||
RECV_CONTINUATION: int
|
||||
SEND_INFORMATIONAL_HEADERS: int
|
||||
RECV_INFORMATIONAL_HEADERS: int
|
||||
SEND_ALTERNATIVE_SERVICE: int
|
||||
RECV_ALTERNATIVE_SERVICE: int
|
||||
UPGRADE_CLIENT: int
|
||||
UPGRADE_SERVER: int
|
||||
SEND_HEADERS = 0
|
||||
SEND_PUSH_PROMISE = 1
|
||||
SEND_RST_STREAM = 2
|
||||
SEND_DATA = 3
|
||||
SEND_WINDOW_UPDATE = 4
|
||||
SEND_END_STREAM = 5
|
||||
RECV_HEADERS = 6
|
||||
RECV_PUSH_PROMISE = 7
|
||||
RECV_RST_STREAM = 8
|
||||
RECV_DATA = 9
|
||||
RECV_WINDOW_UPDATE = 10
|
||||
RECV_END_STREAM = 11
|
||||
RECV_CONTINUATION = 12
|
||||
SEND_INFORMATIONAL_HEADERS = 13
|
||||
RECV_INFORMATIONAL_HEADERS = 14
|
||||
SEND_ALTERNATIVE_SERVICE = 15
|
||||
RECV_ALTERNATIVE_SERVICE = 16
|
||||
UPGRADE_CLIENT = 17
|
||||
UPGRADE_SERVER = 18
|
||||
|
||||
class StreamClosedBy(Enum):
|
||||
SEND_END_STREAM: int
|
||||
RECV_END_STREAM: int
|
||||
SEND_RST_STREAM: int
|
||||
RECV_RST_STREAM: int
|
||||
SEND_END_STREAM = 0
|
||||
RECV_END_STREAM = 1
|
||||
SEND_RST_STREAM = 2
|
||||
RECV_RST_STREAM = 3
|
||||
|
||||
STREAM_OPEN: Incomplete
|
||||
|
||||
@@ -121,32 +59,32 @@ class H2StreamStateMachine:
|
||||
headers_received: Incomplete
|
||||
trailers_received: Incomplete
|
||||
stream_closed_by: Incomplete
|
||||
def __init__(self, stream_id) -> None: ...
|
||||
def process_input(self, input_): ...
|
||||
def request_sent(self, previous_state): ...
|
||||
def response_sent(self, previous_state): ...
|
||||
def request_received(self, previous_state): ...
|
||||
def response_received(self, previous_state): ...
|
||||
def data_received(self, previous_state): ...
|
||||
def window_updated(self, previous_state): ...
|
||||
def stream_half_closed(self, previous_state): ...
|
||||
def stream_ended(self, previous_state): ...
|
||||
def stream_reset(self, previous_state): ...
|
||||
def send_new_pushed_stream(self, previous_state): ...
|
||||
def recv_new_pushed_stream(self, previous_state): ...
|
||||
def send_push_promise(self, previous_state): ...
|
||||
def recv_push_promise(self, previous_state): ...
|
||||
def send_end_stream(self, previous_state) -> None: ...
|
||||
def send_reset_stream(self, previous_state) -> None: ...
|
||||
def reset_stream_on_error(self, previous_state) -> None: ...
|
||||
def recv_on_closed_stream(self, previous_state) -> None: ...
|
||||
def send_on_closed_stream(self, previous_state) -> None: ...
|
||||
def recv_push_on_closed_stream(self, previous_state) -> None: ...
|
||||
def send_push_on_closed_stream(self, previous_state) -> None: ...
|
||||
def send_informational_response(self, previous_state): ...
|
||||
def recv_informational_response(self, previous_state): ...
|
||||
def recv_alt_svc(self, previous_state): ...
|
||||
def send_alt_svc(self, previous_state) -> None: ...
|
||||
def __init__(self, stream_id: int) -> None: ...
|
||||
def process_input(self, input_: StreamInputs) -> Any: ...
|
||||
def request_sent(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def response_sent(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def request_received(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def response_received(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def data_received(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def window_updated(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def stream_half_closed(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def stream_ended(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def stream_reset(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def send_new_pushed_stream(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def recv_new_pushed_stream(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def send_push_promise(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def recv_push_promise(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def send_end_stream(self, previous_state: StreamState) -> None: ...
|
||||
def send_reset_stream(self, previous_state: StreamState) -> None: ...
|
||||
def reset_stream_on_error(self, previous_state: StreamState) -> None: ...
|
||||
def recv_on_closed_stream(self, previous_state: StreamState) -> None: ...
|
||||
def send_on_closed_stream(self, previous_state: StreamState) -> None: ...
|
||||
def recv_push_on_closed_stream(self, previous_state: StreamState) -> None: ...
|
||||
def send_push_on_closed_stream(self, previous_state: StreamState) -> None: ...
|
||||
def send_informational_response(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def recv_informational_response(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def recv_alt_svc(self, previous_state: StreamState) -> list[Event]: ...
|
||||
def send_alt_svc(self, previous_state: StreamState) -> None: ...
|
||||
|
||||
class H2Stream:
|
||||
state_machine: Incomplete
|
||||
@@ -155,30 +93,30 @@ class H2Stream:
|
||||
request_method: Incomplete
|
||||
outbound_flow_control_window: Incomplete
|
||||
config: Incomplete
|
||||
def __init__(self, stream_id, config, inbound_window_size, outbound_window_size) -> None: ...
|
||||
def __init__(self, stream_id: int, config: H2Configuration, inbound_window_size: int, outbound_window_size: int) -> None: ...
|
||||
@property
|
||||
def inbound_flow_control_window(self): ...
|
||||
def inbound_flow_control_window(self) -> int: ...
|
||||
@property
|
||||
def open(self): ...
|
||||
def open(self) -> bool: ...
|
||||
@property
|
||||
def closed(self): ...
|
||||
def closed(self) -> bool: ...
|
||||
@property
|
||||
def closed_by(self): ...
|
||||
def upgrade(self, client_side) -> None: ...
|
||||
def send_headers(self, headers, encoder, end_stream: bool = ...): ...
|
||||
def push_stream_in_band(self, related_stream_id, headers, encoder): ...
|
||||
def locally_pushed(self): ...
|
||||
def send_data(self, data, end_stream: bool = ..., pad_length: Incomplete | None = ...): ...
|
||||
def end_stream(self): ...
|
||||
def advertise_alternative_service(self, field_value): ...
|
||||
def increase_flow_control_window(self, increment): ...
|
||||
def receive_push_promise_in_band(self, promised_stream_id, headers, header_encoding): ...
|
||||
def remotely_pushed(self, pushed_headers): ...
|
||||
def receive_headers(self, headers, end_stream, header_encoding): ...
|
||||
def receive_data(self, data, end_stream, flow_control_len): ...
|
||||
def receive_window_update(self, increment): ...
|
||||
def closed_by(self) -> StreamClosedBy | None: ...
|
||||
def upgrade(self, client_side: bool) -> None: ...
|
||||
def send_headers(self, headers: Iterable[HeaderWeaklyTyped], encoder: Encoder, end_stream: bool = False) -> list[HeadersFrame | ContinuationFrame | PushPromiseFrame]: ...
|
||||
def push_stream_in_band(self, related_stream_id: int, headers: Iterable[HeaderWeaklyTyped], encoder: Encoder) -> list[HeadersFrame | ContinuationFrame | PushPromiseFrame]: ...
|
||||
def locally_pushed(self) -> list[Frame]: ...
|
||||
def send_data(self, data: bytes | memoryview, end_stream: bool = False, pad_length: int | None = None) -> list[Frame]: ...
|
||||
def end_stream(self) -> list[Frame]: ...
|
||||
def advertise_alternative_service(self, field_value: bytes) -> list[Frame]: ...
|
||||
def increase_flow_control_window(self, increment: int) -> list[Frame]: ...
|
||||
def receive_push_promise_in_band(self, promised_stream_id: int, headers: Iterable[Header], header_encoding: bool | str | None) -> tuple[list[Frame], list[Event]]: ...
|
||||
def remotely_pushed(self, pushed_headers: Iterable[Header]) -> tuple[list[Frame], list[Event]]: ...
|
||||
def receive_headers(self, headers: Iterable[Header], end_stream: bool, header_encoding: bool | str | None) -> tuple[list[Frame], list[Event]]: ...
|
||||
def receive_data(self, data: bytes, end_stream: bool, flow_control_len: int) -> tuple[list[Frame], list[Event]]: ...
|
||||
def receive_window_update(self, increment: int) -> tuple[list[Frame], list[Event]]: ...
|
||||
def receive_continuation(self) -> None: ...
|
||||
def receive_alt_svc(self, frame): ...
|
||||
def reset_stream(self, error_code: int = ...): ...
|
||||
def stream_reset(self, frame): ...
|
||||
def acknowledge_received_data(self, acknowledged_size): ...
|
||||
def receive_alt_svc(self, frame: AltSvcFrame) -> tuple[list[Frame], list[Event]]: ...
|
||||
def reset_stream(self, error_code: ErrorCodes | int = 0) -> list[Frame]: ...
|
||||
def stream_reset(self, frame: RstStreamFrame) -> tuple[list[Frame], list[Event]]: ...
|
||||
def acknowledge_received_data(self, acknowledged_size: int) -> list[Frame]: ...
|
||||
|
||||
@@ -1,25 +1,32 @@
|
||||
from typing import NamedTuple
|
||||
|
||||
import collections
|
||||
from .exceptions import FlowControlError as FlowControlError, ProtocolError as ProtocolError
|
||||
from _typeshed import Incomplete
|
||||
|
||||
from .exceptions import FlowControlError as FlowControlError
|
||||
from .exceptions import ProtocolError as ProtocolError
|
||||
from collections.abc import Generator, Iterable
|
||||
from hpack.struct import Header as Header, HeaderWeaklyTyped as HeaderWeaklyTyped
|
||||
from typing import Any, NamedTuple
|
||||
|
||||
UPPER_RE: Incomplete
|
||||
SIGIL: Incomplete
|
||||
INFORMATIONAL_START: Incomplete
|
||||
CONNECTION_HEADERS: Incomplete
|
||||
|
||||
def extract_method_header(headers): ...
|
||||
def is_informational_response(headers): ...
|
||||
def guard_increment_window(current, increment): ...
|
||||
def authority_from_headers(headers): ...
|
||||
def extract_method_header(headers: Iterable[Header]) -> bytes | None: ...
|
||||
def is_informational_response(headers: Iterable[Header]) -> bool: ...
|
||||
def guard_increment_window(current: int, increment: int) -> int: ...
|
||||
def authority_from_headers(headers: Iterable[Header]) -> bytes | None: ...
|
||||
|
||||
class HeaderValidationFlags(NamedTuple):
|
||||
is_client: Incomplete
|
||||
is_trailer: Incomplete
|
||||
is_response_header: Incomplete
|
||||
is_push_promise: Incomplete
|
||||
is_client: bool
|
||||
is_trailer: bool
|
||||
is_response_header: bool
|
||||
is_push_promise: bool
|
||||
|
||||
def validate_headers(headers, hdr_validation_flags): ...
|
||||
def normalize_outbound_headers(headers, hdr_validation_flags): ...
|
||||
def normalize_inbound_headers(headers, hdr_validation_flags): ...
|
||||
def validate_outbound_headers(headers, hdr_validation_flags): ...
|
||||
def validate_headers(headers: Iterable[Header], hdr_validation_flags: HeaderValidationFlags) -> Iterable[Header]: ...
|
||||
def utf8_encode_headers(headers: Iterable[HeaderWeaklyTyped]) -> list[Header]: ...
|
||||
def normalize_outbound_headers(headers: Iterable[Header], hdr_validation_flags: HeaderValidationFlags | None, should_split_outbound_cookies: bool = False) -> Generator[Header, None, None]: ...
|
||||
def normalize_inbound_headers(headers: Iterable[Header], hdr_validation_flags: HeaderValidationFlags) -> Generator[Header, None, None]: ...
|
||||
def validate_outbound_headers(headers: Iterable[Header], hdr_validation_flags: HeaderValidationFlags) -> Generator[Header, None, None]: ...
|
||||
|
||||
class SizeLimitDict(collections.OrderedDict[int, Any]):
|
||||
def __init__(self, *args: dict[int, int], **kwargs: Any) -> None: ...
|
||||
def __setitem__(self, key: int, value: Any | int) -> None: ...
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
from _typeshed import Incomplete
|
||||
|
||||
from .exceptions import FlowControlError as FlowControlError
|
||||
from _typeshed import Incomplete
|
||||
|
||||
LARGEST_FLOW_CONTROL_WINDOW: Incomplete
|
||||
|
||||
class WindowManager:
|
||||
max_window_size: Incomplete
|
||||
current_window_size: Incomplete
|
||||
def __init__(self, max_window_size) -> None: ...
|
||||
def window_consumed(self, size) -> None: ...
|
||||
def window_opened(self, size) -> None: ...
|
||||
def process_bytes(self, size): ...
|
||||
def __init__(self, max_window_size: int) -> None: ...
|
||||
def window_consumed(self, size: int) -> None: ...
|
||||
def window_opened(self, size: int) -> None: ...
|
||||
def process_bytes(self, size: int) -> int | None: ...
|
||||
|
||||
2
vendor/postgres-v14
vendored
2
vendor/postgres-v14
vendored
Submodule vendor/postgres-v14 updated: 46082f2088...c0aedfd3ca
2
vendor/postgres-v15
vendored
2
vendor/postgres-v15
vendored
Submodule vendor/postgres-v15 updated: dd0b28d6fb...355a7c69d3
2
vendor/postgres-v16
vendored
2
vendor/postgres-v16
vendored
Submodule vendor/postgres-v16 updated: d674efd776...3cf7ce1afa
2
vendor/postgres-v17
vendored
2
vendor/postgres-v17
vendored
Submodule vendor/postgres-v17 updated: a8dd6e779d...46f9b96555
8
vendor/revisions.json
vendored
8
vendor/revisions.json
vendored
@@ -1,18 +1,18 @@
|
||||
{
|
||||
"v17": [
|
||||
"17.2",
|
||||
"a8dd6e779dde907778006adb436b557ad652fb97"
|
||||
"46f9b96555e084c35dd975da9485996db9e86181"
|
||||
],
|
||||
"v16": [
|
||||
"16.6",
|
||||
"d674efd776f59d78e8fa1535bd2f95c3e6984fca"
|
||||
"3cf7ce1afab75027716d14223f95ddb300754162"
|
||||
],
|
||||
"v15": [
|
||||
"15.10",
|
||||
"dd0b28d6fbad39e227f3b77296fcca879af8b3a9"
|
||||
"355a7c69d3f907f3612eb406cc7b9c2f55d59b59"
|
||||
],
|
||||
"v14": [
|
||||
"14.15",
|
||||
"46082f20884f087a2d974b33ac65d63af26142bd"
|
||||
"c0aedfd3cac447510a2db843b561f0c52901b679"
|
||||
]
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user