mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-13 08:22:55 +00:00
## Problem Storage controller restarts cause temporary unavailability from the control plane POV. See RFC for more details. ## Summary of changes * A couple of small refactors of the storage controller start-up sequence to make extending it easier. * A leader table is added to track the storage controller instance that's currently the leader (if any) * A peer client is added such that storage controllers can send `step_down` requests to each other (implemented in https://github.com/neondatabase/neon/pull/8512). * Implement the leader cut-over as described in the RFC * Add `start-as-candidate` flag to the storage controller to gate the rolling restart behaviour. When the flag is `false` (the default), the only change from the current start-up sequence is persisting the leader entry to the database.
47 lines
1.2 KiB
Rust
47 lines
1.2 KiB
Rust
// @generated automatically by Diesel CLI.
|
|
|
|
diesel::table! {
|
|
controllers (address, started_at) {
|
|
address -> Varchar,
|
|
started_at -> Timestamptz,
|
|
}
|
|
}
|
|
|
|
diesel::table! {
|
|
metadata_health (tenant_id, shard_number, shard_count) {
|
|
tenant_id -> Varchar,
|
|
shard_number -> Int4,
|
|
shard_count -> Int4,
|
|
healthy -> Bool,
|
|
last_scrubbed_at -> Timestamptz,
|
|
}
|
|
}
|
|
|
|
diesel::table! {
|
|
nodes (node_id) {
|
|
node_id -> Int8,
|
|
scheduling_policy -> Varchar,
|
|
listen_http_addr -> Varchar,
|
|
listen_http_port -> Int4,
|
|
listen_pg_addr -> Varchar,
|
|
listen_pg_port -> Int4,
|
|
}
|
|
}
|
|
|
|
diesel::table! {
|
|
tenant_shards (tenant_id, shard_number, shard_count) {
|
|
tenant_id -> Varchar,
|
|
shard_number -> Int4,
|
|
shard_count -> Int4,
|
|
shard_stripe_size -> Int4,
|
|
generation -> Nullable<Int4>,
|
|
generation_pageserver -> Nullable<Int8>,
|
|
placement_policy -> Varchar,
|
|
splitting -> Int2,
|
|
config -> Text,
|
|
scheduling_policy -> Varchar,
|
|
}
|
|
}
|
|
|
|
diesel::allow_tables_to_appear_in_same_query!(controllers, metadata_health, nodes, tenant_shards,);
|