mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-09 14:32:57 +00:00
[compute_ctl] Do not create availability checker data on each start (#4019)
Initially, idea was to ensure that when we come and check data availability, special service table already contains one row. So if we loose it for some reason, we will error out. Yet, to do availability check we anyway start compute first! So it doesn't really add some value, but we affect each compute start as we update at least one row in the database. Also this writes some WAL, so if timeline is close to `neon.max_cluster_size` it could prevent compute from starting up. That said, do CREATE TABLE IF NOT EXISTS + UPSERT right in the `/check_writability` handler.
This commit is contained in:
@@ -1,12 +1,28 @@
|
|||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use postgres::Client;
|
|
||||||
use tokio_postgres::NoTls;
|
use tokio_postgres::NoTls;
|
||||||
use tracing::{error, instrument};
|
use tracing::{error, instrument};
|
||||||
|
|
||||||
use crate::compute::ComputeNode;
|
use crate::compute::ComputeNode;
|
||||||
|
|
||||||
|
/// Update timestamp in a row in a special service table to check
|
||||||
|
/// that we can actually write some data in this particular timeline.
|
||||||
|
/// Create table if it's missing.
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
pub fn create_writability_check_data(client: &mut Client) -> Result<()> {
|
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
||||||
|
// Connect to the database.
|
||||||
|
let (client, connection) = tokio_postgres::connect(compute.connstr.as_str(), NoTls).await?;
|
||||||
|
if client.is_closed() {
|
||||||
|
return Err(anyhow!("connection to postgres closed"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// The connection object performs the actual communication with the database,
|
||||||
|
// so spawn it off to run on its own.
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = connection.await {
|
||||||
|
error!("connection error: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
let query = "
|
let query = "
|
||||||
CREATE TABLE IF NOT EXISTS health_check (
|
CREATE TABLE IF NOT EXISTS health_check (
|
||||||
id serial primary key,
|
id serial primary key,
|
||||||
@@ -15,31 +31,15 @@ pub fn create_writability_check_data(client: &mut Client) -> Result<()> {
|
|||||||
INSERT INTO health_check VALUES (1, now())
|
INSERT INTO health_check VALUES (1, now())
|
||||||
ON CONFLICT (id) DO UPDATE
|
ON CONFLICT (id) DO UPDATE
|
||||||
SET updated_at = now();";
|
SET updated_at = now();";
|
||||||
let result = client.simple_query(query)?;
|
|
||||||
if result.len() < 2 {
|
let result = client.simple_query(query).await?;
|
||||||
return Err(anyhow::format_err!("executed {} queries", result.len()));
|
|
||||||
}
|
if result.len() != 2 {
|
||||||
Ok(())
|
return Err(anyhow::format_err!(
|
||||||
}
|
"expected 2 query results, but got {}",
|
||||||
|
result.len()
|
||||||
#[instrument(skip_all)]
|
));
|
||||||
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
|
||||||
let (client, connection) = tokio_postgres::connect(compute.connstr.as_str(), NoTls).await?;
|
|
||||||
if client.is_closed() {
|
|
||||||
return Err(anyhow!("connection to postgres closed"));
|
|
||||||
}
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(e) = connection.await {
|
|
||||||
error!("connection error: {}", e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let result = client
|
|
||||||
.simple_query("UPDATE health_check SET updated_at = now() WHERE id = 1;")
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if result.len() != 1 {
|
|
||||||
return Err(anyhow!("statement can't be executed"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,7 +32,6 @@ use utils::lsn::Lsn;
|
|||||||
use compute_api::responses::{ComputeMetrics, ComputeStatus};
|
use compute_api::responses::{ComputeMetrics, ComputeStatus};
|
||||||
use compute_api::spec::ComputeSpec;
|
use compute_api::spec::ComputeSpec;
|
||||||
|
|
||||||
use crate::checker::create_writability_check_data;
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::pg_helpers::*;
|
use crate::pg_helpers::*;
|
||||||
use crate::spec::*;
|
use crate::spec::*;
|
||||||
@@ -342,7 +341,6 @@ impl ComputeNode {
|
|||||||
handle_databases(spec, &mut client)?;
|
handle_databases(spec, &mut client)?;
|
||||||
handle_role_deletions(spec, self.connstr.as_str(), &mut client)?;
|
handle_role_deletions(spec, self.connstr.as_str(), &mut client)?;
|
||||||
handle_grants(spec, self.connstr.as_str(), &mut client)?;
|
handle_grants(spec, self.connstr.as_str(), &mut client)?;
|
||||||
create_writability_check_data(&mut client)?;
|
|
||||||
handle_extensions(spec, &mut client)?;
|
handle_extensions(spec, &mut client)?;
|
||||||
|
|
||||||
// 'Close' connection
|
// 'Close' connection
|
||||||
|
|||||||
@@ -85,7 +85,10 @@ async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body
|
|||||||
let res = crate::checker::check_writability(compute).await;
|
let res = crate::checker::check_writability(compute).await;
|
||||||
match res {
|
match res {
|
||||||
Ok(_) => Response::new(Body::from("true")),
|
Ok(_) => Response::new(Body::from("true")),
|
||||||
Err(e) => Response::new(Body::from(e.to_string())),
|
Err(e) => {
|
||||||
|
error!("check_writability failed: {}", e);
|
||||||
|
Response::new(Body::from(e.to_string()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user