Compare commits

...

13 Commits

Author SHA1 Message Date
Anastasia Lubennikova
263a3ea5e3 Add script export_import_betwen_pageservers.py to migrate projects between pageservers 2022-07-05 15:27:31 +03:00
Heikki Linnakangas
bb69e0920c Do not overwrite an existing image layer.
See github issues #1594 and #1690

Co-authored-by: Konstantin Knizhnik <knizhnik@neon.tech>
2022-07-05 14:45:31 +03:00
Alexander Bayandin
05f6a1394d Add tests for different Postgres client libraries (#2008)
* Add tests for different postgres clients
* test/fixtures: sanitize test name for test_output_dir
* test/fixtures: do not look for etcd before runtime
* Add workflow for testing Postgres client libraries
2022-07-05 12:22:58 +01:00
Heikki Linnakangas
844832ffe4 Bump vendor/postgres
Contains changes from two PRs in vendor/postgres:
- https://github.com/neondatabase/postgres/pull/163
- https://github.com/neondatabase/postgres/pull/176
2022-07-05 10:55:03 +03:00
bojanserafimov
d29c545b5d Gc/compaction thread pool, take 2 (#1933)
Decrease the number of pageserver threads by running gc and compaction in a blocking tokio thread pool
2022-07-05 02:06:40 -04:00
Kirill Bulatov
6abdb12724 Fix 1.62 Clippy errors 2022-07-04 23:46:37 +03:00
Alexander Bayandin
7898e72990 Remove duplicated checks from LocalEnv 2022-07-04 22:35:00 +03:00
Dmitry Rodionov
65704708fa remove unused imports, make more use of pathlib.Path 2022-07-01 18:56:51 +03:00
Arseny Sher
6100a02d0f Prefix WAL files in s3 with environment name.
It wasn't merged to prod yet, so safe to enable.
2022-07-01 19:21:28 +04:00
Arseny Sher
97fed38213 Fix cadaca010c for older ssh clients. 2022-07-01 19:20:59 +04:00
Arseny Sher
cadaca010c Make ansible to work with storage nodes through teleport from local box. 2022-07-01 16:58:34 +03:00
Bojan Serafimov
f09c09438a Fix gc after import 2022-07-01 11:10:49 +03:00
Dmitry Rodionov
00fc696606 replace extra urlencode dependency with already present url library 2022-06-30 14:32:15 +03:00
74 changed files with 1470 additions and 281 deletions

View File

@@ -6,5 +6,7 @@ timeout = 30
[ssh_connection] [ssh_connection]
ssh_args = -F ./ansible.ssh.cfg ssh_args = -F ./ansible.ssh.cfg
scp_if_ssh = True # teleport doesn't support sftp yet https://github.com/gravitational/teleport/issues/7127
# and scp neither worked for me
transfer_method = piped
pipelining = True pipelining = True

View File

@@ -1,3 +1,7 @@
# Remove this once https://github.com/gravitational/teleport/issues/10918 is fixed
# (use pre 8.5 option name to cope with old ssh in CI)
PubkeyAcceptedKeyTypes +ssh-rsa-cert-v01@openssh.com
Host tele.zenith.tech Host tele.zenith.tech
User admin User admin
Port 3023 Port 3023

View File

@@ -12,6 +12,7 @@ pageservers
safekeepers safekeepers
[storage:vars] [storage:vars]
env_name = prod-1
console_mgmt_base_url = http://console-release.local console_mgmt_base_url = http://console-release.local
bucket_name = zenith-storage-oregon bucket_name = zenith-storage-oregon
bucket_region = us-west-2 bucket_region = us-west-2

View File

@@ -13,6 +13,7 @@ pageservers
safekeepers safekeepers
[storage:vars] [storage:vars]
env_name = us-stage
console_mgmt_base_url = http://console-staging.local console_mgmt_base_url = http://console-staging.local
bucket_name = zenith-staging-storage-us-east-1 bucket_name = zenith-staging-storage-us-east-1
bucket_region = us-east-1 bucket_region = us-east-1

View File

@@ -6,7 +6,7 @@ After=network.target auditd.service
Type=simple Type=simple
User=safekeeper User=safekeeper
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/lib Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/lib
ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}.local:6500 --listen-http {{ inventory_hostname }}.local:7676 -p {{ first_pageserver }}:6400 -D /storage/safekeeper/data --broker-endpoints={{ etcd_endpoints }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="wal"}' ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}.local:6500 --listen-http {{ inventory_hostname }}.local:7676 -D /storage/safekeeper/data --broker-endpoints={{ etcd_endpoints }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="{{ env_name }}/wal"}'
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed KillMode=mixed
KillSignal=SIGINT KillSignal=SIGINT

74
.github/workflows/pg_clients.yml vendored Normal file
View File

@@ -0,0 +1,74 @@
name: Test Postgres client libraries
on:
schedule:
# * is a special character in YAML so you have to quote this string
# ┌───────────── minute (0 - 59)
# │ ┌───────────── hour (0 - 23)
# │ │ ┌───────────── day of the month (1 - 31)
# │ │ │ ┌───────────── month (1 - 12 or JAN-DEC)
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
- cron: '23 02 * * *' # run once a day, timezone is utc
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
test-postgres-client-libs:
runs-on: [ ubuntu-latest ]
steps:
- name: Checkout
uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: 3.9
- name: Install Poetry
uses: snok/install-poetry@v1
- name: Cache poetry deps
id: cache_poetry
uses: actions/cache@v3
with:
path: ~/.cache/pypoetry/virtualenvs
key: v1-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }}
- name: Install Python deps
shell: bash -ex {0}
run: ./scripts/pysync
- name: Run pytest
env:
REMOTE_ENV: 1
BENCHMARK_CONNSTR: "${{ secrets.BENCHMARK_STAGING_CONNSTR }}"
TEST_OUTPUT: /tmp/test_output
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
# this variable will be embedded in perf test report
# and is needed to distinguish different environments
PLATFORM: github-actions-selfhosted
shell: bash -ex {0}
run: |
# Test framework expects we have psql binary;
# but since we don't really need it in this test, let's mock it
mkdir -p "$POSTGRES_DISTRIB_DIR/bin" && touch "$POSTGRES_DISTRIB_DIR/bin/psql";
./scripts/pytest \
--junitxml=$TEST_OUTPUT/junit.xml \
--tb=short \
--verbose \
-m "remote_cluster" \
-rA "test_runner/pg_clients"
- name: Post to a Slack channel
if: failure()
id: slack
uses: slackapi/slack-github-action@v1
with:
channel-id: "C033QLM5P7D" # dev-staging-stream
slack-message: "Testing Postgres clients: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
env:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}

8
Cargo.lock generated
View File

@@ -461,7 +461,7 @@ dependencies = [
"tar", "tar",
"tokio", "tokio",
"tokio-postgres", "tokio-postgres",
"urlencoding", "url",
"workspace_hack", "workspace_hack",
] ]
@@ -3685,12 +3685,6 @@ dependencies = [
"percent-encoding", "percent-encoding",
] ]
[[package]]
name = "urlencoding"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68b90931029ab9b034b300b797048cf23723400aa757e8a2bfb9d748102f9821"
[[package]] [[package]]
name = "utils" name = "utils"
version = "0.1.0" version = "0.1.0"

View File

@@ -18,5 +18,5 @@ serde_json = "1"
tar = "0.4" tar = "0.4"
tokio = { version = "1.17", features = ["macros", "rt", "rt-multi-thread"] } tokio = { version = "1.17", features = ["macros", "rt", "rt-multi-thread"] }
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
urlencoding = "2.1.0" url = "2.2.2"
workspace_hack = { version = "0.1", path = "../workspace_hack" } workspace_hack = { version = "0.1", path = "../workspace_hack" }

View File

@@ -33,7 +33,7 @@ use std::process::exit;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::{thread, time::Duration}; use std::{thread, time::Duration};
use anyhow::Result; use anyhow::{Context, Result};
use chrono::Utc; use chrono::Utc;
use clap::Arg; use clap::Arg;
use log::{error, info}; use log::{error, info};
@@ -45,6 +45,7 @@ use compute_tools::monitor::launch_monitor;
use compute_tools::params::*; use compute_tools::params::*;
use compute_tools::pg_helpers::*; use compute_tools::pg_helpers::*;
use compute_tools::spec::*; use compute_tools::spec::*;
use url::Url;
fn main() -> Result<()> { fn main() -> Result<()> {
// TODO: re-use `utils::logging` later // TODO: re-use `utils::logging` later
@@ -131,7 +132,7 @@ fn main() -> Result<()> {
let compute_state = ComputeNode { let compute_state = ComputeNode {
start_time: Utc::now(), start_time: Utc::now(),
connstr: connstr.to_string(), connstr: Url::parse(connstr).context("cannot parse connstr as a URL")?,
pgdata: pgdata.to_string(), pgdata: pgdata.to_string(),
pgbin: pgbin.to_string(), pgbin: pgbin.to_string(),
spec, spec,

View File

@@ -1,5 +1,3 @@
use std::sync::Arc;
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use log::error; use log::error;
use postgres::Client; use postgres::Client;
@@ -23,9 +21,8 @@ pub fn create_writablity_check_data(client: &mut Client) -> Result<()> {
Ok(()) Ok(())
} }
pub async fn check_writability(compute: &Arc<ComputeNode>) -> Result<()> { pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
let connstr = &compute.connstr; let (client, connection) = tokio_postgres::connect(compute.connstr.as_str(), NoTls).await?;
let (client, connection) = tokio_postgres::connect(connstr, NoTls).await?;
if client.is_closed() { if client.is_closed() {
return Err(anyhow!("connection to postgres closed")); return Err(anyhow!("connection to postgres closed"));
} }

View File

@@ -35,7 +35,8 @@ use crate::spec::*;
/// Compute node info shared across several `compute_ctl` threads. /// Compute node info shared across several `compute_ctl` threads.
pub struct ComputeNode { pub struct ComputeNode {
pub start_time: DateTime<Utc>, pub start_time: DateTime<Utc>,
pub connstr: String, // Url type maintains proper escaping
pub connstr: url::Url,
pub pgdata: String, pub pgdata: String,
pub pgbin: String, pub pgbin: String,
pub spec: ComputeSpec, pub spec: ComputeSpec,
@@ -268,21 +269,25 @@ impl ComputeNode {
// In this case we need to connect with old `zenith_admin`name // In this case we need to connect with old `zenith_admin`name
// and create new user. We cannot simply rename connected user, // and create new user. We cannot simply rename connected user,
// but we can create a new one and grant it all privileges. // but we can create a new one and grant it all privileges.
let mut client = match Client::connect(&self.connstr, NoTls) { let mut client = match Client::connect(self.connstr.as_str(), NoTls) {
Err(e) => { Err(e) => {
info!( info!(
"cannot connect to postgres: {}, retrying with `zenith_admin` username", "cannot connect to postgres: {}, retrying with `zenith_admin` username",
e e
); );
let zenith_admin_connstr = self.connstr.replacen("cloud_admin", "zenith_admin", 1); let mut zenith_admin_connstr = self.connstr.clone();
let mut client = Client::connect(&zenith_admin_connstr, NoTls)?; zenith_admin_connstr
.set_username("zenith_admin")
.map_err(|_| anyhow::anyhow!("invalid connstr"))?;
let mut client = Client::connect(zenith_admin_connstr.as_str(), NoTls)?;
client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?; client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
client.simple_query("GRANT zenith_admin TO cloud_admin")?; client.simple_query("GRANT zenith_admin TO cloud_admin")?;
drop(client); drop(client);
// reconnect with connsting with expected name // reconnect with connsting with expected name
Client::connect(&self.connstr, NoTls)? Client::connect(self.connstr.as_str(), NoTls)?
} }
Ok(client) => client, Ok(client) => client,
}; };

View File

@@ -13,11 +13,11 @@ const MONITOR_CHECK_INTERVAL: u64 = 500; // milliseconds
// Spin in a loop and figure out the last activity time in the Postgres. // Spin in a loop and figure out the last activity time in the Postgres.
// Then update it in the shared state. This function never errors out. // Then update it in the shared state. This function never errors out.
// XXX: the only expected panic is at `RwLock` unwrap(). // XXX: the only expected panic is at `RwLock` unwrap().
fn watch_compute_activity(compute: &Arc<ComputeNode>) { fn watch_compute_activity(compute: &ComputeNode) {
// Suppose that `connstr` doesn't change // Suppose that `connstr` doesn't change
let connstr = compute.connstr.clone(); let connstr = compute.connstr.as_str();
// Define `client` outside of the loop to reuse existing connection if it's active. // Define `client` outside of the loop to reuse existing connection if it's active.
let mut client = Client::connect(&connstr, NoTls); let mut client = Client::connect(connstr, NoTls);
let timeout = time::Duration::from_millis(MONITOR_CHECK_INTERVAL); let timeout = time::Duration::from_millis(MONITOR_CHECK_INTERVAL);
info!("watching Postgres activity at {}", connstr); info!("watching Postgres activity at {}", connstr);
@@ -32,7 +32,7 @@ fn watch_compute_activity(compute: &Arc<ComputeNode>) {
info!("connection to postgres closed, trying to reconnect"); info!("connection to postgres closed, trying to reconnect");
// Connection is closed, reconnect and try again. // Connection is closed, reconnect and try again.
client = Client::connect(&connstr, NoTls); client = Client::connect(connstr, NoTls);
continue; continue;
} }
@@ -93,7 +93,7 @@ fn watch_compute_activity(compute: &Arc<ComputeNode>) {
debug!("cannot connect to postgres: {}, retrying", e); debug!("cannot connect to postgres: {}, retrying", e);
// Establish a new connection and try again. // Establish a new connection and try again.
client = Client::connect(&connstr, NoTls); client = Client::connect(connstr, NoTls);
} }
} }
} }

View File

@@ -1,3 +1,4 @@
use std::fmt::Write;
use std::fs::File; use std::fs::File;
use std::io::{BufRead, BufReader}; use std::io::{BufRead, BufReader};
use std::net::{SocketAddr, TcpStream}; use std::net::{SocketAddr, TcpStream};
@@ -138,9 +139,11 @@ impl Role {
// Now we also support SCRAM-SHA-256 and to preserve compatibility // Now we also support SCRAM-SHA-256 and to preserve compatibility
// we treat all encrypted_password as md5 unless they starts with SCRAM-SHA-256. // we treat all encrypted_password as md5 unless they starts with SCRAM-SHA-256.
if pass.starts_with("SCRAM-SHA-256") { if pass.starts_with("SCRAM-SHA-256") {
params.push_str(&format!(" PASSWORD '{}'", pass)); write!(params, " PASSWORD '{pass}'")
.expect("String is documented to not to error during write operations");
} else { } else {
params.push_str(&format!(" PASSWORD 'md5{}'", pass)); write!(params, " PASSWORD 'md5{pass}'")
.expect("String is documented to not to error during write operations");
} }
} else { } else {
params.push_str(" PASSWORD NULL"); params.push_str(" PASSWORD NULL");
@@ -158,7 +161,8 @@ impl Database {
/// it may require a proper quoting too. /// it may require a proper quoting too.
pub fn to_pg_options(&self) -> String { pub fn to_pg_options(&self) -> String {
let mut params: String = self.options.as_pg_options(); let mut params: String = self.options.as_pg_options();
params.push_str(&format!(" OWNER {}", &self.owner.quote())); write!(params, " OWNER {}", &self.owner.quote())
.expect("String is documented to not to error during write operations");
params params
} }

View File

@@ -4,7 +4,6 @@ use anyhow::Result;
use log::{info, log_enabled, warn, Level}; use log::{info, log_enabled, warn, Level};
use postgres::{Client, NoTls}; use postgres::{Client, NoTls};
use serde::Deserialize; use serde::Deserialize;
use urlencoding::encode;
use crate::compute::ComputeNode; use crate::compute::ComputeNode;
use crate::config; use crate::config;
@@ -231,9 +230,11 @@ pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<
fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()> { fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()> {
for db in &node.spec.cluster.databases { for db in &node.spec.cluster.databases {
if db.owner != *role_name { if db.owner != *role_name {
let db_name_encoded = format!("/{}", encode(&db.name)); let mut connstr = node.connstr.clone();
let db_connstr = node.connstr.replacen("/postgres", &db_name_encoded, 1); // database name is always the last and the only component of the path
let mut client = Client::connect(&db_connstr, NoTls)?; connstr.set_path(&db.name);
let mut client = Client::connect(connstr.as_str(), NoTls)?;
// This will reassign all dependent objects to the db owner // This will reassign all dependent objects to the db owner
let reassign_query = format!( let reassign_query = format!(

View File

@@ -403,16 +403,6 @@ impl LocalEnv {
self.pg_distrib_dir.display() self.pg_distrib_dir.display()
); );
} }
for binary in ["pageserver", "safekeeper"] {
if !self.zenith_distrib_dir.join(binary).exists() {
bail!(
"Can't find binary '{}' in zenith distrib dir '{}'",
binary,
self.zenith_distrib_dir.display()
);
}
}
for binary in ["pageserver", "safekeeper"] { for binary in ["pageserver", "safekeeper"] {
if !self.zenith_distrib_dir.join(binary).exists() { if !self.zenith_distrib_dir.join(binary).exists() {
bail!( bail!(
@@ -421,12 +411,6 @@ impl LocalEnv {
); );
} }
} }
if !self.pg_distrib_dir.join("bin/postgres").exists() {
bail!(
"Can't find postgres binary at {}",
self.pg_distrib_dir.display()
);
}
fs::create_dir(&base_path)?; fs::create_dir(&base_path)?;

View File

@@ -263,6 +263,8 @@ fn start_pageserver(conf: &'static PageServerConf, daemonize: bool) -> Result<()
// start profiler (if enabled) // start profiler (if enabled)
let profiler_guard = profiling::init_profiler(conf); let profiler_guard = profiling::init_profiler(conf);
pageserver::tenant_tasks::init_tenant_task_pool()?;
// initialize authentication for incoming connections // initialize authentication for incoming connections
let auth = match &conf.auth_type { let auth = match &conf.auth_type {
AuthType::Trust | AuthType::MD5 => None, AuthType::Trust | AuthType::MD5 => None,

View File

@@ -158,6 +158,18 @@ pub struct LayeredRepository {
// Global pageserver config parameters // Global pageserver config parameters
pub conf: &'static PageServerConf, pub conf: &'static PageServerConf,
// Allows us to gracefully cancel operations that edit the directory
// that backs this layered repository. Usage:
//
// Use `let _guard = file_lock.try_read()` while writing any files.
// Use `let _guard = file_lock.write().unwrap()` to wait for all writes to finish.
//
// TODO try_read this lock during checkpoint as well to prevent race
// between checkpoint and detach/delete.
// TODO try_read this lock for all gc/compaction operations, not just
// ones scheduled by the tenant task manager.
pub file_lock: RwLock<()>,
// Overridden tenant-specific config parameters. // Overridden tenant-specific config parameters.
// We keep TenantConfOpt sturct here to preserve the information // We keep TenantConfOpt sturct here to preserve the information
// about parameters that are not set. // about parameters that are not set.
@@ -685,6 +697,7 @@ impl LayeredRepository {
) -> LayeredRepository { ) -> LayeredRepository {
LayeredRepository { LayeredRepository {
tenant_id, tenant_id,
file_lock: RwLock::new(()),
conf, conf,
tenant_conf: Arc::new(RwLock::new(tenant_conf)), tenant_conf: Arc::new(RwLock::new(tenant_conf)),
timelines: Mutex::new(HashMap::new()), timelines: Mutex::new(HashMap::new()),
@@ -1910,15 +1923,28 @@ impl LayeredTimeline {
} else { } else {
Lsn(0) Lsn(0)
}; };
// Let's consider an example:
//
// delta layer with LSN range 71-81
// delta layer with LSN range 81-91
// delta layer with LSN range 91-101
// image layer at LSN 100
//
// If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
// there's no need to create a new one. We check this case explicitly, to avoid passing
// a bogus range to count_deltas below, with start > end. It's even possible that there
// are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
// after we read last_record_lsn, which is passed here in the 'lsn' argument.
if img_lsn < lsn {
let num_deltas = layers.count_deltas(&img_range, &(img_lsn..lsn))?;
let num_deltas = layers.count_deltas(&img_range, &(img_lsn..lsn))?; debug!(
"key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
debug!( img_range.start, img_range.end, num_deltas, img_lsn, lsn
"range {}-{}, has {} deltas on this timeline", );
img_range.start, img_range.end, num_deltas if num_deltas >= self.get_image_creation_threshold() {
); return Ok(true);
if num_deltas >= self.get_image_creation_threshold() { }
return Ok(true);
} }
} }
} }
@@ -2210,6 +2236,9 @@ impl LayeredTimeline {
LsnForTimestamp::Past(lsn) => { LsnForTimestamp::Past(lsn) => {
debug!("past({})", lsn); debug!("past({})", lsn);
} }
LsnForTimestamp::NoData(lsn) => {
debug!("nodata({})", lsn);
}
} }
debug!("pitr_cutoff_lsn = {:?}", pitr_cutoff_lsn) debug!("pitr_cutoff_lsn = {:?}", pitr_cutoff_lsn)
} }

View File

@@ -34,7 +34,7 @@ pub trait BlobCursor {
) -> Result<(), std::io::Error>; ) -> Result<(), std::io::Error>;
} }
impl<'a, R> BlobCursor for BlockCursor<R> impl<R> BlobCursor for BlockCursor<R>
where where
R: BlockReader, R: BlockReader,
{ {

View File

@@ -445,7 +445,10 @@ impl ImageLayerWriter {
}, },
); );
info!("new image layer {}", path.display()); info!("new image layer {}", path.display());
let mut file = VirtualFile::create(&path)?; let mut file = VirtualFile::open_with_options(
&path,
std::fs::OpenOptions::new().write(true).create_new(true),
)?;
// make room for the header block // make room for the header block
file.seek(SeekFrom::Start(PAGE_SZ as u64))?; file.seek(SeekFrom::Start(PAGE_SZ as u64))?;
let blob_writer = WriteBlobWriter::new(file, PAGE_SZ as u64); let blob_writer = WriteBlobWriter::new(file, PAGE_SZ as u64);

View File

@@ -13,7 +13,7 @@ pub mod repository;
pub mod storage_sync; pub mod storage_sync;
pub mod tenant_config; pub mod tenant_config;
pub mod tenant_mgr; pub mod tenant_mgr;
pub mod tenant_threads; pub mod tenant_tasks;
pub mod thread_mgr; pub mod thread_mgr;
pub mod timelines; pub mod timelines;
pub mod virtual_file; pub mod virtual_file;

View File

@@ -554,7 +554,7 @@ impl PageServerHandler {
// Create empty timeline // Create empty timeline
info!("creating new timeline"); info!("creating new timeline");
let repo = tenant_mgr::get_repository_for_tenant(tenant_id)?; let repo = tenant_mgr::get_repository_for_tenant(tenant_id)?;
let timeline = repo.create_empty_timeline(timeline_id, Lsn(0))?; let timeline = repo.create_empty_timeline(timeline_id, base_lsn)?;
let repartition_distance = repo.get_checkpoint_distance(); let repartition_distance = repo.get_checkpoint_distance();
let mut datadir_timeline = let mut datadir_timeline =
DatadirTimeline::<LayeredRepository>::new(timeline, repartition_distance); DatadirTimeline::<LayeredRepository>::new(timeline, repartition_distance);
@@ -1151,6 +1151,7 @@ impl postgres_backend::Handler for PageServerHandler {
LsnForTimestamp::Present(lsn) => format!("{}", lsn), LsnForTimestamp::Present(lsn) => format!("{}", lsn),
LsnForTimestamp::Future(_lsn) => "future".into(), LsnForTimestamp::Future(_lsn) => "future".into(),
LsnForTimestamp::Past(_lsn) => "past".into(), LsnForTimestamp::Past(_lsn) => "past".into(),
LsnForTimestamp::NoData(_lsn) => "nodata".into(),
}; };
pgb.write_message_noflush(&BeMessage::DataRow(&[Some(result.as_bytes())]))?; pgb.write_message_noflush(&BeMessage::DataRow(&[Some(result.as_bytes())]))?;
pgb.write_message(&BeMessage::CommandComplete(b"SELECT 1"))?; pgb.write_message(&BeMessage::CommandComplete(b"SELECT 1"))?;

View File

@@ -51,6 +51,7 @@ pub enum LsnForTimestamp {
Present(Lsn), Present(Lsn),
Future(Lsn), Future(Lsn),
Past(Lsn), Past(Lsn),
NoData(Lsn),
} }
impl<R: Repository> DatadirTimeline<R> { impl<R: Repository> DatadirTimeline<R> {
@@ -263,7 +264,7 @@ impl<R: Repository> DatadirTimeline<R> {
(false, false) => { (false, false) => {
// This can happen if no commit records have been processed yet, e.g. // This can happen if no commit records have been processed yet, e.g.
// just after importing a cluster. // just after importing a cluster.
bail!("no commit timestamps found"); Ok(LsnForTimestamp::NoData(max_lsn))
} }
(true, false) => { (true, false) => {
// Didn't find any commit timestamps larger than the request // Didn't find any commit timestamps larger than the request

View File

@@ -81,6 +81,12 @@ mod profiling_impl {
pub struct DummyProfilerGuard; pub struct DummyProfilerGuard;
impl Drop for DummyProfilerGuard {
fn drop(&mut self) {
// do nothing, this exists to calm Clippy down
}
}
pub fn profpoint_start( pub fn profpoint_start(
_conf: &PageServerConf, _conf: &PageServerConf,
_point: ProfilingConfig, _point: ProfilingConfig,

View File

@@ -230,8 +230,6 @@ pub fn shutdown_all_tenants() {
drop(m); drop(m);
thread_mgr::shutdown_threads(Some(ThreadKind::WalReceiverManager), None, None); thread_mgr::shutdown_threads(Some(ThreadKind::WalReceiverManager), None, None);
thread_mgr::shutdown_threads(Some(ThreadKind::GarbageCollector), None, None);
thread_mgr::shutdown_threads(Some(ThreadKind::Compactor), None, None);
// Ok, no background threads running anymore. Flush any remaining data in // Ok, no background threads running anymore. Flush any remaining data in
// memory to disk. // memory to disk.
@@ -330,44 +328,12 @@ pub fn set_tenant_state(tenant_id: ZTenantId, new_state: TenantState) -> anyhow:
} }
(TenantState::Idle, TenantState::Active) => { (TenantState::Idle, TenantState::Active) => {
info!("activating tenant {tenant_id}"); info!("activating tenant {tenant_id}");
let compactor_spawn_result = thread_mgr::spawn(
ThreadKind::Compactor,
Some(tenant_id),
None,
"Compactor thread",
false,
move || crate::tenant_threads::compact_loop(tenant_id),
);
if compactor_spawn_result.is_err() {
let mut m = tenants_state::write_tenants();
m.get_mut(&tenant_id)
.with_context(|| format!("Tenant not found for id {tenant_id}"))?
.state = old_state;
drop(m);
}
compactor_spawn_result?;
let gc_spawn_result = thread_mgr::spawn( // Spawn gc and compaction loops. The loops will shut themselves
ThreadKind::GarbageCollector, // down when they notice that the tenant is inactive.
Some(tenant_id), // TODO maybe use tokio::sync::watch instead?
None, crate::tenant_tasks::start_compaction_loop(tenant_id)?;
"GC thread", crate::tenant_tasks::start_gc_loop(tenant_id)?;
false,
move || crate::tenant_threads::gc_loop(tenant_id),
)
.map(|_thread_id| ()) // update the `Result::Ok` type to match the outer function's return signature
.with_context(|| format!("Failed to launch GC thread for tenant {tenant_id}"));
if let Err(e) = &gc_spawn_result {
let mut m = tenants_state::write_tenants();
m.get_mut(&tenant_id)
.with_context(|| format!("Tenant not found for id {tenant_id}"))?
.state = old_state;
drop(m);
error!("Failed to start GC thread for tenant {tenant_id}, stopping its checkpointer thread: {e:?}");
thread_mgr::shutdown_threads(Some(ThreadKind::Compactor), Some(tenant_id), None);
return gc_spawn_result;
}
} }
(TenantState::Idle, TenantState::Stopping) => { (TenantState::Idle, TenantState::Stopping) => {
info!("stopping idle tenant {tenant_id}"); info!("stopping idle tenant {tenant_id}");
@@ -379,8 +345,10 @@ pub fn set_tenant_state(tenant_id: ZTenantId, new_state: TenantState) -> anyhow:
Some(tenant_id), Some(tenant_id),
None, None,
); );
thread_mgr::shutdown_threads(Some(ThreadKind::GarbageCollector), Some(tenant_id), None);
thread_mgr::shutdown_threads(Some(ThreadKind::Compactor), Some(tenant_id), None); // Wait until all gc/compaction tasks finish
let repo = get_repository_for_tenant(tenant_id)?;
let _guard = repo.file_lock.write().unwrap();
} }
} }

View File

@@ -0,0 +1,288 @@
//! This module contains functions to serve per-tenant background processes,
//! such as compaction and GC
use std::collections::HashMap;
use std::ops::ControlFlow;
use std::time::Duration;
use crate::repository::Repository;
use crate::tenant_mgr::TenantState;
use crate::thread_mgr::ThreadKind;
use crate::{tenant_mgr, thread_mgr};
use anyhow::{self, Context};
use futures::stream::FuturesUnordered;
use futures::StreamExt;
use metrics::{register_int_counter_vec, IntCounterVec};
use once_cell::sync::{Lazy, OnceCell};
use tokio::sync::mpsc;
use tokio::sync::watch;
use tracing::*;
use utils::zid::ZTenantId;
static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
register_int_counter_vec!(
"pageserver_tenant_task_events",
"Number of task start/stop/fail events.",
&["event"],
)
.expect("Failed to register tenant_task_events metric")
});
///
/// Compaction task's main loop
///
async fn compaction_loop(tenantid: ZTenantId, mut cancel: watch::Receiver<()>) {
loop {
trace!("waking up");
// Run blocking part of the task
let period: Result<Result<_, anyhow::Error>, _> = tokio::task::spawn_blocking(move || {
// Break if tenant is not active
if tenant_mgr::get_tenant_state(tenantid) != Some(TenantState::Active) {
return Ok(ControlFlow::Break(()));
}
// Break if we're not allowed to write to disk
let repo = tenant_mgr::get_repository_for_tenant(tenantid)?;
// TODO do this inside repo.compaction_iteration instead.
let _guard = match repo.file_lock.try_read() {
Ok(g) => g,
Err(_) => return Ok(ControlFlow::Break(())),
};
// Run compaction
let compaction_period = repo.get_compaction_period();
repo.compaction_iteration()?;
Ok(ControlFlow::Continue(compaction_period))
})
.await;
// Decide whether to sleep or break
let sleep_duration = match period {
Ok(Ok(ControlFlow::Continue(period))) => period,
Ok(Ok(ControlFlow::Break(()))) => break,
Ok(Err(e)) => {
error!("Compaction failed, retrying: {}", e);
Duration::from_secs(2)
}
Err(e) => {
error!("Compaction join error, retrying: {}", e);
Duration::from_secs(2)
}
};
// Sleep
tokio::select! {
_ = cancel.changed() => {
trace!("received cancellation request");
break;
},
_ = tokio::time::sleep(sleep_duration) => {},
}
}
trace!(
"compaction loop stopped. State is {:?}",
tenant_mgr::get_tenant_state(tenantid)
);
}
static START_GC_LOOP: OnceCell<mpsc::Sender<ZTenantId>> = OnceCell::new();
static START_COMPACTION_LOOP: OnceCell<mpsc::Sender<ZTenantId>> = OnceCell::new();
/// Spawn a task that will periodically schedule garbage collection until
/// the tenant becomes inactive. This should be called on tenant
/// activation.
pub fn start_gc_loop(tenantid: ZTenantId) -> anyhow::Result<()> {
START_GC_LOOP
.get()
.context("Failed to get START_GC_LOOP")?
.blocking_send(tenantid)
.context("Failed to send to START_GC_LOOP channel")?;
Ok(())
}
/// Spawn a task that will periodically schedule compaction until
/// the tenant becomes inactive. This should be called on tenant
/// activation.
pub fn start_compaction_loop(tenantid: ZTenantId) -> anyhow::Result<()> {
START_COMPACTION_LOOP
.get()
.context("failed to get START_COMPACTION_LOOP")?
.blocking_send(tenantid)
.context("failed to send to START_COMPACTION_LOOP")?;
Ok(())
}
/// Spawn the TenantTaskManager
/// This needs to be called before start_gc_loop or start_compaction_loop
pub fn init_tenant_task_pool() -> anyhow::Result<()> {
let runtime = tokio::runtime::Builder::new_multi_thread()
.thread_name("tenant-task-worker")
.worker_threads(40) // Way more than necessary
.max_blocking_threads(100) // Way more than necessary
.enable_all()
.build()?;
let (gc_send, mut gc_recv) = mpsc::channel::<ZTenantId>(100);
START_GC_LOOP
.set(gc_send)
.expect("Failed to set START_GC_LOOP");
let (compaction_send, mut compaction_recv) = mpsc::channel::<ZTenantId>(100);
START_COMPACTION_LOOP
.set(compaction_send)
.expect("Failed to set START_COMPACTION_LOOP");
// TODO this is getting repetitive
let mut gc_loops = HashMap::<ZTenantId, watch::Sender<()>>::new();
let mut compaction_loops = HashMap::<ZTenantId, watch::Sender<()>>::new();
thread_mgr::spawn(
ThreadKind::TenantTaskManager,
None,
None,
"Tenant task manager main thread",
true,
move || {
runtime.block_on(async move {
let mut futures = FuturesUnordered::new();
loop {
tokio::select! {
_ = thread_mgr::shutdown_watcher() => {
// Send cancellation to all tasks
for (_, cancel) in gc_loops.drain() {
cancel.send(()).ok();
}
for (_, cancel) in compaction_loops.drain() {
cancel.send(()).ok();
}
// Exit after all tasks finish
while let Some(result) = futures.next().await {
match result {
Ok(()) => {
TENANT_TASK_EVENTS.with_label_values(&["stop"]).inc();
},
Err(e) => {
TENANT_TASK_EVENTS.with_label_values(&["panic"]).inc();
error!("loop join error {}", e)
},
}
}
break;
},
tenantid = gc_recv.recv() => {
let tenantid = tenantid.expect("Gc task channel closed unexpectedly");
// Spawn new task, request cancellation of the old one if exists
let (cancel_send, cancel_recv) = watch::channel(());
let handle = tokio::spawn(gc_loop(tenantid, cancel_recv)
.instrument(info_span!("gc loop", tenant = %tenantid)));
if let Some(old_cancel_send) = gc_loops.insert(tenantid, cancel_send) {
old_cancel_send.send(()).ok();
}
// Update metrics, remember handle
TENANT_TASK_EVENTS.with_label_values(&["start"]).inc();
futures.push(handle);
},
tenantid = compaction_recv.recv() => {
let tenantid = tenantid.expect("Compaction task channel closed unexpectedly");
// Spawn new task, request cancellation of the old one if exists
let (cancel_send, cancel_recv) = watch::channel(());
let handle = tokio::spawn(compaction_loop(tenantid, cancel_recv)
.instrument(info_span!("compaction loop", tenant = %tenantid)));
if let Some(old_cancel_send) = compaction_loops.insert(tenantid, cancel_send) {
old_cancel_send.send(()).ok();
}
// Update metrics, remember handle
TENANT_TASK_EVENTS.with_label_values(&["start"]).inc();
futures.push(handle);
},
result = futures.next() => {
// Log and count any unhandled panics
match result {
Some(Ok(())) => {
TENANT_TASK_EVENTS.with_label_values(&["stop"]).inc();
},
Some(Err(e)) => {
TENANT_TASK_EVENTS.with_label_values(&["panic"]).inc();
error!("loop join error {}", e)
},
None => {},
};
},
}
}
});
Ok(())
},
)?;
Ok(())
}
///
/// GC task's main loop
///
async fn gc_loop(tenantid: ZTenantId, mut cancel: watch::Receiver<()>) {
loop {
trace!("waking up");
// Run blocking part of the task
let period: Result<Result<_, anyhow::Error>, _> = tokio::task::spawn_blocking(move || {
// Break if tenant is not active
if tenant_mgr::get_tenant_state(tenantid) != Some(TenantState::Active) {
return Ok(ControlFlow::Break(()));
}
// Break if we're not allowed to write to disk
let repo = tenant_mgr::get_repository_for_tenant(tenantid)?;
// TODO do this inside repo.gc_iteration instead.
let _guard = match repo.file_lock.try_read() {
Ok(g) => g,
Err(_) => return Ok(ControlFlow::Break(())),
};
// Run gc
let gc_period = repo.get_gc_period();
let gc_horizon = repo.get_gc_horizon();
if gc_horizon > 0 {
repo.gc_iteration(None, gc_horizon, repo.get_pitr_interval(), false)?;
}
Ok(ControlFlow::Continue(gc_period))
})
.await;
// Decide whether to sleep or break
let sleep_duration = match period {
Ok(Ok(ControlFlow::Continue(period))) => period,
Ok(Ok(ControlFlow::Break(()))) => break,
Ok(Err(e)) => {
error!("Gc failed, retrying: {}", e);
Duration::from_secs(2)
}
Err(e) => {
error!("Gc join error, retrying: {}", e);
Duration::from_secs(2)
}
};
// Sleep
tokio::select! {
_ = cancel.changed() => {
trace!("received cancellation request");
break;
},
_ = tokio::time::sleep(sleep_duration) => {},
}
}
trace!(
"GC loop stopped. State is {:?}",
tenant_mgr::get_tenant_state(tenantid)
);
}

View File

@@ -1,79 +0,0 @@
//! This module contains functions to serve per-tenant background processes,
//! such as compaction and GC
use crate::repository::Repository;
use crate::tenant_mgr;
use crate::tenant_mgr::TenantState;
use anyhow::Result;
use std::time::Duration;
use tracing::*;
use utils::zid::ZTenantId;
///
/// Compaction thread's main loop
///
pub fn compact_loop(tenantid: ZTenantId) -> Result<()> {
if let Err(err) = compact_loop_ext(tenantid) {
error!("compact loop terminated with error: {:?}", err);
Err(err)
} else {
Ok(())
}
}
fn compact_loop_ext(tenantid: ZTenantId) -> Result<()> {
loop {
if tenant_mgr::get_tenant_state(tenantid) != Some(TenantState::Active) {
break;
}
let repo = tenant_mgr::get_repository_for_tenant(tenantid)?;
let compaction_period = repo.get_compaction_period();
std::thread::sleep(compaction_period);
trace!("compaction thread for tenant {} waking up", tenantid);
// Compact timelines
let repo = tenant_mgr::get_repository_for_tenant(tenantid)?;
repo.compaction_iteration()?;
}
trace!(
"compaction thread stopped for tenant {} state is {:?}",
tenantid,
tenant_mgr::get_tenant_state(tenantid)
);
Ok(())
}
///
/// GC thread's main loop
///
pub fn gc_loop(tenantid: ZTenantId) -> Result<()> {
loop {
if tenant_mgr::get_tenant_state(tenantid) != Some(TenantState::Active) {
break;
}
trace!("gc thread for tenant {} waking up", tenantid);
let repo = tenant_mgr::get_repository_for_tenant(tenantid)?;
let gc_horizon = repo.get_gc_horizon();
// Garbage collect old files that are not needed for PITR anymore
if gc_horizon > 0 {
repo.gc_iteration(None, gc_horizon, repo.get_pitr_interval(), false)?;
}
// TODO Write it in more adequate way using
// condvar.wait_timeout() or something
let mut sleep_time = repo.get_gc_period().as_secs();
while sleep_time > 0 && tenant_mgr::get_tenant_state(tenantid) == Some(TenantState::Active)
{
sleep_time -= 1;
std::thread::sleep(Duration::from_secs(1));
}
}
trace!(
"GC thread stopped for tenant {} state is {:?}",
tenantid,
tenant_mgr::get_tenant_state(tenantid)
);
Ok(())
}

View File

@@ -94,11 +94,8 @@ pub enum ThreadKind {
// Main walreceiver manager thread that ensures that every timeline spawns a connection to safekeeper, to fetch WAL. // Main walreceiver manager thread that ensures that every timeline spawns a connection to safekeeper, to fetch WAL.
WalReceiverManager, WalReceiverManager,
// Thread that handles compaction of all timelines for a tenant. // Thread that schedules new compaction and gc jobs
Compactor, TenantTaskManager,
// Thread that handles GC of a tenant
GarbageCollector,
// Thread that flushes frozen in-memory layers to disk // Thread that flushes frozen in-memory layers to disk
LayerFlushThread, LayerFlushThread,

View File

@@ -115,7 +115,7 @@ mod tests {
Ok(()) Ok(())
}); });
let () = waiter.await?; waiter.await?;
notifier.await? notifier.await?
} }
} }

View File

@@ -0,0 +1,222 @@
#
# Simple script to export nodes from one pageserver
# and import them into another page server
#
from os import path
import os
import requests
import uuid
import subprocess
import argparse
from pathlib import Path
# directory to save exported tar files to
basepath = path.dirname(path.abspath(__file__))
class NeonPageserverApiException(Exception):
pass
class NeonPageserverHttpClient(requests.Session):
def __init__(self, host, port):
super().__init__()
self.host = host
self.port = port
def verbose_error(self, res: requests.Response):
try:
res.raise_for_status()
except requests.RequestException as e:
try:
msg = res.json()['msg']
except:
msg = ''
raise NeonPageserverApiException(msg) from e
def check_status(self):
self.get(f"http://{self.host}:{self.port}/v1/status").raise_for_status()
def tenant_list(self):
res = self.get(f"http://{self.host}:{self.port}/v1/tenant")
self.verbose_error(res)
res_json = res.json()
assert isinstance(res_json, list)
return res_json
def tenant_create(self, new_tenant_id: uuid.UUID, ok_if_exists):
res = self.post(
f"http://{self.host}:{self.port}/v1/tenant",
json={
'new_tenant_id': new_tenant_id.hex,
},
)
if res.status_code == 409:
if ok_if_exists:
print(f'could not create tenant: already exists for id {new_tenant_id}')
else:
res.raise_for_status()
elif res.status_code == 201:
print(f'created tenant {new_tenant_id}')
else:
self.verbose_error(res)
return new_tenant_id
def timeline_list(self, tenant_id: uuid.UUID):
res = self.get(f"http://{self.host}:{self.port}/v1/tenant/{tenant_id.hex}/timeline")
self.verbose_error(res)
res_json = res.json()
assert isinstance(res_json, list)
return res_json
def main(args: argparse.Namespace):
old_pageserver_host = args.old_pageserver_host
new_pageserver_host = args.new_pageserver_host
tenants = args.tenants
old_http_client = NeonPageserverHttpClient(old_pageserver_host, args.old_pageserver_http_port)
old_http_client.check_status()
old_pageserver_connstr = f"postgresql://{old_pageserver_host}:{args.old_pageserver_pg_port}"
new_http_client = NeonPageserverHttpClient(new_pageserver_host, args.new_pageserver_http_port)
new_http_client.check_status()
new_pageserver_connstr = f"postgresql://{new_pageserver_host}:{args.new_pageserver_pg_port}"
psql_env = {**os.environ, 'LD_LIBRARY_PATH': '/usr/local/lib/'}
for tenant_id in tenants:
print(f"Tenant: {tenant_id}")
timelines = old_http_client.timeline_list(uuid.UUID(tenant_id))
print(f"Timelines: {timelines}")
# Create tenant in new pageserver
if args.only_import is False:
new_http_client.tenant_create(uuid.UUID(tenant_id), args.ok_if_exists)
for timeline in timelines:
# Export timelines from old pageserver
if args.only_import is False:
query = f"fullbackup {timeline['tenant_id']} {timeline['timeline_id']} {timeline['local']['last_record_lsn']}"
cmd = ["psql", "--no-psqlrc", old_pageserver_connstr, "-c", query]
print(f"Running: {cmd}")
tar_filename = path.join(basepath,
f"{timeline['tenant_id']}_{timeline['timeline_id']}.tar")
stderr_filename = path.join(
basepath, f"{timeline['tenant_id']}_{timeline['timeline_id']}.stderr")
with open(tar_filename, 'w') as stdout_f:
with open(stderr_filename, 'w') as stderr_f:
print(f"(capturing output to {tar_filename})")
subprocess.run(cmd, stdout=stdout_f, stderr=stderr_f, env=psql_env)
print(f"Done export: {tar_filename}")
# Import timelines to new pageserver
psql_path = Path(args.psql_path)
import_cmd = f"import basebackup {timeline['tenant_id']} {timeline['timeline_id']} {timeline['local']['last_record_lsn']} {timeline['local']['last_record_lsn']}"
tar_filename = path.join(basepath,
f"{timeline['tenant_id']}_{timeline['timeline_id']}.tar")
full_cmd = rf"""cat {tar_filename} | {psql_path} {new_pageserver_connstr} -c '{import_cmd}' """
stderr_filename2 = path.join(
basepath, f"import_{timeline['tenant_id']}_{timeline['timeline_id']}.stderr")
stdout_filename = path.join(
basepath, f"import_{timeline['tenant_id']}_{timeline['timeline_id']}.stdout")
print(f"Running: {full_cmd}")
with open(stdout_filename, 'w') as stdout_f:
with open(stderr_filename2, 'w') as stderr_f:
print(f"(capturing output to {stdout_filename})")
subprocess.run(full_cmd,
stdout=stdout_f,
stderr=stderr_f,
env=psql_env,
shell=True)
print(f"Done import")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--tenant-id',
dest='tenants',
required=True,
nargs='+',
help='Id of the tenant to migrate. You can pass multiple arguments',
)
parser.add_argument(
'--from-host',
dest='old_pageserver_host',
required=True,
help='Host of the pageserver to migrate data from',
)
parser.add_argument(
'--from-http-port',
dest='old_pageserver_http_port',
required=False,
type=int,
default=9898,
help='HTTP port of the pageserver to migrate data from. Default: 9898',
)
parser.add_argument(
'--from-pg-port',
dest='old_pageserver_pg_port',
required=False,
type=int,
default=6400,
help='pg port of the pageserver to migrate data from. Default: 6400',
)
parser.add_argument(
'--to-host',
dest='new_pageserver_host',
required=True,
help='Host of the pageserver to migrate data to',
)
parser.add_argument(
'--to-http-port',
dest='new_pageserver_http_port',
required=False,
default=9898,
type=int,
help='HTTP port of the pageserver to migrate data to. Default: 9898',
)
parser.add_argument(
'--to-pg-port',
dest='new_pageserver_pg_port',
required=False,
default=6400,
type=int,
help='pg port of the pageserver to migrate data to. Default: 6400',
)
parser.add_argument(
'--ignore-tenant-exists',
dest='ok_if_exists',
required=False,
help=
'Ignore error if we are trying to create the tenant that already exists. It can be dangerous if existing tenant already contains some data.',
)
parser.add_argument(
'--psql-path',
dest='psql_path',
required=False,
default='/usr/local/bin/psql',
help='Path to the psql binary. Default: /usr/local/bin/psql',
)
parser.add_argument(
'--only-import',
dest='only_import',
required=False,
default=False,
action='store_true',
help='Skip export and tenant creation part',
)
args = parser.parse_args()
main(args)

View File

@@ -28,6 +28,10 @@ strict = true
# There is some work in progress, though: https://github.com/MagicStack/asyncpg/pull/577 # There is some work in progress, though: https://github.com/MagicStack/asyncpg/pull/577
ignore_missing_imports = true ignore_missing_imports = true
[mypy-pg8000.*]
# Used only in testing clients
ignore_missing_imports = true
[mypy-cached_property.*] [mypy-cached_property.*]
ignore_missing_imports = true ignore_missing_imports = true

View File

@@ -1,6 +1,3 @@
from contextlib import closing
import psycopg2.extras
import pytest import pytest
from fixtures.log_helper import log from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, NeonPageserverApiException from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, NeonPageserverApiException

View File

@@ -1,8 +1,6 @@
from contextlib import closing from contextlib import closing
from typing import Iterator from uuid import uuid4
from uuid import UUID, uuid4
from fixtures.neon_fixtures import NeonEnvBuilder, NeonPageserverApiException from fixtures.neon_fixtures import NeonEnvBuilder, NeonPageserverApiException
from requests.exceptions import HTTPError
import pytest import pytest

View File

@@ -1,11 +1,9 @@
from contextlib import closing, contextmanager from contextlib import closing, contextmanager
import psycopg2.extras import psycopg2.extras
import pytest import pytest
from fixtures.neon_fixtures import PgProtocol, NeonEnvBuilder from fixtures.neon_fixtures import NeonEnvBuilder
from fixtures.log_helper import log from fixtures.log_helper import log
import os
import time import time
import asyncpg
from fixtures.neon_fixtures import Postgres from fixtures.neon_fixtures import Postgres
import threading import threading

View File

@@ -1,8 +1,6 @@
import pytest import pytest
from contextlib import closing
from fixtures.neon_fixtures import NeonEnv from fixtures.neon_fixtures import NeonEnv
from fixtures.log_helper import log
# #

View File

@@ -1,4 +1,3 @@
import subprocess
from contextlib import closing from contextlib import closing
import psycopg2.extras import psycopg2.extras

View File

@@ -1,16 +1,10 @@
import subprocess
from contextlib import closing from contextlib import closing
import psycopg2.extras
import pytest
from fixtures.log_helper import log from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, PortDistributor, VanillaPostgres from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, PortDistributor, VanillaPostgres
from fixtures.neon_fixtures import pg_distrib_dir from fixtures.neon_fixtures import pg_distrib_dir
import os import os
from fixtures.utils import mkdir_if_needed, subprocess_capture from fixtures.utils import subprocess_capture
import shutil
import getpass
import pwd
num_rows = 1000 num_rows = 1000
@@ -46,19 +40,20 @@ def test_fullbackup(neon_env_builder: NeonEnvBuilder,
psql_env = {'LD_LIBRARY_PATH': os.path.join(str(pg_distrib_dir), 'lib')} psql_env = {'LD_LIBRARY_PATH': os.path.join(str(pg_distrib_dir), 'lib')}
# Get and unpack fullbackup from pageserver # Get and unpack fullbackup from pageserver
restored_dir_path = os.path.join(env.repo_dir, "restored_datadir") restored_dir_path = env.repo_dir / "restored_datadir"
os.mkdir(restored_dir_path, 0o750) os.mkdir(restored_dir_path, 0o750)
query = f"fullbackup {env.initial_tenant.hex} {timeline} {lsn}" query = f"fullbackup {env.initial_tenant.hex} {timeline} {lsn}"
cmd = ["psql", "--no-psqlrc", env.pageserver.connstr(), "-c", query] cmd = ["psql", "--no-psqlrc", env.pageserver.connstr(), "-c", query]
result_basepath = pg_bin.run_capture(cmd, env=psql_env) result_basepath = pg_bin.run_capture(cmd, env=psql_env)
tar_output_file = result_basepath + ".stdout" tar_output_file = result_basepath + ".stdout"
subprocess_capture(str(env.repo_dir), ["tar", "-xf", tar_output_file, "-C", restored_dir_path]) subprocess_capture(str(env.repo_dir),
["tar", "-xf", tar_output_file, "-C", str(restored_dir_path)])
# HACK # HACK
# fullbackup returns neon specific pg_control and first WAL segment # fullbackup returns neon specific pg_control and first WAL segment
# use resetwal to overwrite it # use resetwal to overwrite it
pg_resetwal_path = os.path.join(pg_bin.pg_bin_path, 'pg_resetwal') pg_resetwal_path = os.path.join(pg_bin.pg_bin_path, 'pg_resetwal')
cmd = [pg_resetwal_path, "-D", restored_dir_path] cmd = [pg_resetwal_path, "-D", str(restored_dir_path)]
pg_bin.run_capture(cmd, env=psql_env) pg_bin.run_capture(cmd, env=psql_env)
# Restore from the backup and find the data we inserted # Restore from the backup and find the data we inserted

View File

@@ -191,3 +191,8 @@ def test_import_from_pageserver(test_output_dir, pg_bin, vanilla_pg, neon_env_bu
# Check it's the same as the first fullbackup # Check it's the same as the first fullbackup
# TODO pageserver should be checking checksum # TODO pageserver should be checking checksum
assert os.path.getsize(tar_output_file) == os.path.getsize(new_tar_output_file) assert os.path.getsize(tar_output_file) == os.path.getsize(new_tar_output_file)
# Check that gc works
psconn = env.pageserver.connect()
pscur = psconn.cursor()
pscur.execute(f"do_gc {tenant.hex} {timeline} 0")

View File

@@ -0,0 +1,70 @@
from fixtures.neon_fixtures import NeonEnvBuilder, wait_until
from uuid import UUID
import time
def get_only_element(l):
assert len(l) == 1
return l[0]
# Test that gc and compaction tenant tasks start and stop correctly
def test_tenant_tasks(neon_env_builder: NeonEnvBuilder):
# The gc and compaction loops don't bother to watch for tenant state
# changes while sleeping, so we use small periods to make this test
# run faster. With default settings we'd have to wait longer for tasks
# to notice state changes and shut down.
# TODO fix this behavior in the pageserver
tenant_config = "{gc_period = '1 s', compaction_period = '1 s'}"
neon_env_builder.pageserver_config_override = f"tenant_config={tenant_config}"
name = "test_tenant_tasks"
env = neon_env_builder.init_start()
client = env.pageserver.http_client()
def get_state(tenant):
all_states = client.tenant_list()
matching = [t for t in all_states if t["id"] == tenant.hex]
return get_only_element(matching)["state"]
def get_metric_value(name):
metrics = client.get_metrics()
relevant = [line for line in metrics.splitlines() if line.startswith(name)]
if len(relevant) == 0:
return 0
line = get_only_element(relevant)
value = line.lstrip(name).strip()
return int(value)
def detach_all_timelines(tenant):
timelines = [UUID(t["timeline_id"]) for t in client.timeline_list(tenant)]
for t in timelines:
client.timeline_detach(tenant, t)
def assert_idle(tenant):
assert get_state(tenant) == "Idle"
# Create tenant, start compute
tenant, _ = env.neon_cli.create_tenant()
timeline = env.neon_cli.create_timeline(name, tenant_id=tenant)
pg = env.postgres.create_start(name, tenant_id=tenant)
assert (get_state(tenant) == "Active")
# Stop compute
pg.stop()
# Detach all tenants and wait for them to go idle
# TODO they should be already idle since there are no active computes
for tenant_info in client.tenant_list():
tenant_id = UUID(tenant_info["id"])
detach_all_timelines(tenant_id)
wait_until(10, 0.2, lambda: assert_idle(tenant_id))
# Assert that all tasks finish quickly after tenants go idle
def assert_tasks_finish():
tasks_started = get_metric_value('pageserver_tenant_task_events{event="start"}')
tasks_ended = get_metric_value('pageserver_tenant_task_events{event="stop"}')
tasks_panicked = get_metric_value('pageserver_tenant_task_events{event="panic"}')
assert tasks_started == tasks_ended
assert tasks_panicked == 0
wait_until(10, 0.2, assert_tasks_finish)

View File

@@ -1,3 +1,4 @@
import pathlib
import pytest import pytest
import random import random
import time import time
@@ -14,7 +15,7 @@ from dataclasses import dataclass, field
from multiprocessing import Process, Value from multiprocessing import Process, Value
from pathlib import Path from pathlib import Path
from fixtures.neon_fixtures import PgBin, Etcd, Postgres, RemoteStorageUsers, Safekeeper, NeonEnv, NeonEnvBuilder, PortDistributor, SafekeeperPort, neon_binpath, PgProtocol from fixtures.neon_fixtures import PgBin, Etcd, Postgres, RemoteStorageUsers, Safekeeper, NeonEnv, NeonEnvBuilder, PortDistributor, SafekeeperPort, neon_binpath, PgProtocol
from fixtures.utils import get_dir_size, lsn_to_hex, mkdir_if_needed, lsn_from_hex from fixtures.utils import get_dir_size, lsn_to_hex, lsn_from_hex
from fixtures.log_helper import log from fixtures.log_helper import log
from typing import List, Optional, Any from typing import List, Optional, Any
from uuid import uuid4 from uuid import uuid4
@@ -645,7 +646,7 @@ class ProposerPostgres(PgProtocol):
def create_dir_config(self, safekeepers: str): def create_dir_config(self, safekeepers: str):
""" Create dir and config for running --sync-safekeepers """ """ Create dir and config for running --sync-safekeepers """
mkdir_if_needed(self.pg_data_dir_path()) pathlib.Path(self.pg_data_dir_path()).mkdir(exist_ok=True)
with open(self.config_file_path(), "w") as f: with open(self.config_file_path(), "w") as f:
cfg = [ cfg = [
"synchronous_standby_names = 'walproposer'\n", "synchronous_standby_names = 'walproposer'\n",
@@ -828,7 +829,7 @@ class SafekeeperEnv:
self.timeline_id = uuid.uuid4() self.timeline_id = uuid.uuid4()
self.tenant_id = uuid.uuid4() self.tenant_id = uuid.uuid4()
mkdir_if_needed(str(self.repo_dir)) self.repo_dir.mkdir(exist_ok=True)
# Create config and a Safekeeper object for each safekeeper # Create config and a Safekeeper object for each safekeeper
self.safekeepers = [] self.safekeepers = []
@@ -847,8 +848,8 @@ class SafekeeperEnv:
http=self.port_distributor.get_port(), http=self.port_distributor.get_port(),
) )
safekeeper_dir = os.path.join(self.repo_dir, f"sk{i}") safekeeper_dir = self.repo_dir / f"sk{i}"
mkdir_if_needed(safekeeper_dir) safekeeper_dir.mkdir(exist_ok=True)
args = [ args = [
self.bin_safekeeper, self.bin_safekeeper,
@@ -857,7 +858,7 @@ class SafekeeperEnv:
"--listen-http", "--listen-http",
f"127.0.0.1:{port.http}", f"127.0.0.1:{port.http}",
"-D", "-D",
safekeeper_dir, str(safekeeper_dir),
"--id", "--id",
str(i), str(i),
"--broker-endpoints", "--broker-endpoints",

View File

@@ -1,19 +1,17 @@
import os import os
import subprocess from pathlib import Path
from fixtures.neon_fixtures import (NeonEnvBuilder, from fixtures.neon_fixtures import (NeonEnvBuilder,
VanillaPostgres, VanillaPostgres,
PortDistributor, PortDistributor,
PgBin, PgBin,
base_dir, base_dir,
vanilla_pg,
pg_distrib_dir) pg_distrib_dir)
from fixtures.log_helper import log
def test_wal_restore(neon_env_builder: NeonEnvBuilder, def test_wal_restore(neon_env_builder: NeonEnvBuilder,
pg_bin: PgBin, pg_bin: PgBin,
test_output_dir, test_output_dir: Path,
port_distributor: PortDistributor): port_distributor: PortDistributor):
env = neon_env_builder.init_start() env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_wal_restore") env.neon_cli.create_branch("test_wal_restore")
@@ -22,13 +20,13 @@ def test_wal_restore(neon_env_builder: NeonEnvBuilder,
tenant_id = pg.safe_psql("show neon.tenant_id")[0][0] tenant_id = pg.safe_psql("show neon.tenant_id")[0][0]
env.neon_cli.pageserver_stop() env.neon_cli.pageserver_stop()
port = port_distributor.get_port() port = port_distributor.get_port()
data_dir = os.path.join(test_output_dir, 'pgsql.restored') data_dir = test_output_dir / 'pgsql.restored'
with VanillaPostgres(data_dir, PgBin(test_output_dir), port) as restored: with VanillaPostgres(data_dir, PgBin(test_output_dir), port) as restored:
pg_bin.run_capture([ pg_bin.run_capture([
os.path.join(base_dir, 'libs/utils/scripts/restore_from_wal.sh'), os.path.join(base_dir, 'libs/utils/scripts/restore_from_wal.sh'),
os.path.join(pg_distrib_dir, 'bin'), os.path.join(pg_distrib_dir, 'bin'),
os.path.join(test_output_dir, 'repo/safekeepers/sk1/{}/*'.format(tenant_id)), str(test_output_dir / 'repo' / 'safekeepers' / 'sk1' / str(tenant_id) / '*'),
data_dir, str(data_dir),
str(port) str(port)
]) ])
restored.start() restored.start()

View File

@@ -1,13 +1,13 @@
import os import os
from pathlib import Path
import pytest import pytest
from fixtures.utils import mkdir_if_needed
from fixtures.neon_fixtures import NeonEnv, base_dir, pg_distrib_dir from fixtures.neon_fixtures import NeonEnv, base_dir, pg_distrib_dir
# The isolation tests run for a long time, especially in debug mode, # The isolation tests run for a long time, especially in debug mode,
# so use a larger-than-default timeout. # so use a larger-than-default timeout.
@pytest.mark.timeout(1800) @pytest.mark.timeout(1800)
def test_isolation(neon_simple_env: NeonEnv, test_output_dir, pg_bin, capsys): def test_isolation(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, capsys):
env = neon_simple_env env = neon_simple_env
env.neon_cli.create_branch("test_isolation", "empty") env.neon_cli.create_branch("test_isolation", "empty")
@@ -17,9 +17,8 @@ def test_isolation(neon_simple_env: NeonEnv, test_output_dir, pg_bin, capsys):
pg.safe_psql('CREATE DATABASE isolation_regression') pg.safe_psql('CREATE DATABASE isolation_regression')
# Create some local directories for pg_isolation_regress to run in. # Create some local directories for pg_isolation_regress to run in.
runpath = os.path.join(test_output_dir, 'regress') runpath = test_output_dir / 'regress'
mkdir_if_needed(runpath) (runpath / 'testtablespace').mkdir(parents=True)
mkdir_if_needed(os.path.join(runpath, 'testtablespace'))
# Compute all the file locations that pg_isolation_regress will need. # Compute all the file locations that pg_isolation_regress will need.
build_path = os.path.join(pg_distrib_dir, 'build/src/test/isolation') build_path = os.path.join(pg_distrib_dir, 'build/src/test/isolation')

View File

@@ -1,6 +1,6 @@
import os import os
from pathlib import Path
from fixtures.utils import mkdir_if_needed
from fixtures.neon_fixtures import (NeonEnv, from fixtures.neon_fixtures import (NeonEnv,
check_restored_datadir_content, check_restored_datadir_content,
base_dir, base_dir,
@@ -8,7 +8,7 @@ from fixtures.neon_fixtures import (NeonEnv,
from fixtures.log_helper import log from fixtures.log_helper import log
def test_neon_regress(neon_simple_env: NeonEnv, test_output_dir, pg_bin, capsys): def test_neon_regress(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, capsys):
env = neon_simple_env env = neon_simple_env
env.neon_cli.create_branch("test_neon_regress", "empty") env.neon_cli.create_branch("test_neon_regress", "empty")
@@ -17,9 +17,8 @@ def test_neon_regress(neon_simple_env: NeonEnv, test_output_dir, pg_bin, capsys)
pg.safe_psql('CREATE DATABASE regression') pg.safe_psql('CREATE DATABASE regression')
# Create some local directories for pg_regress to run in. # Create some local directories for pg_regress to run in.
runpath = os.path.join(test_output_dir, 'regress') runpath = test_output_dir / 'regress'
mkdir_if_needed(runpath) (runpath / 'testtablespace').mkdir(parents=True)
mkdir_if_needed(os.path.join(runpath, 'testtablespace'))
# Compute all the file locations that pg_regress will need. # Compute all the file locations that pg_regress will need.
# This test runs neon specific tests # This test runs neon specific tests

View File

@@ -1,13 +1,13 @@
import os import os
import pathlib
import pytest import pytest
from fixtures.utils import mkdir_if_needed
from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content, base_dir, pg_distrib_dir from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content, base_dir, pg_distrib_dir
# The pg_regress tests run for a long time, especially in debug mode, # The pg_regress tests run for a long time, especially in debug mode,
# so use a larger-than-default timeout. # so use a larger-than-default timeout.
@pytest.mark.timeout(1800) @pytest.mark.timeout(1800)
def test_pg_regress(neon_simple_env: NeonEnv, test_output_dir: str, pg_bin, capsys): def test_pg_regress(neon_simple_env: NeonEnv, test_output_dir: pathlib.Path, pg_bin, capsys):
env = neon_simple_env env = neon_simple_env
env.neon_cli.create_branch("test_pg_regress", "empty") env.neon_cli.create_branch("test_pg_regress", "empty")
@@ -16,9 +16,8 @@ def test_pg_regress(neon_simple_env: NeonEnv, test_output_dir: str, pg_bin, caps
pg.safe_psql('CREATE DATABASE regression') pg.safe_psql('CREATE DATABASE regression')
# Create some local directories for pg_regress to run in. # Create some local directories for pg_regress to run in.
runpath = os.path.join(test_output_dir, 'regress') runpath = test_output_dir / 'regress'
mkdir_if_needed(runpath) (runpath / 'testtablespace').mkdir(parents=True)
mkdir_if_needed(os.path.join(runpath, 'testtablespace'))
# Compute all the file locations that pg_regress will need. # Compute all the file locations that pg_regress will need.
build_path = os.path.join(pg_distrib_dir, 'build/src/test/regress') build_path = os.path.join(pg_distrib_dir, 'build/src/test/regress')
@@ -51,7 +50,7 @@ def test_pg_regress(neon_simple_env: NeonEnv, test_output_dir: str, pg_bin, caps
# checkpoint one more time to ensure that the lsn we get is the latest one # checkpoint one more time to ensure that the lsn we get is the latest one
pg.safe_psql('CHECKPOINT') pg.safe_psql('CHECKPOINT')
lsn = pg.safe_psql('select pg_current_wal_insert_lsn()')[0][0] pg.safe_psql('select pg_current_wal_insert_lsn()')[0][0]
# Check that we restore the content of the datadir correctly # Check that we restore the content of the datadir correctly
check_restored_datadir_content(test_output_dir, env, pg) check_restored_datadir_content(test_output_dir, env, pg)

View File

@@ -35,12 +35,7 @@ from typing_extensions import Literal
import requests import requests
import backoff # type: ignore import backoff # type: ignore
from .utils import (etcd_path, from .utils import (etcd_path, get_self_dir, subprocess_capture, lsn_from_hex, lsn_to_hex)
get_self_dir,
mkdir_if_needed,
subprocess_capture,
lsn_from_hex,
lsn_to_hex)
from fixtures.log_helper import log from fixtures.log_helper import log
""" """
This file contains pytest fixtures. A fixture is a test resource that can be This file contains pytest fixtures. A fixture is a test resource that can be
@@ -127,7 +122,7 @@ def pytest_configure(config):
top_output_dir = env_test_output top_output_dir = env_test_output
else: else:
top_output_dir = os.path.join(base_dir, DEFAULT_OUTPUT_DIR) top_output_dir = os.path.join(base_dir, DEFAULT_OUTPUT_DIR)
mkdir_if_needed(top_output_dir) pathlib.Path(top_output_dir).mkdir(exist_ok=True)
# Find the postgres installation. # Find the postgres installation.
global pg_distrib_dir global pg_distrib_dir
@@ -1316,7 +1311,7 @@ def append_pageserver_param_overrides(
class PgBin: class PgBin:
""" A helper class for executing postgres binaries """ """ A helper class for executing postgres binaries """
def __init__(self, log_dir: str): def __init__(self, log_dir: Path):
self.log_dir = log_dir self.log_dir = log_dir
self.pg_bin_path = os.path.join(str(pg_distrib_dir), 'bin') self.pg_bin_path = os.path.join(str(pg_distrib_dir), 'bin')
self.env = os.environ.copy() self.env = os.environ.copy()
@@ -1367,22 +1362,27 @@ class PgBin:
self._fixpath(command) self._fixpath(command)
log.info('Running command "{}"'.format(' '.join(command))) log.info('Running command "{}"'.format(' '.join(command)))
env = self._build_env(env) env = self._build_env(env)
return subprocess_capture(self.log_dir, command, env=env, cwd=cwd, check=True, **kwargs) return subprocess_capture(str(self.log_dir),
command,
env=env,
cwd=cwd,
check=True,
**kwargs)
@pytest.fixture(scope='function') @pytest.fixture(scope='function')
def pg_bin(test_output_dir: str) -> PgBin: def pg_bin(test_output_dir: Path) -> PgBin:
return PgBin(test_output_dir) return PgBin(test_output_dir)
class VanillaPostgres(PgProtocol): class VanillaPostgres(PgProtocol):
def __init__(self, pgdatadir: str, pg_bin: PgBin, port: int, init=True): def __init__(self, pgdatadir: Path, pg_bin: PgBin, port: int, init=True):
super().__init__(host='localhost', port=port, dbname='postgres') super().__init__(host='localhost', port=port, dbname='postgres')
self.pgdatadir = pgdatadir self.pgdatadir = pgdatadir
self.pg_bin = pg_bin self.pg_bin = pg_bin
self.running = False self.running = False
if init: if init:
self.pg_bin.run_capture(['initdb', '-D', pgdatadir]) self.pg_bin.run_capture(['initdb', '-D', str(pgdatadir)])
self.configure([f"port = {port}\n"]) self.configure([f"port = {port}\n"])
def configure(self, options: List[str]): def configure(self, options: List[str]):
@@ -1398,12 +1398,13 @@ class VanillaPostgres(PgProtocol):
if log_path is None: if log_path is None:
log_path = os.path.join(self.pgdatadir, "pg.log") log_path = os.path.join(self.pgdatadir, "pg.log")
self.pg_bin.run_capture(['pg_ctl', '-w', '-D', self.pgdatadir, '-l', log_path, 'start']) self.pg_bin.run_capture(
['pg_ctl', '-w', '-D', str(self.pgdatadir), '-l', log_path, 'start'])
def stop(self): def stop(self):
assert self.running assert self.running
self.running = False self.running = False
self.pg_bin.run_capture(['pg_ctl', '-w', '-D', self.pgdatadir, 'stop']) self.pg_bin.run_capture(['pg_ctl', '-w', '-D', str(self.pgdatadir), 'stop'])
def get_subdir_size(self, subdir) -> int: def get_subdir_size(self, subdir) -> int:
"""Return size of pgdatadir subdirectory in bytes.""" """Return size of pgdatadir subdirectory in bytes."""
@@ -1418,9 +1419,9 @@ class VanillaPostgres(PgProtocol):
@pytest.fixture(scope='function') @pytest.fixture(scope='function')
def vanilla_pg(test_output_dir: str, def vanilla_pg(test_output_dir: Path,
port_distributor: PortDistributor) -> Iterator[VanillaPostgres]: port_distributor: PortDistributor) -> Iterator[VanillaPostgres]:
pgdatadir = os.path.join(test_output_dir, "pgdata-vanilla") pgdatadir = test_output_dir / "pgdata-vanilla"
pg_bin = PgBin(test_output_dir) pg_bin = PgBin(test_output_dir)
port = port_distributor.get_port() port = port_distributor.get_port()
with VanillaPostgres(pgdatadir, pg_bin, port) as vanilla_pg: with VanillaPostgres(pgdatadir, pg_bin, port) as vanilla_pg:
@@ -1457,7 +1458,7 @@ class RemotePostgres(PgProtocol):
@pytest.fixture(scope='function') @pytest.fixture(scope='function')
def remote_pg(test_output_dir: str) -> Iterator[RemotePostgres]: def remote_pg(test_output_dir: Path) -> Iterator[RemotePostgres]:
pg_bin = PgBin(test_output_dir) pg_bin = PgBin(test_output_dir)
connstr = os.getenv("BENCHMARK_CONNSTR") connstr = os.getenv("BENCHMARK_CONNSTR")
@@ -1924,9 +1925,12 @@ class Etcd:
datadir: str datadir: str
port: int port: int
peer_port: int peer_port: int
binary_path: Path = etcd_path() binary_path: Path = field(init=False)
handle: Optional[subprocess.Popen[Any]] = None # handle of running daemon handle: Optional[subprocess.Popen[Any]] = None # handle of running daemon
def __post_init__(self):
self.binary_path = etcd_path()
def client_url(self): def client_url(self):
return f'http://127.0.0.1:{self.port}' return f'http://127.0.0.1:{self.port}'
@@ -1980,11 +1984,13 @@ class Etcd:
self.handle.wait() self.handle.wait()
def get_test_output_dir(request: Any) -> str: def get_test_output_dir(request: Any) -> pathlib.Path:
""" Compute the working directory for an individual test. """ """ Compute the working directory for an individual test. """
test_name = request.node.name test_name = request.node.name
test_dir = os.path.join(str(top_output_dir), test_name) test_dir = pathlib.Path(top_output_dir) / test_name.replace("/", "-")
log.info(f'get_test_output_dir is {test_dir}') log.info(f'get_test_output_dir is {test_dir}')
# make mypy happy
assert isinstance(test_dir, pathlib.Path)
return test_dir return test_dir
@@ -1998,14 +2004,14 @@ def get_test_output_dir(request: Any) -> str:
# this fixture ensures that the directory exists. That works because # this fixture ensures that the directory exists. That works because
# 'autouse' fixtures are run before other fixtures. # 'autouse' fixtures are run before other fixtures.
@pytest.fixture(scope='function', autouse=True) @pytest.fixture(scope='function', autouse=True)
def test_output_dir(request: Any) -> str: def test_output_dir(request: Any) -> pathlib.Path:
""" Create the working directory for an individual test. """ """ Create the working directory for an individual test. """
# one directory per test # one directory per test
test_dir = get_test_output_dir(request) test_dir = get_test_output_dir(request)
log.info(f'test_output_dir is {test_dir}') log.info(f'test_output_dir is {test_dir}')
shutil.rmtree(test_dir, ignore_errors=True) shutil.rmtree(test_dir, ignore_errors=True)
mkdir_if_needed(test_dir) test_dir.mkdir()
return test_dir return test_dir
@@ -2051,7 +2057,7 @@ def should_skip_file(filename: str) -> bool:
# #
# Test helpers # Test helpers
# #
def list_files_to_compare(pgdata_dir: str): def list_files_to_compare(pgdata_dir: pathlib.Path):
pgdata_files = [] pgdata_files = []
for root, _file, filenames in os.walk(pgdata_dir): for root, _file, filenames in os.walk(pgdata_dir):
for filename in filenames: for filename in filenames:
@@ -2068,7 +2074,7 @@ def list_files_to_compare(pgdata_dir: str):
# pg is the existing and running compute node, that we want to compare with a basebackup # pg is the existing and running compute node, that we want to compare with a basebackup
def check_restored_datadir_content(test_output_dir: str, env: NeonEnv, pg: Postgres): def check_restored_datadir_content(test_output_dir: Path, env: NeonEnv, pg: Postgres):
# Get the timeline ID. We need it for the 'basebackup' command # Get the timeline ID. We need it for the 'basebackup' command
with closing(pg.connect()) as conn: with closing(pg.connect()) as conn:
@@ -2080,8 +2086,8 @@ def check_restored_datadir_content(test_output_dir: str, env: NeonEnv, pg: Postg
pg.stop() pg.stop()
# Take a basebackup from pageserver # Take a basebackup from pageserver
restored_dir_path = os.path.join(env.repo_dir, f"{pg.node_name}_restored_datadir") restored_dir_path = env.repo_dir / f"{pg.node_name}_restored_datadir"
mkdir_if_needed(restored_dir_path) restored_dir_path.mkdir(exist_ok=True)
pg_bin = PgBin(test_output_dir) pg_bin = PgBin(test_output_dir)
psql_path = os.path.join(pg_bin.pg_bin_path, 'psql') psql_path = os.path.join(pg_bin.pg_bin_path, 'psql')
@@ -2108,7 +2114,7 @@ def check_restored_datadir_content(test_output_dir: str, env: NeonEnv, pg: Postg
# list files we're going to compare # list files we're going to compare
assert pg.pgdata_dir assert pg.pgdata_dir
pgdata_files = list_files_to_compare(pg.pgdata_dir) pgdata_files = list_files_to_compare(pathlib.Path(pg.pgdata_dir))
restored_files = list_files_to_compare(restored_dir_path) restored_files = list_files_to_compare(restored_dir_path)
# check that file sets are equal # check that file sets are equal
@@ -2140,7 +2146,7 @@ def check_restored_datadir_content(test_output_dir: str, env: NeonEnv, pg: Postg
assert (mismatch, error) == ([], []) assert (mismatch, error) == ([], [])
def wait_until(number_of_iterations: int, interval: int, func): def wait_until(number_of_iterations: int, interval: float, func):
""" """
Wait until 'func' returns successfully, without exception. Returns the last return value Wait until 'func' returns successfully, without exception. Returns the last return value
from the the function. from the the function.

View File

@@ -12,18 +12,6 @@ def get_self_dir() -> str:
return os.path.dirname(os.path.abspath(__file__)) return os.path.dirname(os.path.abspath(__file__))
def mkdir_if_needed(path: str) -> None:
""" Create a directory if it doesn't already exist
Note this won't try to create intermediate directories.
"""
try:
os.mkdir(path)
except FileExistsError:
pass
assert os.path.isdir(path)
def subprocess_capture(capture_dir: str, cmd: List[str], **kwargs: Any) -> str: def subprocess_capture(capture_dir: str, cmd: List[str], **kwargs: Any) -> str:
""" Run a process and capture its output """ Run a process and capture its output

View File

@@ -0,0 +1,2 @@
bin/
obj/

View File

@@ -0,0 +1,2 @@
bin/
obj/

View File

@@ -0,0 +1,14 @@
FROM mcr.microsoft.com/dotnet/sdk:6.0 AS build
WORKDIR /source
COPY *.csproj .
RUN dotnet restore
COPY . .
RUN dotnet publish -c release -o /app --no-restore
FROM mcr.microsoft.com/dotnet/runtime:6.0
WORKDIR /app
COPY --from=build /app .
ENTRYPOINT ["dotnet", "csharp-npgsql.dll"]

View File

@@ -0,0 +1,19 @@
using Npgsql;
var host = Environment.GetEnvironmentVariable("NEON_HOST");
var database = Environment.GetEnvironmentVariable("NEON_DATABASE");
var user = Environment.GetEnvironmentVariable("NEON_USER");
var password = Environment.GetEnvironmentVariable("NEON_PASSWORD");
var connString = $"Host={host};Username={user};Password={password};Database={database}";
await using var conn = new NpgsqlConnection(connString);
await conn.OpenAsync();
await using (var cmd = new NpgsqlCommand("SELECT 1", conn))
await using (var reader = await cmd.ExecuteReaderAsync())
{
while (await reader.ReadAsync())
Console.WriteLine(reader.GetInt32(0));
}
await conn.CloseAsync();

View File

@@ -0,0 +1,14 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net6.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Npgsql" Version="6.0.5" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,10 @@
FROM openjdk:17
WORKDIR /source
COPY . .
WORKDIR /app
RUN curl --output postgresql.jar https://jdbc.postgresql.org/download/postgresql-42.4.0.jar && \
javac -d /app /source/Example.java
CMD ["java", "-cp", "/app/postgresql.jar:.", "Example"]

View File

@@ -0,0 +1,31 @@
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.Statement;
import java.util.Properties;
public class Example
{
public static void main( String[] args ) throws Exception
{
String host = System.getenv("NEON_HOST");
String database = System.getenv("NEON_DATABASE");
String user = System.getenv("NEON_USER");
String password = System.getenv("NEON_PASSWORD");
String url = "jdbc:postgresql://%s/%s".formatted(host, database);
Properties props = new Properties();
props.setProperty("user", user);
props.setProperty("password", password);
Connection conn = DriverManager.getConnection(url, props);
Statement st = conn.createStatement();
ResultSet rs = st.executeQuery("SELECT 1");
while (rs.next())
{
System.out.println(rs.getString(1));
}
rs.close();
st.close();
}
}

View File

@@ -0,0 +1,8 @@
FROM python:3.10
WORKDIR /source
COPY . .
RUN python3 -m pip install --no-cache-dir -r requirements.txt
CMD ["python3", "asyncpg_example.py"]

View File

@@ -0,0 +1,30 @@
#! /usr/bin/env python3
import asyncio
import os
import asyncpg
async def run(**kwargs) -> asyncpg.Record:
conn = await asyncpg.connect(
**kwargs,
statement_cache_size=0, # Prepared statements doesn't work pgbouncer
)
rv = await conn.fetchrow("SELECT 1")
await conn.close()
return rv
if __name__ == "__main__":
kwargs = {
k.lstrip("NEON_").lower(): v
for k in ("NEON_HOST", "NEON_DATABASE", "NEON_USER", "NEON_PASSWORD")
if (v := os.environ.get(k, None)) is not None
}
loop = asyncio.new_event_loop()
row = loop.run_until_complete(run(**kwargs))
print(row[0])

View File

@@ -0,0 +1 @@
asyncpg==0.25.0

View File

@@ -0,0 +1,8 @@
FROM python:3.10
WORKDIR /source
COPY . .
RUN python3 -m pip install --no-cache-dir -r requirements.txt
CMD ["python3", "pg8000_example.py"]

View File

@@ -0,0 +1,23 @@
#! /usr/bin/env python3
import os
import ssl
import pg8000.dbapi
if __name__ == "__main__":
kwargs = {
k.lstrip("NEON_").lower(): v
for k in ("NEON_HOST", "NEON_DATABASE", "NEON_USER", "NEON_PASSWORD")
if (v := os.environ.get(k, None)) is not None
}
conn = pg8000.dbapi.connect(
**kwargs,
ssl_context=True,
)
cursor = conn.cursor()
cursor.execute("SELECT 1")
row = cursor.fetchone()
print(row[0])
conn.close()

View File

@@ -0,0 +1 @@
pg8000==1.29.1

View File

@@ -0,0 +1 @@
.build/

View File

@@ -0,0 +1 @@
.build/

View File

@@ -0,0 +1,11 @@
FROM swift:5.6 AS build
RUN apt-get -q update && apt-get -q install -y libssl-dev
WORKDIR /source
COPY . .
RUN swift build --configuration release
FROM swift:5.6
WORKDIR /app
COPY --from=build /source/.build/release/release .
CMD ["/app/PostgresClientKitExample"]

View File

@@ -0,0 +1,41 @@
{
"pins" : [
{
"identity" : "bluesocket",
"kind" : "remoteSourceControl",
"location" : "https://github.com/IBM-Swift/BlueSocket.git",
"state" : {
"revision" : "dd924c3bc2c1c144c42b8dda3896f1a03115ded4",
"version" : "2.0.2"
}
},
{
"identity" : "bluesslservice",
"kind" : "remoteSourceControl",
"location" : "https://github.com/IBM-Swift/BlueSSLService",
"state" : {
"revision" : "c249988fb748749739144e7f554710552acdc0bd",
"version" : "2.0.1"
}
},
{
"identity" : "postgresclientkit",
"kind" : "remoteSourceControl",
"location" : "https://github.com/codewinsdotcom/PostgresClientKit.git",
"state" : {
"branch" : "v1.4.3",
"revision" : "beafedaea6dc9f04712e9a8547b77f47c406a47e"
}
},
{
"identity" : "swift-argument-parser",
"kind" : "remoteSourceControl",
"location" : "https://github.com/apple/swift-argument-parser",
"state" : {
"revision" : "6b2aa2748a7881eebb9f84fb10c01293e15b52ca",
"version" : "0.5.0"
}
}
],
"version" : 2
}

View File

@@ -0,0 +1,17 @@
// swift-tools-version:5.6
import PackageDescription
let package = Package(
name: "PostgresClientKitExample",
dependencies: [
.package(
url: "https://github.com/codewinsdotcom/PostgresClientKit.git",
revision: "v1.4.3"
)
],
targets: [
.target(
name: "PostgresClientKitExample",
dependencies: [ "PostgresClientKit" ])
]
)

View File

@@ -0,0 +1,38 @@
import Foundation
import PostgresClientKit
do {
var configuration = PostgresClientKit.ConnectionConfiguration()
let env = ProcessInfo.processInfo.environment
if let host = env["NEON_HOST"] {
configuration.host = host
}
if let database = env["NEON_DATABASE"] {
configuration.database = database
}
if let user = env["NEON_USER"] {
configuration.user = user
}
if let password = env["NEON_PASSWORD"] {
configuration.credential = .scramSHA256(password: password)
}
let connection = try PostgresClientKit.Connection(configuration: configuration)
defer { connection.close() }
let text = "SELECT 1;"
let statement = try connection.prepareStatement(text: text)
defer { statement.close() }
let cursor = try statement.execute(parameterValues: [ ])
defer { cursor.close() }
for row in cursor {
let columns = try row.get().columns
print(columns[0])
}
} catch {
print(error)
}

View File

@@ -0,0 +1,54 @@
import os
import shutil
import subprocess
from pathlib import Path
from tempfile import NamedTemporaryFile
from urllib.parse import urlparse
import pytest
from fixtures.neon_fixtures import RemotePostgres
@pytest.mark.remote_cluster
@pytest.mark.parametrize(
"client",
[
"csharp/npgsql",
"java/jdbc",
"python/asyncpg",
pytest.param(
"python/pg8000", # See https://github.com/neondatabase/neon/pull/2008#discussion_r912264281
marks=pytest.mark.xfail(reason="Handles SSL in incompatible with Neon way")),
pytest.param(
"swift/PostgresClientKit", # See https://github.com/neondatabase/neon/pull/2008#discussion_r911896592
marks=pytest.mark.xfail(reason="Neither SNI nor parameters is supported")),
"typescript/postgresql-client",
],
)
def test_pg_clients(remote_pg: RemotePostgres, client: str):
conn_options = remote_pg.conn_options()
env_file = None
with NamedTemporaryFile(mode="w", delete=False) as f:
env_file = f.name
f.write(f"""
NEON_HOST={conn_options["host"]}
NEON_DATABASE={conn_options["dbname"]}
NEON_USER={conn_options["user"]}
NEON_PASSWORD={conn_options["password"]}
""")
image_tag = client.lower()
docker_bin = shutil.which("docker")
if docker_bin is None:
raise RuntimeError("docker is required for running this test")
build_cmd = [
docker_bin, "build", "--quiet", "--tag", image_tag, f"{Path(__file__).parent / client}"
]
run_cmd = [docker_bin, "run", "--rm", "--env-file", env_file, image_tag]
subprocess.run(build_cmd, check=True)
result = subprocess.run(run_cmd, check=True, capture_output=True, text=True)
assert result.stdout.strip() == "1"

View File

@@ -0,0 +1 @@
node_modules/

View File

@@ -0,0 +1 @@
node_modules/

View File

@@ -0,0 +1,7 @@
FROM node:16
WORKDIR /source
COPY . .
RUN npm clean-install
CMD ["/source/index.js"]

View File

@@ -0,0 +1,25 @@
#! /usr/bin/env node
import {Connection} from 'postgresql-client';
const params = {
"host": process.env.NEON_HOST,
"database": process.env.NEON_DATABASE,
"user": process.env.NEON_USER,
"password": process.env.NEON_PASSWORD,
"ssl": true,
}
for (const key in params) {
if (params[key] === undefined) {
delete params[key];
}
}
const connection = new Connection(params);
await connection.connect();
const result = await connection.query(
'select 1'
);
const rows = result.rows;
await connection.close();
console.log(rows[0][0]);

View File

@@ -0,0 +1,262 @@
{
"name": "typescript",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"dependencies": {
"postgresql-client": "^2.1.3"
}
},
"node_modules/debug": {
"version": "4.3.4",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
"integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
"dependencies": {
"ms": "2.1.2"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/doublylinked": {
"version": "2.5.1",
"resolved": "https://registry.npmjs.org/doublylinked/-/doublylinked-2.5.1.tgz",
"integrity": "sha512-Lpqb+qyHpR5Bew8xfKsxVYdjXEYAQ7HLp1IX47kHKmVCZeXErInytonjkL+kE+L4yaKSYEmDNR9MJYr5zwuAKA==",
"engines": {
"node": ">= 10.0"
}
},
"node_modules/lightning-pool": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/lightning-pool/-/lightning-pool-3.1.3.tgz",
"integrity": "sha512-OgWuoh0BBrikWx/mc/XwIKwC9HHTe/GU3XODLMBPibv7jv8u0o2gQFS7KVEg5U8Oufg6N7mkm8Y1RoiLER0zeQ==",
"dependencies": {
"doublylinked": "^2.4.3",
"putil-promisify": "^1.8.2"
},
"engines": {
"node": ">= 10.0"
}
},
"node_modules/ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
},
"node_modules/obuf": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz",
"integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg=="
},
"node_modules/postgres-bytea": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-3.0.0.tgz",
"integrity": "sha512-CNd4jim9RFPkObHSjVHlVrxoVQXz7quwNFpz7RY1okNNme49+sVyiTvTRobiLV548Hx/hb1BG+iE7h9493WzFw==",
"dependencies": {
"obuf": "~1.1.2"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/postgresql-client": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/postgresql-client/-/postgresql-client-2.1.3.tgz",
"integrity": "sha512-36Ga6JzhydsRzcCRcA/Y2hrX9C9sI0wS6sgRNBlOGkOwACXQVybmhDM7mAUbi9cT00N39Ee7btR0eMCyD//5Xg==",
"dependencies": {
"debug": "^4.3.4",
"doublylinked": "^2.5.1",
"lightning-pool": "^3.1.3",
"postgres-bytea": "^3.0.0",
"power-tasks": "^0.8.0",
"putil-merge": "^3.8.0",
"putil-promisify": "^1.8.5",
"putil-varhelpers": "^1.6.4"
},
"engines": {
"node": ">=14.0",
"npm": ">=7.0.0"
}
},
"node_modules/power-tasks": {
"version": "0.8.0",
"resolved": "https://registry.npmjs.org/power-tasks/-/power-tasks-0.8.0.tgz",
"integrity": "sha512-HhMcx+y5UkzlEmKslruz8uAU2Yq8CODJsFEMFsYMrGp5EzKpkNHGu0RNvBqyewKJDZHPNKtBSULsEAxMqQIBVQ==",
"dependencies": {
"debug": "^4.3.4",
"doublylinked": "^2.5.1",
"strict-typed-events": "^2.2.0"
},
"engines": {
"node": ">=14.0",
"npm": ">=7.0.0"
}
},
"node_modules/putil-merge": {
"version": "3.8.0",
"resolved": "https://registry.npmjs.org/putil-merge/-/putil-merge-3.8.0.tgz",
"integrity": "sha512-5tXPafJawWFoYZWLhkYXZ7IC/qkI45HgJsgv36lJBeq3qjFZfUITZE01CmWUBIlIn9f1yDiikqgYERARhVmgrg==",
"engines": {
"node": ">= 10.0"
}
},
"node_modules/putil-promisify": {
"version": "1.8.5",
"resolved": "https://registry.npmjs.org/putil-promisify/-/putil-promisify-1.8.5.tgz",
"integrity": "sha512-DItclasWWZokvpq3Aiaq0iV7WC8isP/0o/8mhC0yV6CQ781N/7NQHA1VyOm6hfpeFEwIQoo1C4Yjc5eH0q6Jbw==",
"engines": {
"node": ">= 6.0"
}
},
"node_modules/putil-varhelpers": {
"version": "1.6.4",
"resolved": "https://registry.npmjs.org/putil-varhelpers/-/putil-varhelpers-1.6.4.tgz",
"integrity": "sha512-nM2nO1HS2yJUyPgz0grd2XZAM0Spr6/tt6F4xXeNDjByV00BV2mq6lZ+sDff8WIfQBI9Hn1Czh93H1xBvKESxw==",
"engines": {
"node": ">= 6.0"
}
},
"node_modules/strict-typed-events": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/strict-typed-events/-/strict-typed-events-2.2.0.tgz",
"integrity": "sha512-yvHRtEfRRV7TJWi9cLhMt4Mb12JtAwXXONltUlLCA3fRB0LRy94B4E4e2gIlXzT5nZHTZVpOjJNOshri3LZ5bw==",
"dependencies": {
"putil-promisify": "^1.8.5",
"ts-gems": "^2.0.0"
},
"engines": {
"node": ">=14.0"
}
},
"node_modules/ts-gems": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/ts-gems/-/ts-gems-2.1.0.tgz",
"integrity": "sha512-5IqiG4nq1tsOhYPc4CwxA6bsE+TfU6uAABzf6bH4sdElgXpt/mlStvIYedvvtU7BM1+RRJxCaTLaaVFcCqNaiA==",
"peerDependencies": {
"typescript": ">=4.0.0"
}
},
"node_modules/typescript": {
"version": "4.7.4",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-4.7.4.tgz",
"integrity": "sha512-C0WQT0gezHuw6AdY1M2jxUO83Rjf0HP7Sk1DtXj6j1EwkQNZrHAg2XPWlq62oqEhYvONq5pkC2Y9oPljWToLmQ==",
"peer": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
},
"engines": {
"node": ">=4.2.0"
}
}
},
"dependencies": {
"debug": {
"version": "4.3.4",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
"integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
"requires": {
"ms": "2.1.2"
}
},
"doublylinked": {
"version": "2.5.1",
"resolved": "https://registry.npmjs.org/doublylinked/-/doublylinked-2.5.1.tgz",
"integrity": "sha512-Lpqb+qyHpR5Bew8xfKsxVYdjXEYAQ7HLp1IX47kHKmVCZeXErInytonjkL+kE+L4yaKSYEmDNR9MJYr5zwuAKA=="
},
"lightning-pool": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/lightning-pool/-/lightning-pool-3.1.3.tgz",
"integrity": "sha512-OgWuoh0BBrikWx/mc/XwIKwC9HHTe/GU3XODLMBPibv7jv8u0o2gQFS7KVEg5U8Oufg6N7mkm8Y1RoiLER0zeQ==",
"requires": {
"doublylinked": "^2.4.3",
"putil-promisify": "^1.8.2"
}
},
"ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
},
"obuf": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz",
"integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg=="
},
"postgres-bytea": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-3.0.0.tgz",
"integrity": "sha512-CNd4jim9RFPkObHSjVHlVrxoVQXz7quwNFpz7RY1okNNme49+sVyiTvTRobiLV548Hx/hb1BG+iE7h9493WzFw==",
"requires": {
"obuf": "~1.1.2"
}
},
"postgresql-client": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/postgresql-client/-/postgresql-client-2.1.3.tgz",
"integrity": "sha512-36Ga6JzhydsRzcCRcA/Y2hrX9C9sI0wS6sgRNBlOGkOwACXQVybmhDM7mAUbi9cT00N39Ee7btR0eMCyD//5Xg==",
"requires": {
"debug": "^4.3.4",
"doublylinked": "^2.5.1",
"lightning-pool": "^3.1.3",
"postgres-bytea": "^3.0.0",
"power-tasks": "^0.8.0",
"putil-merge": "^3.8.0",
"putil-promisify": "^1.8.5",
"putil-varhelpers": "^1.6.4"
}
},
"power-tasks": {
"version": "0.8.0",
"resolved": "https://registry.npmjs.org/power-tasks/-/power-tasks-0.8.0.tgz",
"integrity": "sha512-HhMcx+y5UkzlEmKslruz8uAU2Yq8CODJsFEMFsYMrGp5EzKpkNHGu0RNvBqyewKJDZHPNKtBSULsEAxMqQIBVQ==",
"requires": {
"debug": "^4.3.4",
"doublylinked": "^2.5.1",
"strict-typed-events": "^2.2.0"
}
},
"putil-merge": {
"version": "3.8.0",
"resolved": "https://registry.npmjs.org/putil-merge/-/putil-merge-3.8.0.tgz",
"integrity": "sha512-5tXPafJawWFoYZWLhkYXZ7IC/qkI45HgJsgv36lJBeq3qjFZfUITZE01CmWUBIlIn9f1yDiikqgYERARhVmgrg=="
},
"putil-promisify": {
"version": "1.8.5",
"resolved": "https://registry.npmjs.org/putil-promisify/-/putil-promisify-1.8.5.tgz",
"integrity": "sha512-DItclasWWZokvpq3Aiaq0iV7WC8isP/0o/8mhC0yV6CQ781N/7NQHA1VyOm6hfpeFEwIQoo1C4Yjc5eH0q6Jbw=="
},
"putil-varhelpers": {
"version": "1.6.4",
"resolved": "https://registry.npmjs.org/putil-varhelpers/-/putil-varhelpers-1.6.4.tgz",
"integrity": "sha512-nM2nO1HS2yJUyPgz0grd2XZAM0Spr6/tt6F4xXeNDjByV00BV2mq6lZ+sDff8WIfQBI9Hn1Czh93H1xBvKESxw=="
},
"strict-typed-events": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/strict-typed-events/-/strict-typed-events-2.2.0.tgz",
"integrity": "sha512-yvHRtEfRRV7TJWi9cLhMt4Mb12JtAwXXONltUlLCA3fRB0LRy94B4E4e2gIlXzT5nZHTZVpOjJNOshri3LZ5bw==",
"requires": {
"putil-promisify": "^1.8.5",
"ts-gems": "^2.0.0"
}
},
"ts-gems": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/ts-gems/-/ts-gems-2.1.0.tgz",
"integrity": "sha512-5IqiG4nq1tsOhYPc4CwxA6bsE+TfU6uAABzf6bH4sdElgXpt/mlStvIYedvvtU7BM1+RRJxCaTLaaVFcCqNaiA==",
"requires": {}
},
"typescript": {
"version": "4.7.4",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-4.7.4.tgz",
"integrity": "sha512-C0WQT0gezHuw6AdY1M2jxUO83Rjf0HP7Sk1DtXj6j1EwkQNZrHAg2XPWlq62oqEhYvONq5pkC2Y9oPljWToLmQ==",
"peer": true
}
}
}

View File

@@ -0,0 +1,6 @@
{
"type": "module",
"dependencies": {
"postgresql-client": "^2.1.3"
}
}