From b8eb908a3df34f437b4f123461b14b599be4a8b4 Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Tue, 13 Sep 2022 15:43:53 +0300 Subject: [PATCH] Rename old project name references --- Cargo.lock | 8 +- Cargo.toml | 2 +- Dockerfile | 16 +- compute_tools/Cargo.toml | 4 +- control_plane/Cargo.toml | 2 +- control_plane/simple.conf | 2 +- control_plane/src/bin/neon_local.rs | 34 +- control_plane/src/compute.rs | 20 +- control_plane/src/local_env.rs | 54 +-- control_plane/src/postgresql_conf.rs | 2 +- control_plane/src/safekeeper.rs | 8 +- control_plane/src/storage.rs | 36 +- docs/authentication.md | 4 +- docs/multitenancy.md | 18 +- docs/pageserver-services.md | 2 +- docs/pageserver-storage.md | 10 +- docs/pageserver-tenant-migration.md | 4 +- docs/rfcs/013-term-history.md | 2 +- docs/rfcs/cluster-size-limits.md | 8 +- docs/sourcetree.md | 11 +- libs/etcd_broker/src/subscription_key.rs | 26 +- libs/postgres_ffi/Cargo.toml | 2 +- libs/postgres_ffi/wal_craft/Cargo.toml | 2 +- libs/utils/Cargo.toml | 4 +- libs/utils/benches/benchmarks.rs | 4 +- libs/utils/src/auth.rs | 14 +- libs/utils/src/http/endpoint.rs | 6 +- libs/utils/src/http/mod.rs | 2 +- libs/utils/src/{zid.rs => id.rs} | 88 ++-- libs/utils/src/lib.rs | 2 +- libs/utils/src/postgres_backend.rs | 12 +- libs/utils/src/postgres_backend_async.rs | 4 +- pageserver/Cargo.toml | 8 +- pageserver/src/bin/dump_layerfile.rs | 2 +- pageserver/src/bin/pageserver.rs | 4 +- pageserver/src/bin/update_metadata.rs | 2 +- pageserver/src/config.rs | 16 +- pageserver/src/http/models.rs | 24 +- pageserver/src/http/routes.rs | 42 +- pageserver/src/import_datadir.rs | 2 +- pageserver/src/lib.rs | 10 +- pageserver/src/metrics.rs | 6 +- pageserver/src/page_cache.rs | 14 +- pageserver/src/page_service.rs | 76 ++-- pageserver/src/pgdatadir_mapping.rs | 8 +- pageserver/src/repository.rs | 4 +- pageserver/src/storage_sync.rs | 72 ++-- pageserver/src/storage_sync/delete.rs | 8 +- pageserver/src/storage_sync/download.rs | 40 +- pageserver/src/storage_sync/index.rs | 42 +- pageserver/src/storage_sync/upload.rs | 12 +- pageserver/src/task_mgr.rs | 18 +- pageserver/src/tenant.rs | 100 ++--- pageserver/src/tenant/delta_layer.rs | 84 ++-- pageserver/src/tenant/ephemeral_file.rs | 36 +- pageserver/src/tenant/image_layer.rs | 84 ++-- pageserver/src/tenant/inmemory_layer.rs | 38 +- pageserver/src/tenant/layer_map.rs | 2 +- pageserver/src/tenant/metadata.rs | 20 +- pageserver/src/tenant/storage_layer.rs | 10 +- pageserver/src/tenant/timeline.rs | 16 +- pageserver/src/tenant_config.rs | 6 +- pageserver/src/tenant_mgr.rs | 36 +- pageserver/src/tenant_tasks.rs | 12 +- pageserver/src/timelines.rs | 14 +- pageserver/src/virtual_file.rs | 38 +- pageserver/src/walingest.rs | 26 +- .../src/walreceiver/connection_manager.rs | 18 +- .../src/walreceiver/walreceiver_connection.rs | 8 +- pageserver/src/walrecord.rs | 16 +- pageserver/src/walredo.rs | 80 ++-- pgxn/neon/inmem_smgr.c | 2 +- pgxn/neon/libpagestore.c | 49 ++- pgxn/neon/neon.c | 2 - pgxn/neon/pagestore_client.h | 153 ++++--- pgxn/neon/pagestore_smgr.c | 408 +++++++++--------- pgxn/neon/relsize_cache.c | 6 +- pgxn/neon/walproposer.c | 114 ++--- pgxn/neon/walproposer.h | 38 +- pgxn/neon_test_utils/neontest.c | 32 +- proxy/Cargo.toml | 2 +- pyproject.toml | 2 +- safekeeper/Cargo.toml | 6 +- safekeeper/src/bin/safekeeper.rs | 6 +- safekeeper/src/broker.rs | 10 +- safekeeper/src/control_file.rs | 18 +- safekeeper/src/control_file_upgrade.rs | 25 +- safekeeper/src/handler.rs | 30 +- safekeeper/src/http/models.rs | 4 +- safekeeper/src/http/routes.rs | 14 +- safekeeper/src/json_ctrl.rs | 4 +- safekeeper/src/lib.rs | 6 +- safekeeper/src/metrics.rs | 4 +- safekeeper/src/receive_wal.rs | 2 +- safekeeper/src/safekeeper.rs | 36 +- safekeeper/src/send_wal.rs | 8 +- safekeeper/src/timeline.rs | 49 ++- safekeeper/src/wal_backup.rs | 14 +- safekeeper/src/wal_storage.rs | 8 +- scripts/generate_and_push_perf_report.sh | 8 +- scripts/perf_report_template.html | 4 +- test_runner/README.md | 2 +- test_runner/fixtures/benchmark_fixture.py | 6 +- test_runner/fixtures/neon_fixtures.py | 132 +++--- test_runner/fixtures/types.py | 14 +- test_runner/performance/README.md | 2 +- test_runner/regress/test_ancestor_branch.py | 8 +- test_runner/regress/test_auth.py | 4 +- test_runner/regress/test_branch_behind.py | 4 +- test_runner/regress/test_broken_timeline.py | 4 +- test_runner/regress/test_fullbackup.py | 4 +- test_runner/regress/test_gc_aggressive.py | 8 +- test_runner/regress/test_import.py | 12 +- test_runner/regress/test_neon_cli.py | 8 +- test_runner/regress/test_old_request_lsn.py | 4 +- test_runner/regress/test_pageserver_api.py | 24 +- test_runner/regress/test_pitr_gc.py | 4 +- test_runner/regress/test_remote_storage.py | 8 +- test_runner/regress/test_tenant_detach.py | 8 +- test_runner/regress/test_tenant_relocation.py | 22 +- test_runner/regress/test_tenant_tasks.py | 10 +- test_runner/regress/test_tenants.py | 4 +- .../test_tenants_with_remote_storage.py | 8 +- test_runner/regress/test_timeline_delete.py | 6 +- test_runner/regress/test_timeline_size.py | 8 +- test_runner/regress/test_wal_acceptor.py | 64 ++- .../regress/test_wal_acceptor_async.py | 10 +- test_runner/regress/test_wal_restore.py | 4 +- 128 files changed, 1428 insertions(+), 1495 deletions(-) rename libs/utils/src/{zid.rs => id.rs} (76%) diff --git a/Cargo.lock b/Cargo.lock index e9ebcdc5ac..d4234d2b00 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2048,7 +2048,7 @@ dependencies = [ [[package]] name = "postgres" version = "0.19.2" -source = "git+https://github.com/zenithdb/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" +source = "git+https://github.com/neondatabase/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" dependencies = [ "bytes", "fallible-iterator", @@ -2061,7 +2061,7 @@ dependencies = [ [[package]] name = "postgres-protocol" version = "0.6.4" -source = "git+https://github.com/zenithdb/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" +source = "git+https://github.com/neondatabase/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" dependencies = [ "base64", "byteorder", @@ -2079,7 +2079,7 @@ dependencies = [ [[package]] name = "postgres-types" version = "0.2.3" -source = "git+https://github.com/zenithdb/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" +source = "git+https://github.com/neondatabase/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" dependencies = [ "bytes", "fallible-iterator", @@ -3295,7 +3295,7 @@ dependencies = [ [[package]] name = "tokio-postgres" version = "0.7.6" -source = "git+https://github.com/zenithdb/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" +source = "git+https://github.com/neondatabase/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" dependencies = [ "async-trait", "byteorder", diff --git a/Cargo.toml b/Cargo.toml index 1936b261f7..bc2a705558 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,4 +70,4 @@ lto = true # This is only needed for proxy's tests. # TODO: we should probably fork `tokio-postgres-rustls` instead. [patch.crates-io] -tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } diff --git a/Dockerfile b/Dockerfile index 3e173f4d5b..eacb88d168 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,7 +22,7 @@ RUN set -e \ && rm -rf pg_install/v15/build \ && tar -C pg_install/v14 -czf /home/nonroot/postgres_install.tar.gz . -# Build zenith binaries +# Build neon binaries FROM $REPOSITORY/$IMAGE:$TAG AS build WORKDIR /home/nonroot ARG GIT_VERSION=local @@ -60,12 +60,12 @@ RUN set -e \ openssl \ ca-certificates \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ - && useradd -d /data zenith \ - && chown -R zenith:zenith /data + && useradd -d /data neon \ + && chown -R neon:neon /data -COPY --from=build --chown=zenith:zenith /home/nonroot/target/release/pageserver /usr/local/bin -COPY --from=build --chown=zenith:zenith /home/nonroot/target/release/safekeeper /usr/local/bin -COPY --from=build --chown=zenith:zenith /home/nonroot/target/release/proxy /usr/local/bin +COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver /usr/local/bin +COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin +COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin # v14 is default for now COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/ @@ -73,7 +73,7 @@ COPY --from=pg-build /home/nonroot/postgres_install.tar.gz /data/ # By default, pageserver uses `.neon/` working directory in WORKDIR, so create one and fill it with the dummy config. # Now, when `docker run ... pageserver` is run, it can start without errors, yet will have some default dummy values. -RUN mkdir -p /data/.neon/ && chown -R zenith:zenith /data/.neon/ \ +RUN mkdir -p /data/.neon/ && chown -R neon:neon /data/.neon/ \ && /usr/local/bin/pageserver -D /data/.neon/ --init \ -c "id=1234" \ -c "broker_endpoints=['http://etcd:2379']" \ @@ -82,7 +82,7 @@ RUN mkdir -p /data/.neon/ && chown -R zenith:zenith /data/.neon/ \ -c "listen_http_addr='0.0.0.0:9898'" VOLUME ["/data"] -USER zenith +USER neon EXPOSE 6400 EXPOSE 9898 CMD ["/bin/bash"] diff --git a/compute_tools/Cargo.toml b/compute_tools/Cargo.toml index 78b85d0e79..b13f7f191d 100644 --- a/compute_tools/Cargo.toml +++ b/compute_tools/Cargo.toml @@ -10,12 +10,12 @@ clap = "3.0" env_logger = "0.9" hyper = { version = "0.14", features = ["full"] } log = { version = "0.4", features = ["std", "serde"] } -postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } regex = "1" serde = { version = "1.0", features = ["derive"] } serde_json = "1" tar = "0.4" tokio = { version = "1.17", features = ["macros", "rt", "rt-multi-thread"] } -tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } url = "2.2.2" workspace_hack = { version = "0.1", path = "../workspace_hack" } diff --git a/control_plane/Cargo.toml b/control_plane/Cargo.toml index 8a79a6e566..ab9df8534c 100644 --- a/control_plane/Cargo.toml +++ b/control_plane/Cargo.toml @@ -8,7 +8,7 @@ clap = "3.0" comfy-table = "5.0.1" git-version = "0.3.5" tar = "0.4.38" -postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } serde = { version = "1.0", features = ["derive"] } serde_with = "1.12.0" toml = "0.5" diff --git a/control_plane/simple.conf b/control_plane/simple.conf index 925e2f14ee..ae60657400 100644 --- a/control_plane/simple.conf +++ b/control_plane/simple.conf @@ -1,4 +1,4 @@ -# Minimal zenith environment with one safekeeper. This is equivalent to the built-in +# Minimal neon environment with one safekeeper. This is equivalent to the built-in # defaults that you get with no --config [pageserver] listen_pg_addr = '127.0.0.1:64000' diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index e3160db53b..e16fd8764a 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -27,10 +27,10 @@ use std::process::exit; use std::str::FromStr; use utils::{ auth::{Claims, Scope}, + id::{NodeId, TenantId, TenantTimelineId, TimelineId}, lsn::Lsn, postgres_backend::AuthType, project_git_version, - zid::{NodeId, ZTenantId, ZTenantTimelineId, ZTimelineId}, }; // Default id of a safekeeper node, if not specified on the command line. @@ -72,7 +72,7 @@ struct TimelineTreeEl { /// Name, recovered from neon config mappings pub name: Option, /// Holds all direct children of this timeline referenced using `timeline_id`. - pub children: BTreeSet, + pub children: BTreeSet, } // Main entry point for the 'neon_local' CLI utility @@ -321,7 +321,7 @@ fn main() -> Result<()> { /// fn print_timelines_tree( timelines: Vec, - mut timeline_name_mappings: HashMap, + mut timeline_name_mappings: HashMap, ) -> Result<()> { let mut timelines_hash = timelines .iter() @@ -332,7 +332,7 @@ fn print_timelines_tree( info: t.clone(), children: BTreeSet::new(), name: timeline_name_mappings - .remove(&ZTenantTimelineId::new(t.tenant_id, t.timeline_id)), + .remove(&TenantTimelineId::new(t.tenant_id, t.timeline_id)), }, ) }) @@ -374,7 +374,7 @@ fn print_timeline( nesting_level: usize, is_last: &[bool], timeline: &TimelineTreeEl, - timelines: &HashMap, + timelines: &HashMap, ) -> Result<()> { let local_remote = match (timeline.info.local.as_ref(), timeline.info.remote.as_ref()) { (None, None) => unreachable!("in this case no info for a timeline is found"), @@ -452,8 +452,8 @@ fn print_timeline( /// Connects to the pageserver to query this information. fn get_timeline_infos( env: &local_env::LocalEnv, - tenant_id: &ZTenantId, -) -> Result> { + tenant_id: &TenantId, +) -> Result> { Ok(PageServerNode::from_env(env) .timeline_list(tenant_id)? .into_iter() @@ -462,7 +462,7 @@ fn get_timeline_infos( } // Helper function to parse --tenant_id option, or get the default from config file -fn get_tenant_id(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::Result { +fn get_tenant_id(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::Result { if let Some(tenant_id_from_arguments) = parse_tenant_id(sub_match).transpose() { tenant_id_from_arguments } else if let Some(default_id) = env.default_tenant_id { @@ -472,18 +472,18 @@ fn get_tenant_id(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::R } } -fn parse_tenant_id(sub_match: &ArgMatches) -> anyhow::Result> { +fn parse_tenant_id(sub_match: &ArgMatches) -> anyhow::Result> { sub_match .value_of("tenant-id") - .map(ZTenantId::from_str) + .map(TenantId::from_str) .transpose() .context("Failed to parse tenant id from the argument string") } -fn parse_timeline_id(sub_match: &ArgMatches) -> anyhow::Result> { +fn parse_timeline_id(sub_match: &ArgMatches) -> anyhow::Result> { sub_match .value_of("timeline-id") - .map(ZTimelineId::from_str) + .map(TimelineId::from_str) .transpose() .context("Failed to parse timeline id from the argument string") } @@ -504,9 +504,9 @@ fn handle_init(init_match: &ArgMatches) -> anyhow::Result { let mut env = LocalEnv::parse_config(&toml_file).context("Failed to create neon configuration")?; env.init().context("Failed to initialize neon repository")?; - - // default_tenantid was generated by the `env.init()` call above - let initial_tenant_id = env.default_tenant_id.unwrap(); + let initial_tenant_id = env + .default_tenant_id + .expect("default_tenant_id should be generated by the `env.init()` call above"); // Initialize pageserver, create initial tenant and timeline. let pageserver = PageServerNode::from_env(&env); @@ -759,7 +759,7 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> { }; let branch_name = timeline_name_mappings - .get(&ZTenantTimelineId::new(tenant_id, node.timeline_id)) + .get(&TenantTimelineId::new(tenant_id, node.timeline_id)) .map(|name| name.as_str()) .unwrap_or("?"); @@ -810,7 +810,7 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> { let node = cplane.nodes.get(&(tenant_id, node_name.to_owned())); - let auth_token = if matches!(env.pageserver.auth_type, AuthType::ZenithJWT) { + let auth_token = if matches!(env.pageserver.auth_type, AuthType::NeonJWT) { let claims = Claims::new(Some(tenant_id), Scope::Tenant); Some(env.generate_auth_token(&claims)?) diff --git a/control_plane/src/compute.rs b/control_plane/src/compute.rs index 57b5e1e10a..b678d620df 100644 --- a/control_plane/src/compute.rs +++ b/control_plane/src/compute.rs @@ -13,9 +13,9 @@ use std::time::Duration; use anyhow::{Context, Result}; use utils::{ connstring::connection_host_port, + id::{TenantId, TimelineId}, lsn::Lsn, postgres_backend::AuthType, - zid::{ZTenantId, ZTimelineId}, }; use crate::local_env::LocalEnv; @@ -28,7 +28,7 @@ use crate::storage::PageServerNode; pub struct ComputeControlPlane { base_port: u16, pageserver: Arc, - pub nodes: BTreeMap<(ZTenantId, String), Arc>, + pub nodes: BTreeMap<(TenantId, String), Arc>, env: LocalEnv, } @@ -76,9 +76,9 @@ impl ComputeControlPlane { pub fn new_node( &mut self, - tenant_id: ZTenantId, + tenant_id: TenantId, name: &str, - timeline_id: ZTimelineId, + timeline_id: TimelineId, lsn: Option, port: Option, ) -> Result> { @@ -114,9 +114,9 @@ pub struct PostgresNode { pub env: LocalEnv, pageserver: Arc, is_test: bool, - pub timeline_id: ZTimelineId, + pub timeline_id: TimelineId, pub lsn: Option, // if it's a read-only node. None for primary - pub tenant_id: ZTenantId, + pub tenant_id: TenantId, uses_wal_proposer: bool, } @@ -148,8 +148,8 @@ impl PostgresNode { // Read a few options from the config file let context = format!("in config file {}", cfg_path_str); let port: u16 = conf.parse_field("port", &context)?; - let timeline_id: ZTimelineId = conf.parse_field("neon.timeline_id", &context)?; - let tenant_id: ZTenantId = conf.parse_field("neon.tenant_id", &context)?; + let timeline_id: TimelineId = conf.parse_field("neon.timeline_id", &context)?; + let tenant_id: TenantId = conf.parse_field("neon.tenant_id", &context)?; let uses_wal_proposer = conf.get("neon.safekeepers").is_some(); // parse recovery_target_lsn, if any @@ -292,7 +292,7 @@ impl PostgresNode { // variable during compute pg startup. It is done this way because // otherwise user will be able to retrieve the value using SHOW // command or pg_settings - let password = if let AuthType::ZenithJWT = auth_type { + let password = if let AuthType::NeonJWT = auth_type { "$ZENITH_AUTH_TOKEN" } else { "" @@ -301,7 +301,7 @@ impl PostgresNode { // Also note that not all parameters are supported here. Because in compute we substitute $ZENITH_AUTH_TOKEN // We parse this string and build it back with token from env var, and for simplicity rebuild // uses only needed variables namely host, port, user, password. - format!("postgresql://no_user:{}@{}:{}", password, host, port) + format!("postgresql://no_user:{password}@{host}:{port}") }; conf.append("shared_preload_libraries", "neon"); conf.append_line(""); diff --git a/control_plane/src/local_env.rs b/control_plane/src/local_env.rs index c4a61dbd7b..7afaad26dc 100644 --- a/control_plane/src/local_env.rs +++ b/control_plane/src/local_env.rs @@ -14,8 +14,8 @@ use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use utils::{ auth::{encode_from_key_file, Claims, Scope}, + id::{NodeId, TenantId, TenantTimelineId, TimelineId}, postgres_backend::AuthType, - zid::{NodeId, ZTenantId, ZTenantTimelineId, ZTimelineId}, }; use crate::safekeeper::SafekeeperNode; @@ -48,13 +48,13 @@ pub struct LocalEnv { // Path to pageserver binary. #[serde(default)] - pub zenith_distrib_dir: PathBuf, + pub neon_distrib_dir: PathBuf, - // Default tenant ID to use with the 'zenith' command line utility, when - // --tenantid is not explicitly specified. + // Default tenant ID to use with the 'neon_local' command line utility, when + // --tenant_id is not explicitly specified. #[serde(default)] #[serde_as(as = "Option")] - pub default_tenant_id: Option, + pub default_tenant_id: Option, // used to issue tokens during e.g pg start #[serde(default)] @@ -69,11 +69,11 @@ pub struct LocalEnv { /// Keep human-readable aliases in memory (and persist them to config), to hide ZId hex strings from the user. #[serde(default)] - // A `HashMap>` would be more appropriate here, + // A `HashMap>` would be more appropriate here, // but deserialization into a generic toml object as `toml::Value::try_from` fails with an error. // https://toml.io/en/v1.0.0 does not contain a concept of "a table inside another table". #[serde_as(as = "HashMap<_, Vec<(DisplayFromStr, DisplayFromStr)>>")] - branch_name_mappings: HashMap>, + branch_name_mappings: HashMap>, } /// Etcd broker config for cluster internal communication. @@ -204,20 +204,20 @@ impl LocalEnv { } pub fn pageserver_bin(&self) -> anyhow::Result { - Ok(self.zenith_distrib_dir.join("pageserver")) + Ok(self.neon_distrib_dir.join("pageserver")) } pub fn safekeeper_bin(&self) -> anyhow::Result { - Ok(self.zenith_distrib_dir.join("safekeeper")) + Ok(self.neon_distrib_dir.join("safekeeper")) } pub fn pg_data_dirs_path(&self) -> PathBuf { self.base_data_dir.join("pgdatadirs").join("tenants") } - pub fn pg_data_dir(&self, tenantid: &ZTenantId, branch_name: &str) -> PathBuf { + pub fn pg_data_dir(&self, tenant_id: &TenantId, branch_name: &str) -> PathBuf { self.pg_data_dirs_path() - .join(tenantid.to_string()) + .join(tenant_id.to_string()) .join(branch_name) } @@ -233,8 +233,8 @@ impl LocalEnv { pub fn register_branch_mapping( &mut self, branch_name: String, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, ) -> anyhow::Result<()> { let existing_values = self .branch_name_mappings @@ -260,22 +260,22 @@ impl LocalEnv { pub fn get_branch_timeline_id( &self, branch_name: &str, - tenant_id: ZTenantId, - ) -> Option { + tenant_id: TenantId, + ) -> Option { self.branch_name_mappings .get(branch_name)? .iter() .find(|(mapped_tenant_id, _)| mapped_tenant_id == &tenant_id) .map(|&(_, timeline_id)| timeline_id) - .map(ZTimelineId::from) + .map(TimelineId::from) } - pub fn timeline_name_mappings(&self) -> HashMap { + pub fn timeline_name_mappings(&self) -> HashMap { self.branch_name_mappings .iter() .flat_map(|(name, tenant_timelines)| { tenant_timelines.iter().map(|&(tenant_id, timeline_id)| { - (ZTenantTimelineId::new(tenant_id, timeline_id), name.clone()) + (TenantTimelineId::new(tenant_id, timeline_id), name.clone()) }) }) .collect() @@ -299,14 +299,14 @@ impl LocalEnv { } } - // Find zenith binaries. - if env.zenith_distrib_dir == Path::new("") { - env.zenith_distrib_dir = env::current_exe()?.parent().unwrap().to_owned(); + // Find neon binaries. + if env.neon_distrib_dir == Path::new("") { + env.neon_distrib_dir = env::current_exe()?.parent().unwrap().to_owned(); } // If no initial tenant ID was given, generate it. if env.default_tenant_id.is_none() { - env.default_tenant_id = Some(ZTenantId::generate()); + env.default_tenant_id = Some(TenantId::generate()); } env.base_data_dir = base_path(); @@ -320,12 +320,12 @@ impl LocalEnv { if !repopath.exists() { bail!( - "Zenith config is not found in {}. You need to run 'neon_local init' first", + "Neon config is not found in {}. You need to run 'neon_local init' first", repopath.to_str().unwrap() ); } - // TODO: check that it looks like a zenith repository + // TODO: check that it looks like a neon repository // load and parse file let config = fs::read_to_string(repopath.join("config"))?; @@ -404,10 +404,10 @@ impl LocalEnv { ); } for binary in ["pageserver", "safekeeper"] { - if !self.zenith_distrib_dir.join(binary).exists() { + if !self.neon_distrib_dir.join(binary).exists() { bail!( - "Can't find binary '{binary}' in zenith distrib dir '{}'", - self.zenith_distrib_dir.display() + "Can't find binary '{binary}' in neon distrib dir '{}'", + self.neon_distrib_dir.display() ); } } diff --git a/control_plane/src/postgresql_conf.rs b/control_plane/src/postgresql_conf.rs index a71108da01..34dc769e78 100644 --- a/control_plane/src/postgresql_conf.rs +++ b/control_plane/src/postgresql_conf.rs @@ -2,7 +2,7 @@ /// Module for parsing postgresql.conf file. /// /// NOTE: This doesn't implement the full, correct postgresql.conf syntax. Just -/// enough to extract a few settings we need in Zenith, assuming you don't do +/// enough to extract a few settings we need in Neon, assuming you don't do /// funny stuff like include-directives or funny escaping. use anyhow::{bail, Context, Result}; use once_cell::sync::Lazy; diff --git a/control_plane/src/safekeeper.rs b/control_plane/src/safekeeper.rs index 2cc1ae7853..600a9ffe05 100644 --- a/control_plane/src/safekeeper.rs +++ b/control_plane/src/safekeeper.rs @@ -17,7 +17,7 @@ use thiserror::Error; use utils::{ connstring::connection_address, http::error::HttpErrorBody, - zid::{NodeId, ZTenantId, ZTimelineId}, + id::{NodeId, TenantId, TimelineId}, }; use crate::local_env::{LocalEnv, SafekeeperConf}; @@ -269,7 +269,7 @@ impl SafekeeperNode { fn http_request(&self, method: Method, url: U) -> RequestBuilder { // TODO: authentication - //if self.env.auth_type == AuthType::ZenithJWT { + //if self.env.auth_type == AuthType::NeonJWT { // builder = builder.bearer_auth(&self.env.safekeeper_auth_token) //} self.http_client.request(method, url) @@ -284,8 +284,8 @@ impl SafekeeperNode { pub fn timeline_create( &self, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, peer_ids: Vec, ) -> Result<()> { Ok(self diff --git a/control_plane/src/storage.rs b/control_plane/src/storage.rs index 9fdab5f88c..d2cc5e096c 100644 --- a/control_plane/src/storage.rs +++ b/control_plane/src/storage.rs @@ -21,9 +21,9 @@ use thiserror::Error; use utils::{ connstring::connection_address, http::error::HttpErrorBody, + id::{TenantId, TimelineId}, lsn::Lsn, postgres_backend::AuthType, - zid::{ZTenantId, ZTimelineId}, }; use crate::local_env::LocalEnv; @@ -83,7 +83,7 @@ pub struct PageServerNode { impl PageServerNode { pub fn from_env(env: &LocalEnv) -> PageServerNode { - let password = if env.pageserver.auth_type == AuthType::ZenithJWT { + let password = if env.pageserver.auth_type == AuthType::NeonJWT { &env.pageserver.auth_token } else { "" @@ -109,10 +109,10 @@ impl PageServerNode { pub fn initialize( &self, - create_tenant: Option, - initial_timeline_id: Option, + create_tenant: Option, + initial_timeline_id: Option, config_overrides: &[&str], - ) -> anyhow::Result { + ) -> anyhow::Result { let id = format!("id={}", self.env.pageserver.id); // FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc. let pg_distrib_dir_param = @@ -173,9 +173,9 @@ impl PageServerNode { fn try_init_timeline( &self, - new_tenant_id: Option, - new_timeline_id: Option, - ) -> anyhow::Result { + new_tenant_id: Option, + new_timeline_id: Option, + ) -> anyhow::Result { let initial_tenant_id = self.tenant_create(new_tenant_id, HashMap::new())?; let initial_timeline_info = self.timeline_create(initial_tenant_id, new_timeline_id, None, None)?; @@ -345,7 +345,7 @@ impl PageServerNode { fn http_request(&self, method: Method, url: U) -> RequestBuilder { let mut builder = self.http_client.request(method, url); - if self.env.pageserver.auth_type == AuthType::ZenithJWT { + if self.env.pageserver.auth_type == AuthType::NeonJWT { builder = builder.bearer_auth(&self.env.pageserver.auth_token) } builder @@ -368,9 +368,9 @@ impl PageServerNode { pub fn tenant_create( &self, - new_tenant_id: Option, + new_tenant_id: Option, settings: HashMap<&str, &str>, - ) -> anyhow::Result { + ) -> anyhow::Result { self.http_request(Method::POST, format!("{}/tenant", self.http_base_url)) .json(&TenantCreateRequest { new_tenant_id, @@ -422,7 +422,7 @@ impl PageServerNode { }) } - pub fn tenant_config(&self, tenant_id: ZTenantId, settings: HashMap<&str, &str>) -> Result<()> { + pub fn tenant_config(&self, tenant_id: TenantId, settings: HashMap<&str, &str>) -> Result<()> { self.http_request(Method::PUT, format!("{}/tenant/config", self.http_base_url)) .json(&TenantConfigRequest { tenant_id, @@ -471,7 +471,7 @@ impl PageServerNode { Ok(()) } - pub fn timeline_list(&self, tenant_id: &ZTenantId) -> anyhow::Result> { + pub fn timeline_list(&self, tenant_id: &TenantId) -> anyhow::Result> { let timeline_infos: Vec = self .http_request( Method::GET, @@ -486,10 +486,10 @@ impl PageServerNode { pub fn timeline_create( &self, - tenant_id: ZTenantId, - new_timeline_id: Option, + tenant_id: TenantId, + new_timeline_id: Option, ancestor_start_lsn: Option, - ancestor_timeline_id: Option, + ancestor_timeline_id: Option, ) -> anyhow::Result { self.http_request( Method::POST, @@ -524,8 +524,8 @@ impl PageServerNode { /// * `pg_wal` - if there's any wal to import: (end lsn, path to `pg_wal.tar`) pub fn timeline_import( &self, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, base: (Lsn, PathBuf), pg_wal: Option<(Lsn, PathBuf)>, ) -> anyhow::Result<()> { diff --git a/docs/authentication.md b/docs/authentication.md index 7200ffc62f..9748a7ab0d 100644 --- a/docs/authentication.md +++ b/docs/authentication.md @@ -2,14 +2,14 @@ ### Overview -Current state of authentication includes usage of JWT tokens in communication between compute and pageserver and between CLI and pageserver. JWT token is signed using RSA keys. CLI generates a key pair during call to `zenith init`. Using following openssl commands: +Current state of authentication includes usage of JWT tokens in communication between compute and pageserver and between CLI and pageserver. JWT token is signed using RSA keys. CLI generates a key pair during call to `neon_local init`. Using following openssl commands: ```bash openssl genrsa -out private_key.pem 2048 openssl rsa -in private_key.pem -pubout -outform PEM -out public_key.pem ``` -CLI also generates signed token and saves it in the config for later access to pageserver. Now authentication is optional. Pageserver has two variables in config: `auth_validation_public_key_path` and `auth_type`, so when auth type present and set to `ZenithJWT` pageserver will require authentication for connections. Actual JWT is passed in password field of connection string. There is a caveat for psql, it silently truncates passwords to 100 symbols, so to correctly pass JWT via psql you have to either use PGPASSWORD environment variable, or store password in psql config file. +CLI also generates signed token and saves it in the config for later access to pageserver. Now authentication is optional. Pageserver has two variables in config: `auth_validation_public_key_path` and `auth_type`, so when auth type present and set to `NeonJWT` pageserver will require authentication for connections. Actual JWT is passed in password field of connection string. There is a caveat for psql, it silently truncates passwords to 100 symbols, so to correctly pass JWT via psql you have to either use PGPASSWORD environment variable, or store password in psql config file. Currently there is no authentication between compute and safekeepers, because this communication layer is under heavy refactoring. After this refactoring support for authentication will be added there too. Now safekeeper supports "hardcoded" token passed via environment variable to be able to use callmemaybe command in pageserver. diff --git a/docs/multitenancy.md b/docs/multitenancy.md index c697ae93cd..35c69e69a1 100644 --- a/docs/multitenancy.md +++ b/docs/multitenancy.md @@ -2,26 +2,26 @@ ### Overview -Zenith supports multitenancy. One pageserver can serve multiple tenants at once. Tenants can be managed via zenith CLI. During page server setup tenant can be created using ```zenith init --create-tenant``` Also tenants can be added into the system on the fly without pageserver restart. This can be done using the following cli command: ```zenith tenant create``` Tenants use random identifiers which can be represented as a 32 symbols hexadecimal string. So zenith tenant create accepts desired tenant id as an optional argument. The concept of timelines/branches is working independently per tenant. +Neon supports multitenancy. One pageserver can serve multiple tenants at once. Tenants can be managed via neon_local CLI. During page server setup tenant can be created using ```neon_local init --create-tenant``` Also tenants can be added into the system on the fly without pageserver restart. This can be done using the following cli command: ```neon_local tenant create``` Tenants use random identifiers which can be represented as a 32 symbols hexadecimal string. So neon_local tenant create accepts desired tenant id as an optional argument. The concept of timelines/branches is working independently per tenant. ### Tenants in other commands -By default during `zenith init` new tenant is created on the pageserver. Newly created tenant's id is saved to cli config, so other commands can use it automatically if no direct argument `--tenantid=` is provided. So generally tenantid more frequently appears in internal pageserver interface. Its commands take tenantid argument to distinguish to which tenant operation should be applied. CLI support creation of new tenants. +By default during `neon_local init` new tenant is created on the pageserver. Newly created tenant's id is saved to cli config, so other commands can use it automatically if no direct argument `--tenant_id=` is provided. So generally tenant_id more frequently appears in internal pageserver interface. Its commands take tenant_id argument to distinguish to which tenant operation should be applied. CLI support creation of new tenants. Examples for cli: ```sh -zenith tenant list +neon_local tenant list -zenith tenant create // generates new id +neon_local tenant create // generates new id -zenith tenant create ee6016ec31116c1b7c33dfdfca38892f +neon_local tenant create ee6016ec31116c1b7c33dfdfca38892f -zenith pg create main // default tenant from zenith init +neon_local pg create main // default tenant from neon init -zenith pg create main --tenantid=ee6016ec31116c1b7c33dfdfca38892f +neon_local pg create main --tenant_id=ee6016ec31116c1b7c33dfdfca38892f -zenith branch --tenantid=ee6016ec31116c1b7c33dfdfca38892f +neon_local branch --tenant_id=ee6016ec31116c1b7c33dfdfca38892f ``` ### Data layout @@ -56,4 +56,4 @@ Tenant id is passed to postgres via GUC the same way as the timeline. Tenant id ### Safety -For now particular tenant can only appear on a particular pageserver. Set of safekeepers are also pinned to particular (tenantid, timeline) pair so there can only be one writer for particular (tenantid, timeline). +For now particular tenant can only appear on a particular pageserver. Set of safekeepers are also pinned to particular (tenant_id, timeline_id) pair so there can only be one writer for particular (tenant_id, timeline_id). diff --git a/docs/pageserver-services.md b/docs/pageserver-services.md index 07a91f543d..fc259c8a5f 100644 --- a/docs/pageserver-services.md +++ b/docs/pageserver-services.md @@ -109,7 +109,7 @@ Repository The repository stores all the page versions, or WAL records needed to reconstruct them. Each tenant has a separate Repository, which is -stored in the .neon/tenants/ directory. +stored in the .neon/tenants/ directory. Repository is an abstract trait, defined in `repository.rs`. It is implemented by the LayeredRepository object in diff --git a/docs/pageserver-storage.md b/docs/pageserver-storage.md index 8d03e68ac7..77e7ff35bc 100644 --- a/docs/pageserver-storage.md +++ b/docs/pageserver-storage.md @@ -123,7 +123,7 @@ The files are called "layer files". Each layer file covers a range of keys, and a range of LSNs (or a single LSN, in case of image layers). You can think of it as a rectangle in the two-dimensional key-LSN space. The layer files for each timeline are stored in the timeline's subdirectory under -`.neon/tenants//timelines`. +`.neon/tenants//timelines`. There are two kind of layer files: images, and delta layers. An image file contains a snapshot of all keys at a particular LSN, whereas a delta file @@ -351,7 +351,7 @@ branch. Note: It doesn't make any difference if the child branch is created when the end of the main branch was at LSN 250, or later when the tip of the main branch had already moved on. The latter case, creating a -branch at a historic LSN, is how we support PITR in Zenith. +branch at a historic LSN, is how we support PITR in Neon. # Garbage collection @@ -396,9 +396,9 @@ table: main/orders_200_300 DELETE main/orders_300 STILL NEEDED BY orders_300_400 main/orders_300_400 KEEP, NEWER THAN GC HORIZON - main/orders_400 .. - main/orders_400_500 .. - main/orders_500 .. + main/orders_400 .. + main/orders_400_500 .. + main/orders_500 .. main/customers_100 DELETE main/customers_100_200 DELETE main/customers_200 KEEP, NO NEWER VERSION diff --git a/docs/pageserver-tenant-migration.md b/docs/pageserver-tenant-migration.md index a846213ab2..5fb2097030 100644 --- a/docs/pageserver-tenant-migration.md +++ b/docs/pageserver-tenant-migration.md @@ -9,7 +9,7 @@ This feature allows to migrate a timeline from one pageserver to another by util Pageserver implements two new http handlers: timeline attach and timeline detach. Timeline migration is performed in a following way: 1. Timeline attach is called on a target pageserver. This asks pageserver to download latest checkpoint uploaded to s3. -2. For now it is necessary to manually initialize replication stream via callmemaybe call so target pageserver initializes replication from safekeeper (it is desired to avoid this and initialize replication directly in attach handler, but this requires some refactoring (probably [#997](https://github.com/zenithdb/zenith/issues/997)/[#1049](https://github.com/zenithdb/zenith/issues/1049)) +2. For now it is necessary to manually initialize replication stream via callmemaybe call so target pageserver initializes replication from safekeeper (it is desired to avoid this and initialize replication directly in attach handler, but this requires some refactoring (probably [#997](https://github.com/neondatabase/neon/issues/997)/[#1049](https://github.com/neondatabase/neon/issues/1049)) 3. Replication state can be tracked via timeline detail pageserver call. 4. Compute node should be restarted with new pageserver connection string. Issue with multiple compute nodes for one timeline is handled on the safekeeper consensus level. So this is not a problem here.Currently responsibility for rescheduling the compute with updated config lies on external coordinator (console). 5. Timeline is detached from old pageserver. On disk data is removed. @@ -18,5 +18,5 @@ Timeline migration is performed in a following way: ### Implementation details Now safekeeper needs to track which pageserver it is replicating to. This introduces complications into replication code: -* We need to distinguish different pageservers (now this is done by connection string which is imperfect and is covered here: https://github.com/zenithdb/zenith/issues/1105). Callmemaybe subscription management also needs to track that (this is already implemented). +* We need to distinguish different pageservers (now this is done by connection string which is imperfect and is covered here: https://github.com/neondatabase/neon/issues/1105). Callmemaybe subscription management also needs to track that (this is already implemented). * We need to track which pageserver is the primary. This is needed to avoid reconnections to non primary pageservers. Because we shouldn't reconnect to them when they decide to stop their walreceiver. I e this can appear when there is a load on the compute and we are trying to detach timeline from old pageserver. In this case callmemaybe will try to reconnect to it because replication termination condition is not met (page server with active compute could never catch up to the latest lsn, so there is always some wal tail) diff --git a/docs/rfcs/013-term-history.md b/docs/rfcs/013-term-history.md index 59833526c5..7e815abf73 100644 --- a/docs/rfcs/013-term-history.md +++ b/docs/rfcs/013-term-history.md @@ -70,7 +70,7 @@ two options. ...start sending WAL conservatively since the horizon (1.1), and truncate obsolete part of WAL only when recovery is finished, i.e. epochStartLsn (4) is -reached, i.e. 2.3 transferred -- that's what https://github.com/zenithdb/zenith/pull/505 proposes. +reached, i.e. 2.3 transferred -- that's what https://github.com/neondatabase/neon/pull/505 proposes. Then the following is possible: diff --git a/docs/rfcs/cluster-size-limits.md b/docs/rfcs/cluster-size-limits.md index bd4cb9ef32..4ef006d9a6 100644 --- a/docs/rfcs/cluster-size-limits.md +++ b/docs/rfcs/cluster-size-limits.md @@ -15,7 +15,7 @@ The stateless compute node that performs validation is separate from the storage Limit the maximum size of a PostgreSQL instance to limit free tier users (and other tiers in the future). First of all, this is needed to control our free tier production costs. -Another reason to limit resources is risk management — we haven't (fully) tested and optimized zenith for big clusters, +Another reason to limit resources is risk management — we haven't (fully) tested and optimized neon for big clusters, so we don't want to give users access to the functionality that we don't think is ready. ## Components @@ -43,20 +43,20 @@ Then this size should be reported to compute node. `current_timeline_size` value is included in the walreceiver's custom feedback message: `ReplicationFeedback.` -(PR about protocol changes https://github.com/zenithdb/zenith/pull/1037). +(PR about protocol changes https://github.com/neondatabase/neon/pull/1037). This message is received by the safekeeper and propagated to compute node as a part of `AppendResponse`. Finally, when compute node receives the `current_timeline_size` from safekeeper (or from pageserver directly), it updates the global variable. -And then every zenith_extend() operation checks if limit is reached `(current_timeline_size > neon.max_cluster_size)` and throws `ERRCODE_DISK_FULL` error if so. +And then every neon_extend() operation checks if limit is reached `(current_timeline_size > neon.max_cluster_size)` and throws `ERRCODE_DISK_FULL` error if so. (see Postgres error codes [https://www.postgresql.org/docs/devel/errcodes-appendix.html](https://www.postgresql.org/docs/devel/errcodes-appendix.html)) TODO: We can allow autovacuum processes to bypass this check, simply checking `IsAutoVacuumWorkerProcess()`. It would be nice to allow manual VACUUM and VACUUM FULL to bypass the check, but it's uneasy to distinguish these operations at the low level. See issues https://github.com/neondatabase/neon/issues/1245 -https://github.com/zenithdb/zenith/issues/1445 +https://github.com/neondatabase/neon/issues/1445 TODO: We should warn users if the limit is soon to be reached. diff --git a/docs/sourcetree.md b/docs/sourcetree.md index 339a90e0ba..c1a860f126 100644 --- a/docs/sourcetree.md +++ b/docs/sourcetree.md @@ -10,7 +10,7 @@ Intended to be used in integration tests and in CLI tools for local installation `/docs`: -Documentation of the Zenith features and concepts. +Documentation of the Neon features and concepts. Now it is mostly dev documentation. `/monitoring`: @@ -19,7 +19,7 @@ TODO `/pageserver`: -Zenith storage service. +Neon storage service. The pageserver has a few different duties: - Store and manage the data. @@ -54,7 +54,7 @@ PostgreSQL extension that contains functions needed for testing and debugging. `/safekeeper`: -The zenith WAL service that receives WAL from a primary compute nodes and streams it to the pageserver. +The neon WAL service that receives WAL from a primary compute nodes and streams it to the pageserver. It acts as a holding area and redistribution center for recently generated WAL. For more detailed info, see [walservice.md](./walservice.md) @@ -64,11 +64,6 @@ The workspace_hack crate exists only to pin down some dependencies. We use [cargo-hakari](https://crates.io/crates/cargo-hakari) for automation. -`/zenith` - -Main entry point for the 'zenith' CLI utility. -TODO: Doesn't it belong to control_plane? - `/libs`: Unites granular neon helper crates under the hood. diff --git a/libs/etcd_broker/src/subscription_key.rs b/libs/etcd_broker/src/subscription_key.rs index 8f8579f4e5..a11d2ab106 100644 --- a/libs/etcd_broker/src/subscription_key.rs +++ b/libs/etcd_broker/src/subscription_key.rs @@ -11,7 +11,7 @@ use std::{fmt::Display, str::FromStr}; use once_cell::sync::Lazy; use regex::{Captures, Regex}; -use utils::zid::{NodeId, ZTenantId, ZTenantTimelineId}; +use utils::id::{NodeId, TenantId, TenantTimelineId}; /// The subscription kind to the timeline updates from safekeeper. #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -30,13 +30,13 @@ pub enum SubscriptionKind { /// Get every update in etcd. All, /// Get etcd updates for any timeiline of a certain tenant, affected by any operation from any node kind. - TenantTimelines(ZTenantId), + TenantTimelines(TenantId), /// Get etcd updates for a certain timeline of a tenant, affected by any operation from any node kind. - Timeline(ZTenantTimelineId), + Timeline(TenantTimelineId), /// Get etcd timeline updates, specific to a certain node kind. - Node(ZTenantTimelineId, NodeKind), + Node(TenantTimelineId, NodeKind), /// Get etcd timeline updates for a certain operation on specific nodes. - Operation(ZTenantTimelineId, NodeKind, OperationKind), + Operation(TenantTimelineId, NodeKind, OperationKind), } /// All kinds of nodes, able to write into etcd. @@ -67,7 +67,7 @@ static SUBSCRIPTION_FULL_KEY_REGEX: Lazy = Lazy::new(|| { /// No other etcd keys are considered during system's work. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct SubscriptionFullKey { - pub id: ZTenantTimelineId, + pub id: TenantTimelineId, pub node_kind: NodeKind, pub operation: OperationKind, pub node_id: NodeId, @@ -83,7 +83,7 @@ impl SubscriptionKey { } /// Subscribes to a given timeline info updates from safekeepers. - pub fn sk_timeline_info(cluster_prefix: String, timeline: ZTenantTimelineId) -> Self { + pub fn sk_timeline_info(cluster_prefix: String, timeline: TenantTimelineId) -> Self { Self { cluster_prefix, kind: SubscriptionKind::Operation( @@ -97,7 +97,7 @@ impl SubscriptionKey { /// Subscribes to all timeine updates during specific operations, running on the corresponding nodes. pub fn operation( cluster_prefix: String, - timeline: ZTenantTimelineId, + timeline: TenantTimelineId, node_kind: NodeKind, operation: OperationKind, ) -> Self { @@ -175,7 +175,7 @@ impl FromStr for SubscriptionFullKey { }; Ok(Self { - id: ZTenantTimelineId::new( + id: TenantTimelineId::new( parse_capture(&key_captures, 1)?, parse_capture(&key_captures, 2)?, ), @@ -247,7 +247,7 @@ impl FromStr for SkOperationKind { #[cfg(test)] mod tests { - use utils::zid::ZTimelineId; + use utils::id::TimelineId; use super::*; @@ -256,9 +256,9 @@ mod tests { let prefix = "neon"; let node_kind = NodeKind::Safekeeper; let operation_kind = OperationKind::Safekeeper(SkOperationKind::WalBackup); - let tenant_id = ZTenantId::generate(); - let timeline_id = ZTimelineId::generate(); - let id = ZTenantTimelineId::new(tenant_id, timeline_id); + let tenant_id = TenantId::generate(); + let timeline_id = TimelineId::generate(); + let id = TenantTimelineId::new(tenant_id, timeline_id); let node_id = NodeId(1); let timeline_subscription_keys = [ diff --git a/libs/postgres_ffi/Cargo.toml b/libs/postgres_ffi/Cargo.toml index 5b9ecb7394..2b453fa0dc 100644 --- a/libs/postgres_ffi/Cargo.toml +++ b/libs/postgres_ffi/Cargo.toml @@ -21,7 +21,7 @@ workspace_hack = { version = "0.1", path = "../../workspace_hack" } [dev-dependencies] env_logger = "0.9" -postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } wal_craft = { path = "wal_craft" } [build-dependencies] diff --git a/libs/postgres_ffi/wal_craft/Cargo.toml b/libs/postgres_ffi/wal_craft/Cargo.toml index 114f08113b..f848ac1273 100644 --- a/libs/postgres_ffi/wal_craft/Cargo.toml +++ b/libs/postgres_ffi/wal_craft/Cargo.toml @@ -11,6 +11,6 @@ clap = "3.0" env_logger = "0.9" log = "0.4" once_cell = "1.13.0" -postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } postgres_ffi = { path = "../" } tempfile = "3.2" diff --git a/libs/utils/Cargo.toml b/libs/utils/Cargo.toml index ce55277f29..ef2aa8b305 100644 --- a/libs/utils/Cargo.toml +++ b/libs/utils/Cargo.toml @@ -10,8 +10,8 @@ bincode = "1.3" bytes = "1.0.1" hyper = { version = "0.14.7", features = ["full"] } pin-project-lite = "0.2.7" -postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } -postgres-protocol = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } routerify = "3" serde = { version = "1.0", features = ["derive"] } serde_json = "1" diff --git a/libs/utils/benches/benchmarks.rs b/libs/utils/benches/benchmarks.rs index 0339939934..badcb5774e 100644 --- a/libs/utils/benches/benchmarks.rs +++ b/libs/utils/benches/benchmarks.rs @@ -1,11 +1,11 @@ #![allow(unused)] use criterion::{criterion_group, criterion_main, Criterion}; -use utils::zid; +use utils::id; pub fn bench_zid_stringify(c: &mut Criterion) { // Can only use public methods. - let ztl = zid::ZTenantTimelineId::generate(); + let ztl = id::TenantTimelineId::generate(); c.bench_function("zid.to_string", |b| { b.iter(|| { diff --git a/libs/utils/src/auth.rs b/libs/utils/src/auth.rs index 3bdabacad4..b190b0d1c5 100644 --- a/libs/utils/src/auth.rs +++ b/libs/utils/src/auth.rs @@ -14,7 +14,7 @@ use jsonwebtoken::{ use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; -use crate::zid::ZTenantId; +use crate::id::TenantId; const JWT_ALGORITHM: Algorithm = Algorithm::RS256; @@ -30,23 +30,23 @@ pub enum Scope { pub struct Claims { #[serde(default)] #[serde_as(as = "Option")] - pub tenant_id: Option, + pub tenant_id: Option, pub scope: Scope, } impl Claims { - pub fn new(tenant_id: Option, scope: Scope) -> Self { + pub fn new(tenant_id: Option, scope: Scope) -> Self { Self { tenant_id, scope } } } -pub fn check_permission(claims: &Claims, tenantid: Option) -> Result<()> { - match (&claims.scope, tenantid) { +pub fn check_permission(claims: &Claims, tenant_id: Option) -> Result<()> { + match (&claims.scope, tenant_id) { (Scope::Tenant, None) => { bail!("Attempt to access management api with tenant scope. Permission denied") } - (Scope::Tenant, Some(tenantid)) => { - if claims.tenant_id.unwrap() != tenantid { + (Scope::Tenant, Some(tenant_id)) => { + if claims.tenant_id.unwrap() != tenant_id { bail!("Tenant id mismatch. Permission denied") } Ok(()) diff --git a/libs/utils/src/http/endpoint.rs b/libs/utils/src/http/endpoint.rs index 69bf5ef87a..4066791e2b 100644 --- a/libs/utils/src/http/endpoint.rs +++ b/libs/utils/src/http/endpoint.rs @@ -1,6 +1,6 @@ use crate::auth::{self, Claims, JwtAuth}; use crate::http::error; -use crate::zid::ZTenantId; +use crate::id::TenantId; use anyhow::anyhow; use hyper::header::AUTHORIZATION; use hyper::{header::CONTENT_TYPE, Body, Request, Response, Server}; @@ -137,9 +137,9 @@ pub fn auth_middleware( }) } -pub fn check_permission(req: &Request, tenantid: Option) -> Result<(), ApiError> { +pub fn check_permission(req: &Request, tenant_id: Option) -> Result<(), ApiError> { match req.context::() { - Some(claims) => Ok(auth::check_permission(&claims, tenantid) + Some(claims) => Ok(auth::check_permission(&claims, tenant_id) .map_err(|err| ApiError::Forbidden(err.to_string()))?), None => Ok(()), // claims is None because auth is disabled } diff --git a/libs/utils/src/http/mod.rs b/libs/utils/src/http/mod.rs index 0bb53ef51d..74ed6bb5b2 100644 --- a/libs/utils/src/http/mod.rs +++ b/libs/utils/src/http/mod.rs @@ -3,6 +3,6 @@ pub mod error; pub mod json; pub mod request; -/// Current fast way to apply simple http routing in various Zenith binaries. +/// Current fast way to apply simple http routing in various Neon binaries. /// Re-exported for sake of uniform approach, that could be later replaced with better alternatives, if needed. pub use routerify::{ext::RequestExt, RouterBuilder, RouterService}; diff --git a/libs/utils/src/zid.rs b/libs/utils/src/id.rs similarity index 76% rename from libs/utils/src/zid.rs rename to libs/utils/src/id.rs index 6da5355f61..059ce69ca4 100644 --- a/libs/utils/src/zid.rs +++ b/libs/utils/src/id.rs @@ -4,7 +4,7 @@ use hex::FromHex; use rand::Rng; use serde::{Deserialize, Serialize}; -/// Zenith ID is a 128-bit random ID. +/// Neon ID is a 128-bit random ID. /// Used to represent various identifiers. Provides handy utility methods and impls. /// /// NOTE: It (de)serializes as an array of hex bytes, so the string representation would look @@ -13,13 +13,13 @@ use serde::{Deserialize, Serialize}; /// Use `#[serde_as(as = "DisplayFromStr")]` to (de)serialize it as hex string instead: `ad50847381e248feaac9876cc71ae418`. /// Check the `serde_with::serde_as` documentation for options for more complex types. #[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)] -struct ZId([u8; 16]); +struct Id([u8; 16]); -impl ZId { - pub fn get_from_buf(buf: &mut dyn bytes::Buf) -> ZId { +impl Id { + pub fn get_from_buf(buf: &mut dyn bytes::Buf) -> Id { let mut arr = [0u8; 16]; buf.copy_to_slice(&mut arr); - ZId::from(arr) + Id::from(arr) } pub fn as_arr(&self) -> [u8; 16] { @@ -29,7 +29,7 @@ impl ZId { pub fn generate() -> Self { let mut tli_buf = [0u8; 16]; rand::thread_rng().fill(&mut tli_buf); - ZId::from(tli_buf) + Id::from(tli_buf) } fn hex_encode(&self) -> String { @@ -44,54 +44,54 @@ impl ZId { } } -impl FromStr for ZId { +impl FromStr for Id { type Err = hex::FromHexError; - fn from_str(s: &str) -> Result { + fn from_str(s: &str) -> Result { Self::from_hex(s) } } -// this is needed for pretty serialization and deserialization of ZId's using serde integration with hex crate -impl FromHex for ZId { +// this is needed for pretty serialization and deserialization of Id's using serde integration with hex crate +impl FromHex for Id { type Error = hex::FromHexError; fn from_hex>(hex: T) -> Result { let mut buf: [u8; 16] = [0u8; 16]; hex::decode_to_slice(hex, &mut buf)?; - Ok(ZId(buf)) + Ok(Id(buf)) } } -impl AsRef<[u8]> for ZId { +impl AsRef<[u8]> for Id { fn as_ref(&self) -> &[u8] { &self.0 } } -impl From<[u8; 16]> for ZId { +impl From<[u8; 16]> for Id { fn from(b: [u8; 16]) -> Self { - ZId(b) + Id(b) } } -impl fmt::Display for ZId { +impl fmt::Display for Id { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.hex_encode()) } } -impl fmt::Debug for ZId { +impl fmt::Debug for Id { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.hex_encode()) } } -macro_rules! zid_newtype { +macro_rules! id_newtype { ($t:ident) => { impl $t { pub fn get_from_buf(buf: &mut dyn bytes::Buf) -> $t { - $t(ZId::get_from_buf(buf)) + $t(Id::get_from_buf(buf)) } pub fn as_arr(&self) -> [u8; 16] { @@ -99,11 +99,11 @@ macro_rules! zid_newtype { } pub fn generate() -> Self { - $t(ZId::generate()) + $t(Id::generate()) } pub const fn from_array(b: [u8; 16]) -> Self { - $t(ZId(b)) + $t(Id(b)) } } @@ -111,14 +111,14 @@ macro_rules! zid_newtype { type Err = hex::FromHexError; fn from_str(s: &str) -> Result<$t, Self::Err> { - let value = ZId::from_str(s)?; + let value = Id::from_str(s)?; Ok($t(value)) } } impl From<[u8; 16]> for $t { fn from(b: [u8; 16]) -> Self { - $t(ZId::from(b)) + $t(Id::from(b)) } } @@ -126,7 +126,7 @@ macro_rules! zid_newtype { type Error = hex::FromHexError; fn from_hex>(hex: T) -> Result { - Ok($t(ZId::from_hex(hex)?)) + Ok($t(Id::from_hex(hex)?)) } } @@ -150,7 +150,7 @@ macro_rules! zid_newtype { }; } -/// Zenith timeline IDs are different from PostgreSQL timeline +/// Neon timeline IDs are different from PostgreSQL timeline /// IDs. They serve a similar purpose though: they differentiate /// between different "histories" of the same cluster. However, /// PostgreSQL timeline IDs are a bit cumbersome, because they are only @@ -158,7 +158,7 @@ macro_rules! zid_newtype { /// timeline history. Those limitations mean that we cannot generate a /// new PostgreSQL timeline ID by just generating a random number. And /// that in turn is problematic for the "pull/push" workflow, where you -/// have a local copy of a zenith repository, and you periodically sync +/// have a local copy of a Neon repository, and you periodically sync /// the local changes with a remote server. When you work "detached" /// from the remote server, you cannot create a PostgreSQL timeline ID /// that's guaranteed to be different from all existing timelines in @@ -168,55 +168,55 @@ macro_rules! zid_newtype { /// branches? If they pick the same one, and later try to push the /// branches to the same remote server, they will get mixed up. /// -/// To avoid those issues, Zenith has its own concept of timelines that +/// To avoid those issues, Neon has its own concept of timelines that /// is separate from PostgreSQL timelines, and doesn't have those -/// limitations. A zenith timeline is identified by a 128-bit ID, which +/// limitations. A Neon timeline is identified by a 128-bit ID, which /// is usually printed out as a hex string. /// /// NOTE: It (de)serializes as an array of hex bytes, so the string representation would look /// like `[173,80,132,115,129,226,72,254,170,201,135,108,199,26,228,24]`. -/// See [`ZId`] for alternative ways to serialize it. +/// See [`Id`] for alternative ways to serialize it. #[derive(Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd, Serialize, Deserialize)] -pub struct ZTimelineId(ZId); +pub struct TimelineId(Id); -zid_newtype!(ZTimelineId); +id_newtype!(TimelineId); -/// Zenith Tenant Id represents identifiar of a particular tenant. +/// Neon Tenant Id represents identifiar of a particular tenant. /// Is used for distinguishing requests and data belonging to different users. /// /// NOTE: It (de)serializes as an array of hex bytes, so the string representation would look /// like `[173,80,132,115,129,226,72,254,170,201,135,108,199,26,228,24]`. -/// See [`ZId`] for alternative ways to serialize it. +/// See [`Id`] for alternative ways to serialize it. #[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)] -pub struct ZTenantId(ZId); +pub struct TenantId(Id); -zid_newtype!(ZTenantId); +id_newtype!(TenantId); -// A pair uniquely identifying Zenith instance. +// A pair uniquely identifying Neon instance. #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct ZTenantTimelineId { - pub tenant_id: ZTenantId, - pub timeline_id: ZTimelineId, +pub struct TenantTimelineId { + pub tenant_id: TenantId, + pub timeline_id: TimelineId, } -impl ZTenantTimelineId { - pub fn new(tenant_id: ZTenantId, timeline_id: ZTimelineId) -> Self { - ZTenantTimelineId { +impl TenantTimelineId { + pub fn new(tenant_id: TenantId, timeline_id: TimelineId) -> Self { + TenantTimelineId { tenant_id, timeline_id, } } pub fn generate() -> Self { - Self::new(ZTenantId::generate(), ZTimelineId::generate()) + Self::new(TenantId::generate(), TimelineId::generate()) } pub fn empty() -> Self { - Self::new(ZTenantId::from([0u8; 16]), ZTimelineId::from([0u8; 16])) + Self::new(TenantId::from([0u8; 16]), TimelineId::from([0u8; 16])) } } -impl fmt::Display for ZTenantTimelineId { +impl fmt::Display for TenantTimelineId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}/{}", self.tenant_id, self.timeline_id) } diff --git a/libs/utils/src/lib.rs b/libs/utils/src/lib.rs index caa7ac6c09..2c80556446 100644 --- a/libs/utils/src/lib.rs +++ b/libs/utils/src/lib.rs @@ -29,7 +29,7 @@ pub mod crashsafe_dir; pub mod auth; // utility functions and helper traits for unified unique id generation/serialization etc. -pub mod zid; +pub mod id; // http endpoint utils pub mod http; diff --git a/libs/utils/src/postgres_backend.rs b/libs/utils/src/postgres_backend.rs index 604eb75aaf..0498e0887b 100644 --- a/libs/utils/src/postgres_backend.rs +++ b/libs/utils/src/postgres_backend.rs @@ -63,7 +63,7 @@ pub enum AuthType { Trust, MD5, // This mimics postgres's AuthenticationCleartextPassword but instead of password expects JWT - ZenithJWT, + NeonJWT, } impl FromStr for AuthType { @@ -73,8 +73,8 @@ impl FromStr for AuthType { match s { "Trust" => Ok(Self::Trust), "MD5" => Ok(Self::MD5), - "ZenithJWT" => Ok(Self::ZenithJWT), - _ => bail!("invalid value \"{}\" for auth type", s), + "NeonJWT" => Ok(Self::NeonJWT), + _ => bail!("invalid value \"{s}\" for auth type"), } } } @@ -84,7 +84,7 @@ impl fmt::Display for AuthType { f.write_str(match self { AuthType::Trust => "Trust", AuthType::MD5 => "MD5", - AuthType::ZenithJWT => "ZenithJWT", + AuthType::NeonJWT => "NeonJWT", }) } } @@ -376,7 +376,7 @@ impl PostgresBackend { ))?; self.state = ProtoState::Authentication; } - AuthType::ZenithJWT => { + AuthType::NeonJWT => { self.write_message(&BeMessage::AuthenticationCleartextPassword)?; self.state = ProtoState::Authentication; } @@ -403,7 +403,7 @@ impl PostgresBackend { bail!("auth failed: {}", e); } } - AuthType::ZenithJWT => { + AuthType::NeonJWT => { let (_, jwt_response) = m.split_last().context("protocol violation")?; if let Err(e) = handler.check_auth_jwt(self, jwt_response) { diff --git a/libs/utils/src/postgres_backend_async.rs b/libs/utils/src/postgres_backend_async.rs index 383ad3742f..87e4478a99 100644 --- a/libs/utils/src/postgres_backend_async.rs +++ b/libs/utils/src/postgres_backend_async.rs @@ -346,7 +346,7 @@ impl PostgresBackend { ))?; self.state = ProtoState::Authentication; } - AuthType::ZenithJWT => { + AuthType::NeonJWT => { self.write_message(&BeMessage::AuthenticationCleartextPassword)?; self.state = ProtoState::Authentication; } @@ -374,7 +374,7 @@ impl PostgresBackend { bail!("auth failed: {}", e); } } - AuthType::ZenithJWT => { + AuthType::NeonJWT => { let (_, jwt_response) = m.split_last().context("protocol violation")?; if let Err(e) = handler.check_auth_jwt(self, jwt_response) { diff --git a/pageserver/Cargo.toml b/pageserver/Cargo.toml index e73c73bd9c..11d2d94906 100644 --- a/pageserver/Cargo.toml +++ b/pageserver/Cargo.toml @@ -27,10 +27,10 @@ clap = "3.0" daemonize = "0.4.1" tokio = { version = "1.17", features = ["process", "sync", "macros", "fs", "rt", "io-util", "time"] } tokio-util = { version = "0.7.3", features = ["io", "io-util"] } -postgres-types = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } -postgres-protocol = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } -postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } -tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } anyhow = { version = "1.0", features = ["backtrace"] } crc32c = "0.6.0" thiserror = "1.0" diff --git a/pageserver/src/bin/dump_layerfile.rs b/pageserver/src/bin/dump_layerfile.rs index 7e766ce859..f5247ee609 100644 --- a/pageserver/src/bin/dump_layerfile.rs +++ b/pageserver/src/bin/dump_layerfile.rs @@ -12,7 +12,7 @@ use utils::project_git_version; project_git_version!(GIT_VERSION); fn main() -> Result<()> { - let arg_matches = App::new("Zenith dump_layerfile utility") + let arg_matches = App::new("Neon dump_layerfile utility") .about("Dump contents of one layer file, for debugging") .version(GIT_VERSION) .arg( diff --git a/pageserver/src/bin/pageserver.rs b/pageserver/src/bin/pageserver.rs index 679c6f76e7..92d5eab379 100644 --- a/pageserver/src/bin/pageserver.rs +++ b/pageserver/src/bin/pageserver.rs @@ -40,7 +40,7 @@ fn version() -> String { } fn main() -> anyhow::Result<()> { - let arg_matches = App::new("Zenith page server") + let arg_matches = App::new("Neon page server") .about("Materializes WAL stream to pages and serves them to the postgres") .version(&*version()) .arg( @@ -293,7 +293,7 @@ fn start_pageserver(conf: &'static PageServerConf, daemonize: bool) -> Result<() // initialize authentication for incoming connections let auth = match &conf.auth_type { AuthType::Trust | AuthType::MD5 => None, - AuthType::ZenithJWT => { + AuthType::NeonJWT => { // unwrap is ok because check is performed when creating config, so path is set and file exists let key_path = conf.auth_validation_public_key_path.as_ref().unwrap(); Some(JwtAuth::from_key_path(key_path)?.into()) diff --git a/pageserver/src/bin/update_metadata.rs b/pageserver/src/bin/update_metadata.rs index 3339564b0f..16359c2532 100644 --- a/pageserver/src/bin/update_metadata.rs +++ b/pageserver/src/bin/update_metadata.rs @@ -11,7 +11,7 @@ use utils::{lsn::Lsn, project_git_version}; project_git_version!(GIT_VERSION); fn main() -> Result<()> { - let arg_matches = App::new("Zenith update metadata utility") + let arg_matches = App::new("Neon update metadata utility") .about("Dump or update metadata file") .version(GIT_VERSION) .arg( diff --git a/pageserver/src/config.rs b/pageserver/src/config.rs index 56171f46e3..75c71b09d2 100644 --- a/pageserver/src/config.rs +++ b/pageserver/src/config.rs @@ -15,8 +15,8 @@ use toml_edit; use toml_edit::{Document, Item}; use url::Url; use utils::{ + id::{NodeId, TenantId, TimelineId}, postgres_backend::AuthType, - zid::{NodeId, ZTenantId, ZTimelineId}, }; use crate::tenant::TIMELINES_SEGMENT_NAME; @@ -342,16 +342,16 @@ impl PageServerConf { self.workdir.join("tenants") } - pub fn tenant_path(&self, tenantid: &ZTenantId) -> PathBuf { - self.tenants_path().join(tenantid.to_string()) + pub fn tenant_path(&self, tenant_id: &TenantId) -> PathBuf { + self.tenants_path().join(tenant_id.to_string()) } - pub fn timelines_path(&self, tenantid: &ZTenantId) -> PathBuf { - self.tenant_path(tenantid).join(TIMELINES_SEGMENT_NAME) + pub fn timelines_path(&self, tenant_id: &TenantId) -> PathBuf { + self.tenant_path(tenant_id).join(TIMELINES_SEGMENT_NAME) } - pub fn timeline_path(&self, timelineid: &ZTimelineId, tenantid: &ZTenantId) -> PathBuf { - self.timelines_path(tenantid).join(timelineid.to_string()) + pub fn timeline_path(&self, timeline_id: &TimelineId, tenant_id: &TenantId) -> PathBuf { + self.timelines_path(tenant_id).join(timeline_id.to_string()) } // @@ -419,7 +419,7 @@ impl PageServerConf { let mut conf = builder.build().context("invalid config")?; - if conf.auth_type == AuthType::ZenithJWT { + if conf.auth_type == AuthType::NeonJWT { let auth_validation_public_key_path = conf .auth_validation_public_key_path .get_or_insert_with(|| workdir.join("auth_public_key.pem")); diff --git a/pageserver/src/http/models.rs b/pageserver/src/http/models.rs index 0ccf23776c..c0dc5b9677 100644 --- a/pageserver/src/http/models.rs +++ b/pageserver/src/http/models.rs @@ -3,8 +3,8 @@ use std::num::NonZeroU64; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; use utils::{ + id::{NodeId, TenantId, TimelineId}, lsn::Lsn, - zid::{NodeId, ZTenantId, ZTimelineId}, }; use crate::tenant::TenantState; @@ -14,10 +14,10 @@ use crate::tenant::TenantState; pub struct TimelineCreateRequest { #[serde(default)] #[serde_as(as = "Option")] - pub new_timeline_id: Option, + pub new_timeline_id: Option, #[serde(default)] #[serde_as(as = "Option")] - pub ancestor_timeline_id: Option, + pub ancestor_timeline_id: Option, #[serde(default)] #[serde_as(as = "Option")] pub ancestor_start_lsn: Option, @@ -28,7 +28,7 @@ pub struct TimelineCreateRequest { pub struct TenantCreateRequest { #[serde(default)] #[serde_as(as = "Option")] - pub new_tenant_id: Option, + pub new_tenant_id: Option, pub checkpoint_distance: Option, pub checkpoint_timeout: Option, pub compaction_target_size: Option, @@ -46,7 +46,7 @@ pub struct TenantCreateRequest { #[serde_as] #[derive(Serialize, Deserialize)] #[serde(transparent)] -pub struct TenantCreateResponse(#[serde_as(as = "DisplayFromStr")] pub ZTenantId); +pub struct TenantCreateResponse(#[serde_as(as = "DisplayFromStr")] pub TenantId); #[derive(Serialize)] pub struct StatusResponse { @@ -54,7 +54,7 @@ pub struct StatusResponse { } impl TenantCreateRequest { - pub fn new(new_tenant_id: Option) -> TenantCreateRequest { + pub fn new(new_tenant_id: Option) -> TenantCreateRequest { TenantCreateRequest { new_tenant_id, ..Default::default() @@ -65,7 +65,7 @@ impl TenantCreateRequest { #[serde_as] #[derive(Serialize, Deserialize)] pub struct TenantConfigRequest { - pub tenant_id: ZTenantId, + pub tenant_id: TenantId, #[serde(default)] #[serde_as(as = "Option")] pub checkpoint_distance: Option, @@ -83,7 +83,7 @@ pub struct TenantConfigRequest { } impl TenantConfigRequest { - pub fn new(tenant_id: ZTenantId) -> TenantConfigRequest { + pub fn new(tenant_id: TenantId) -> TenantConfigRequest { TenantConfigRequest { tenant_id, checkpoint_distance: None, @@ -106,7 +106,7 @@ impl TenantConfigRequest { #[derive(Serialize, Deserialize, Clone)] pub struct TenantInfo { #[serde_as(as = "DisplayFromStr")] - pub id: ZTenantId, + pub id: TenantId, pub state: TenantState, pub current_physical_size: Option, // physical size is only included in `tenant_status` endpoint pub has_in_progress_downloads: Option, @@ -116,7 +116,7 @@ pub struct TenantInfo { #[derive(Debug, Serialize, Deserialize, Clone)] pub struct LocalTimelineInfo { #[serde_as(as = "Option")] - pub ancestor_timeline_id: Option, + pub ancestor_timeline_id: Option, #[serde_as(as = "Option")] pub ancestor_lsn: Option, #[serde_as(as = "DisplayFromStr")] @@ -154,9 +154,9 @@ pub struct RemoteTimelineInfo { #[derive(Debug, Serialize, Deserialize, Clone)] pub struct TimelineInfo { #[serde_as(as = "DisplayFromStr")] - pub tenant_id: ZTenantId, + pub tenant_id: TenantId, #[serde_as(as = "DisplayFromStr")] - pub timeline_id: ZTimelineId, + pub timeline_id: TimelineId, pub local: Option, pub remote: Option, } diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 36ba2e9b66..2e49429f38 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -25,8 +25,8 @@ use utils::{ request::parse_request_param, RequestExt, RouterBuilder, }, + id::{TenantId, TenantTimelineId, TimelineId}, lsn::Lsn, - zid::{ZTenantId, ZTenantTimelineId, ZTimelineId}, }; struct State { @@ -128,10 +128,10 @@ fn local_timeline_info_from_timeline( } fn list_local_timelines( - tenant_id: ZTenantId, + tenant_id: TenantId, include_non_incremental_logical_size: bool, include_non_incremental_physical_size: bool, -) -> Result> { +) -> Result> { let tenant = tenant_mgr::get_tenant(tenant_id, true)?; let timelines = tenant.list_timelines(); @@ -156,7 +156,7 @@ async fn status_handler(request: Request) -> Result, ApiErr } async fn timeline_create_handler(mut request: Request) -> Result, ApiError> { - let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?; + let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?; let request_data: TimelineCreateRequest = json_request(&mut request).await?; check_permission(&request, Some(tenant_id))?; @@ -164,8 +164,8 @@ async fn timeline_create_handler(mut request: Request) -> Result { @@ -193,7 +193,7 @@ async fn timeline_create_handler(mut request: Request) -> Result) -> Result, ApiError> { - let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?; + let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?; let include_non_incremental_logical_size = query_param_present(&request, "include-non-incremental-logical-size"); let include_non_incremental_physical_size = @@ -229,7 +229,7 @@ async fn timeline_list_handler(request: Request) -> Result, .remote_index .read() .await - .timeline_entry(&ZTenantTimelineId { + .timeline_entry(&TenantTimelineId { tenant_id, timeline_id, }) @@ -257,8 +257,8 @@ fn query_param_present(request: &Request, param: &str) -> bool { } async fn timeline_detail_handler(request: Request) -> Result, ApiError> { - let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?; - let timeline_id: ZTimelineId = parse_request_param(&request, "timeline_id")?; + let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?; + let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?; let include_non_incremental_logical_size = query_param_present(&request, "include-non-incremental-logical-size"); let include_non_incremental_physical_size = @@ -289,7 +289,7 @@ async fn timeline_detail_handler(request: Request) -> Result) -> Result) -> Result, ApiError> { - let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?; + let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?; check_permission(&request, Some(tenant_id))?; info!("Handling tenant attach {tenant_id}"); @@ -402,8 +402,8 @@ async fn tenant_attach_handler(request: Request) -> Result, /// for details see comment to `storage_sync::gather_tenant_timelines_index_parts` async fn gather_tenant_timelines_index_parts( state: &State, - tenant_id: ZTenantId, -) -> anyhow::Result>> { + tenant_id: TenantId, +) -> anyhow::Result>> { let index_parts = match state.remote_storage.as_ref() { Some(storage) => { storage_sync::gather_tenant_timelines_index_parts(state.conf, storage, tenant_id).await @@ -425,8 +425,8 @@ async fn gather_tenant_timelines_index_parts( } async fn timeline_delete_handler(request: Request) -> Result, ApiError> { - let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?; - let timeline_id: ZTimelineId = parse_request_param(&request, "timeline_id")?; + let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?; + let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?; check_permission(&request, Some(tenant_id))?; let state = get_state(&request); @@ -436,7 +436,7 @@ async fn timeline_delete_handler(request: Request) -> Result) -> Result) -> Result, ApiError> { - let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?; + let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?; check_permission(&request, Some(tenant_id))?; let state = get_state(&request); @@ -479,7 +479,7 @@ async fn tenant_list_handler(request: Request) -> Result, A } async fn tenant_status(request: Request) -> Result, ApiError> { - let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?; + let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?; check_permission(&request, Some(tenant_id))?; // if tenant is in progress of downloading it can be absent in global tenant map @@ -588,8 +588,8 @@ async fn tenant_create_handler(mut request: Request) -> Result(HashMap>); +pub struct TenantTimelineValues(HashMap>); impl TenantTimelineValues { fn new() -> Self { @@ -187,8 +187,8 @@ mod tests { #[test] fn tenant_timeline_value_mapping() { - let first_tenant = ZTenantId::generate(); - let second_tenant = ZTenantId::generate(); + let first_tenant = TenantId::generate(); + let second_tenant = TenantId::generate(); assert_ne!(first_tenant, second_tenant); let mut initial = TenantTimelineValues::new(); diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index ada0bbd359..2f03943429 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -5,7 +5,7 @@ use metrics::{ IntCounter, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec, }; use once_cell::sync::Lazy; -use utils::zid::{ZTenantId, ZTimelineId}; +use utils::id::{TenantId, TimelineId}; /// Prometheus histogram buckets (in seconds) that capture the majority of /// latencies in the microsecond range but also extend far enough up to distinguish @@ -327,7 +327,7 @@ pub struct TimelineMetrics { } impl TimelineMetrics { - pub fn new(tenant_id: &ZTenantId, timeline_id: &ZTimelineId) -> Self { + pub fn new(tenant_id: &TenantId, timeline_id: &TimelineId) -> Self { let tenant_id = tenant_id.to_string(); let timeline_id = timeline_id.to_string(); let reconstruct_time_histo = RECONSTRUCT_TIME @@ -414,6 +414,6 @@ impl Drop for TimelineMetrics { } } -pub fn remove_tenant_metrics(tenant_id: &ZTenantId) { +pub fn remove_tenant_metrics(tenant_id: &TenantId) { let _ = STORAGE_TIME.remove_label_values(&["gc", &tenant_id.to_string(), "-"]); } diff --git a/pageserver/src/page_cache.rs b/pageserver/src/page_cache.rs index 15c3c22dd6..d2fe06697e 100644 --- a/pageserver/src/page_cache.rs +++ b/pageserver/src/page_cache.rs @@ -49,8 +49,8 @@ use anyhow::Context; use once_cell::sync::OnceCell; use tracing::error; use utils::{ + id::{TenantId, TimelineId}, lsn::Lsn, - zid::{ZTenantId, ZTimelineId}, }; use crate::repository::Key; @@ -109,8 +109,8 @@ enum CacheKey { #[derive(Debug, PartialEq, Eq, Hash, Clone)] struct MaterializedPageHashKey { - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, key: Key, } @@ -308,8 +308,8 @@ impl PageCache { /// returned page. pub fn lookup_materialized_page( &self, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, key: &Key, lsn: Lsn, ) -> Option<(Lsn, PageReadGuard)> { @@ -338,8 +338,8 @@ impl PageCache { /// pub fn memorize_materialized_page( &self, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, key: Key, lsn: Lsn, img: &[u8], diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index 388f40f916..b06814c557 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -23,12 +23,12 @@ use tokio_util::io::SyncIoBridge; use tracing::*; use utils::{ auth::{self, Claims, JwtAuth, Scope}, + id::{TenantId, TimelineId}, lsn::Lsn, postgres_backend::AuthType, postgres_backend_async::{self, PostgresBackend}, pq_proto::{BeMessage, FeMessage, RowDescriptor, SINGLE_COL_ROWDESC}, simple_rcu::RcuReadGuard, - zid::{ZTenantId, ZTimelineId}, }; use crate::basebackup; @@ -123,7 +123,7 @@ impl PagestreamFeMessage { fn parse(mut body: Bytes) -> anyhow::Result { // TODO these gets can fail - // these correspond to the ZenithMessageTag enum in pagestore_client.h + // these correspond to the NeonMessageTag enum in pagestore_client.h // // TODO: consider using protobuf or serde bincode for less error prone // serialization. @@ -370,7 +370,7 @@ struct PageRequestMetrics { } impl PageRequestMetrics { - fn new(tenant_id: &ZTenantId, timeline_id: &ZTimelineId) -> Self { + fn new(tenant_id: &TenantId, timeline_id: &TimelineId) -> Self { let tenant_id = tenant_id.to_string(); let timeline_id = timeline_id.to_string(); @@ -415,8 +415,8 @@ impl PageServerHandler { async fn handle_pagerequests( &self, pgb: &mut PostgresBackend, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, ) -> anyhow::Result<()> { // NOTE: pagerequests handler exits when connection is closed, // so there is no need to reset the association @@ -452,11 +452,11 @@ impl PageServerHandler { None => break, // client disconnected }; - trace!("query: {:?}", copy_data_bytes); + trace!("query: {copy_data_bytes:?}"); - let zenith_fe_msg = PagestreamFeMessage::parse(copy_data_bytes)?; + let neon_fe_msg = PagestreamFeMessage::parse(copy_data_bytes)?; - let response = match zenith_fe_msg { + let response = match neon_fe_msg { PagestreamFeMessage::Exists(req) => { let _timer = metrics.get_rel_exists.start_timer(); self.handle_get_rel_exists_request(&timeline, &req).await @@ -494,8 +494,8 @@ impl PageServerHandler { async fn handle_import_basebackup( &self, pgb: &mut PostgresBackend, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, base_lsn: Lsn, _end_lsn: Lsn, ) -> anyhow::Result<()> { @@ -557,8 +557,8 @@ impl PageServerHandler { async fn handle_import_wal( &self, pgb: &mut PostgresBackend, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, start_lsn: Lsn, end_lsn: Lsn, ) -> anyhow::Result<()> { @@ -750,8 +750,8 @@ impl PageServerHandler { async fn handle_basebackup_request( &self, pgb: &mut PostgresBackend, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, lsn: Option, prev_lsn: Option, full_backup: bool, @@ -792,7 +792,7 @@ impl PageServerHandler { // when accessing management api supply None as an argument // when using to authorize tenant pass corresponding tenant id - fn check_permission(&self, tenant_id: Option) -> Result<()> { + fn check_permission(&self, tenant_id: Option) -> Result<()> { if self.auth.is_none() { // auth is set to Trust, nothing to check so just return ok return Ok(()); @@ -815,7 +815,7 @@ impl postgres_backend_async::Handler for PageServerHandler { _pgb: &mut PostgresBackend, jwt_response: &[u8], ) -> anyhow::Result<()> { - // this unwrap is never triggered, because check_auth_jwt only called when auth_type is ZenithJWT + // this unwrap is never triggered, because check_auth_jwt only called when auth_type is NeonJWT // which requires auth to be present let data = self .auth @@ -853,8 +853,8 @@ impl postgres_backend_async::Handler for PageServerHandler { params.len() == 2, "invalid param number for pagestream command" ); - let tenant_id = ZTenantId::from_str(params[0])?; - let timeline_id = ZTimelineId::from_str(params[1])?; + let tenant_id = TenantId::from_str(params[0])?; + let timeline_id = TimelineId::from_str(params[1])?; self.check_permission(Some(tenant_id))?; @@ -869,8 +869,8 @@ impl postgres_backend_async::Handler for PageServerHandler { "invalid param number for basebackup command" ); - let tenant_id = ZTenantId::from_str(params[0])?; - let timeline_id = ZTimelineId::from_str(params[1])?; + let tenant_id = TenantId::from_str(params[0])?; + let timeline_id = TimelineId::from_str(params[1])?; self.check_permission(Some(tenant_id))?; @@ -895,8 +895,8 @@ impl postgres_backend_async::Handler for PageServerHandler { "invalid param number for get_last_record_rlsn command" ); - let tenant_id = ZTenantId::from_str(params[0])?; - let timeline_id = ZTimelineId::from_str(params[1])?; + let tenant_id = TenantId::from_str(params[0])?; + let timeline_id = TimelineId::from_str(params[1])?; self.check_permission(Some(tenant_id))?; let timeline = get_local_timeline(tenant_id, timeline_id)?; @@ -923,8 +923,8 @@ impl postgres_backend_async::Handler for PageServerHandler { "invalid param number for fullbackup command" ); - let tenant_id = ZTenantId::from_str(params[0])?; - let timeline_id = ZTimelineId::from_str(params[1])?; + let tenant_id = TenantId::from_str(params[0])?; + let timeline_id = TimelineId::from_str(params[1])?; // The caller is responsible for providing correct lsn and prev_lsn. let lsn = if params.len() > 2 { @@ -959,8 +959,8 @@ impl postgres_backend_async::Handler for PageServerHandler { let (_, params_raw) = query_string.split_at("import basebackup ".len()); let params = params_raw.split_whitespace().collect::>(); ensure!(params.len() == 4); - let tenant_id = ZTenantId::from_str(params[0])?; - let timeline_id = ZTimelineId::from_str(params[1])?; + let tenant_id = TenantId::from_str(params[0])?; + let timeline_id = TimelineId::from_str(params[1])?; let base_lsn = Lsn::from_str(params[2])?; let end_lsn = Lsn::from_str(params[3])?; @@ -984,8 +984,8 @@ impl postgres_backend_async::Handler for PageServerHandler { let (_, params_raw) = query_string.split_at("import wal ".len()); let params = params_raw.split_whitespace().collect::>(); ensure!(params.len() == 4); - let tenant_id = ZTenantId::from_str(params[0])?; - let timeline_id = ZTimelineId::from_str(params[1])?; + let tenant_id = TenantId::from_str(params[0])?; + let timeline_id = TimelineId::from_str(params[1])?; let start_lsn = Lsn::from_str(params[2])?; let end_lsn = Lsn::from_str(params[3])?; @@ -1035,7 +1035,7 @@ impl postgres_backend_async::Handler for PageServerHandler { let (_, params_raw) = query_string.split_at("show ".len()); let params = params_raw.split(' ').collect::>(); ensure!(params.len() == 1, "invalid param number for config command"); - let tenant_id = ZTenantId::from_str(params[0])?; + let tenant_id = TenantId::from_str(params[0])?; let tenant = tenant_mgr::get_tenant(tenant_id, true)?; pgb.write_message(&BeMessage::RowDescription(&[ RowDescriptor::int8_col(b"checkpoint_distance"), @@ -1087,8 +1087,8 @@ impl postgres_backend_async::Handler for PageServerHandler { .captures(query_string) .with_context(|| format!("invalid do_gc: '{}'", query_string))?; - let tenant_id = ZTenantId::from_str(caps.get(1).unwrap().as_str())?; - let timeline_id = ZTimelineId::from_str(caps.get(2).unwrap().as_str())?; + let tenant_id = TenantId::from_str(caps.get(1).unwrap().as_str())?; + let timeline_id = TimelineId::from_str(caps.get(2).unwrap().as_str())?; let tenant = tenant_mgr::get_tenant(tenant_id, true)?; @@ -1131,8 +1131,8 @@ impl postgres_backend_async::Handler for PageServerHandler { .captures(query_string) .with_context(|| format!("Invalid compact: '{}'", query_string))?; - let tenant_id = ZTenantId::from_str(caps.get(1).unwrap().as_str())?; - let timeline_id = ZTimelineId::from_str(caps.get(2).unwrap().as_str())?; + let tenant_id = TenantId::from_str(caps.get(1).unwrap().as_str())?; + let timeline_id = TimelineId::from_str(caps.get(2).unwrap().as_str())?; let timeline = get_local_timeline(tenant_id, timeline_id)?; timeline.compact()?; @@ -1148,8 +1148,8 @@ impl postgres_backend_async::Handler for PageServerHandler { .captures(query_string) .with_context(|| format!("invalid checkpoint command: '{}'", query_string))?; - let tenant_id = ZTenantId::from_str(caps.get(1).unwrap().as_str())?; - let timeline_id = ZTimelineId::from_str(caps.get(2).unwrap().as_str())?; + let tenant_id = TenantId::from_str(caps.get(1).unwrap().as_str())?; + let timeline_id = TimelineId::from_str(caps.get(2).unwrap().as_str())?; let timeline = get_local_timeline(tenant_id, timeline_id)?; // Checkpoint the timeline and also compact it (due to `CheckpointConfig::Forced`). @@ -1166,8 +1166,8 @@ impl postgres_backend_async::Handler for PageServerHandler { .captures(query_string) .with_context(|| format!("invalid get_lsn_by_timestamp: '{}'", query_string))?; - let tenant_id = ZTenantId::from_str(caps.get(1).unwrap().as_str())?; - let timeline_id = ZTimelineId::from_str(caps.get(2).unwrap().as_str())?; + let tenant_id = TenantId::from_str(caps.get(1).unwrap().as_str())?; + let timeline_id = TimelineId::from_str(caps.get(2).unwrap().as_str())?; let timeline = get_local_timeline(tenant_id, timeline_id)?; let timestamp = humantime::parse_rfc3339(caps.get(3).unwrap().as_str())?; @@ -1192,7 +1192,7 @@ impl postgres_backend_async::Handler for PageServerHandler { } } -fn get_local_timeline(tenant_id: ZTenantId, timeline_id: ZTimelineId) -> Result> { +fn get_local_timeline(tenant_id: TenantId, timeline_id: TimelineId) -> Result> { tenant_mgr::get_tenant(tenant_id, true).and_then(|tenant| tenant.get_timeline(timeline_id)) } diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 2454b6f54f..9d4b438dc4 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -10,7 +10,7 @@ use crate::keyspace::{KeySpace, KeySpaceAccum}; use crate::reltag::{RelTag, SlruKind}; use crate::repository::*; use crate::tenant::Timeline; -use crate::walrecord::ZenithWalRecord; +use crate::walrecord::NeonWalRecord; use anyhow::{bail, ensure, Result}; use bytes::{Buf, Bytes}; use postgres_ffi::v14::pg_constants; @@ -570,7 +570,7 @@ impl<'a> DatadirModification<'a> { &mut self, rel: RelTag, blknum: BlockNumber, - rec: ZenithWalRecord, + rec: NeonWalRecord, ) -> Result<()> { ensure!(rel.relnode != 0, "invalid relnode"); self.put(rel_block_to_key(rel, blknum), Value::WalRecord(rec)); @@ -583,7 +583,7 @@ impl<'a> DatadirModification<'a> { kind: SlruKind, segno: u32, blknum: BlockNumber, - rec: ZenithWalRecord, + rec: NeonWalRecord, ) -> Result<()> { self.put( slru_block_to_key(kind, segno, blknum), @@ -1401,7 +1401,7 @@ fn is_slru_block_key(key: Key) -> bool { #[cfg(test)] pub fn create_test_timeline( tenant: &crate::tenant::Tenant, - timeline_id: utils::zid::ZTimelineId, + timeline_id: utils::id::TimelineId, ) -> Result> { let tline = tenant.create_empty_timeline(timeline_id, Lsn(8))?; let mut m = tline.begin_modification(Lsn(8)); diff --git a/pageserver/src/repository.rs b/pageserver/src/repository.rs index c3b08c93de..f6ea9d8c5d 100644 --- a/pageserver/src/repository.rs +++ b/pageserver/src/repository.rs @@ -1,4 +1,4 @@ -use crate::walrecord::ZenithWalRecord; +use crate::walrecord::NeonWalRecord; use anyhow::{bail, Result}; use byteorder::{ByteOrder, BE}; use bytes::Bytes; @@ -157,7 +157,7 @@ pub enum Value { /// replayed get the full value. Replaying the WAL record /// might need a previous version of the value (if will_init() /// returns false), or it may be replayed stand-alone (true). - WalRecord(ZenithWalRecord), + WalRecord(NeonWalRecord), } impl Value { diff --git a/pageserver/src/storage_sync.rs b/pageserver/src/storage_sync.rs index c104dba298..9d259bf1e2 100644 --- a/pageserver/src/storage_sync.rs +++ b/pageserver/src/storage_sync.rs @@ -68,7 +68,7 @@ //! Pageserver maintains similar to the local file structure remotely: all layer files are uploaded with the same names under the same directory structure. //! Yet instead of keeping the `metadata` file remotely, we wrap it with more data in [`IndexPart`], containing the list of remote files. //! This file gets read to populate the cache, if the remote timeline data is missing from it and gets updated after every successful download. -//! This way, we optimize S3 storage access by not running the `S3 list` command that could be expencive and slow: knowing both [`ZTenantId`] and [`ZTimelineId`], +//! This way, we optimize S3 storage access by not running the `S3 list` command that could be expencive and slow: knowing both [`TenantId`] and [`TimelineId`], //! we can always reconstruct the path to the timeline, use this to get the same path on the remote storage and retrieve its shard contents, if needed, same as any layer files. //! //! By default, pageserver reads the remote storage index data only for timelines located locally, to synchronize those, if needed. @@ -183,7 +183,7 @@ use crate::{ TenantTimelineValues, }; -use utils::zid::{ZTenantId, ZTenantTimelineId, ZTimelineId}; +use utils::id::{TenantId, TenantTimelineId, TimelineId}; use self::download::download_index_parts; pub use self::download::gather_tenant_timelines_index_parts; @@ -227,7 +227,7 @@ pub struct SyncStartupData { struct SyncQueue { max_timelines_per_batch: NonZeroUsize, - queue: Mutex>, + queue: Mutex>, condvar: Condvar, } @@ -241,7 +241,7 @@ impl SyncQueue { } /// Queue a new task - fn push(&self, sync_id: ZTenantTimelineId, new_task: SyncTask) { + fn push(&self, sync_id: TenantTimelineId, new_task: SyncTask) { let mut q = self.queue.lock().unwrap(); q.push_back((sync_id, new_task)); @@ -254,7 +254,7 @@ impl SyncQueue { /// A timeline has to care to not to delete certain layers from the remote storage before the corresponding uploads happen. /// Other than that, due to "immutable" nature of the layers, the order of their deletion/uploading/downloading does not matter. /// Hence, we merge the layers together into single task per timeline and run those concurrently (with the deletion happening only after successful uploading). - fn next_task_batch(&self) -> (HashMap, usize) { + fn next_task_batch(&self) -> (HashMap, usize) { // Wait for the first task in blocking fashion let mut q = self.queue.lock().unwrap(); while q.is_empty() { @@ -488,8 +488,8 @@ struct LayersDeletion { /// /// Ensure that the loop is started otherwise the task is never processed. pub fn schedule_layer_upload( - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, layers_to_upload: HashSet, metadata: Option, ) { @@ -501,7 +501,7 @@ pub fn schedule_layer_upload( } }; sync_queue.push( - ZTenantTimelineId { + TenantTimelineId { tenant_id, timeline_id, }, @@ -519,8 +519,8 @@ pub fn schedule_layer_upload( /// /// Ensure that the loop is started otherwise the task is never processed. pub fn schedule_layer_delete( - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, layers_to_delete: HashSet, ) { let sync_queue = match SYNC_QUEUE.get() { @@ -531,7 +531,7 @@ pub fn schedule_layer_delete( } }; sync_queue.push( - ZTenantTimelineId { + TenantTimelineId { tenant_id, timeline_id, }, @@ -551,7 +551,7 @@ pub fn schedule_layer_delete( /// On any failure, the task gets retried, omitting already downloaded layers. /// /// Ensure that the loop is started otherwise the task is never processed. -pub fn schedule_layer_download(tenant_id: ZTenantId, timeline_id: ZTimelineId) { +pub fn schedule_layer_download(tenant_id: TenantId, timeline_id: TimelineId) { debug!("Scheduling layer download for tenant {tenant_id}, timeline {timeline_id}"); let sync_queue = match SYNC_QUEUE.get() { Some(queue) => queue, @@ -561,7 +561,7 @@ pub fn schedule_layer_download(tenant_id: ZTenantId, timeline_id: ZTimelineId) { } }; sync_queue.push( - ZTenantTimelineId { + TenantTimelineId { tenant_id, timeline_id, }, @@ -604,7 +604,7 @@ pub fn spawn_storage_sync_task( let _ = empty_tenants.0.entry(tenant_id).or_default(); } else { for (timeline_id, timeline_data) in timeline_data { - let id = ZTenantTimelineId::new(tenant_id, timeline_id); + let id = TenantTimelineId::new(tenant_id, timeline_id); keys_for_index_part_downloads.insert(id); timelines_to_sync.insert(id, timeline_data); } @@ -766,9 +766,9 @@ async fn process_batches( max_sync_errors: NonZeroU32, storage: GenericRemoteStorage, index: &RemoteIndex, - batched_tasks: HashMap, + batched_tasks: HashMap, sync_queue: &SyncQueue, -) -> HashSet { +) -> HashSet { let mut sync_results = batched_tasks .into_iter() .map(|(sync_id, batch)| { @@ -808,7 +808,7 @@ async fn process_sync_task_batch( conf: &'static PageServerConf, (storage, index, sync_queue): (GenericRemoteStorage, RemoteIndex, &SyncQueue), max_sync_errors: NonZeroU32, - sync_id: ZTenantTimelineId, + sync_id: TenantTimelineId, batch: SyncTaskBatch, ) -> DownloadStatus { let sync_start = Instant::now(); @@ -949,7 +949,7 @@ async fn download_timeline_data( conf: &'static PageServerConf, (storage, index, sync_queue): (&GenericRemoteStorage, &RemoteIndex, &SyncQueue), current_remote_timeline: Option<&RemoteTimeline>, - sync_id: ZTenantTimelineId, + sync_id: TenantTimelineId, new_download_data: SyncData, sync_start: Instant, task_name: &str, @@ -999,7 +999,7 @@ async fn download_timeline_data( async fn update_local_metadata( conf: &'static PageServerConf, - sync_id: ZTenantTimelineId, + sync_id: TenantTimelineId, remote_timeline: Option<&RemoteTimeline>, ) -> anyhow::Result<()> { let remote_metadata = match remote_timeline { @@ -1031,7 +1031,7 @@ async fn update_local_metadata( info!("Updating local timeline metadata from remote timeline: local disk_consistent_lsn={local_lsn:?}, remote disk_consistent_lsn={remote_lsn}"); // clone because spawn_blocking requires static lifetime let cloned_metadata = remote_metadata.to_owned(); - let ZTenantTimelineId { + let TenantTimelineId { tenant_id, timeline_id, } = sync_id; @@ -1061,7 +1061,7 @@ async fn update_local_metadata( async fn delete_timeline_data( conf: &'static PageServerConf, (storage, index, sync_queue): (&GenericRemoteStorage, &RemoteIndex, &SyncQueue), - sync_id: ZTenantTimelineId, + sync_id: TenantTimelineId, mut new_delete_data: SyncData, sync_start: Instant, task_name: &str, @@ -1104,7 +1104,7 @@ async fn upload_timeline_data( conf: &'static PageServerConf, (storage, index, sync_queue): (&GenericRemoteStorage, &RemoteIndex, &SyncQueue), current_remote_timeline: Option<&RemoteTimeline>, - sync_id: ZTenantTimelineId, + sync_id: TenantTimelineId, new_upload_data: SyncData, sync_start: Instant, task_name: &str, @@ -1163,7 +1163,7 @@ async fn update_remote_data( conf: &'static PageServerConf, storage: &GenericRemoteStorage, index: &RemoteIndex, - sync_id: ZTenantTimelineId, + sync_id: TenantTimelineId, update: RemoteDataUpdate<'_>, ) -> anyhow::Result<()> { let updated_remote_timeline = { @@ -1261,7 +1261,7 @@ async fn validate_task_retries( fn schedule_first_sync_tasks( index: &mut RemoteTimelineIndex, sync_queue: &SyncQueue, - local_timeline_files: HashMap)>, + local_timeline_files: HashMap)>, ) -> TenantTimelineValues { let mut local_timeline_init_statuses = TenantTimelineValues::new(); @@ -1331,8 +1331,8 @@ fn schedule_first_sync_tasks( /// bool in return value stands for awaits_download fn compare_local_and_remote_timeline( - new_sync_tasks: &mut VecDeque<(ZTenantTimelineId, SyncTask)>, - sync_id: ZTenantTimelineId, + new_sync_tasks: &mut VecDeque<(TenantTimelineId, SyncTask)>, + sync_id: TenantTimelineId, local_metadata: TimelineMetadata, local_files: HashSet, remote_entry: &RemoteTimeline, @@ -1377,7 +1377,7 @@ fn compare_local_and_remote_timeline( } fn register_sync_status( - sync_id: ZTenantTimelineId, + sync_id: TenantTimelineId, sync_start: Instant, sync_name: &str, sync_status: Option, @@ -1409,7 +1409,7 @@ mod test_utils { pub(super) async fn create_local_timeline( harness: &TenantHarness<'_>, - timeline_id: ZTimelineId, + timeline_id: TimelineId, filenames: &[&str], metadata: TimelineMetadata, ) -> anyhow::Result { @@ -1454,8 +1454,8 @@ mod tests { use super::*; - const TEST_SYNC_ID: ZTenantTimelineId = ZTenantTimelineId { - tenant_id: ZTenantId::from_array(hex!("11223344556677881122334455667788")), + const TEST_SYNC_ID: TenantTimelineId = TenantTimelineId { + tenant_id: TenantId::from_array(hex!("11223344556677881122334455667788")), timeline_id: TIMELINE_ID, }; @@ -1464,12 +1464,12 @@ mod tests { let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); assert_eq!(sync_queue.len(), 0); - let sync_id_2 = ZTenantTimelineId { - tenant_id: ZTenantId::from_array(hex!("22223344556677881122334455667788")), + let sync_id_2 = TenantTimelineId { + tenant_id: TenantId::from_array(hex!("22223344556677881122334455667788")), timeline_id: TIMELINE_ID, }; - let sync_id_3 = ZTenantTimelineId { - tenant_id: ZTenantId::from_array(hex!("33223344556677881122334455667788")), + let sync_id_3 = TenantTimelineId { + tenant_id: TenantId::from_array(hex!("33223344556677881122334455667788")), timeline_id: TIMELINE_ID, }; assert!(sync_id_2 != TEST_SYNC_ID); @@ -1591,8 +1591,8 @@ mod tests { layers_to_skip: HashSet::from([PathBuf::from("sk4")]), }; - let sync_id_2 = ZTenantTimelineId { - tenant_id: ZTenantId::from_array(hex!("22223344556677881122334455667788")), + let sync_id_2 = TenantTimelineId { + tenant_id: TenantId::from_array(hex!("22223344556677881122334455667788")), timeline_id: TIMELINE_ID, }; assert!(sync_id_2 != TEST_SYNC_ID); diff --git a/pageserver/src/storage_sync/delete.rs b/pageserver/src/storage_sync/delete.rs index 945f5fded8..21a3372e70 100644 --- a/pageserver/src/storage_sync/delete.rs +++ b/pageserver/src/storage_sync/delete.rs @@ -8,7 +8,7 @@ use tracing::{debug, error, info}; use crate::storage_sync::{SyncQueue, SyncTask}; use remote_storage::GenericRemoteStorage; -use utils::zid::ZTenantTimelineId; +use utils::id::TenantTimelineId; use super::{LayersDeletion, SyncData}; @@ -17,7 +17,7 @@ use super::{LayersDeletion, SyncData}; pub(super) async fn delete_timeline_layers( storage: &GenericRemoteStorage, sync_queue: &SyncQueue, - sync_id: ZTenantTimelineId, + sync_id: TenantTimelineId, mut delete_data: SyncData, ) -> bool { if !delete_data.data.deletion_registered { @@ -123,7 +123,7 @@ mod tests { async fn delete_timeline_negative() -> anyhow::Result<()> { let harness = TenantHarness::create("delete_timeline_negative")?; let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); - let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); + let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let storage = GenericRemoteStorage::new(LocalFs::new( tempdir()?.path().to_path_buf(), harness.conf.workdir.clone(), @@ -157,7 +157,7 @@ mod tests { let harness = TenantHarness::create("delete_timeline")?; let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); - let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); + let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let layer_files = ["a", "b", "c", "d"]; let storage = GenericRemoteStorage::new(LocalFs::new( tempdir()?.path().to_path_buf(), diff --git a/pageserver/src/storage_sync/download.rs b/pageserver/src/storage_sync/download.rs index 32f228b447..80d5ca5994 100644 --- a/pageserver/src/storage_sync/download.rs +++ b/pageserver/src/storage_sync/download.rs @@ -20,7 +20,7 @@ use crate::{ config::PageServerConf, storage_sync::SyncTask, tenant::metadata::metadata_path, TEMP_FILE_SUFFIX, }; -use utils::zid::{ZTenantId, ZTenantTimelineId, ZTimelineId}; +use utils::id::{TenantId, TenantTimelineId, TimelineId}; use super::{ index::{IndexPart, RemoteTimeline}, @@ -33,14 +33,14 @@ use super::{ // When data is received succesfully without errors Present variant is used. pub enum TenantIndexParts { Poisoned { - present: HashMap, - missing: HashSet, + present: HashMap, + missing: HashSet, }, - Present(HashMap), + Present(HashMap), } impl TenantIndexParts { - fn add_poisoned(&mut self, timeline_id: ZTimelineId) { + fn add_poisoned(&mut self, timeline_id: TimelineId) { match self { TenantIndexParts::Poisoned { missing, .. } => { missing.insert(timeline_id); @@ -64,9 +64,9 @@ impl Default for TenantIndexParts { pub async fn download_index_parts( conf: &'static PageServerConf, storage: &GenericRemoteStorage, - keys: HashSet, -) -> HashMap { - let mut index_parts: HashMap = HashMap::new(); + keys: HashSet, +) -> HashMap { + let mut index_parts: HashMap = HashMap::new(); let mut part_downloads = keys .into_iter() @@ -112,8 +112,8 @@ pub async fn download_index_parts( pub async fn gather_tenant_timelines_index_parts( conf: &'static PageServerConf, storage: &GenericRemoteStorage, - tenant_id: ZTenantId, -) -> anyhow::Result> { + tenant_id: TenantId, +) -> anyhow::Result> { let tenant_path = conf.timelines_path(&tenant_id); let timeline_sync_ids = get_timeline_sync_ids(storage, &tenant_path, tenant_id) .await @@ -135,7 +135,7 @@ pub async fn gather_tenant_timelines_index_parts( async fn download_index_part( conf: &'static PageServerConf, storage: &GenericRemoteStorage, - sync_id: ZTenantTimelineId, + sync_id: TenantTimelineId, ) -> Result { let index_part_path = metadata_path(conf, sync_id.timeline_id, sync_id.tenant_id) .with_file_name(IndexPart::FILE_NAME); @@ -197,7 +197,7 @@ pub(super) async fn download_timeline_layers<'a>( storage: &'a GenericRemoteStorage, sync_queue: &'a SyncQueue, remote_timeline: Option<&'a RemoteTimeline>, - sync_id: ZTenantTimelineId, + sync_id: TenantTimelineId, mut download_data: SyncData, ) -> DownloadedTimeline { let remote_timeline = match remote_timeline { @@ -335,7 +335,7 @@ pub(super) async fn download_timeline_layers<'a>( } // fsync timeline directory which is a parent directory for downloaded files - let ZTenantTimelineId { + let TenantTimelineId { tenant_id, timeline_id, } = &sync_id; @@ -366,8 +366,8 @@ pub(super) async fn download_timeline_layers<'a>( async fn get_timeline_sync_ids( storage: &GenericRemoteStorage, tenant_path: &Path, - tenant_id: ZTenantId, -) -> anyhow::Result> { + tenant_id: TenantId, +) -> anyhow::Result> { let tenant_storage_path = storage.remote_object_id(tenant_path).with_context(|| { format!( "Failed to get tenant storage path for local path '{}'", @@ -395,11 +395,11 @@ async fn get_timeline_sync_ids( anyhow::anyhow!("failed to get timeline id for remote tenant {tenant_id}") })?; - let timeline_id: ZTimelineId = object_name.parse().with_context(|| { + let timeline_id: TimelineId = object_name.parse().with_context(|| { format!("failed to parse object name into timeline id '{object_name}'") })?; - sync_ids.insert(ZTenantTimelineId { + sync_ids.insert(TenantTimelineId { tenant_id, timeline_id, }); @@ -439,7 +439,7 @@ mod tests { let harness = TenantHarness::create("download_timeline")?; let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); - let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); + let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let layer_files = ["a", "b", "layer_to_skip", "layer_to_keep_locally"]; let storage = GenericRemoteStorage::new(LocalFs::new( tempdir()?.path().to_owned(), @@ -539,7 +539,7 @@ mod tests { async fn download_timeline_negatives() -> anyhow::Result<()> { let harness = TenantHarness::create("download_timeline_negatives")?; let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); - let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); + let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let storage = GenericRemoteStorage::new(LocalFs::new( tempdir()?.path().to_owned(), harness.conf.workdir.clone(), @@ -597,7 +597,7 @@ mod tests { #[tokio::test] async fn test_download_index_part() -> anyhow::Result<()> { let harness = TenantHarness::create("test_download_index_part")?; - let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); + let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let storage = GenericRemoteStorage::new(LocalFs::new( tempdir()?.path().to_owned(), diff --git a/pageserver/src/storage_sync/index.rs b/pageserver/src/storage_sync/index.rs index cff14cde49..13495ffefe 100644 --- a/pageserver/src/storage_sync/index.rs +++ b/pageserver/src/storage_sync/index.rs @@ -17,8 +17,8 @@ use tracing::log::warn; use crate::{config::PageServerConf, tenant::metadata::TimelineMetadata}; use utils::{ + id::{TenantId, TenantTimelineId, TimelineId}, lsn::Lsn, - zid::{ZTenantId, ZTenantTimelineId, ZTimelineId}, }; use super::download::TenantIndexParts; @@ -49,7 +49,7 @@ impl RelativePath { } #[derive(Debug, Clone, Default)] -pub struct TenantEntry(HashMap); +pub struct TenantEntry(HashMap); impl TenantEntry { pub fn has_in_progress_downloads(&self) -> bool { @@ -59,7 +59,7 @@ impl TenantEntry { } impl Deref for TenantEntry { - type Target = HashMap; + type Target = HashMap; fn deref(&self) -> &Self::Target { &self.0 @@ -72,8 +72,8 @@ impl DerefMut for TenantEntry { } } -impl From> for TenantEntry { - fn from(inner: HashMap) -> Self { +impl From> for TenantEntry { + fn from(inner: HashMap) -> Self { Self(inner) } } @@ -81,7 +81,7 @@ impl From> for TenantEntry { /// An index to track tenant files that exist on the remote storage. #[derive(Debug, Clone, Default)] pub struct RemoteTimelineIndex { - entries: HashMap, + entries: HashMap, } /// A wrapper to synchronize the access to the index, should be created and used before dealing with any [`RemoteTimelineIndex`]. @@ -91,9 +91,9 @@ pub struct RemoteIndex(Arc>); impl RemoteIndex { pub fn from_parts( conf: &'static PageServerConf, - index_parts: HashMap, + index_parts: HashMap, ) -> anyhow::Result { - let mut entries: HashMap = HashMap::new(); + let mut entries: HashMap = HashMap::new(); for (tenant_id, index_parts) in index_parts { match index_parts { @@ -136,30 +136,30 @@ impl Clone for RemoteIndex { impl RemoteTimelineIndex { pub fn timeline_entry( &self, - ZTenantTimelineId { + TenantTimelineId { tenant_id, timeline_id, - }: &ZTenantTimelineId, + }: &TenantTimelineId, ) -> Option<&RemoteTimeline> { self.entries.get(tenant_id)?.get(timeline_id) } pub fn timeline_entry_mut( &mut self, - ZTenantTimelineId { + TenantTimelineId { tenant_id, timeline_id, - }: &ZTenantTimelineId, + }: &TenantTimelineId, ) -> Option<&mut RemoteTimeline> { self.entries.get_mut(tenant_id)?.get_mut(timeline_id) } pub fn add_timeline_entry( &mut self, - ZTenantTimelineId { + TenantTimelineId { tenant_id, timeline_id, - }: ZTenantTimelineId, + }: TenantTimelineId, entry: RemoteTimeline, ) { self.entries @@ -170,10 +170,10 @@ impl RemoteTimelineIndex { pub fn remove_timeline_entry( &mut self, - ZTenantTimelineId { + TenantTimelineId { tenant_id, timeline_id, - }: ZTenantTimelineId, + }: TenantTimelineId, ) -> Option { self.entries .entry(tenant_id) @@ -181,25 +181,25 @@ impl RemoteTimelineIndex { .remove(&timeline_id) } - pub fn tenant_entry(&self, tenant_id: &ZTenantId) -> Option<&TenantEntry> { + pub fn tenant_entry(&self, tenant_id: &TenantId) -> Option<&TenantEntry> { self.entries.get(tenant_id) } - pub fn tenant_entry_mut(&mut self, tenant_id: &ZTenantId) -> Option<&mut TenantEntry> { + pub fn tenant_entry_mut(&mut self, tenant_id: &TenantId) -> Option<&mut TenantEntry> { self.entries.get_mut(tenant_id) } - pub fn add_tenant_entry(&mut self, tenant_id: ZTenantId) -> &mut TenantEntry { + pub fn add_tenant_entry(&mut self, tenant_id: TenantId) -> &mut TenantEntry { self.entries.entry(tenant_id).or_default() } - pub fn remove_tenant_entry(&mut self, tenant_id: &ZTenantId) -> Option { + pub fn remove_tenant_entry(&mut self, tenant_id: &TenantId) -> Option { self.entries.remove(tenant_id) } pub fn set_awaits_download( &mut self, - id: &ZTenantTimelineId, + id: &TenantTimelineId, awaits_download: bool, ) -> anyhow::Result<()> { self.timeline_entry_mut(id) diff --git a/pageserver/src/storage_sync/upload.rs b/pageserver/src/storage_sync/upload.rs index bd09e6b898..aa5a2232cf 100644 --- a/pageserver/src/storage_sync/upload.rs +++ b/pageserver/src/storage_sync/upload.rs @@ -8,7 +8,7 @@ use remote_storage::GenericRemoteStorage; use tokio::fs; use tracing::{debug, error, info, warn}; -use utils::zid::ZTenantTimelineId; +use utils::id::TenantTimelineId; use super::{ index::{IndexPart, RemoteTimeline}, @@ -21,7 +21,7 @@ use crate::{config::PageServerConf, storage_sync::SyncTask, tenant::metadata::me pub(super) async fn upload_index_part( conf: &'static PageServerConf, storage: &GenericRemoteStorage, - sync_id: ZTenantTimelineId, + sync_id: TenantTimelineId, index_part: IndexPart, ) -> anyhow::Result<()> { let index_part_bytes = serde_json::to_vec(&index_part) @@ -58,7 +58,7 @@ pub(super) async fn upload_timeline_layers<'a>( storage: &'a GenericRemoteStorage, sync_queue: &SyncQueue, remote_timeline: Option<&'a RemoteTimeline>, - sync_id: ZTenantTimelineId, + sync_id: TenantTimelineId, mut upload_data: SyncData, ) -> UploadedTimeline { let upload = &mut upload_data.data; @@ -213,7 +213,7 @@ mod tests { async fn regular_layer_upload() -> anyhow::Result<()> { let harness = TenantHarness::create("regular_layer_upload")?; let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); - let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); + let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let layer_files = ["a", "b"]; let storage = GenericRemoteStorage::new(LocalFs::new( @@ -301,7 +301,7 @@ mod tests { async fn layer_upload_after_local_fs_update() -> anyhow::Result<()> { let harness = TenantHarness::create("layer_upload_after_local_fs_update")?; let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); - let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); + let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let layer_files = ["a1", "b1"]; let storage = GenericRemoteStorage::new(LocalFs::new( @@ -395,7 +395,7 @@ mod tests { #[tokio::test] async fn test_upload_index_part() -> anyhow::Result<()> { let harness = TenantHarness::create("test_upload_index_part")?; - let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); + let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let storage = GenericRemoteStorage::new(LocalFs::new( tempdir()?.path().to_owned(), diff --git a/pageserver/src/task_mgr.rs b/pageserver/src/task_mgr.rs index 2aa803d119..dad6e0039d 100644 --- a/pageserver/src/task_mgr.rs +++ b/pageserver/src/task_mgr.rs @@ -51,7 +51,7 @@ use tracing::{debug, error, info, warn}; use once_cell::sync::Lazy; -use utils::zid::{ZTenantId, ZTimelineId}; +use utils::id::{TenantId, TimelineId}; use crate::shutdown_pageserver; @@ -210,8 +210,8 @@ pub enum TaskKind { #[derive(Default)] struct MutableTaskState { /// Tenant and timeline that this task is associated with. - tenant_id: Option, - timeline_id: Option, + tenant_id: Option, + timeline_id: Option, /// Handle for waiting for the task to exit. It can be None, if the /// the task has already exited. @@ -238,8 +238,8 @@ struct PageServerTask { pub fn spawn( runtime: &tokio::runtime::Handle, kind: TaskKind, - tenant_id: Option, - timeline_id: Option, + tenant_id: Option, + timeline_id: Option, name: &str, shutdown_process_on_error: bool, future: F, @@ -371,7 +371,7 @@ async fn task_finish( } // expected to be called from the task of the given id. -pub fn associate_with(tenant_id: Option, timeline_id: Option) { +pub fn associate_with(tenant_id: Option, timeline_id: Option) { CURRENT_TASK.with(|ct| { let mut task_mut = ct.mutable.lock().unwrap(); task_mut.tenant_id = tenant_id; @@ -391,12 +391,12 @@ pub fn associate_with(tenant_id: Option, timeline_id: Option, - tenant_id: Option, - timeline_id: Option, + tenant_id: Option, + timeline_id: Option, ) { let mut victim_tasks = Vec::new(); diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 4ef810faba..41fd98ec07 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -4,7 +4,7 @@ //! The functions here are responsible for locating the correct layer for the //! get/put call, walking back the timeline branching history as needed. //! -//! The files are stored in the .neon/tenants//timelines/ +//! The files are stored in the .neon/tenants//timelines/ //! directory. See docs/pageserver-storage.md for how the files are managed. //! In addition to the layer files, there is a metadata file in the same //! directory that contains information about the timeline, in particular its @@ -48,8 +48,8 @@ use crate::CheckpointConfig; use toml_edit; use utils::{ crashsafe_dir, + id::{TenantId, TimelineId}, lsn::{Lsn, RecordLsn}, - zid::{ZTenantId, ZTimelineId}, }; mod blob_io; @@ -80,7 +80,7 @@ pub use crate::tenant::metadata::save_metadata; // re-export for use in walreceiver pub use crate::tenant::timeline::WalReceiverInfo; -/// Parts of the `.neon/tenants//timelines/` directory prefix. +/// Parts of the `.neon/tenants//timelines/` directory prefix. pub const TIMELINES_SEGMENT_NAME: &str = "timelines"; /// @@ -98,8 +98,8 @@ pub struct Tenant { // This is necessary to allow global config updates. tenant_conf: Arc>, - tenant_id: ZTenantId, - timelines: Mutex>>, + tenant_id: TenantId, + timelines: Mutex>>, // This mutex prevents creation of new timelines during GC. // Adding yet another mutex (in addition to `timelines`) is needed because holding // `timelines` mutex during all GC iteration (especially with enforced checkpoint) @@ -134,7 +134,7 @@ pub enum TenantState { impl Tenant { /// Get Timeline handle for given zenith timeline ID. /// This function is idempotent. It doesn't change internal state in any way. - pub fn get_timeline(&self, timeline_id: ZTimelineId) -> anyhow::Result> { + pub fn get_timeline(&self, timeline_id: TimelineId) -> anyhow::Result> { self.timelines .lock() .unwrap() @@ -151,7 +151,7 @@ impl Tenant { /// Lists timelines the tenant contains. /// Up to tenant's implementation to omit certain timelines that ar not considered ready for use. - pub fn list_timelines(&self) -> Vec<(ZTimelineId, Arc)> { + pub fn list_timelines(&self) -> Vec<(TimelineId, Arc)> { self.timelines .lock() .unwrap() @@ -164,7 +164,7 @@ impl Tenant { /// Initdb lsn is provided for timeline impl to be able to perform checks for some operations against it. pub fn create_empty_timeline( &self, - new_timeline_id: ZTimelineId, + new_timeline_id: TimelineId, initdb_lsn: Lsn, ) -> Result> { // XXX: keep the lock to avoid races during timeline creation @@ -207,8 +207,8 @@ impl Tenant { /// Branch a timeline pub fn branch_timeline( &self, - src: ZTimelineId, - dst: ZTimelineId, + src: TimelineId, + dst: TimelineId, start_lsn: Option, ) -> Result> { // We need to hold this lock to prevent GC from starting at the same time. GC scans the directory to learn @@ -302,14 +302,14 @@ impl Tenant { /// this function is periodically called by gc task. /// also it can be explicitly requested through page server api 'do_gc' command. /// - /// 'timelineid' specifies the timeline to GC, or None for all. + /// 'target_timeline_id' specifies the timeline to GC, or None for all. /// `horizon` specifies delta from last lsn to preserve all object versions (pitr interval). /// `checkpoint_before_gc` parameter is used to force compaction of storage before GC /// to make tests more deterministic. /// TODO Do we still need it or we can call checkpoint explicitly in tests where needed? pub fn gc_iteration( &self, - target_timeline_id: Option, + target_timeline_id: Option, horizon: u64, pitr: Duration, checkpoint_before_gc: bool, @@ -337,13 +337,13 @@ impl Tenant { let timelines = self.timelines.lock().unwrap(); let timelines_to_compact = timelines .iter() - .map(|(timelineid, timeline)| (*timelineid, timeline.clone())) + .map(|(timeline_id, timeline)| (*timeline_id, timeline.clone())) .collect::>(); drop(timelines); - for (timelineid, timeline) in &timelines_to_compact { + for (timeline_id, timeline) in &timelines_to_compact { let _entered = - info_span!("compact", timeline = %timelineid, tenant = %self.tenant_id).entered(); + info_span!("compact", timeline = %timeline_id, tenant = %self.tenant_id).entered(); timeline.compact()?; } @@ -362,13 +362,13 @@ impl Tenant { let timelines = self.timelines.lock().unwrap(); let timelines_to_compact = timelines .iter() - .map(|(timelineid, timeline)| (*timelineid, Arc::clone(timeline))) + .map(|(timeline_id, timeline)| (*timeline_id, Arc::clone(timeline))) .collect::>(); drop(timelines); - for (timelineid, timeline) in &timelines_to_compact { + for (timeline_id, timeline) in &timelines_to_compact { let _entered = - info_span!("checkpoint", timeline = %timelineid, tenant = %self.tenant_id) + info_span!("checkpoint", timeline = %timeline_id, tenant = %self.tenant_id) .entered(); timeline.checkpoint(CheckpointConfig::Flush)?; } @@ -377,7 +377,7 @@ impl Tenant { } /// Removes timeline-related in-memory data - pub fn delete_timeline(&self, timeline_id: ZTimelineId) -> anyhow::Result<()> { + pub fn delete_timeline(&self, timeline_id: TimelineId) -> anyhow::Result<()> { // in order to be retriable detach needs to be idempotent // (or at least to a point that each time the detach is called it can make progress) let mut timelines = self.timelines.lock().unwrap(); @@ -416,7 +416,7 @@ impl Tenant { pub fn init_attach_timelines( &self, - timelines: HashMap, + timelines: HashMap, ) -> anyhow::Result<()> { let sorted_timelines = if timelines.len() == 1 { timelines.into_iter().collect() @@ -505,13 +505,13 @@ impl Tenant { /// perform a topological sort, so that the parent of each timeline comes /// before the children. fn tree_sort_timelines( - timelines: HashMap, -) -> Result> { + timelines: HashMap, +) -> Result> { let mut result = Vec::with_capacity(timelines.len()); let mut now = Vec::with_capacity(timelines.len()); // (ancestor, children) - let mut later: HashMap> = + let mut later: HashMap> = HashMap::with_capacity(timelines.len()); for (timeline_id, metadata) in timelines { @@ -636,9 +636,9 @@ impl Tenant { fn initialize_new_timeline( &self, - new_timeline_id: ZTimelineId, + new_timeline_id: TimelineId, new_metadata: TimelineMetadata, - timelines: &mut MutexGuard>>, + timelines: &mut MutexGuard>>, ) -> anyhow::Result> { let ancestor = match new_metadata.ancestor_timeline() { Some(ancestor_timeline_id) => Some( @@ -680,7 +680,7 @@ impl Tenant { conf: &'static PageServerConf, tenant_conf: TenantConfOpt, walredo_mgr: Arc, - tenant_id: ZTenantId, + tenant_id: TenantId, remote_index: RemoteIndex, upload_layers: bool, ) -> Tenant { @@ -701,7 +701,7 @@ impl Tenant { /// Locate and load config pub fn load_tenant_config( conf: &'static PageServerConf, - tenant_id: ZTenantId, + tenant_id: TenantId, ) -> anyhow::Result { let target_config_path = TenantConf::path(conf, tenant_id); let target_config_display = target_config_path.display(); @@ -830,7 +830,7 @@ impl Tenant { // we do. fn gc_iteration_internal( &self, - target_timeline_id: Option, + target_timeline_id: Option, horizon: u64, pitr: Duration, checkpoint_before_gc: bool, @@ -848,7 +848,7 @@ impl Tenant { // Scan all timelines. For each timeline, remember the timeline ID and // the branch point where it was created. - let mut all_branchpoints: BTreeSet<(ZTimelineId, Lsn)> = BTreeSet::new(); + let mut all_branchpoints: BTreeSet<(TimelineId, Lsn)> = BTreeSet::new(); let timeline_ids = { if let Some(target_timeline_id) = target_timeline_id.as_ref() { if timelines.get(target_timeline_id).is_none() { @@ -861,11 +861,11 @@ impl Tenant { .map(|(timeline_id, timeline_entry)| { // This is unresolved question for now, how to do gc in presence of remote timelines // especially when this is combined with branching. - // Somewhat related: https://github.com/zenithdb/zenith/issues/999 + // Somewhat related: https://github.com/neondatabase/neon/issues/999 if let Some(ancestor_timeline_id) = &timeline_entry.get_ancestor_timeline_id() { // If target_timeline is specified, we only need to know branchpoints of its children - if let Some(timelineid) = target_timeline_id { - if ancestor_timeline_id == &timelineid { + if let Some(timeline_id) = target_timeline_id { + if ancestor_timeline_id == &timeline_id { all_branchpoints.insert(( *ancestor_timeline_id, timeline_entry.get_ancestor_lsn(), @@ -895,8 +895,8 @@ impl Tenant { .with_context(|| format!("Timeline {timeline_id} was not found"))?; // If target_timeline is specified, ignore all other timelines - if let Some(target_timelineid) = target_timeline_id { - if timeline_id != target_timelineid { + if let Some(target_timeline_id) = target_timeline_id { + if timeline_id != target_timeline_id { continue; } } @@ -952,7 +952,7 @@ impl Tenant { Ok(totals) } - pub fn tenant_id(&self) -> ZTenantId { + pub fn tenant_id(&self) -> TenantId { self.tenant_id } } @@ -998,7 +998,7 @@ pub mod harness { config::PageServerConf, repository::Key, tenant::Tenant, - walrecord::ZenithWalRecord, + walrecord::NeonWalRecord, walredo::{WalRedoError, WalRedoManager}, }; @@ -1006,12 +1006,12 @@ pub mod harness { use super::*; use crate::tenant_config::{TenantConf, TenantConfOpt}; use hex_literal::hex; - use utils::zid::{ZTenantId, ZTimelineId}; + use utils::id::{TenantId, TimelineId}; - pub const TIMELINE_ID: ZTimelineId = - ZTimelineId::from_array(hex!("11223344556677881122334455667788")); - pub const NEW_TIMELINE_ID: ZTimelineId = - ZTimelineId::from_array(hex!("AA223344556677881122334455667788")); + pub const TIMELINE_ID: TimelineId = + TimelineId::from_array(hex!("11223344556677881122334455667788")); + pub const NEW_TIMELINE_ID: TimelineId = + TimelineId::from_array(hex!("AA223344556677881122334455667788")); /// Convenience function to create a page image with given string as the only content #[allow(non_snake_case)] @@ -1047,7 +1047,7 @@ pub mod harness { pub struct TenantHarness<'a> { pub conf: &'static PageServerConf, pub tenant_conf: TenantConf, - pub tenant_id: ZTenantId, + pub tenant_id: TenantId, pub lock_guard: ( Option>, @@ -1080,7 +1080,7 @@ pub mod harness { let tenant_conf = TenantConf::dummy_conf(); - let tenant_id = ZTenantId::generate(); + let tenant_id = TenantId::generate(); fs::create_dir_all(conf.tenant_path(&tenant_id))?; fs::create_dir_all(conf.timelines_path(&tenant_id))?; @@ -1113,7 +1113,7 @@ pub mod harness { .expect("should be able to read timelines dir") { let timeline_dir_entry = timeline_dir_entry?; - let timeline_id: ZTimelineId = timeline_dir_entry + let timeline_id: TimelineId = timeline_dir_entry .path() .file_name() .unwrap() @@ -1128,15 +1128,15 @@ pub mod harness { Ok(tenant) } - pub fn timeline_path(&self, timeline_id: &ZTimelineId) -> PathBuf { + pub fn timeline_path(&self, timeline_id: &TimelineId) -> PathBuf { self.conf.timeline_path(timeline_id, &self.tenant_id) } } fn load_metadata( conf: &'static PageServerConf, - timeline_id: ZTimelineId, - tenant_id: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, ) -> anyhow::Result { let metadata_path = metadata_path(conf, timeline_id, tenant_id); let metadata_bytes = std::fs::read(&metadata_path).with_context(|| { @@ -1162,7 +1162,7 @@ pub mod harness { key: Key, lsn: Lsn, base_img: Option, - records: Vec<(Lsn, ZenithWalRecord)>, + records: Vec<(Lsn, NeonWalRecord)>, ) -> Result { let s = format!( "redo for {} to get to {}, with {} and {} records", @@ -1747,7 +1747,7 @@ mod tests { let mut tline_id = TIMELINE_ID; for _ in 0..50 { - let new_tline_id = ZTimelineId::generate(); + let new_tline_id = TimelineId::generate(); tenant.branch_timeline(tline_id, new_tline_id, Some(lsn))?; tline = tenant .get_timeline(new_tline_id) @@ -1808,7 +1808,7 @@ mod tests { #[allow(clippy::needless_range_loop)] for idx in 0..NUM_TLINES { - let new_tline_id = ZTimelineId::generate(); + let new_tline_id = TimelineId::generate(); tenant.branch_timeline(tline_id, new_tline_id, Some(lsn))?; tline = tenant .get_timeline(new_tline_id) diff --git a/pageserver/src/tenant/delta_layer.rs b/pageserver/src/tenant/delta_layer.rs index ff6d3652f9..892000c20b 100644 --- a/pageserver/src/tenant/delta_layer.rs +++ b/pageserver/src/tenant/delta_layer.rs @@ -7,7 +7,7 @@ //! must be page images or WAL records with the 'will_init' flag set, so that //! they can be replayed without referring to an older page version. //! -//! The delta files are stored in timelines/ directory. Currently, +//! The delta files are stored in timelines/ directory. Currently, //! there are no subdirectories, and each delta file is named like this: //! //! -__-, lsn_range: Range, @@ -81,8 +81,8 @@ impl From<&DeltaLayer> for Summary { magic: DELTA_FILE_MAGIC, format_version: STORAGE_FORMAT_VERSION, - tenantid: layer.tenantid, - timelineid: layer.timelineid, + tenant_id: layer.tenant_id, + timeline_id: layer.timeline_id, key_range: layer.key_range.clone(), lsn_range: layer.lsn_range.clone(), @@ -173,8 +173,8 @@ impl DeltaKey { pub struct DeltaLayer { path_or_conf: PathOrConf, - pub tenantid: ZTenantId, - pub timelineid: ZTimelineId, + pub tenant_id: TenantId, + pub timeline_id: TimelineId, pub key_range: Range, pub lsn_range: Range, @@ -194,12 +194,12 @@ pub struct DeltaLayerInner { } impl Layer for DeltaLayer { - fn get_tenant_id(&self) -> ZTenantId { - self.tenantid + fn get_tenant_id(&self) -> TenantId { + self.tenant_id } - fn get_timeline_id(&self) -> ZTimelineId { - self.timelineid + fn get_timeline_id(&self) -> TimelineId { + self.timeline_id } fn get_key_range(&self) -> Range { @@ -344,8 +344,8 @@ impl Layer for DeltaLayer { fn dump(&self, verbose: bool) -> Result<()> { println!( "----- delta layer for ten {} tli {} keys {}-{} lsn {}-{} ----", - self.tenantid, - self.timelineid, + self.tenant_id, + self.timeline_id, self.key_range.start, self.key_range.end, self.lsn_range.start, @@ -419,22 +419,22 @@ impl Layer for DeltaLayer { impl DeltaLayer { fn path_for( path_or_conf: &PathOrConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, fname: &DeltaFileName, ) -> PathBuf { match path_or_conf { PathOrConf::Path(path) => path.clone(), PathOrConf::Conf(conf) => conf - .timeline_path(&timelineid, &tenantid) + .timeline_path(&timeline_id, &tenant_id) .join(fname.to_string()), } } fn temp_path_for( conf: &PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, key_start: Key, lsn_range: &Range, ) -> PathBuf { @@ -444,7 +444,7 @@ impl DeltaLayer { .map(char::from) .collect(); - conf.timeline_path(&timelineid, &tenantid).join(format!( + conf.timeline_path(&timeline_id, &tenant_id).join(format!( "{}-XXX__{:016X}-{:016X}.{}.{}", key_start, u64::from(lsn_range.start), @@ -535,14 +535,14 @@ impl DeltaLayer { /// Create a DeltaLayer struct representing an existing file on disk. pub fn new( conf: &'static PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, filename: &DeltaFileName, ) -> DeltaLayer { DeltaLayer { path_or_conf: PathOrConf::Conf(conf), - timelineid, - tenantid, + timeline_id, + tenant_id, key_range: filename.key_range.clone(), lsn_range: filename.lsn_range.clone(), inner: RwLock::new(DeltaLayerInner { @@ -568,8 +568,8 @@ impl DeltaLayer { Ok(DeltaLayer { path_or_conf: PathOrConf::Path(path.to_path_buf()), - timelineid: summary.timelineid, - tenantid: summary.tenantid, + timeline_id: summary.timeline_id, + tenant_id: summary.tenant_id, key_range: summary.key_range, lsn_range: summary.lsn_range, inner: RwLock::new(DeltaLayerInner { @@ -592,8 +592,8 @@ impl DeltaLayer { pub fn path(&self) -> PathBuf { Self::path_for( &self.path_or_conf, - self.timelineid, - self.tenantid, + self.timeline_id, + self.tenant_id, &self.layer_name(), ) } @@ -613,8 +613,8 @@ impl DeltaLayer { pub struct DeltaLayerWriter { conf: &'static PageServerConf, path: PathBuf, - timelineid: ZTimelineId, - tenantid: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, key_start: Key, lsn_range: Range, @@ -630,8 +630,8 @@ impl DeltaLayerWriter { /// pub fn new( conf: &'static PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, key_start: Key, lsn_range: Range, ) -> Result { @@ -641,7 +641,7 @@ impl DeltaLayerWriter { // // Note: This overwrites any existing file. There shouldn't be any. // FIXME: throw an error instead? - let path = DeltaLayer::temp_path_for(conf, timelineid, tenantid, key_start, &lsn_range); + let path = DeltaLayer::temp_path_for(conf, timeline_id, tenant_id, key_start, &lsn_range); let mut file = VirtualFile::create(&path)?; // make room for the header block @@ -656,8 +656,8 @@ impl DeltaLayerWriter { Ok(DeltaLayerWriter { conf, path, - timelineid, - tenantid, + timeline_id, + tenant_id, key_start, lsn_range, tree: tree_builder, @@ -718,8 +718,8 @@ impl DeltaLayerWriter { let summary = Summary { magic: DELTA_FILE_MAGIC, format_version: STORAGE_FORMAT_VERSION, - tenantid: self.tenantid, - timelineid: self.timelineid, + tenant_id: self.tenant_id, + timeline_id: self.timeline_id, key_range: self.key_start..key_end, lsn_range: self.lsn_range.clone(), index_start_blk, @@ -733,8 +733,8 @@ impl DeltaLayerWriter { // set inner.file here. The first read will have to re-open it. let layer = DeltaLayer { path_or_conf: PathOrConf::Conf(self.conf), - tenantid: self.tenantid, - timelineid: self.timelineid, + tenant_id: self.tenant_id, + timeline_id: self.timeline_id, key_range: self.key_start..key_end, lsn_range: self.lsn_range.clone(), inner: RwLock::new(DeltaLayerInner { @@ -753,8 +753,8 @@ impl DeltaLayerWriter { // FIXME: throw an error instead? let final_path = DeltaLayer::path_for( &PathOrConf::Conf(self.conf), - self.timelineid, - self.tenantid, + self.timeline_id, + self.tenant_id, &DeltaFileName { key_range: self.key_start..key_end, lsn_range: self.lsn_range, diff --git a/pageserver/src/tenant/ephemeral_file.rs b/pageserver/src/tenant/ephemeral_file.rs index c675e4e778..0774fa42a6 100644 --- a/pageserver/src/tenant/ephemeral_file.rs +++ b/pageserver/src/tenant/ephemeral_file.rs @@ -17,7 +17,7 @@ use std::ops::DerefMut; use std::path::PathBuf; use std::sync::{Arc, RwLock}; use tracing::*; -use utils::zid::{ZTenantId, ZTimelineId}; +use utils::id::{TenantId, TimelineId}; use std::os::unix::fs::FileExt; @@ -39,8 +39,8 @@ pub struct EphemeralFiles { pub struct EphemeralFile { file_id: u64, - _tenantid: ZTenantId, - _timelineid: ZTimelineId, + _tenant_id: TenantId, + _timeline_id: TimelineId, file: Arc, pub size: u64, @@ -49,15 +49,15 @@ pub struct EphemeralFile { impl EphemeralFile { pub fn create( conf: &PageServerConf, - tenantid: ZTenantId, - timelineid: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, ) -> Result { let mut l = EPHEMERAL_FILES.write().unwrap(); let file_id = l.next_file_id; l.next_file_id += 1; let filename = conf - .timeline_path(&timelineid, &tenantid) + .timeline_path(&timeline_id, &tenant_id) .join(PathBuf::from(format!("ephemeral-{}", file_id))); let file = VirtualFile::open_with_options( @@ -69,8 +69,8 @@ impl EphemeralFile { Ok(EphemeralFile { file_id, - _tenantid: tenantid, - _timelineid: timelineid, + _tenant_id: tenant_id, + _timeline_id: timeline_id, file: file_rc, size: 0, }) @@ -338,7 +338,7 @@ mod tests { fn harness( test_name: &str, - ) -> Result<(&'static PageServerConf, ZTenantId, ZTimelineId), io::Error> { + ) -> Result<(&'static PageServerConf, TenantId, TimelineId), io::Error> { let repo_dir = PageServerConf::test_repo_dir(test_name); let _ = fs::remove_dir_all(&repo_dir); let conf = PageServerConf::dummy_conf(repo_dir); @@ -346,11 +346,11 @@ mod tests { // OK in a test. let conf: &'static PageServerConf = Box::leak(Box::new(conf)); - let tenantid = ZTenantId::from_str("11000000000000000000000000000000").unwrap(); - let timelineid = ZTimelineId::from_str("22000000000000000000000000000000").unwrap(); - fs::create_dir_all(conf.timeline_path(&timelineid, &tenantid))?; + let tenant_id = TenantId::from_str("11000000000000000000000000000000").unwrap(); + let timeline_id = TimelineId::from_str("22000000000000000000000000000000").unwrap(); + fs::create_dir_all(conf.timeline_path(&timeline_id, &tenant_id))?; - Ok((conf, tenantid, timelineid)) + Ok((conf, tenant_id, timeline_id)) } // Helper function to slurp contents of a file, starting at the current position, @@ -368,9 +368,9 @@ mod tests { #[test] fn test_ephemeral_files() -> Result<(), io::Error> { - let (conf, tenantid, timelineid) = harness("ephemeral_files")?; + let (conf, tenant_id, timeline_id) = harness("ephemeral_files")?; - let file_a = EphemeralFile::create(conf, tenantid, timelineid)?; + let file_a = EphemeralFile::create(conf, tenant_id, timeline_id)?; file_a.write_all_at(b"foo", 0)?; assert_eq!("foo", read_string(&file_a, 0, 20)?); @@ -381,7 +381,7 @@ mod tests { // Open a lot of files, enough to cause some page evictions. let mut efiles = Vec::new(); for fileno in 0..100 { - let efile = EphemeralFile::create(conf, tenantid, timelineid)?; + let efile = EphemeralFile::create(conf, tenant_id, timeline_id)?; efile.write_all_at(format!("file {}", fileno).as_bytes(), 0)?; assert_eq!(format!("file {}", fileno), read_string(&efile, 0, 10)?); efiles.push((fileno, efile)); @@ -399,9 +399,9 @@ mod tests { #[test] fn test_ephemeral_blobs() -> Result<(), io::Error> { - let (conf, tenantid, timelineid) = harness("ephemeral_blobs")?; + let (conf, tenant_id, timeline_id) = harness("ephemeral_blobs")?; - let mut file = EphemeralFile::create(conf, tenantid, timelineid)?; + let mut file = EphemeralFile::create(conf, tenant_id, timeline_id)?; let pos_foo = file.write_blob(b"foo")?; assert_eq!(b"foo", file.block_cursor().read_blob(pos_foo)?.as_slice()); diff --git a/pageserver/src/tenant/image_layer.rs b/pageserver/src/tenant/image_layer.rs index 518643241d..92bf022fee 100644 --- a/pageserver/src/tenant/image_layer.rs +++ b/pageserver/src/tenant/image_layer.rs @@ -4,7 +4,7 @@ //! but does not exist in the layer, does not exist. //! //! An image layer is stored in a file on disk. The file is stored in -//! timelines/ directory. Currently, there are no +//! timelines/ directory. Currently, there are no //! subdirectories, and each image layer file is named like this: //! //! -__ @@ -44,8 +44,8 @@ use tracing::*; use utils::{ bin_ser::BeSer, + id::{TenantId, TimelineId}, lsn::Lsn, - zid::{ZTenantId, ZTimelineId}, }; /// @@ -56,12 +56,12 @@ use utils::{ /// #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] struct Summary { - /// Magic value to identify this as a zenith image file. Always IMAGE_FILE_MAGIC. + /// Magic value to identify this as a neon image file. Always IMAGE_FILE_MAGIC. magic: u16, format_version: u16, - tenantid: ZTenantId, - timelineid: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, key_range: Range, lsn: Lsn, @@ -77,8 +77,8 @@ impl From<&ImageLayer> for Summary { Self { magic: IMAGE_FILE_MAGIC, format_version: STORAGE_FORMAT_VERSION, - tenantid: layer.tenantid, - timelineid: layer.timelineid, + tenant_id: layer.tenant_id, + timeline_id: layer.timeline_id, key_range: layer.key_range.clone(), lsn: layer.lsn, @@ -97,8 +97,8 @@ impl From<&ImageLayer> for Summary { /// pub struct ImageLayer { path_or_conf: PathOrConf, - pub tenantid: ZTenantId, - pub timelineid: ZTimelineId, + pub tenant_id: TenantId, + pub timeline_id: TimelineId, pub key_range: Range, // This entry contains an image of all pages as of this LSN @@ -128,12 +128,12 @@ impl Layer for ImageLayer { Some(self.path()) } - fn get_tenant_id(&self) -> ZTenantId { - self.tenantid + fn get_tenant_id(&self) -> TenantId { + self.tenant_id } - fn get_timeline_id(&self) -> ZTimelineId { - self.timelineid + fn get_timeline_id(&self) -> TimelineId { + self.timeline_id } fn get_key_range(&self) -> Range { @@ -202,7 +202,7 @@ impl Layer for ImageLayer { fn dump(&self, verbose: bool) -> Result<()> { println!( "----- image layer for ten {} tli {} key {}-{} at {} ----", - self.tenantid, self.timelineid, self.key_range.start, self.key_range.end, self.lsn + self.tenant_id, self.timeline_id, self.key_range.start, self.key_range.end, self.lsn ); if !verbose { @@ -228,22 +228,22 @@ impl Layer for ImageLayer { impl ImageLayer { fn path_for( path_or_conf: &PathOrConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, fname: &ImageFileName, ) -> PathBuf { match path_or_conf { PathOrConf::Path(path) => path.to_path_buf(), PathOrConf::Conf(conf) => conf - .timeline_path(&timelineid, &tenantid) + .timeline_path(&timeline_id, &tenant_id) .join(fname.to_string()), } } fn temp_path_for( conf: &PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, fname: &ImageFileName, ) -> PathBuf { let rand_string: String = rand::thread_rng() @@ -252,7 +252,7 @@ impl ImageLayer { .map(char::from) .collect(); - conf.timeline_path(&timelineid, &tenantid) + conf.timeline_path(&timeline_id, &tenant_id) .join(format!("{fname}.{rand_string}.{TEMP_FILE_SUFFIX}")) } @@ -336,14 +336,14 @@ impl ImageLayer { /// Create an ImageLayer struct representing an existing file on disk pub fn new( conf: &'static PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, filename: &ImageFileName, ) -> ImageLayer { ImageLayer { path_or_conf: PathOrConf::Conf(conf), - timelineid, - tenantid, + timeline_id, + tenant_id, key_range: filename.key_range.clone(), lsn: filename.lsn, inner: RwLock::new(ImageLayerInner { @@ -369,8 +369,8 @@ impl ImageLayer { Ok(ImageLayer { path_or_conf: PathOrConf::Path(path.to_path_buf()), - timelineid: summary.timelineid, - tenantid: summary.tenantid, + timeline_id: summary.timeline_id, + tenant_id: summary.tenant_id, key_range: summary.key_range, lsn: summary.lsn, inner: RwLock::new(ImageLayerInner { @@ -393,8 +393,8 @@ impl ImageLayer { pub fn path(&self) -> PathBuf { Self::path_for( &self.path_or_conf, - self.timelineid, - self.tenantid, + self.timeline_id, + self.tenant_id, &self.layer_name(), ) } @@ -414,8 +414,8 @@ impl ImageLayer { pub struct ImageLayerWriter { conf: &'static PageServerConf, path: PathBuf, - timelineid: ZTimelineId, - tenantid: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, key_range: Range, lsn: Lsn, @@ -426,8 +426,8 @@ pub struct ImageLayerWriter { impl ImageLayerWriter { pub fn new( conf: &'static PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, key_range: &Range, lsn: Lsn, ) -> anyhow::Result { @@ -435,8 +435,8 @@ impl ImageLayerWriter { // We'll atomically rename it to the final name when we're done. let path = ImageLayer::temp_path_for( conf, - timelineid, - tenantid, + timeline_id, + tenant_id, &ImageFileName { key_range: key_range.clone(), lsn, @@ -458,8 +458,8 @@ impl ImageLayerWriter { let writer = ImageLayerWriter { conf, path, - timelineid, - tenantid, + timeline_id, + tenant_id, key_range: key_range.clone(), lsn, tree: tree_builder, @@ -502,8 +502,8 @@ impl ImageLayerWriter { let summary = Summary { magic: IMAGE_FILE_MAGIC, format_version: STORAGE_FORMAT_VERSION, - tenantid: self.tenantid, - timelineid: self.timelineid, + tenant_id: self.tenant_id, + timeline_id: self.timeline_id, key_range: self.key_range.clone(), lsn: self.lsn, index_start_blk, @@ -517,8 +517,8 @@ impl ImageLayerWriter { // set inner.file here. The first read will have to re-open it. let layer = ImageLayer { path_or_conf: PathOrConf::Conf(self.conf), - timelineid: self.timelineid, - tenantid: self.tenantid, + timeline_id: self.timeline_id, + tenant_id: self.tenant_id, key_range: self.key_range.clone(), lsn: self.lsn, inner: RwLock::new(ImageLayerInner { @@ -538,8 +538,8 @@ impl ImageLayerWriter { // FIXME: throw an error instead? let final_path = ImageLayer::path_for( &PathOrConf::Conf(self.conf), - self.timelineid, - self.tenantid, + self.timeline_id, + self.tenant_id, &ImageFileName { key_range: self.key_range.clone(), lsn: self.lsn, diff --git a/pageserver/src/tenant/inmemory_layer.rs b/pageserver/src/tenant/inmemory_layer.rs index 0e7b215b1e..9aa33a72ca 100644 --- a/pageserver/src/tenant/inmemory_layer.rs +++ b/pageserver/src/tenant/inmemory_layer.rs @@ -18,9 +18,9 @@ use std::collections::HashMap; use tracing::*; use utils::{ bin_ser::BeSer, + id::{TenantId, TimelineId}, lsn::Lsn, vec_map::VecMap, - zid::{ZTenantId, ZTimelineId}, }; // avoid binding to Write (conflicts with std::io::Write) // while being able to use std::fmt::Write's methods @@ -37,8 +37,8 @@ thread_local! { pub struct InMemoryLayer { conf: &'static PageServerConf, - tenantid: ZTenantId, - timelineid: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, /// /// This layer contains all the changes from 'start_lsn'. The @@ -94,12 +94,12 @@ impl Layer for InMemoryLayer { None } - fn get_tenant_id(&self) -> ZTenantId { - self.tenantid + fn get_tenant_id(&self) -> TenantId { + self.tenant_id } - fn get_timeline_id(&self) -> ZTimelineId { - self.timelineid + fn get_timeline_id(&self) -> TimelineId { + self.timeline_id } fn get_key_range(&self) -> Range { @@ -197,7 +197,7 @@ impl Layer for InMemoryLayer { println!( "----- in-memory layer for tli {} LSNs {}-{} ----", - self.timelineid, self.start_lsn, end_str, + self.timeline_id, self.start_lsn, end_str, ); if !verbose { @@ -251,22 +251,18 @@ impl InMemoryLayer { /// pub fn create( conf: &'static PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, start_lsn: Lsn, ) -> Result { - trace!( - "initializing new empty InMemoryLayer for writing on timeline {} at {}", - timelineid, - start_lsn - ); + trace!("initializing new empty InMemoryLayer for writing on timeline {timeline_id} at {start_lsn}"); - let file = EphemeralFile::create(conf, tenantid, timelineid)?; + let file = EphemeralFile::create(conf, tenant_id, timeline_id)?; Ok(InMemoryLayer { conf, - timelineid, - tenantid, + timeline_id, + tenant_id, start_lsn, inner: RwLock::new(InMemoryLayerInner { end_lsn: None, @@ -281,7 +277,7 @@ impl InMemoryLayer { /// Common subroutine of the public put_wal_record() and put_page_image() functions. /// Adds the page version to the in-memory tree pub fn put_value(&self, key: Key, lsn: Lsn, val: &Value) -> Result<()> { - trace!("put_value key {} at {}/{}", key, self.timelineid, lsn); + trace!("put_value key {} at {}/{}", key, self.timeline_id, lsn); let mut inner = self.inner.write().unwrap(); inner.assert_writeable(); @@ -344,8 +340,8 @@ impl InMemoryLayer { let mut delta_layer_writer = DeltaLayerWriter::new( self.conf, - self.timelineid, - self.tenantid, + self.timeline_id, + self.tenant_id, Key::MIN, self.start_lsn..inner.end_lsn.unwrap(), )?; diff --git a/pageserver/src/tenant/layer_map.rs b/pageserver/src/tenant/layer_map.rs index c24e3976fb..8abeebf54c 100644 --- a/pageserver/src/tenant/layer_map.rs +++ b/pageserver/src/tenant/layer_map.rs @@ -2,7 +2,7 @@ //! The layer map tracks what layers exist in a timeline. //! //! When the timeline is first accessed, the server lists of all layer files -//! in the timelines/ directory, and populates this map with +//! in the timelines/ directory, and populates this map with //! ImageLayer and DeltaLayer structs corresponding to each file. When the first //! new WAL record is received, we create an InMemoryLayer to hold the incoming //! records. Now and then, in the checkpoint() function, the in-memory layer is diff --git a/pageserver/src/tenant/metadata.rs b/pageserver/src/tenant/metadata.rs index 4ea2b7d55b..ace4dc91e9 100644 --- a/pageserver/src/tenant/metadata.rs +++ b/pageserver/src/tenant/metadata.rs @@ -15,8 +15,8 @@ use serde::{Deserialize, Serialize}; use tracing::info_span; use utils::{ bin_ser::BeSer, + id::{TenantId, TimelineId}, lsn::Lsn, - zid::{ZTenantId, ZTimelineId}, }; use crate::config::PageServerConf; @@ -63,7 +63,7 @@ struct TimelineMetadataBody { // doing a clean shutdown, so that there is no more WAL beyond // 'disk_consistent_lsn' prev_record_lsn: Option, - ancestor_timeline: Option, + ancestor_timeline: Option, ancestor_lsn: Lsn, latest_gc_cutoff_lsn: Lsn, initdb_lsn: Lsn, @@ -73,7 +73,7 @@ impl TimelineMetadata { pub fn new( disk_consistent_lsn: Lsn, prev_record_lsn: Option, - ancestor_timeline: Option, + ancestor_timeline: Option, ancestor_lsn: Lsn, latest_gc_cutoff_lsn: Lsn, initdb_lsn: Lsn, @@ -149,7 +149,7 @@ impl TimelineMetadata { self.body.prev_record_lsn } - pub fn ancestor_timeline(&self) -> Option { + pub fn ancestor_timeline(&self) -> Option { self.body.ancestor_timeline } @@ -170,23 +170,23 @@ impl TimelineMetadata { /// where certain timeline's metadata file should be located. pub fn metadata_path( conf: &'static PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, ) -> PathBuf { - conf.timeline_path(&timelineid, &tenantid) + conf.timeline_path(&timeline_id, &tenant_id) .join(METADATA_FILE_NAME) } /// Save timeline metadata to file pub fn save_metadata( conf: &'static PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, data: &TimelineMetadata, first_save: bool, ) -> anyhow::Result<()> { let _enter = info_span!("saving metadata").entered(); - let path = metadata_path(conf, timelineid, tenantid); + let path = metadata_path(conf, timeline_id, tenant_id); // use OpenOptions to ensure file presence is consistent with first_save let mut file = VirtualFile::open_with_options( &path, diff --git a/pageserver/src/tenant/storage_layer.rs b/pageserver/src/tenant/storage_layer.rs index e10330bdd3..8dafcab124 100644 --- a/pageserver/src/tenant/storage_layer.rs +++ b/pageserver/src/tenant/storage_layer.rs @@ -3,15 +3,15 @@ //! use crate::repository::{Key, Value}; -use crate::walrecord::ZenithWalRecord; +use crate::walrecord::NeonWalRecord; use anyhow::Result; use bytes::Bytes; use std::ops::Range; use std::path::PathBuf; use utils::{ + id::{TenantId, TimelineId}, lsn::Lsn, - zid::{ZTenantId, ZTimelineId}, }; pub fn range_overlaps(a: &Range, b: &Range) -> bool @@ -50,7 +50,7 @@ where /// #[derive(Debug)] pub struct ValueReconstructState { - pub records: Vec<(Lsn, ZenithWalRecord)>, + pub records: Vec<(Lsn, NeonWalRecord)>, pub img: Option<(Lsn, Bytes)>, } @@ -84,10 +84,10 @@ pub enum ValueReconstructResult { /// LSN /// pub trait Layer: Send + Sync { - fn get_tenant_id(&self) -> ZTenantId; + fn get_tenant_id(&self) -> TenantId; /// Identify the timeline this layer belongs to - fn get_timeline_id(&self) -> ZTimelineId; + fn get_timeline_id(&self) -> TimelineId; /// Range of keys that this layer covers fn get_key_range(&self) -> Range; diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index c96ad99909..e821ef1b9a 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -39,10 +39,10 @@ use crate::tenant_config::TenantConfOpt; use postgres_ffi::v14::xlog_utils::to_pg_timestamp; use utils::{ + id::{TenantId, TimelineId}, lsn::{AtomicLsn, Lsn, RecordLsn}, seqwait::SeqWait, simple_rcu::{Rcu, RcuReadGuard}, - zid::{ZTenantId, ZTimelineId}, }; use crate::repository::GcResult; @@ -58,8 +58,8 @@ pub struct Timeline { conf: &'static PageServerConf, tenant_conf: Arc>, - pub tenant_id: ZTenantId, - pub timeline_id: ZTimelineId, + pub tenant_id: TenantId, + pub timeline_id: TimelineId, pub layers: RwLock, @@ -312,7 +312,7 @@ impl Timeline { } /// Get the ancestor's timeline id - pub fn get_ancestor_timeline_id(&self) -> Option { + pub fn get_ancestor_timeline_id(&self) -> Option { self.ancestor_timeline .as_ref() .map(|ancestor| ancestor.timeline_id) @@ -531,8 +531,8 @@ impl Timeline { tenant_conf: Arc>, metadata: TimelineMetadata, ancestor: Option>, - timeline_id: ZTimelineId, - tenant_id: ZTenantId, + timeline_id: TimelineId, + tenant_id: TenantId, walredo_mgr: Arc, upload_layers: bool, ) -> Timeline { @@ -1250,7 +1250,7 @@ impl Timeline { None }; - let ancestor_timelineid = self + let ancestor_timeline_id = self .ancestor_timeline .as_ref() .map(|ancestor| ancestor.timeline_id); @@ -1258,7 +1258,7 @@ impl Timeline { let metadata = TimelineMetadata::new( disk_consistent_lsn, ondisk_prev_record_lsn, - ancestor_timelineid, + ancestor_timeline_id, self.ancestor_lsn, *self.latest_gc_cutoff_lsn.read(), self.initdb_lsn, diff --git a/pageserver/src/tenant_config.rs b/pageserver/src/tenant_config.rs index 73bf3636d2..4448ffc456 100644 --- a/pageserver/src/tenant_config.rs +++ b/pageserver/src/tenant_config.rs @@ -13,7 +13,7 @@ use serde::{Deserialize, Serialize}; use std::num::NonZeroU64; use std::path::PathBuf; use std::time::Duration; -use utils::zid::ZTenantId; +use utils::id::TenantId; pub const TENANT_CONFIG_NAME: &str = "config"; @@ -217,8 +217,8 @@ impl TenantConf { /// Points to a place in pageserver's local directory, /// where certain tenant's tenantconf file should be located. - pub fn path(conf: &'static PageServerConf, tenantid: ZTenantId) -> PathBuf { - conf.tenant_path(&tenantid).join(TENANT_CONFIG_NAME) + pub fn path(conf: &'static PageServerConf, tenant_id: TenantId) -> PathBuf { + conf.tenant_path(&tenant_id).join(TENANT_CONFIG_NAME) } #[cfg(test)] diff --git a/pageserver/src/tenant_mgr.rs b/pageserver/src/tenant_mgr.rs index a8a9926c77..d6fa843305 100644 --- a/pageserver/src/tenant_mgr.rs +++ b/pageserver/src/tenant_mgr.rs @@ -27,7 +27,7 @@ use crate::walredo::PostgresRedoManager; use crate::{TenantTimelineValues, TEMP_FILE_SUFFIX}; use utils::crashsafe_dir; -use utils::zid::{ZTenantId, ZTimelineId}; +use utils::id::{TenantId, TimelineId}; mod tenants_state { use once_cell::sync::Lazy; @@ -35,20 +35,20 @@ mod tenants_state { collections::HashMap, sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}, }; - use utils::zid::ZTenantId; + use utils::id::TenantId; use crate::tenant::Tenant; - static TENANTS: Lazy>>> = + static TENANTS: Lazy>>> = Lazy::new(|| RwLock::new(HashMap::new())); - pub(super) fn read_tenants() -> RwLockReadGuard<'static, HashMap>> { + pub(super) fn read_tenants() -> RwLockReadGuard<'static, HashMap>> { TENANTS .read() .expect("Failed to read() tenants lock, it got poisoned") } - pub(super) fn write_tenants() -> RwLockWriteGuard<'static, HashMap>> { + pub(super) fn write_tenants() -> RwLockWriteGuard<'static, HashMap>> { TENANTS .write() .expect("Failed to write() tenants lock, it got poisoned") @@ -159,7 +159,7 @@ pub fn attach_local_tenants( fn load_local_tenant( conf: &'static PageServerConf, - tenant_id: ZTenantId, + tenant_id: TenantId, remote_index: &RemoteIndex, ) -> Arc { let tenant = Arc::new(Tenant::new( @@ -225,7 +225,7 @@ pub async fn shutdown_all_tenants() { fn create_tenant_files( conf: &'static PageServerConf, tenant_conf: TenantConfOpt, - tenant_id: ZTenantId, + tenant_id: TenantId, ) -> anyhow::Result<()> { let target_tenant_directory = conf.tenant_path(&tenant_id); anyhow::ensure!( @@ -310,9 +310,9 @@ fn rebase_directory(original_path: &Path, base: &Path, new_base: &Path) -> anyho pub fn create_tenant( conf: &'static PageServerConf, tenant_conf: TenantConfOpt, - tenant_id: ZTenantId, + tenant_id: TenantId, remote_index: RemoteIndex, -) -> anyhow::Result> { +) -> anyhow::Result> { match tenants_state::write_tenants().entry(tenant_id) { hash_map::Entry::Occupied(_) => { debug!("tenant {tenant_id} already exists"); @@ -339,7 +339,7 @@ pub fn create_tenant( pub fn update_tenant_config( conf: &'static PageServerConf, tenant_conf: TenantConfOpt, - tenant_id: ZTenantId, + tenant_id: TenantId, ) -> anyhow::Result<()> { info!("configuring tenant {tenant_id}"); get_tenant(tenant_id, true)?.update_tenant_config(tenant_conf); @@ -349,7 +349,7 @@ pub fn update_tenant_config( /// Gets the tenant from the in-memory data, erroring if it's absent or is not fitting to the query. /// `active_only = true` allows to query only tenants that are ready for operations, erroring on other kinds of tenants. -pub fn get_tenant(tenant_id: ZTenantId, active_only: bool) -> anyhow::Result> { +pub fn get_tenant(tenant_id: TenantId, active_only: bool) -> anyhow::Result> { let m = tenants_state::read_tenants(); let tenant = m .get(&tenant_id) @@ -361,7 +361,7 @@ pub fn get_tenant(tenant_id: ZTenantId, active_only: bool) -> anyhow::Result anyhow::Result<()> { +pub async fn delete_timeline(tenant_id: TenantId, timeline_id: TimelineId) -> anyhow::Result<()> { // Start with the shutdown of timeline tasks (this shuts down the walreceiver) // It is important that we do not take locks here, and do not check whether the timeline exists // because if we hold tenants_state::write_tenants() while awaiting for the tasks to join @@ -398,7 +398,7 @@ pub async fn delete_timeline(tenant_id: ZTenantId, timeline_id: ZTimelineId) -> pub async fn detach_tenant( conf: &'static PageServerConf, - tenant_id: ZTenantId, + tenant_id: TenantId, ) -> anyhow::Result<()> { let tenant = match { let mut tenants_accessor = tenants_state::write_tenants(); @@ -565,14 +565,14 @@ fn collect_timelines_for_tenant( config: &'static PageServerConf, tenant_path: &Path, ) -> anyhow::Result<( - ZTenantId, - HashMap)>, + TenantId, + HashMap)>, )> { let tenant_id = tenant_path .file_name() .and_then(OsStr::to_str) .unwrap_or_default() - .parse::() + .parse::() .context("Could not parse tenant id out of the tenant dir name")?; let timelines_dir = config.timelines_path(&tenant_id); @@ -644,7 +644,7 @@ fn collect_timelines_for_tenant( // NOTE: ephemeral files are excluded from the list fn collect_timeline_files( timeline_dir: &Path, -) -> anyhow::Result<(ZTimelineId, TimelineMetadata, HashSet)> { +) -> anyhow::Result<(TimelineId, TimelineMetadata, HashSet)> { let mut timeline_files = HashSet::new(); let mut timeline_metadata_path = None; @@ -652,7 +652,7 @@ fn collect_timeline_files( .file_name() .and_then(OsStr::to_str) .unwrap_or_default() - .parse::() + .parse::() .context("Could not parse timeline id out of the timeline dir name")?; let timeline_dir_entries = fs::read_dir(&timeline_dir).context("Failed to list timeline dir contents")?; diff --git a/pageserver/src/tenant_tasks.rs b/pageserver/src/tenant_tasks.rs index 3ef54838af..c543a0ecb1 100644 --- a/pageserver/src/tenant_tasks.rs +++ b/pageserver/src/tenant_tasks.rs @@ -10,9 +10,9 @@ use crate::task_mgr::{self, TaskKind, BACKGROUND_RUNTIME}; use crate::tenant::{Tenant, TenantState}; use crate::tenant_mgr; use tracing::*; -use utils::zid::ZTenantId; +use utils::id::TenantId; -pub fn start_background_loops(tenant_id: ZTenantId) { +pub fn start_background_loops(tenant_id: TenantId) { task_mgr::spawn( BACKGROUND_RUNTIME.handle(), TaskKind::Compaction, @@ -42,9 +42,8 @@ pub fn start_background_loops(tenant_id: ZTenantId) { /// /// Compaction task's main loop /// -async fn compaction_loop(tenant_id: ZTenantId) { +async fn compaction_loop(tenant_id: TenantId) { let wait_duration = Duration::from_secs(2); - info!("starting compaction loop for {tenant_id}"); TENANT_TASK_EVENTS.with_label_values(&["start"]).inc(); async { @@ -90,9 +89,8 @@ async fn compaction_loop(tenant_id: ZTenantId) { /// /// GC task's main loop /// -async fn gc_loop(tenant_id: ZTenantId) { +async fn gc_loop(tenant_id: TenantId) { let wait_duration = Duration::from_secs(2); - info!("starting gc loop for {tenant_id}"); TENANT_TASK_EVENTS.with_label_values(&["start"]).inc(); async { @@ -138,7 +136,7 @@ async fn gc_loop(tenant_id: ZTenantId) { } async fn wait_for_active_tenant( - tenant_id: ZTenantId, + tenant_id: TenantId, wait: Duration, ) -> ControlFlow<(), Arc> { let tenant = loop { diff --git a/pageserver/src/timelines.rs b/pageserver/src/timelines.rs index 69d14babf0..88b26e18f4 100644 --- a/pageserver/src/timelines.rs +++ b/pageserver/src/timelines.rs @@ -14,8 +14,8 @@ use tracing::*; use remote_storage::path_with_suffix_extension; use utils::{ + id::{TenantId, TimelineId}, lsn::Lsn, - zid::{ZTenantId, ZTimelineId}, }; use crate::config::PageServerConf; @@ -61,8 +61,8 @@ fn run_initdb(conf: &'static PageServerConf, initdbpath: &Path) -> Result<()> { // fn bootstrap_timeline( conf: &'static PageServerConf, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, tenant: &Tenant, ) -> Result> { // create a `tenant/{tenant_id}/timelines/basebackup-{timeline_id}.{TEMP_FILE_SUFFIX}/` @@ -115,12 +115,12 @@ fn bootstrap_timeline( /// pub(crate) async fn create_timeline( conf: &'static PageServerConf, - tenant_id: ZTenantId, - new_timeline_id: Option, - ancestor_timeline_id: Option, + tenant_id: TenantId, + new_timeline_id: Option, + ancestor_timeline_id: Option, mut ancestor_start_lsn: Option, ) -> Result>> { - let new_timeline_id = new_timeline_id.unwrap_or_else(ZTimelineId::generate); + let new_timeline_id = new_timeline_id.unwrap_or_else(TimelineId::generate); let tenant = tenant_mgr::get_tenant(tenant_id, true)?; if conf.timeline_path(&new_timeline_id, &tenant_id).exists() { diff --git a/pageserver/src/virtual_file.rs b/pageserver/src/virtual_file.rs index 7a2c699b44..896c2603a2 100644 --- a/pageserver/src/virtual_file.rs +++ b/pageserver/src/virtual_file.rs @@ -53,8 +53,8 @@ pub struct VirtualFile { pub path: PathBuf, open_options: OpenOptions, - tenantid: String, - timelineid: String, + tenant_id: String, + timeline_id: String, } #[derive(Debug, PartialEq, Clone, Copy)] @@ -149,7 +149,7 @@ impl OpenFiles { // old file. // if let Some(old_file) = slot_guard.file.take() { - // We do not have information about tenantid/timelineid of evicted file. + // We do not have information about tenant_id/timeline_id of evicted file. // It is possible to store path together with file or use filepath crate, // but as far as close() is not expected to be fast, it is not so critical to gather // precise per-tenant statistic here. @@ -197,18 +197,18 @@ impl VirtualFile { ) -> Result { let path_str = path.to_string_lossy(); let parts = path_str.split('/').collect::>(); - let tenantid; - let timelineid; + let tenant_id; + let timeline_id; if parts.len() > 5 && parts[parts.len() - 5] == "tenants" { - tenantid = parts[parts.len() - 4].to_string(); - timelineid = parts[parts.len() - 2].to_string(); + tenant_id = parts[parts.len() - 4].to_string(); + timeline_id = parts[parts.len() - 2].to_string(); } else { - tenantid = "*".to_string(); - timelineid = "*".to_string(); + tenant_id = "*".to_string(); + timeline_id = "*".to_string(); } let (handle, mut slot_guard) = get_open_files().find_victim_slot(); let file = STORAGE_IO_TIME - .with_label_values(&["open", &tenantid, &timelineid]) + .with_label_values(&["open", &tenant_id, &timeline_id]) .observe_closure_duration(|| open_options.open(path))?; // Strip all options other than read and write. @@ -226,8 +226,8 @@ impl VirtualFile { pos: 0, path: path.to_path_buf(), open_options: reopen_options, - tenantid, - timelineid, + tenant_id, + timeline_id, }; slot_guard.file.replace(file); @@ -267,7 +267,7 @@ impl VirtualFile { // Found a cached file descriptor. slot.recently_used.store(true, Ordering::Relaxed); return Ok(STORAGE_IO_TIME - .with_label_values(&[op, &self.tenantid, &self.timelineid]) + .with_label_values(&[op, &self.tenant_id, &self.timeline_id]) .observe_closure_duration(|| func(file))); } } @@ -294,7 +294,7 @@ impl VirtualFile { // Open the physical file let file = STORAGE_IO_TIME - .with_label_values(&["open", &self.tenantid, &self.timelineid]) + .with_label_values(&["open", &self.tenant_id, &self.timeline_id]) .observe_closure_duration(|| self.open_options.open(&self.path))?; // Perform the requested operation on it @@ -308,7 +308,7 @@ impl VirtualFile { // may deadlock on subsequent read calls. // Simply replacing all `RwLock` in project causes deadlocks, so use it sparingly. let result = STORAGE_IO_TIME - .with_label_values(&[op, &self.tenantid, &self.timelineid]) + .with_label_values(&[op, &self.tenant_id, &self.timeline_id]) .observe_closure_duration(|| func(&file)); // Store the File in the slot and update the handle in the VirtualFile @@ -333,11 +333,11 @@ impl Drop for VirtualFile { if slot_guard.tag == handle.tag { slot.recently_used.store(false, Ordering::Relaxed); // Unlike files evicted by replacement algorithm, here - // we group close time by tenantid/timelineid. + // we group close time by tenant_id/timeline_id. // At allows to compare number/time of "normal" file closes // with file eviction. STORAGE_IO_TIME - .with_label_values(&["close", &self.tenantid, &self.timelineid]) + .with_label_values(&["close", &self.tenant_id, &self.timeline_id]) .observe_closure_duration(|| slot_guard.file.take()); } } @@ -399,7 +399,7 @@ impl FileExt for VirtualFile { let result = self.with_file("read", |file| file.read_at(buf, offset))?; if let Ok(size) = result { STORAGE_IO_SIZE - .with_label_values(&["read", &self.tenantid, &self.timelineid]) + .with_label_values(&["read", &self.tenant_id, &self.timeline_id]) .add(size as i64); } result @@ -409,7 +409,7 @@ impl FileExt for VirtualFile { let result = self.with_file("write", |file| file.write_at(buf, offset))?; if let Ok(size) = result { STORAGE_IO_SIZE - .with_label_values(&["write", &self.tenantid, &self.timelineid]) + .with_label_values(&["write", &self.tenant_id, &self.timeline_id]) .add(size as i64); } result diff --git a/pageserver/src/walingest.rs b/pageserver/src/walingest.rs index 45d0916dec..bede4ac13e 100644 --- a/pageserver/src/walingest.rs +++ b/pageserver/src/walingest.rs @@ -1,5 +1,5 @@ //! -//! Parse PostgreSQL WAL records and store them in a zenith Timeline. +//! Parse PostgreSQL WAL records and store them in a neon Timeline. //! //! The pipeline for ingesting WAL looks like this: //! @@ -9,7 +9,7 @@ //! and decodes it to individual WAL records. It feeds the WAL records //! to WalIngest, which parses them and stores them in the Repository. //! -//! The zenith Repository can store page versions in two formats: as +//! The neon Repository can store page versions in two formats: as //! page images, or a WAL records. WalIngest::ingest_record() extracts //! page images out of some WAL records, but most it stores as WAL //! records. If a WAL record modifies multiple pages, WalIngest @@ -315,7 +315,7 @@ impl<'a> WalIngest<'a> { assert_eq!(image.len(), BLCKSZ as usize); self.put_rel_page_image(modification, rel, blk.blkno, image.freeze())?; } else { - let rec = ZenithWalRecord::Postgres { + let rec = NeonWalRecord::Postgres { will_init: blk.will_init || blk.apply_image, rec: decoded.record.clone(), }; @@ -428,7 +428,7 @@ impl<'a> WalIngest<'a> { modification, vm_rel, new_vm_blk.unwrap(), - ZenithWalRecord::ClearVisibilityMapFlags { + NeonWalRecord::ClearVisibilityMapFlags { new_heap_blkno, old_heap_blkno, flags: pg_constants::VISIBILITYMAP_VALID_BITS, @@ -442,7 +442,7 @@ impl<'a> WalIngest<'a> { modification, vm_rel, new_vm_blk, - ZenithWalRecord::ClearVisibilityMapFlags { + NeonWalRecord::ClearVisibilityMapFlags { new_heap_blkno, old_heap_blkno: None, flags: pg_constants::VISIBILITYMAP_VALID_BITS, @@ -454,7 +454,7 @@ impl<'a> WalIngest<'a> { modification, vm_rel, old_vm_blk, - ZenithWalRecord::ClearVisibilityMapFlags { + NeonWalRecord::ClearVisibilityMapFlags { new_heap_blkno: None, old_heap_blkno, flags: pg_constants::VISIBILITYMAP_VALID_BITS, @@ -642,12 +642,12 @@ impl<'a> WalIngest<'a> { segno, rpageno, if is_commit { - ZenithWalRecord::ClogSetCommitted { + NeonWalRecord::ClogSetCommitted { xids: page_xids, timestamp: parsed.xact_time, } } else { - ZenithWalRecord::ClogSetAborted { xids: page_xids } + NeonWalRecord::ClogSetAborted { xids: page_xids } }, )?; page_xids = Vec::new(); @@ -662,12 +662,12 @@ impl<'a> WalIngest<'a> { segno, rpageno, if is_commit { - ZenithWalRecord::ClogSetCommitted { + NeonWalRecord::ClogSetCommitted { xids: page_xids, timestamp: parsed.xact_time, } } else { - ZenithWalRecord::ClogSetAborted { xids: page_xids } + NeonWalRecord::ClogSetAborted { xids: page_xids } }, )?; @@ -760,7 +760,7 @@ impl<'a> WalIngest<'a> { SlruKind::MultiXactOffsets, segno, rpageno, - ZenithWalRecord::MultixactOffsetCreate { + NeonWalRecord::MultixactOffsetCreate { mid: xlrec.mid, moff: xlrec.moff, }, @@ -794,7 +794,7 @@ impl<'a> WalIngest<'a> { SlruKind::MultiXactMembers, pageno / pg_constants::SLRU_PAGES_PER_SEGMENT, pageno % pg_constants::SLRU_PAGES_PER_SEGMENT, - ZenithWalRecord::MultixactMembersCreate { + NeonWalRecord::MultixactMembersCreate { moff: offset, members: this_page_members, }, @@ -901,7 +901,7 @@ impl<'a> WalIngest<'a> { modification: &mut DatadirModification, rel: RelTag, blknum: BlockNumber, - rec: ZenithWalRecord, + rec: NeonWalRecord, ) -> Result<()> { self.handle_rel_extend(modification, rel, blknum)?; modification.put_rel_wal_record(rel, blknum, rec)?; diff --git a/pageserver/src/walreceiver/connection_manager.rs b/pageserver/src/walreceiver/connection_manager.rs index 69e400f291..1e4b4e7d52 100644 --- a/pageserver/src/walreceiver/connection_manager.rs +++ b/pageserver/src/walreceiver/connection_manager.rs @@ -34,8 +34,8 @@ use crate::{ DEFAULT_MAX_BACKOFF_SECONDS, }; use utils::{ + id::{NodeId, TenantTimelineId}, lsn::Lsn, - zid::{NodeId, ZTenantTimelineId}, }; use super::{walreceiver_connection::WalConnectionStatus, TaskEvent, TaskHandle}; @@ -101,7 +101,7 @@ async fn connection_manager_loop_step( etcd_client: &mut Client, walreceiver_state: &mut WalreceiverState, ) { - let id = ZTenantTimelineId { + let id = TenantTimelineId { tenant_id: walreceiver_state.timeline.tenant_id, timeline_id: walreceiver_state.timeline.timeline_id, }; @@ -230,7 +230,7 @@ fn cleanup_broker_connection( async fn subscribe_for_timeline_updates( etcd_client: &mut Client, broker_prefix: &str, - id: ZTenantTimelineId, + id: TenantTimelineId, ) -> BrokerSubscription { let mut attempt = 0; loop { @@ -266,7 +266,7 @@ const WALCONNECTION_RETRY_BACKOFF_MULTIPLIER: f64 = 1.5; /// All data that's needed to run endless broker loop and keep the WAL streaming connection alive, if possible. struct WalreceiverState { - id: ZTenantTimelineId, + id: TenantTimelineId, /// Use pageserver data about the timeline to filter out some of the safekeepers. timeline: Arc, @@ -331,7 +331,7 @@ impl WalreceiverState { lagging_wal_timeout: Duration, max_lsn_wal_lag: NonZeroU64, ) -> Self { - let id = ZTenantTimelineId { + let id = TenantTimelineId { tenant_id: timeline.tenant_id, timeline_id: timeline.timeline_id, }; @@ -746,10 +746,10 @@ enum ReconnectReason { } fn wal_stream_connection_string( - ZTenantTimelineId { + TenantTimelineId { tenant_id, timeline_id, - }: ZTenantTimelineId, + }: TenantTimelineId, listen_pg_addr_str: &str, ) -> anyhow::Result { let sk_connstr = format!("postgresql://no_user@{listen_pg_addr_str}/no_db"); @@ -760,7 +760,7 @@ fn wal_stream_connection_string( })?; let (host, port) = utils::connstring::connection_host_port(&me_conf); Ok(format!( - "host={host} port={port} options='-c ztimelineid={timeline_id} ztenantid={tenant_id}'" + "host={host} port={port} options='-c timeline_id={timeline_id} tenant_id={tenant_id}'" )) } @@ -1355,7 +1355,7 @@ mod tests { fn dummy_state(harness: &TenantHarness) -> WalreceiverState { WalreceiverState { - id: ZTenantTimelineId { + id: TenantTimelineId { tenant_id: harness.tenant_id, timeline_id: TIMELINE_ID, }, diff --git a/pageserver/src/walreceiver/walreceiver_connection.rs b/pageserver/src/walreceiver/walreceiver_connection.rs index 6f1fbc2c9d..29c4cea882 100644 --- a/pageserver/src/walreceiver/walreceiver_connection.rs +++ b/pageserver/src/walreceiver/walreceiver_connection.rs @@ -30,7 +30,7 @@ use crate::{ walrecord::DecodedWALRecord, }; use postgres_ffi::v14::waldecoder::WalStreamDecoder; -use utils::zid::ZTenantTimelineId; +use utils::id::TenantTimelineId; use utils::{lsn::Lsn, pq_proto::ReplicationFeedback}; /// Status of the connection. @@ -288,7 +288,7 @@ pub async fn handle_walreceiver_connection( .await // here we either do not have this timeline in remote index // or there were no checkpoints for it yet - .timeline_entry(&ZTenantTimelineId { + .timeline_entry(&TenantTimelineId { tenant_id, timeline_id, }) @@ -316,7 +316,7 @@ pub async fn handle_walreceiver_connection( }; *timeline.last_received_wal.lock().unwrap() = Some(last_received_wal); - // Send zenith feedback message. + // Send the replication feedback message. // Regular standby_status_update fields are put into this message. let status_update = ReplicationFeedback { current_timeline_size: timeline @@ -328,7 +328,7 @@ pub async fn handle_walreceiver_connection( ps_replytime: ts, }; - debug!("zenith_status_update {status_update:?}"); + debug!("neon_status_update {status_update:?}"); let mut data = BytesMut::new(); status_update.serialize(&mut data)?; diff --git a/pageserver/src/walrecord.rs b/pageserver/src/walrecord.rs index c718a4c30c..dbf9bf9d33 100644 --- a/pageserver/src/walrecord.rs +++ b/pageserver/src/walrecord.rs @@ -13,10 +13,10 @@ use serde::{Deserialize, Serialize}; use tracing::*; use utils::bin_ser::DeserializeError; -/// Each update to a page is represented by a ZenithWalRecord. It can be a wrapper -/// around a PostgreSQL WAL record, or a custom zenith-specific "record". +/// Each update to a page is represented by a NeonWalRecord. It can be a wrapper +/// around a PostgreSQL WAL record, or a custom neon-specific "record". #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub enum ZenithWalRecord { +pub enum NeonWalRecord { /// Native PostgreSQL WAL record Postgres { will_init: bool, rec: Bytes }, @@ -45,14 +45,14 @@ pub enum ZenithWalRecord { }, } -impl ZenithWalRecord { +impl NeonWalRecord { /// Does replaying this WAL record initialize the page from scratch, or does /// it need to be applied over the previous image of the page? pub fn will_init(&self) -> bool { match self { - ZenithWalRecord::Postgres { will_init, rec: _ } => *will_init, + NeonWalRecord::Postgres { will_init, rec: _ } => *will_init, - // None of the special zenith record types currently initialize the page + // None of the special neon record types currently initialize the page _ => false, } } @@ -767,9 +767,9 @@ pub fn decode_wal_record( /// Build a human-readable string to describe a WAL record /// /// For debugging purposes -pub fn describe_wal_record(rec: &ZenithWalRecord) -> Result { +pub fn describe_wal_record(rec: &NeonWalRecord) -> Result { match rec { - ZenithWalRecord::Postgres { will_init, rec } => Ok(format!( + NeonWalRecord::Postgres { will_init, rec } => Ok(format!( "will_init: {}, {}", will_init, describe_postgres_wal_record(rec)? diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index dd946659bb..9faabfebda 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -36,7 +36,7 @@ use std::sync::Mutex; use std::time::Duration; use std::time::Instant; use tracing::*; -use utils::{bin_ser::BeSer, lsn::Lsn, nonblock::set_nonblock, zid::ZTenantId}; +use utils::{bin_ser::BeSer, id::TenantId, lsn::Lsn, nonblock::set_nonblock}; use crate::metrics::{ WAL_REDO_RECORDS_HISTOGRAM, WAL_REDO_RECORD_COUNTER, WAL_REDO_TIME, WAL_REDO_WAIT_TIME, @@ -44,7 +44,7 @@ use crate::metrics::{ use crate::pgdatadir_mapping::{key_to_rel_block, key_to_slru_block}; use crate::reltag::{RelTag, SlruKind}; use crate::repository::Key; -use crate::walrecord::ZenithWalRecord; +use crate::walrecord::NeonWalRecord; use crate::{config::PageServerConf, TEMP_FILE_SUFFIX}; use postgres_ffi::v14::nonrelfile_utils::{ mx_offset_to_flags_bitshift, mx_offset_to_flags_offset, mx_offset_to_member_offset, @@ -81,7 +81,7 @@ pub trait WalRedoManager: Send + Sync { key: Key, lsn: Lsn, base_img: Option, - records: Vec<(Lsn, ZenithWalRecord)>, + records: Vec<(Lsn, NeonWalRecord)>, ) -> Result; } @@ -93,20 +93,20 @@ pub trait WalRedoManager: Send + Sync { /// records. /// pub struct PostgresRedoManager { - tenantid: ZTenantId, + tenant_id: TenantId, conf: &'static PageServerConf, process: Mutex>, } -/// Can this request be served by zenith redo functions +/// Can this request be served by neon redo functions /// or we need to pass it to wal-redo postgres process? -fn can_apply_in_zenith(rec: &ZenithWalRecord) -> bool { +fn can_apply_in_neon(rec: &NeonWalRecord) -> bool { // Currently, we don't have bespoken Rust code to replay any - // Postgres WAL records. But everything else is handled in zenith. + // Postgres WAL records. But everything else is handled in neon. #[allow(clippy::match_like_matches_macro)] match rec { - ZenithWalRecord::Postgres { + NeonWalRecord::Postgres { will_init: _, rec: _, } => false, @@ -143,7 +143,7 @@ impl WalRedoManager for PostgresRedoManager { key: Key, lsn: Lsn, base_img: Option, - records: Vec<(Lsn, ZenithWalRecord)>, + records: Vec<(Lsn, NeonWalRecord)>, ) -> Result { if records.is_empty() { error!("invalid WAL redo request with no records"); @@ -151,14 +151,14 @@ impl WalRedoManager for PostgresRedoManager { } let mut img: Option = base_img; - let mut batch_zenith = can_apply_in_zenith(&records[0].1); + let mut batch_neon = can_apply_in_neon(&records[0].1); let mut batch_start = 0; for i in 1..records.len() { - let rec_zenith = can_apply_in_zenith(&records[i].1); + let rec_neon = can_apply_in_neon(&records[i].1); - if rec_zenith != batch_zenith { - let result = if batch_zenith { - self.apply_batch_zenith(key, lsn, img, &records[batch_start..i]) + if rec_neon != batch_neon { + let result = if batch_neon { + self.apply_batch_neon(key, lsn, img, &records[batch_start..i]) } else { self.apply_batch_postgres( key, @@ -170,13 +170,13 @@ impl WalRedoManager for PostgresRedoManager { }; img = Some(result?); - batch_zenith = rec_zenith; + batch_neon = rec_neon; batch_start = i; } } // last batch - if batch_zenith { - self.apply_batch_zenith(key, lsn, img, &records[batch_start..]) + if batch_neon { + self.apply_batch_neon(key, lsn, img, &records[batch_start..]) } else { self.apply_batch_postgres( key, @@ -193,10 +193,10 @@ impl PostgresRedoManager { /// /// Create a new PostgresRedoManager. /// - pub fn new(conf: &'static PageServerConf, tenantid: ZTenantId) -> PostgresRedoManager { + pub fn new(conf: &'static PageServerConf, tenant_id: TenantId) -> PostgresRedoManager { // The actual process is launched lazily, on first request. PostgresRedoManager { - tenantid, + tenant_id, conf, process: Mutex::new(None), } @@ -210,7 +210,7 @@ impl PostgresRedoManager { key: Key, lsn: Lsn, base_img: Option, - records: &[(Lsn, ZenithWalRecord)], + records: &[(Lsn, NeonWalRecord)], wal_redo_timeout: Duration, ) -> Result { let (rel, blknum) = key_to_rel_block(key).or(Err(WalRedoError::InvalidRecord))?; @@ -222,7 +222,7 @@ impl PostgresRedoManager { // launch the WAL redo process on first use if process_guard.is_none() { - let p = PostgresRedoProcess::launch(self.conf, &self.tenantid)?; + let p = PostgresRedoProcess::launch(self.conf, &self.tenant_id)?; *process_guard = Some(p); } let process = process_guard.as_mut().unwrap(); @@ -263,14 +263,14 @@ impl PostgresRedoManager { } /// - /// Process a batch of WAL records using bespoken Zenith code. + /// Process a batch of WAL records using bespoken Neon code. /// - fn apply_batch_zenith( + fn apply_batch_neon( &self, key: Key, lsn: Lsn, base_img: Option, - records: &[(Lsn, ZenithWalRecord)], + records: &[(Lsn, NeonWalRecord)], ) -> Result { let start_time = Instant::now(); @@ -280,13 +280,13 @@ impl PostgresRedoManager { page.extend_from_slice(&fpi[..]); } else { // All the current WAL record types that we can handle require a base image. - error!("invalid zenith WAL redo request with no base image"); + error!("invalid neon WAL redo request with no base image"); return Err(WalRedoError::InvalidRequest); } // Apply all the WAL records in the batch for (record_lsn, record) in records.iter() { - self.apply_record_zenith(key, &mut page, *record_lsn, record)?; + self.apply_record_neon(key, &mut page, *record_lsn, record)?; } // Success! let end_time = Instant::now(); @@ -294,7 +294,7 @@ impl PostgresRedoManager { WAL_REDO_TIME.observe(duration.as_secs_f64()); debug!( - "zenith applied {} WAL records in {} ms to reconstruct page image at LSN {}", + "neon applied {} WAL records in {} ms to reconstruct page image at LSN {}", records.len(), duration.as_micros(), lsn @@ -303,22 +303,22 @@ impl PostgresRedoManager { Ok(page.freeze()) } - fn apply_record_zenith( + fn apply_record_neon( &self, key: Key, page: &mut BytesMut, _record_lsn: Lsn, - record: &ZenithWalRecord, + record: &NeonWalRecord, ) -> Result<(), WalRedoError> { match record { - ZenithWalRecord::Postgres { + NeonWalRecord::Postgres { will_init: _, rec: _, } => { - error!("tried to pass postgres wal record to zenith WAL redo"); + error!("tried to pass postgres wal record to neon WAL redo"); return Err(WalRedoError::InvalidRequest); } - ZenithWalRecord::ClearVisibilityMapFlags { + NeonWalRecord::ClearVisibilityMapFlags { new_heap_blkno, old_heap_blkno, flags, @@ -360,7 +360,7 @@ impl PostgresRedoManager { } // Non-relational WAL records are handled here, with custom code that has the // same effects as the corresponding Postgres WAL redo function. - ZenithWalRecord::ClogSetCommitted { xids, timestamp } => { + NeonWalRecord::ClogSetCommitted { xids, timestamp } => { let (slru_kind, segno, blknum) = key_to_slru_block(key).or(Err(WalRedoError::InvalidRecord))?; assert_eq!( @@ -410,7 +410,7 @@ impl PostgresRedoManager { ); } } - ZenithWalRecord::ClogSetAborted { xids } => { + NeonWalRecord::ClogSetAborted { xids } => { let (slru_kind, segno, blknum) = key_to_slru_block(key).or(Err(WalRedoError::InvalidRecord))?; assert_eq!( @@ -441,7 +441,7 @@ impl PostgresRedoManager { transaction_id_set_status(xid, pg_constants::TRANSACTION_STATUS_ABORTED, page); } } - ZenithWalRecord::MultixactOffsetCreate { mid, moff } => { + NeonWalRecord::MultixactOffsetCreate { mid, moff } => { let (slru_kind, segno, blknum) = key_to_slru_block(key).or(Err(WalRedoError::InvalidRecord))?; assert_eq!( @@ -474,7 +474,7 @@ impl PostgresRedoManager { LittleEndian::write_u32(&mut page[offset..offset + 4], *moff); } - ZenithWalRecord::MultixactMembersCreate { moff, members } => { + NeonWalRecord::MultixactMembersCreate { moff, members } => { let (slru_kind, segno, blknum) = key_to_slru_block(key).or(Err(WalRedoError::InvalidRecord))?; assert_eq!( @@ -570,7 +570,7 @@ impl PostgresRedoProcess { // // Start postgres binary in special WAL redo mode. // - fn launch(conf: &PageServerConf, tenant_id: &ZTenantId) -> Result { + fn launch(conf: &PageServerConf, tenant_id: &TenantId) -> Result { // FIXME: We need a dummy Postgres cluster to run the process in. Currently, we // just create one with constant name. That fails if you try to launch more than // one WAL redo manager concurrently. @@ -686,7 +686,7 @@ impl PostgresRedoProcess { &mut self, tag: BufferTag, base_img: Option, - records: &[(Lsn, ZenithWalRecord)], + records: &[(Lsn, NeonWalRecord)], wal_redo_timeout: Duration, ) -> Result { // Serialize all the messages to send the WAL redo process first. @@ -700,7 +700,7 @@ impl PostgresRedoProcess { build_push_page_msg(tag, &img, &mut writebuf); } for (lsn, rec) in records.iter() { - if let ZenithWalRecord::Postgres { + if let NeonWalRecord::Postgres { will_init: _, rec: postgres_rec, } = rec @@ -709,7 +709,7 @@ impl PostgresRedoProcess { } else { return Err(Error::new( ErrorKind::Other, - "tried to pass zenith wal record to postgres WAL redo", + "tried to pass neon wal record to postgres WAL redo", )); } } diff --git a/pgxn/neon/inmem_smgr.c b/pgxn/neon/inmem_smgr.c index 4926d759e8..bc0ee352b8 100644 --- a/pgxn/neon/inmem_smgr.c +++ b/pgxn/neon/inmem_smgr.c @@ -86,7 +86,7 @@ inmem_exists(SMgrRelation reln, ForkNumber forknum) } /* - * inmem_create() -- Create a new relation on zenithd storage + * inmem_create() -- Create a new relation on neon storage * * If isRedo is true, it's okay for the relation to exist already. */ diff --git a/pgxn/neon/libpagestore.c b/pgxn/neon/libpagestore.c index 55285a6345..296865838d 100644 --- a/pgxn/neon/libpagestore.c +++ b/pgxn/neon/libpagestore.c @@ -30,13 +30,12 @@ #include "walproposer.h" #include "walproposer_utils.h" - #define PageStoreTrace DEBUG5 #define NEON_TAG "[NEON_SMGR] " -#define neon_log(tag, fmt, ...) ereport(tag, \ - (errmsg(NEON_TAG fmt, ## __VA_ARGS__), \ - errhidestmt(true), errhidecontext(true))) +#define neon_log(tag, fmt, ...) ereport(tag, \ + (errmsg(NEON_TAG fmt, ##__VA_ARGS__), \ + errhidestmt(true), errhidecontext(true))) bool connected = false; PGconn *pageserver_conn = NULL; @@ -65,7 +64,7 @@ pageserver_connect() errdetail_internal("%s", msg))); } - query = psprintf("pagestream %s %s", zenith_tenant, zenith_timeline); + query = psprintf("pagestream %s %s", neon_tenant, neon_timeline); ret = PQsendQuery(pageserver_conn, query); if (ret != 1) { @@ -169,7 +168,7 @@ pageserver_disconnect(void) } static void -pageserver_send(ZenithRequest *request) +pageserver_send(NeonRequest * request) { StringInfoData req_buff; @@ -205,18 +204,18 @@ pageserver_send(ZenithRequest *request) if (message_level_is_interesting(PageStoreTrace)) { - char *msg = zm_to_string((ZenithMessage *) request); + char *msg = zm_to_string((NeonMessage *) request); neon_log(PageStoreTrace, "sent request: %s", msg); pfree(msg); } } -static ZenithResponse * +static NeonResponse * pageserver_receive(void) { StringInfoData resp_buff; - ZenithResponse *resp; + NeonResponse *resp; PG_TRY(); { @@ -236,7 +235,7 @@ pageserver_receive(void) if (message_level_is_interesting(PageStoreTrace)) { - char *msg = zm_to_string((ZenithMessage *) resp); + char *msg = zm_to_string((NeonMessage *) resp); neon_log(PageStoreTrace, "got response: %s", msg); pfree(msg); @@ -249,7 +248,7 @@ pageserver_receive(void) } PG_END_TRY(); - return (ZenithResponse *) resp; + return (NeonResponse *) resp; } @@ -265,8 +264,8 @@ pageserver_flush(void) } } -static ZenithResponse * -pageserver_call(ZenithRequest *request) +static NeonResponse * +pageserver_call(NeonRequest * request) { pageserver_send(request); pageserver_flush(); @@ -281,7 +280,7 @@ page_server_api api = { }; static bool -check_zenith_id(char **newval, void **extra, GucSource source) +check_neon_id(char **newval, void **extra, GucSource source) { uint8 zid[16]; @@ -403,22 +402,22 @@ pg_init_libpagestore(void) NULL, NULL, NULL); DefineCustomStringVariable("neon.timeline_id", - "Zenith timelineid the server is running on", + "Neon timeline_id the server is running on", NULL, - &zenith_timeline, + &neon_timeline, "", PGC_POSTMASTER, 0, /* no flags required */ - check_zenith_id, NULL, NULL); + check_neon_id, NULL, NULL); DefineCustomStringVariable("neon.tenant_id", - "Neon tenantid the server is running on", + "Neon tenant_id the server is running on", NULL, - &zenith_tenant, + &neon_tenant, "", PGC_POSTMASTER, 0, /* no flags required */ - check_zenith_id, NULL, NULL); + check_neon_id, NULL, NULL); DefineCustomBoolVariable("neon.wal_redo", "start in wal-redo mode", @@ -450,8 +449,8 @@ pg_init_libpagestore(void) page_server_connstring = substitute_pageserver_password(page_server_connstring_raw); /* Is there more correct way to pass CustomGUC to postgres code? */ - zenith_timeline_walproposer = zenith_timeline; - zenith_tenant_walproposer = zenith_tenant; + neon_timeline_walproposer = neon_timeline; + neon_tenant_walproposer = neon_tenant; if (wal_redo) { @@ -462,8 +461,8 @@ pg_init_libpagestore(void) else if (page_server_connstring && page_server_connstring[0]) { neon_log(PageStoreTrace, "set neon_smgr hook"); - smgr_hook = smgr_zenith; - smgr_init_hook = smgr_init_zenith; - dbsize_hook = zenith_dbsize; + smgr_hook = smgr_neon; + smgr_init_hook = smgr_init_neon; + dbsize_hook = neon_dbsize; } } diff --git a/pgxn/neon/neon.c b/pgxn/neon/neon.c index 5346680b0b..2a2a163ee8 100644 --- a/pgxn/neon/neon.c +++ b/pgxn/neon/neon.c @@ -28,7 +28,6 @@ PG_MODULE_MAGIC; void _PG_init(void); - void _PG_init(void) { @@ -56,7 +55,6 @@ pg_cluster_size(PG_FUNCTION_ARGS) PG_RETURN_INT64(size); } - Datum backpressure_lsns(PG_FUNCTION_ARGS) { diff --git a/pgxn/neon/pagestore_client.h b/pgxn/neon/pagestore_client.h index 7dc38c13fb..633c7b465c 100644 --- a/pgxn/neon/pagestore_client.h +++ b/pgxn/neon/pagestore_client.h @@ -28,31 +28,29 @@ typedef enum { /* pagestore_client -> pagestore */ - T_ZenithExistsRequest = 0, - T_ZenithNblocksRequest, - T_ZenithGetPageRequest, - T_ZenithDbSizeRequest, + T_NeonExistsRequest = 0, + T_NeonNblocksRequest, + T_NeonGetPageRequest, + T_NeonDbSizeRequest, /* pagestore -> pagestore_client */ - T_ZenithExistsResponse = 100, - T_ZenithNblocksResponse, - T_ZenithGetPageResponse, - T_ZenithErrorResponse, - T_ZenithDbSizeResponse, -} ZenithMessageTag; - - + T_NeonExistsResponse = 100, + T_NeonNblocksResponse, + T_NeonGetPageResponse, + T_NeonErrorResponse, + T_NeonDbSizeResponse, +} NeonMessageTag; /* base struct for c-style inheritance */ typedef struct { - ZenithMessageTag tag; -} ZenithMessage; + NeonMessageTag tag; +} NeonMessage; -#define messageTag(m) (((const ZenithMessage *)(m))->tag) +#define messageTag(m) (((const NeonMessage *)(m))->tag) /* - * supertype of all the Zenith*Request structs below + * supertype of all the Neon*Request structs below * * If 'latest' is true, we are requesting the latest page version, and 'lsn' * is just a hint to the server that we know there are no versions of the page @@ -60,81 +58,79 @@ typedef struct */ typedef struct { - ZenithMessageTag tag; + NeonMessageTag tag; bool latest; /* if true, request latest page version */ XLogRecPtr lsn; /* request page version @ this LSN */ -} ZenithRequest; +} NeonRequest; typedef struct { - ZenithRequest req; + NeonRequest req; RelFileNode rnode; ForkNumber forknum; -} ZenithExistsRequest; +} NeonExistsRequest; typedef struct { - ZenithRequest req; + NeonRequest req; RelFileNode rnode; ForkNumber forknum; -} ZenithNblocksRequest; - +} NeonNblocksRequest; typedef struct { - ZenithRequest req; + NeonRequest req; Oid dbNode; -} ZenithDbSizeRequest; - +} NeonDbSizeRequest; typedef struct { - ZenithRequest req; + NeonRequest req; RelFileNode rnode; ForkNumber forknum; BlockNumber blkno; -} ZenithGetPageRequest; +} NeonGetPageRequest; -/* supertype of all the Zenith*Response structs below */ +/* supertype of all the Neon*Response structs below */ typedef struct { - ZenithMessageTag tag; -} ZenithResponse; + NeonMessageTag tag; +} NeonResponse; typedef struct { - ZenithMessageTag tag; + NeonMessageTag tag; bool exists; -} ZenithExistsResponse; +} NeonExistsResponse; typedef struct { - ZenithMessageTag tag; + NeonMessageTag tag; uint32 n_blocks; -} ZenithNblocksResponse; +} NeonNblocksResponse; typedef struct { - ZenithMessageTag tag; + NeonMessageTag tag; char page[FLEXIBLE_ARRAY_MEMBER]; -} ZenithGetPageResponse; +} NeonGetPageResponse; typedef struct { - ZenithMessageTag tag; + NeonMessageTag tag; int64 db_size; -} ZenithDbSizeResponse; +} NeonDbSizeResponse; typedef struct { - ZenithMessageTag tag; + NeonMessageTag tag; char message[FLEXIBLE_ARRAY_MEMBER]; /* null-terminated error * message */ -} ZenithErrorResponse; +} NeonErrorResponse; -extern StringInfoData zm_pack_request(ZenithRequest *msg); -extern ZenithResponse *zm_unpack_response(StringInfo s); -extern char *zm_to_string(ZenithMessage *msg); +extern StringInfoData zm_pack_request(NeonRequest * msg); +extern NeonResponse * zm_unpack_response(StringInfo s); +extern char *zm_to_string(NeonMessage * msg); /* * API @@ -142,57 +138,57 @@ extern char *zm_to_string(ZenithMessage *msg); typedef struct { - ZenithResponse *(*request) (ZenithRequest *request); - void (*send) (ZenithRequest *request); - ZenithResponse *(*receive) (void); + NeonResponse *(*request) (NeonRequest * request); + void (*send) (NeonRequest * request); + NeonResponse *(*receive) (void); void (*flush) (void); } page_server_api; extern page_server_api * page_server; extern char *page_server_connstring; -extern char *zenith_timeline; -extern char *zenith_tenant; +extern char *neon_timeline; +extern char *neon_tenant; extern bool wal_redo; extern int32 max_cluster_size; -extern const f_smgr *smgr_zenith(BackendId backend, RelFileNode rnode); -extern void smgr_init_zenith(void); +extern const f_smgr *smgr_neon(BackendId backend, RelFileNode rnode); +extern void smgr_init_neon(void); extern const f_smgr *smgr_inmem(BackendId backend, RelFileNode rnode); extern void smgr_init_inmem(void); extern void smgr_shutdown_inmem(void); -/* zenith storage manager functionality */ +/* Neon storage manager functionality */ -extern void zenith_init(void); -extern void zenith_open(SMgrRelation reln); -extern void zenith_close(SMgrRelation reln, ForkNumber forknum); -extern void zenith_create(SMgrRelation reln, ForkNumber forknum, bool isRedo); -extern bool zenith_exists(SMgrRelation reln, ForkNumber forknum); -extern void zenith_unlink(RelFileNodeBackend rnode, ForkNumber forknum, bool isRedo); -extern void zenith_extend(SMgrRelation reln, ForkNumber forknum, - BlockNumber blocknum, char *buffer, bool skipFsync); -extern bool zenith_prefetch(SMgrRelation reln, ForkNumber forknum, - BlockNumber blocknum); -extern void zenith_reset_prefetch(SMgrRelation reln); -extern void zenith_read(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, - char *buffer); +extern void neon_init(void); +extern void neon_open(SMgrRelation reln); +extern void neon_close(SMgrRelation reln, ForkNumber forknum); +extern void neon_create(SMgrRelation reln, ForkNumber forknum, bool isRedo); +extern bool neon_exists(SMgrRelation reln, ForkNumber forknum); +extern void neon_unlink(RelFileNodeBackend rnode, ForkNumber forknum, bool isRedo); +extern void neon_extend(SMgrRelation reln, ForkNumber forknum, + BlockNumber blocknum, char *buffer, bool skipFsync); +extern bool neon_prefetch(SMgrRelation reln, ForkNumber forknum, + BlockNumber blocknum); +extern void neon_reset_prefetch(SMgrRelation reln); +extern void neon_read(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, + char *buffer); -extern void zenith_read_at_lsn(RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno, - XLogRecPtr request_lsn, bool request_latest, char *buffer); +extern void neon_read_at_lsn(RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno, + XLogRecPtr request_lsn, bool request_latest, char *buffer); -extern void zenith_write(SMgrRelation reln, ForkNumber forknum, - BlockNumber blocknum, char *buffer, bool skipFsync); -extern void zenith_writeback(SMgrRelation reln, ForkNumber forknum, - BlockNumber blocknum, BlockNumber nblocks); -extern BlockNumber zenith_nblocks(SMgrRelation reln, ForkNumber forknum); -extern int64 zenith_dbsize(Oid dbNode); -extern void zenith_truncate(SMgrRelation reln, ForkNumber forknum, - BlockNumber nblocks); -extern void zenith_immedsync(SMgrRelation reln, ForkNumber forknum); +extern void neon_write(SMgrRelation reln, ForkNumber forknum, + BlockNumber blocknum, char *buffer, bool skipFsync); +extern void neon_writeback(SMgrRelation reln, ForkNumber forknum, + BlockNumber blocknum, BlockNumber nblocks); +extern BlockNumber neon_nblocks(SMgrRelation reln, ForkNumber forknum); +extern int64 neon_dbsize(Oid dbNode); +extern void neon_truncate(SMgrRelation reln, ForkNumber forknum, + BlockNumber nblocks); +extern void neon_immedsync(SMgrRelation reln, ForkNumber forknum); -/* zenith wal-redo storage manager functionality */ +/* neon wal-redo storage manager functionality */ extern void inmem_init(void); extern void inmem_open(SMgrRelation reln); @@ -215,8 +211,7 @@ extern void inmem_truncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks); extern void inmem_immedsync(SMgrRelation reln, ForkNumber forknum); - -/* utils for zenith relsize cache */ +/* utils for neon relsize cache */ extern void relsize_hash_init(void); extern bool get_cached_relsize(RelFileNode rnode, ForkNumber forknum, BlockNumber *size); extern void set_cached_relsize(RelFileNode rnode, ForkNumber forknum, BlockNumber size); diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index 504ae60d4a..24adee019f 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -96,9 +96,9 @@ page_server_api *page_server; /* GUCs */ char *page_server_connstring; -//with substituted password -char *zenith_timeline; -char *zenith_tenant; +/*with substituted password*/ +char *neon_timeline; +char *neon_tenant; bool wal_redo = false; int32 max_cluster_size; @@ -143,7 +143,7 @@ consume_prefetch_responses(void) { for (int i = n_prefetched_buffers; i < n_prefetch_responses; i++) { - ZenithResponse *resp = page_server->receive(); + NeonResponse *resp = page_server->receive(); pfree(resp); } @@ -151,16 +151,16 @@ consume_prefetch_responses(void) n_prefetch_responses = 0; } -static ZenithResponse * +static NeonResponse * page_server_request(void const *req) { consume_prefetch_responses(); - return page_server->request((ZenithRequest *) req); + return page_server->request((NeonRequest *) req); } StringInfoData -zm_pack_request(ZenithRequest *msg) +zm_pack_request(NeonRequest * msg) { StringInfoData s; @@ -170,9 +170,9 @@ zm_pack_request(ZenithRequest *msg) switch (messageTag(msg)) { /* pagestore_client -> pagestore */ - case T_ZenithExistsRequest: + case T_NeonExistsRequest: { - ZenithExistsRequest *msg_req = (ZenithExistsRequest *) msg; + NeonExistsRequest *msg_req = (NeonExistsRequest *) msg; pq_sendbyte(&s, msg_req->req.latest); pq_sendint64(&s, msg_req->req.lsn); @@ -183,9 +183,9 @@ zm_pack_request(ZenithRequest *msg) break; } - case T_ZenithNblocksRequest: + case T_NeonNblocksRequest: { - ZenithNblocksRequest *msg_req = (ZenithNblocksRequest *) msg; + NeonNblocksRequest *msg_req = (NeonNblocksRequest *) msg; pq_sendbyte(&s, msg_req->req.latest); pq_sendint64(&s, msg_req->req.lsn); @@ -196,9 +196,9 @@ zm_pack_request(ZenithRequest *msg) break; } - case T_ZenithDbSizeRequest: + case T_NeonDbSizeRequest: { - ZenithDbSizeRequest *msg_req = (ZenithDbSizeRequest *) msg; + NeonDbSizeRequest *msg_req = (NeonDbSizeRequest *) msg; pq_sendbyte(&s, msg_req->req.latest); pq_sendint64(&s, msg_req->req.lsn); @@ -206,9 +206,9 @@ zm_pack_request(ZenithRequest *msg) break; } - case T_ZenithGetPageRequest: + case T_NeonGetPageRequest: { - ZenithGetPageRequest *msg_req = (ZenithGetPageRequest *) msg; + NeonGetPageRequest *msg_req = (NeonGetPageRequest *) msg; pq_sendbyte(&s, msg_req->req.latest); pq_sendint64(&s, msg_req->req.lsn); @@ -222,91 +222,91 @@ zm_pack_request(ZenithRequest *msg) } /* pagestore -> pagestore_client. We never need to create these. */ - case T_ZenithExistsResponse: - case T_ZenithNblocksResponse: - case T_ZenithGetPageResponse: - case T_ZenithErrorResponse: - case T_ZenithDbSizeResponse: + case T_NeonExistsResponse: + case T_NeonNblocksResponse: + case T_NeonGetPageResponse: + case T_NeonErrorResponse: + case T_NeonDbSizeResponse: default: - elog(ERROR, "unexpected zenith message tag 0x%02x", msg->tag); + elog(ERROR, "unexpected neon message tag 0x%02x", msg->tag); break; } return s; } -ZenithResponse * +NeonResponse * zm_unpack_response(StringInfo s) { - ZenithMessageTag tag = pq_getmsgbyte(s); - ZenithResponse *resp = NULL; + NeonMessageTag tag = pq_getmsgbyte(s); + NeonResponse *resp = NULL; switch (tag) { /* pagestore -> pagestore_client */ - case T_ZenithExistsResponse: + case T_NeonExistsResponse: { - ZenithExistsResponse *msg_resp = palloc0(sizeof(ZenithExistsResponse)); + NeonExistsResponse *msg_resp = palloc0(sizeof(NeonExistsResponse)); msg_resp->tag = tag; msg_resp->exists = pq_getmsgbyte(s); pq_getmsgend(s); - resp = (ZenithResponse *) msg_resp; + resp = (NeonResponse *) msg_resp; break; } - case T_ZenithNblocksResponse: + case T_NeonNblocksResponse: { - ZenithNblocksResponse *msg_resp = palloc0(sizeof(ZenithNblocksResponse)); + NeonNblocksResponse *msg_resp = palloc0(sizeof(NeonNblocksResponse)); msg_resp->tag = tag; msg_resp->n_blocks = pq_getmsgint(s, 4); pq_getmsgend(s); - resp = (ZenithResponse *) msg_resp; + resp = (NeonResponse *) msg_resp; break; } - case T_ZenithGetPageResponse: + case T_NeonGetPageResponse: { - ZenithGetPageResponse *msg_resp = palloc0(offsetof(ZenithGetPageResponse, page) + BLCKSZ); + NeonGetPageResponse *msg_resp = palloc0(offsetof(NeonGetPageResponse, page) + BLCKSZ); msg_resp->tag = tag; /* XXX: should be varlena */ memcpy(msg_resp->page, pq_getmsgbytes(s, BLCKSZ), BLCKSZ); pq_getmsgend(s); - resp = (ZenithResponse *) msg_resp; + resp = (NeonResponse *) msg_resp; break; } - case T_ZenithDbSizeResponse: + case T_NeonDbSizeResponse: { - ZenithDbSizeResponse *msg_resp = palloc0(sizeof(ZenithDbSizeResponse)); + NeonDbSizeResponse *msg_resp = palloc0(sizeof(NeonDbSizeResponse)); msg_resp->tag = tag; msg_resp->db_size = pq_getmsgint64(s); pq_getmsgend(s); - resp = (ZenithResponse *) msg_resp; + resp = (NeonResponse *) msg_resp; break; } - case T_ZenithErrorResponse: + case T_NeonErrorResponse: { - ZenithErrorResponse *msg_resp; + NeonErrorResponse *msg_resp; size_t msglen; const char *msgtext; msgtext = pq_getmsgrawstring(s); msglen = strlen(msgtext); - msg_resp = palloc0(sizeof(ZenithErrorResponse) + msglen + 1); + msg_resp = palloc0(sizeof(NeonErrorResponse) + msglen + 1); msg_resp->tag = tag; memcpy(msg_resp->message, msgtext, msglen + 1); pq_getmsgend(s); - resp = (ZenithResponse *) msg_resp; + resp = (NeonResponse *) msg_resp; break; } @@ -315,12 +315,12 @@ zm_unpack_response(StringInfo s) * * We create these ourselves, and don't need to decode them. */ - case T_ZenithExistsRequest: - case T_ZenithNblocksRequest: - case T_ZenithGetPageRequest: - case T_ZenithDbSizeRequest: + case T_NeonExistsRequest: + case T_NeonNblocksRequest: + case T_NeonGetPageRequest: + case T_NeonDbSizeRequest: default: - elog(ERROR, "unexpected zenith message tag 0x%02x", tag); + elog(ERROR, "unexpected neon message tag 0x%02x", tag); break; } @@ -329,7 +329,7 @@ zm_unpack_response(StringInfo s) /* dump to json for debugging / error reporting purposes */ char * -zm_to_string(ZenithMessage *msg) +zm_to_string(NeonMessage * msg) { StringInfoData s; @@ -338,11 +338,11 @@ zm_to_string(ZenithMessage *msg) switch (messageTag(msg)) { /* pagestore_client -> pagestore */ - case T_ZenithExistsRequest: + case T_NeonExistsRequest: { - ZenithExistsRequest *msg_req = (ZenithExistsRequest *) msg; + NeonExistsRequest *msg_req = (NeonExistsRequest *) msg; - appendStringInfoString(&s, "{\"type\": \"ZenithExistsRequest\""); + appendStringInfoString(&s, "{\"type\": \"NeonExistsRequest\""); appendStringInfo(&s, ", \"rnode\": \"%u/%u/%u\"", msg_req->rnode.spcNode, msg_req->rnode.dbNode, @@ -354,11 +354,11 @@ zm_to_string(ZenithMessage *msg) break; } - case T_ZenithNblocksRequest: + case T_NeonNblocksRequest: { - ZenithNblocksRequest *msg_req = (ZenithNblocksRequest *) msg; + NeonNblocksRequest *msg_req = (NeonNblocksRequest *) msg; - appendStringInfoString(&s, "{\"type\": \"ZenithNblocksRequest\""); + appendStringInfoString(&s, "{\"type\": \"NeonNblocksRequest\""); appendStringInfo(&s, ", \"rnode\": \"%u/%u/%u\"", msg_req->rnode.spcNode, msg_req->rnode.dbNode, @@ -370,11 +370,11 @@ zm_to_string(ZenithMessage *msg) break; } - case T_ZenithGetPageRequest: + case T_NeonGetPageRequest: { - ZenithGetPageRequest *msg_req = (ZenithGetPageRequest *) msg; + NeonGetPageRequest *msg_req = (NeonGetPageRequest *) msg; - appendStringInfoString(&s, "{\"type\": \"ZenithGetPageRequest\""); + appendStringInfoString(&s, "{\"type\": \"NeonGetPageRequest\""); appendStringInfo(&s, ", \"rnode\": \"%u/%u/%u\"", msg_req->rnode.spcNode, msg_req->rnode.dbNode, @@ -386,11 +386,11 @@ zm_to_string(ZenithMessage *msg) appendStringInfoChar(&s, '}'); break; } - case T_ZenithDbSizeRequest: + case T_NeonDbSizeRequest: { - ZenithDbSizeRequest *msg_req = (ZenithDbSizeRequest *) msg; + NeonDbSizeRequest *msg_req = (NeonDbSizeRequest *) msg; - appendStringInfoString(&s, "{\"type\": \"ZenithDbSizeRequest\""); + appendStringInfoString(&s, "{\"type\": \"NeonDbSizeRequest\""); appendStringInfo(&s, ", \"dbnode\": \"%u\"", msg_req->dbNode); appendStringInfo(&s, ", \"lsn\": \"%X/%X\"", LSN_FORMAT_ARGS(msg_req->req.lsn)); appendStringInfo(&s, ", \"latest\": %d", msg_req->req.latest); @@ -398,61 +398,57 @@ zm_to_string(ZenithMessage *msg) break; } - /* pagestore -> pagestore_client */ - case T_ZenithExistsResponse: + case T_NeonExistsResponse: { - ZenithExistsResponse *msg_resp = (ZenithExistsResponse *) msg; + NeonExistsResponse *msg_resp = (NeonExistsResponse *) msg; - appendStringInfoString(&s, "{\"type\": \"ZenithExistsResponse\""); + appendStringInfoString(&s, "{\"type\": \"NeonExistsResponse\""); appendStringInfo(&s, ", \"exists\": %d}", - msg_resp->exists - ); + msg_resp->exists); appendStringInfoChar(&s, '}'); break; } - case T_ZenithNblocksResponse: + case T_NeonNblocksResponse: { - ZenithNblocksResponse *msg_resp = (ZenithNblocksResponse *) msg; + NeonNblocksResponse *msg_resp = (NeonNblocksResponse *) msg; - appendStringInfoString(&s, "{\"type\": \"ZenithNblocksResponse\""); + appendStringInfoString(&s, "{\"type\": \"NeonNblocksResponse\""); appendStringInfo(&s, ", \"n_blocks\": %u}", - msg_resp->n_blocks - ); + msg_resp->n_blocks); appendStringInfoChar(&s, '}'); break; } - case T_ZenithGetPageResponse: + case T_NeonGetPageResponse: { #if 0 - ZenithGetPageResponse *msg_resp = (ZenithGetPageResponse *) msg; + NeonGetPageResponse *msg_resp = (NeonGetPageResponse *) msg; #endif - appendStringInfoString(&s, "{\"type\": \"ZenithGetPageResponse\""); + appendStringInfoString(&s, "{\"type\": \"NeonGetPageResponse\""); appendStringInfo(&s, ", \"page\": \"XXX\"}"); appendStringInfoChar(&s, '}'); break; } - case T_ZenithErrorResponse: + case T_NeonErrorResponse: { - ZenithErrorResponse *msg_resp = (ZenithErrorResponse *) msg; + NeonErrorResponse *msg_resp = (NeonErrorResponse *) msg; /* FIXME: escape double-quotes in the message */ - appendStringInfoString(&s, "{\"type\": \"ZenithErrorResponse\""); + appendStringInfoString(&s, "{\"type\": \"NeonErrorResponse\""); appendStringInfo(&s, ", \"message\": \"%s\"}", msg_resp->message); appendStringInfoChar(&s, '}'); break; } - case T_ZenithDbSizeResponse: + case T_NeonDbSizeResponse: { - ZenithDbSizeResponse *msg_resp = (ZenithDbSizeResponse *) msg; + NeonDbSizeResponse *msg_resp = (NeonDbSizeResponse *) msg; - appendStringInfoString(&s, "{\"type\": \"ZenithDbSizeResponse\""); + appendStringInfoString(&s, "{\"type\": \"NeonDbSizeResponse\""); appendStringInfo(&s, ", \"db_size\": %ld}", - msg_resp->db_size - ); + msg_resp->db_size); appendStringInfoChar(&s, '}'); break; @@ -494,7 +490,7 @@ PageIsEmptyHeapPage(char *buffer) } static void -zenith_wallog_page(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer) +neon_wallog_page(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer) { XLogRecPtr lsn = PageGetLSN(buffer); @@ -551,8 +547,8 @@ zenith_wallog_page(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, { /* * When PostgreSQL extends a relation, it calls smgrextend() with an - * all-zeros pages, and we can just ignore that in Zenith. We do need - * to remember the new size, though, so that smgrnblocks() returns the + * all-zeros pages, and we can just ignore that in Neon. We do need to + * remember the new size, though, so that smgrnblocks() returns the * right answer after the rel has been extended. We rely on the * relsize cache for that. * @@ -616,12 +612,11 @@ zenith_wallog_page(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, SetLastWrittenLSNForBlock(lsn, reln->smgr_rnode.node, forknum, blocknum); } - /* - * zenith_init() -- Initialize private state + * neon_init() -- Initialize private state */ void -zenith_init(void) +neon_init(void) { /* noop */ #ifdef DEBUG_COMPARE_LOCAL @@ -658,7 +653,7 @@ zm_adjust_lsn(XLogRecPtr lsn) * Return LSN for requesting pages and number of blocks from page server */ static XLogRecPtr -zenith_get_request_lsn(bool *latest, RelFileNode rnode, ForkNumber forknum, BlockNumber blkno) +neon_get_request_lsn(bool *latest, RelFileNode rnode, ForkNumber forknum, BlockNumber blkno) { XLogRecPtr lsn; @@ -666,14 +661,14 @@ zenith_get_request_lsn(bool *latest, RelFileNode rnode, ForkNumber forknum, Bloc { *latest = false; lsn = GetXLogReplayRecPtr(NULL); - elog(DEBUG1, "zenith_get_request_lsn GetXLogReplayRecPtr %X/%X request lsn 0 ", + elog(DEBUG1, "neon_get_request_lsn GetXLogReplayRecPtr %X/%X request lsn 0 ", (uint32) ((lsn) >> 32), (uint32) (lsn)); } else if (am_walsender) { *latest = true; lsn = InvalidXLogRecPtr; - elog(DEBUG1, "am walsender zenith_get_request_lsn lsn 0 "); + elog(DEBUG1, "am walsender neon_get_request_lsn lsn 0 "); } else { @@ -687,7 +682,7 @@ zenith_get_request_lsn(bool *latest, RelFileNode rnode, ForkNumber forknum, Bloc *latest = true; lsn = GetLastWrittenLSN(rnode, forknum, blkno); Assert(lsn != InvalidXLogRecPtr); - elog(DEBUG1, "zenith_get_request_lsn GetLastWrittenLSN lsn %X/%X ", + elog(DEBUG1, "neon_get_request_lsn GetLastWrittenLSN lsn %X/%X ", (uint32) ((lsn) >> 32), (uint32) (lsn)); lsn = zm_adjust_lsn(lsn); @@ -717,15 +712,14 @@ zenith_get_request_lsn(bool *latest, RelFileNode rnode, ForkNumber forknum, Bloc return lsn; } - /* - * zenith_exists() -- Does the physical file exist? + * neon_exists() -- Does the physical file exist? */ bool -zenith_exists(SMgrRelation reln, ForkNumber forkNum) +neon_exists(SMgrRelation reln, ForkNumber forkNum) { bool exists; - ZenithResponse *resp; + NeonResponse *resp; BlockNumber n_blocks; bool latest; XLogRecPtr request_lsn; @@ -777,26 +771,25 @@ zenith_exists(SMgrRelation reln, ForkNumber forkNum) return false; } - request_lsn = zenith_get_request_lsn(&latest, reln->smgr_rnode.node, forkNum, REL_METADATA_PSEUDO_BLOCKNO); + request_lsn = neon_get_request_lsn(&latest, reln->smgr_rnode.node, forkNum, REL_METADATA_PSEUDO_BLOCKNO); { - ZenithExistsRequest request = { - .req.tag = T_ZenithExistsRequest, + NeonExistsRequest request = { + .req.tag = T_NeonExistsRequest, .req.latest = latest, .req.lsn = request_lsn, .rnode = reln->smgr_rnode.node, - .forknum = forkNum - }; + .forknum = forkNum}; resp = page_server_request(&request); } switch (resp->tag) { - case T_ZenithExistsResponse: - exists = ((ZenithExistsResponse *) resp)->exists; + case T_NeonExistsResponse: + exists = ((NeonExistsResponse *) resp)->exists; break; - case T_ZenithErrorResponse: + case T_NeonErrorResponse: ereport(ERROR, (errcode(ERRCODE_IO_ERROR), errmsg("could not read relation existence of rel %u/%u/%u.%u from page server at lsn %X/%08X", @@ -806,7 +799,7 @@ zenith_exists(SMgrRelation reln, ForkNumber forkNum) forkNum, (uint32) (request_lsn >> 32), (uint32) request_lsn), errdetail("page server returned error: %s", - ((ZenithErrorResponse *) resp)->message))); + ((NeonErrorResponse *) resp)->message))); break; default: @@ -817,12 +810,12 @@ zenith_exists(SMgrRelation reln, ForkNumber forkNum) } /* - * zenith_create() -- Create a new relation on zenithd storage + * neon_create() -- Create a new relation on neond storage * * If isRedo is true, it's okay for the relation to exist already. */ void -zenith_create(SMgrRelation reln, ForkNumber forkNum, bool isRedo) +neon_create(SMgrRelation reln, ForkNumber forkNum, bool isRedo) { switch (reln->smgr_relpersistence) { @@ -866,7 +859,7 @@ zenith_create(SMgrRelation reln, ForkNumber forkNum, bool isRedo) } /* - * zenith_unlink() -- Unlink a relation. + * neon_unlink() -- Unlink a relation. * * Note that we're passed a RelFileNodeBackend --- by the time this is called, * there won't be an SMgrRelation hashtable entry anymore. @@ -884,7 +877,7 @@ zenith_create(SMgrRelation reln, ForkNumber forkNum, bool isRedo) * we are usually not in a transaction anymore when this is called. */ void -zenith_unlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo) +neon_unlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo) { /* * Might or might not exist locally, depending on whether it's an unlogged @@ -899,7 +892,7 @@ zenith_unlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo) } /* - * zenith_extend() -- Add a block to the specified relation. + * neon_extend() -- Add a block to the specified relation. * * The semantics are nearly the same as mdwrite(): write at the * specified position. However, this is to be used for the case of @@ -908,8 +901,8 @@ zenith_unlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo) * causes intervening file space to become filled with zeroes. */ void -zenith_extend(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, - char *buffer, bool skipFsync) +neon_extend(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, + char *buffer, bool skipFsync) { XLogRecPtr lsn; @@ -951,7 +944,7 @@ zenith_extend(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, errhint("This limit is defined by neon.max_cluster_size GUC"))); } - zenith_wallog_page(reln, forkNum, blkno, buffer); + neon_wallog_page(reln, forkNum, blkno, buffer); set_cached_relsize(reln->smgr_rnode.node, forkNum, blkno + 1); lsn = PageGetLSN(buffer); @@ -971,10 +964,10 @@ zenith_extend(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, } /* - * zenith_open() -- Initialize newly-opened relation. + * neon_open() -- Initialize newly-opened relation. */ void -zenith_open(SMgrRelation reln) +neon_open(SMgrRelation reln) { /* * We don't have anything special to do here. Call mdopen() to let md.c @@ -985,14 +978,14 @@ zenith_open(SMgrRelation reln) mdopen(reln); /* no work */ - elog(SmgrTrace, "[ZENITH_SMGR] open noop"); + elog(SmgrTrace, "[NEON_SMGR] open noop"); } /* - * zenith_close() -- Close the specified relation, if it isn't closed already. + * neon_close() -- Close the specified relation, if it isn't closed already. */ void -zenith_close(SMgrRelation reln, ForkNumber forknum) +neon_close(SMgrRelation reln, ForkNumber forknum) { /* * Let md.c close it, if it had it open. Doesn't hurt to do this even for @@ -1003,19 +996,19 @@ zenith_close(SMgrRelation reln, ForkNumber forknum) /* - * zenith_reset_prefetch() -- reoe all previously rgistered prefeth requests + * neon_reset_prefetch() -- reoe all previously rgistered prefeth requests */ void -zenith_reset_prefetch(SMgrRelation reln) +neon_reset_prefetch(SMgrRelation reln) { n_prefetch_requests = 0; } /* - * zenith_prefetch() -- Initiate asynchronous read of the specified block of a relation + * neon_prefetch() -- Initiate asynchronous read of the specified block of a relation */ bool -zenith_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum) +neon_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum) { switch (reln->smgr_relpersistence) { @@ -1046,14 +1039,14 @@ zenith_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum) } /* - * zenith_writeback() -- Tell the kernel to write pages back to storage. + * neon_writeback() -- Tell the kernel to write pages back to storage. * * This accepts a range of blocks because flushing several pages at once is * considerably more efficient than doing so individually. */ void -zenith_writeback(SMgrRelation reln, ForkNumber forknum, - BlockNumber blocknum, BlockNumber nblocks) +neon_writeback(SMgrRelation reln, ForkNumber forknum, + BlockNumber blocknum, BlockNumber nblocks) { switch (reln->smgr_relpersistence) { @@ -1075,7 +1068,7 @@ zenith_writeback(SMgrRelation reln, ForkNumber forknum, } /* not implemented */ - elog(SmgrTrace, "[ZENITH_SMGR] writeback noop"); + elog(SmgrTrace, "[NEON_SMGR] writeback noop"); #ifdef DEBUG_COMPARE_LOCAL if (IS_LOCAL_REL(reln)) @@ -1084,14 +1077,14 @@ zenith_writeback(SMgrRelation reln, ForkNumber forknum, } /* - * While function is defined in the zenith extension it's used within neon_test_utils directly. + * While function is defined in the neon extension it's used within neon_test_utils directly. * To avoid breaking tests in the runtime please keep function signature in sync. */ void -zenith_read_at_lsn(RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno, - XLogRecPtr request_lsn, bool request_latest, char *buffer) +neon_read_at_lsn(RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno, + XLogRecPtr request_lsn, bool request_latest, char *buffer) { - ZenithResponse *resp; + NeonResponse *resp; int i; /* @@ -1103,12 +1096,12 @@ zenith_read_at_lsn(RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno, for (i = n_prefetched_buffers; i < n_prefetch_responses; i++) { resp = page_server->receive(); - if (resp->tag == T_ZenithGetPageResponse && + if (resp->tag == T_NeonGetPageResponse && RelFileNodeEquals(prefetch_responses[i].rnode, rnode) && prefetch_responses[i].forkNum == forkNum && prefetch_responses[i].blockNum == blkno) { - char *page = ((ZenithGetPageResponse *) resp)->page; + char *page = ((NeonGetPageResponse *) resp)->page; /* * Check if prefetched page is still relevant. If it is updated by @@ -1135,8 +1128,8 @@ zenith_read_at_lsn(RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno, n_prefetch_responses = 0; n_prefetch_misses += 1; { - ZenithGetPageRequest request = { - .req.tag = T_ZenithGetPageRequest, + NeonGetPageRequest request = { + .req.tag = T_NeonGetPageRequest, .req.latest = request_latest, .req.lsn = request_lsn, .rnode = rnode, @@ -1147,14 +1140,14 @@ zenith_read_at_lsn(RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno, if (n_prefetch_requests > 0) { /* Combine all prefetch requests with primary request */ - page_server->send((ZenithRequest *) &request); + page_server->send((NeonRequest *) & request); for (i = 0; i < n_prefetch_requests; i++) { request.rnode = prefetch_requests[i].rnode; request.forknum = prefetch_requests[i].forkNum; request.blkno = prefetch_requests[i].blockNum; prefetch_responses[i] = prefetch_requests[i]; - page_server->send((ZenithRequest *) &request); + page_server->send((NeonRequest *) & request); } page_server->flush(); n_prefetch_responses = n_prefetch_requests; @@ -1164,16 +1157,16 @@ zenith_read_at_lsn(RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno, } else { - resp = page_server->request((ZenithRequest *) &request); + resp = page_server->request((NeonRequest *) & request); } } switch (resp->tag) { - case T_ZenithGetPageResponse: - memcpy(buffer, ((ZenithGetPageResponse *) resp)->page, BLCKSZ); + case T_NeonGetPageResponse: + memcpy(buffer, ((NeonGetPageResponse *) resp)->page, BLCKSZ); break; - case T_ZenithErrorResponse: + case T_NeonErrorResponse: ereport(ERROR, (errcode(ERRCODE_IO_ERROR), errmsg("could not read block %u in rel %u/%u/%u.%u from page server at lsn %X/%08X", @@ -1184,7 +1177,7 @@ zenith_read_at_lsn(RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno, forkNum, (uint32) (request_lsn >> 32), (uint32) request_lsn), errdetail("page server returned error: %s", - ((ZenithErrorResponse *) resp)->message))); + ((NeonErrorResponse *) resp)->message))); break; default: @@ -1195,11 +1188,11 @@ zenith_read_at_lsn(RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno, } /* - * zenith_read() -- Read the specified block from a relation. + * neon_read() -- Read the specified block from a relation. */ void -zenith_read(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, - char *buffer) +neon_read(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, + char *buffer) { bool latest; XLogRecPtr request_lsn; @@ -1221,8 +1214,8 @@ zenith_read(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, elog(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - request_lsn = zenith_get_request_lsn(&latest, reln->smgr_rnode.node, forkNum, blkno); - zenith_read_at_lsn(reln->smgr_rnode.node, forkNum, blkno, request_lsn, latest, buffer); + request_lsn = neon_get_request_lsn(&latest, reln->smgr_rnode.node, forkNum, blkno); + neon_read_at_lsn(reln->smgr_rnode.node, forkNum, blkno, request_lsn, latest, buffer); #ifdef DEBUG_COMPARE_LOCAL if (forkNum == MAIN_FORKNUM && IS_LOCAL_REL(reln)) @@ -1328,15 +1321,15 @@ hexdump_page(char *page) #endif /* - * zenith_write() -- Write the supplied block at the appropriate location. + * neon_write() -- Write the supplied block at the appropriate location. * * This is to be used only for updating already-existing blocks of a * relation (ie, those before the current EOF). To extend a relation, * use mdextend(). */ void -zenith_write(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, - char *buffer, bool skipFsync) +neon_write(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, + char *buffer, bool skipFsync) { XLogRecPtr lsn; @@ -1372,7 +1365,7 @@ zenith_write(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, elog(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - zenith_wallog_page(reln, forknum, blocknum, buffer); + neon_wallog_page(reln, forknum, blocknum, buffer); lsn = PageGetLSN(buffer); elog(SmgrTrace, "smgrwrite called for %u/%u/%u.%u blk %u, page LSN: %X/%08X", @@ -1389,12 +1382,12 @@ zenith_write(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, } /* - * zenith_nblocks() -- Get the number of blocks stored in a relation. + * neon_nblocks() -- Get the number of blocks stored in a relation. */ BlockNumber -zenith_nblocks(SMgrRelation reln, ForkNumber forknum) +neon_nblocks(SMgrRelation reln, ForkNumber forknum) { - ZenithResponse *resp; + NeonResponse *resp; BlockNumber n_blocks; bool latest; XLogRecPtr request_lsn; @@ -1426,10 +1419,10 @@ zenith_nblocks(SMgrRelation reln, ForkNumber forknum) return n_blocks; } - request_lsn = zenith_get_request_lsn(&latest, reln->smgr_rnode.node, forknum, REL_METADATA_PSEUDO_BLOCKNO); + request_lsn = neon_get_request_lsn(&latest, reln->smgr_rnode.node, forknum, REL_METADATA_PSEUDO_BLOCKNO); { - ZenithNblocksRequest request = { - .req.tag = T_ZenithNblocksRequest, + NeonNblocksRequest request = { + .req.tag = T_NeonNblocksRequest, .req.latest = latest, .req.lsn = request_lsn, .rnode = reln->smgr_rnode.node, @@ -1441,11 +1434,11 @@ zenith_nblocks(SMgrRelation reln, ForkNumber forknum) switch (resp->tag) { - case T_ZenithNblocksResponse: - n_blocks = ((ZenithNblocksResponse *) resp)->n_blocks; + case T_NeonNblocksResponse: + n_blocks = ((NeonNblocksResponse *) resp)->n_blocks; break; - case T_ZenithErrorResponse: + case T_NeonErrorResponse: ereport(ERROR, (errcode(ERRCODE_IO_ERROR), errmsg("could not read relation size of rel %u/%u/%u.%u from page server at lsn %X/%08X", @@ -1455,7 +1448,7 @@ zenith_nblocks(SMgrRelation reln, ForkNumber forknum) forknum, (uint32) (request_lsn >> 32), (uint32) request_lsn), errdetail("page server returned error: %s", - ((ZenithErrorResponse *) resp)->message))); + ((NeonErrorResponse *) resp)->message))); break; default: @@ -1463,7 +1456,7 @@ zenith_nblocks(SMgrRelation reln, ForkNumber forknum) } update_cached_relsize(reln->smgr_rnode.node, forknum, n_blocks); - elog(SmgrTrace, "zenith_nblocks: rel %u/%u/%u fork %u (request LSN %X/%08X): %u blocks", + elog(SmgrTrace, "neon_nblocks: rel %u/%u/%u fork %u (request LSN %X/%08X): %u blocks", reln->smgr_rnode.node.spcNode, reln->smgr_rnode.node.dbNode, reln->smgr_rnode.node.relNode, @@ -1476,21 +1469,21 @@ zenith_nblocks(SMgrRelation reln, ForkNumber forknum) } /* - * zenith_db_size() -- Get the size of the database in bytes. + * neon_db_size() -- Get the size of the database in bytes. */ int64 -zenith_dbsize(Oid dbNode) +neon_dbsize(Oid dbNode) { - ZenithResponse *resp; + NeonResponse *resp; int64 db_size; XLogRecPtr request_lsn; bool latest; RelFileNode dummy_node = {InvalidOid, InvalidOid, InvalidOid}; - request_lsn = zenith_get_request_lsn(&latest, dummy_node, MAIN_FORKNUM, REL_METADATA_PSEUDO_BLOCKNO); + request_lsn = neon_get_request_lsn(&latest, dummy_node, MAIN_FORKNUM, REL_METADATA_PSEUDO_BLOCKNO); { - ZenithDbSizeRequest request = { - .req.tag = T_ZenithDbSizeRequest, + NeonDbSizeRequest request = { + .req.tag = T_NeonDbSizeRequest, .req.latest = latest, .req.lsn = request_lsn, .dbNode = dbNode, @@ -1501,25 +1494,25 @@ zenith_dbsize(Oid dbNode) switch (resp->tag) { - case T_ZenithDbSizeResponse: - db_size = ((ZenithDbSizeResponse *) resp)->db_size; + case T_NeonDbSizeResponse: + db_size = ((NeonDbSizeResponse *) resp)->db_size; break; - case T_ZenithErrorResponse: + case T_NeonErrorResponse: ereport(ERROR, (errcode(ERRCODE_IO_ERROR), errmsg("could not read db size of db %u from page server at lsn %X/%08X", dbNode, (uint32) (request_lsn >> 32), (uint32) request_lsn), errdetail("page server returned error: %s", - ((ZenithErrorResponse *) resp)->message))); + ((NeonErrorResponse *) resp)->message))); break; default: elog(ERROR, "unexpected response from page server with tag 0x%02x", resp->tag); } - elog(SmgrTrace, "zenith_dbsize: db %u (request LSN %X/%08X): %ld bytes", + elog(SmgrTrace, "neon_dbsize: db %u (request LSN %X/%08X): %ld bytes", dbNode, (uint32) (request_lsn >> 32), (uint32) request_lsn, db_size); @@ -1529,10 +1522,10 @@ zenith_dbsize(Oid dbNode) } /* - * zenith_truncate() -- Truncate relation to specified number of blocks. + * neon_truncate() -- Truncate relation to specified number of blocks. */ void -zenith_truncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) +neon_truncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) { XLogRecPtr lsn; @@ -1591,7 +1584,7 @@ zenith_truncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) } /* - * zenith_immedsync() -- Immediately sync a relation to stable storage. + * neon_immedsync() -- Immediately sync a relation to stable storage. * * Note that only writes already issued are synced; this routine knows * nothing of dirty buffers that may exist inside the buffer manager. We @@ -1602,7 +1595,7 @@ zenith_truncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) * segment may survive recovery, reintroducing unwanted data into the table. */ void -zenith_immedsync(SMgrRelation reln, ForkNumber forknum) +neon_immedsync(SMgrRelation reln, ForkNumber forknum) { switch (reln->smgr_relpersistence) { @@ -1622,7 +1615,7 @@ zenith_immedsync(SMgrRelation reln, ForkNumber forknum) elog(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - elog(SmgrTrace, "[ZENITH_SMGR] immedsync noop"); + elog(SmgrTrace, "[NEON_SMGR] immedsync noop"); #ifdef DEBUG_COMPARE_LOCAL if (IS_LOCAL_REL(reln)) @@ -1631,16 +1624,16 @@ zenith_immedsync(SMgrRelation reln, ForkNumber forknum) } /* - * zenith_start_unlogged_build() -- Starting build operation on a rel. + * neon_start_unlogged_build() -- Starting build operation on a rel. * * Some indexes are built in two phases, by first populating the table with * regular inserts, using the shared buffer cache but skipping WAL-logging, - * and WAL-logging the whole relation after it's done. Zenith relies on the + * and WAL-logging the whole relation after it's done. Neon relies on the * WAL to reconstruct pages, so we cannot use the page server in the * first phase when the changes are not logged. */ static void -zenith_start_unlogged_build(SMgrRelation reln) +neon_start_unlogged_build(SMgrRelation reln) { /* * Currently, there can be only one unlogged relation build operation in @@ -1692,13 +1685,13 @@ zenith_start_unlogged_build(SMgrRelation reln) } /* - * zenith_finish_unlogged_build_phase_1() + * neon_finish_unlogged_build_phase_1() * * Call this after you have finished populating a relation in unlogged mode, * before you start WAL-logging it. */ static void -zenith_finish_unlogged_build_phase_1(SMgrRelation reln) +neon_finish_unlogged_build_phase_1(SMgrRelation reln) { Assert(unlogged_build_rel == reln); @@ -1718,7 +1711,7 @@ zenith_finish_unlogged_build_phase_1(SMgrRelation reln) } /* - * zenith_end_unlogged_build() -- Finish an unlogged rel build. + * neon_end_unlogged_build() -- Finish an unlogged rel build. * * Call this after you have finished WAL-logging an relation that was * first populated without WAL-logging. @@ -1727,7 +1720,7 @@ zenith_finish_unlogged_build_phase_1(SMgrRelation reln) * WAL-logged and is present in the page server. */ static void -zenith_end_unlogged_build(SMgrRelation reln) +neon_end_unlogged_build(SMgrRelation reln) { Assert(unlogged_build_rel == reln); @@ -1769,7 +1762,7 @@ zenith_end_unlogged_build(SMgrRelation reln) } static void -AtEOXact_zenith(XactEvent event, void *arg) +AtEOXact_neon(XactEvent event, void *arg) { switch (event) { @@ -1802,47 +1795,46 @@ AtEOXact_zenith(XactEvent event, void *arg) } } -static const struct f_smgr zenith_smgr = +static const struct f_smgr neon_smgr = { - .smgr_init = zenith_init, + .smgr_init = neon_init, .smgr_shutdown = NULL, - .smgr_open = zenith_open, - .smgr_close = zenith_close, - .smgr_create = zenith_create, - .smgr_exists = zenith_exists, - .smgr_unlink = zenith_unlink, - .smgr_extend = zenith_extend, - .smgr_prefetch = zenith_prefetch, - .smgr_reset_prefetch = zenith_reset_prefetch, - .smgr_read = zenith_read, - .smgr_write = zenith_write, - .smgr_writeback = zenith_writeback, - .smgr_nblocks = zenith_nblocks, - .smgr_truncate = zenith_truncate, - .smgr_immedsync = zenith_immedsync, + .smgr_open = neon_open, + .smgr_close = neon_close, + .smgr_create = neon_create, + .smgr_exists = neon_exists, + .smgr_unlink = neon_unlink, + .smgr_extend = neon_extend, + .smgr_prefetch = neon_prefetch, + .smgr_reset_prefetch = neon_reset_prefetch, + .smgr_read = neon_read, + .smgr_write = neon_write, + .smgr_writeback = neon_writeback, + .smgr_nblocks = neon_nblocks, + .smgr_truncate = neon_truncate, + .smgr_immedsync = neon_immedsync, - .smgr_start_unlogged_build = zenith_start_unlogged_build, - .smgr_finish_unlogged_build_phase_1 = zenith_finish_unlogged_build_phase_1, - .smgr_end_unlogged_build = zenith_end_unlogged_build, + .smgr_start_unlogged_build = neon_start_unlogged_build, + .smgr_finish_unlogged_build_phase_1 = neon_finish_unlogged_build_phase_1, + .smgr_end_unlogged_build = neon_end_unlogged_build, }; - const f_smgr * -smgr_zenith(BackendId backend, RelFileNode rnode) +smgr_neon(BackendId backend, RelFileNode rnode) { /* Don't use page server for temp relations */ if (backend != InvalidBackendId) return smgr_standard(backend, rnode); else - return &zenith_smgr; + return &neon_smgr; } void -smgr_init_zenith(void) +smgr_init_neon(void) { - RegisterXactCallback(AtEOXact_zenith, NULL); + RegisterXactCallback(AtEOXact_neon, NULL); smgr_init_standard(); - zenith_init(); + neon_init(); } diff --git a/pgxn/neon/relsize_cache.c b/pgxn/neon/relsize_cache.c index 31021f3e41..d4262c730a 100644 --- a/pgxn/neon/relsize_cache.c +++ b/pgxn/neon/relsize_cache.c @@ -56,7 +56,7 @@ static void relsize_shmem_request(void); #define DEFAULT_RELSIZE_HASH_SIZE (64 * 1024) static void -zenith_smgr_shmem_startup(void) +neon_smgr_shmem_startup(void) { static HASHCTL info; @@ -174,14 +174,14 @@ relsize_hash_init(void) #endif prev_shmem_startup_hook = shmem_startup_hook; - shmem_startup_hook = zenith_smgr_shmem_startup; + shmem_startup_hook = neon_smgr_shmem_startup; } } #if PG_VERSION_NUM >= 150000 /* * shmem_request hook: request additional shared resources. We'll allocate or - * attach to the shared resources in zenith_smgr_shmem_startup(). + * attach to the shared resources in neon_smgr_shmem_startup(). */ static void relsize_shmem_request(void) diff --git a/pgxn/neon/walproposer.c b/pgxn/neon/walproposer.c index 05257ced4c..fc0b660a64 100644 --- a/pgxn/neon/walproposer.c +++ b/pgxn/neon/walproposer.c @@ -71,14 +71,13 @@ #include "walproposer_utils.h" #include "replication/walpropshim.h" - char *wal_acceptors_list; int wal_acceptor_reconnect_timeout; int wal_acceptor_connect_timeout; bool am_wal_proposer; -char *zenith_timeline_walproposer = NULL; -char *zenith_tenant_walproposer = NULL; +char *neon_timeline_walproposer = NULL; +char *neon_tenant_walproposer = NULL; /* Declared in walproposer.h, defined here, initialized in libpqwalproposer.c */ WalProposerFunctionsType *WalProposerFunctions = NULL; @@ -89,7 +88,7 @@ static int n_safekeepers = 0; static int quorum = 0; static Safekeeper safekeeper[MAX_SAFEKEEPERS]; static XLogRecPtr availableLsn; /* WAL has been generated up to this point */ -static XLogRecPtr lastSentCommitLsn; /* last commitLsn broadcast to +static XLogRecPtr lastSentCommitLsn; /* last commitLsn broadcast to* * safekeepers */ static ProposerGreeting greetRequest; static VoteRequest voteRequest; /* Vote request for safekeeper */ @@ -162,7 +161,6 @@ static bool BlockingWrite(Safekeeper *sk, void *msg, size_t msg_size, Safekeeper static bool AsyncWrite(Safekeeper *sk, void *msg, size_t msg_size, SafekeeperState flush_state); static bool AsyncFlush(Safekeeper *sk); - static void nwp_shmem_startup_hook(void); static void nwp_register_gucs(void); static void nwp_prepare_shmem(void); @@ -176,7 +174,6 @@ static shmem_request_hook_type prev_shmem_request_hook = NULL; static void walproposer_shmem_request(void); #endif - void pg_init_walproposer(void) { @@ -207,10 +204,9 @@ nwp_register_gucs(void) &wal_acceptors_list, /* valueAddr */ "", /* bootValue */ PGC_POSTMASTER, - GUC_LIST_INPUT, /* extensions can't use + GUC_LIST_INPUT, /* extensions can't use* * GUC_LIST_QUOTE */ - NULL, NULL, NULL - ); + NULL, NULL, NULL); DefineCustomIntVariable( "neon.safekeeper_reconnect_timeout", @@ -220,8 +216,7 @@ nwp_register_gucs(void) 1000, 0, INT_MAX, /* default, min, max */ PGC_SIGHUP, /* context */ GUC_UNIT_MS, /* flags */ - NULL, NULL, NULL - ); + NULL, NULL, NULL); DefineCustomIntVariable( "neon.safekeeper_connect_timeout", @@ -231,9 +226,7 @@ nwp_register_gucs(void) 5000, 0, INT_MAX, PGC_SIGHUP, GUC_UNIT_MS, - NULL, NULL, NULL - ); - + NULL, NULL, NULL); } /* shmem handling */ @@ -499,19 +492,19 @@ WalProposerInitImpl(XLogRecPtr flushRecPtr, uint64 systemId) greetRequest.pgVersion = PG_VERSION_NUM; pg_strong_random(&greetRequest.proposerId, sizeof(greetRequest.proposerId)); greetRequest.systemId = systemId; - if (!zenith_timeline_walproposer) + if (!neon_timeline_walproposer) elog(FATAL, "neon.timeline_id is not provided"); - if (*zenith_timeline_walproposer != '\0' && - !HexDecodeString(greetRequest.ztimelineid, zenith_timeline_walproposer, 16)) - elog(FATAL, "Could not parse neon.timeline_id, %s", zenith_timeline_walproposer); - if (!zenith_tenant_walproposer) + if (*neon_timeline_walproposer != '\0' && + !HexDecodeString(greetRequest.timeline_id, neon_timeline_walproposer, 16)) + elog(FATAL, "Could not parse neon.timeline_id, %s", neon_timeline_walproposer); + if (!neon_tenant_walproposer) elog(FATAL, "neon.tenant_id is not provided"); - if (*zenith_tenant_walproposer != '\0' && - !HexDecodeString(greetRequest.ztenantid, zenith_tenant_walproposer, 16)) - elog(FATAL, "Could not parse neon.tenant_id, %s", zenith_tenant_walproposer); + if (*neon_tenant_walproposer != '\0' && + !HexDecodeString(greetRequest.tenant_id, neon_tenant_walproposer, 16)) + elog(FATAL, "Could not parse neon.tenant_id, %s", neon_tenant_walproposer); #if PG_VERSION_NUM >= 150000 -/* FIXME don't use hardcoded timeline id */ + /* FIXME don't use hardcoded timeline id */ greetRequest.timeline = 1; #else greetRequest.timeline = ThisTimeLineID; @@ -657,8 +650,8 @@ ResetConnection(Safekeeper *sk) int written = 0; written = snprintf((char *) &sk->conninfo, MAXCONNINFO, - "host=%s port=%s dbname=replication options='-c ztimelineid=%s ztenantid=%s'", - sk->host, sk->port, zenith_timeline_walproposer, zenith_tenant_walproposer); + "host=%s port=%s dbname=replication options='-c timeline_id=%s tenant_id=%s'", + sk->host, sk->port, neon_timeline_walproposer, neon_tenant_walproposer); /* * currently connection string is not that long, but once we pass @@ -1326,8 +1319,7 @@ DetermineEpochStartLsn(void) propTerm, LSN_FORMAT_ARGS(propEpochStartLsn), safekeeper[donor].host, safekeeper[donor].port, - LSN_FORMAT_ARGS(truncateLsn) - ); + LSN_FORMAT_ARGS(truncateLsn)); /* * Ensure the basebackup we are running (at RedoStartLsn) matches LSN @@ -1373,8 +1365,8 @@ WalProposerRecovery(int donor, TimeLineID timeline, XLogRecPtr startpos, XLogRec WalReceiverConn *wrconn; WalRcvStreamOptions options; - sprintf(conninfo, "host=%s port=%s dbname=replication options='-c ztimelineid=%s ztenantid=%s'", - safekeeper[donor].host, safekeeper[donor].port, zenith_timeline_walproposer, zenith_tenant_walproposer); + sprintf(conninfo, "host=%s port=%s dbname=replication options='-c timeline_id=%s tenant_id=%s'", + safekeeper[donor].host, safekeeper[donor].port, neon_timeline_walproposer, neon_tenant_walproposer); wrconn = walrcv_connect(conninfo, false, "wal_proposer_recovery", &err); if (!wrconn) { @@ -1544,8 +1536,7 @@ SendProposerElected(Safekeeper *sk) else { XLogRecPtr propEndLsn = propTermHistory.entries[i + 1].lsn; - XLogRecPtr skEndLsn = (i + 1 < th->n_entries ? th->entries[i + 1].lsn : - sk->voteResponse.flushLsn); + XLogRecPtr skEndLsn = (i + 1 < th->n_entries ? th->entries[i + 1].lsn : sk->voteResponse.flushLsn); sk->startStreamingAt = Min(propEndLsn, skEndLsn); } @@ -1759,7 +1750,7 @@ SendAppendRequests(Safekeeper *sk) req->beginLsn, req->endLsn - req->beginLsn, #if PG_VERSION_NUM >= 150000 - /* FIXME don't use hardcoded timelineid here */ + /* FIXME don't use hardcoded timeline_id here */ 1, #else ThisTimeLineID, @@ -1784,9 +1775,9 @@ SendAppendRequests(Safekeeper *sk) case PG_ASYNC_WRITE_TRY_FLUSH: /* - * We still need to call PQflush some more to finish the job. - * Caller function will handle this by setting right event - * set. + * * We still need to call PQflush some more to finish the + * job. Caller function will handle this by setting right + * event* set. */ sk->flushWrite = true; return true; @@ -1885,40 +1876,40 @@ ParseReplicationFeedbackMessage(StringInfo reply_message, ReplicationFeedback * if (strcmp(key, "current_timeline_size") == 0) { pq_getmsgint(reply_message, sizeof(int32)); - //read value length - rf->currentClusterSize = pq_getmsgint64(reply_message); + /* read value length */ + rf->currentClusterSize = pq_getmsgint64(reply_message); elog(DEBUG2, "ParseReplicationFeedbackMessage: current_timeline_size %lu", rf->currentClusterSize); } else if (strcmp(key, "ps_writelsn") == 0) { pq_getmsgint(reply_message, sizeof(int32)); - //read value length - rf->ps_writelsn = pq_getmsgint64(reply_message); + /* read value length */ + rf->ps_writelsn = pq_getmsgint64(reply_message); elog(DEBUG2, "ParseReplicationFeedbackMessage: ps_writelsn %X/%X", LSN_FORMAT_ARGS(rf->ps_writelsn)); } else if (strcmp(key, "ps_flushlsn") == 0) { pq_getmsgint(reply_message, sizeof(int32)); - //read value length - rf->ps_flushlsn = pq_getmsgint64(reply_message); + /* read value length */ + rf->ps_flushlsn = pq_getmsgint64(reply_message); elog(DEBUG2, "ParseReplicationFeedbackMessage: ps_flushlsn %X/%X", LSN_FORMAT_ARGS(rf->ps_flushlsn)); } else if (strcmp(key, "ps_applylsn") == 0) { pq_getmsgint(reply_message, sizeof(int32)); - //read value length - rf->ps_applylsn = pq_getmsgint64(reply_message); + /* read value length */ + rf->ps_applylsn = pq_getmsgint64(reply_message); elog(DEBUG2, "ParseReplicationFeedbackMessage: ps_applylsn %X/%X", LSN_FORMAT_ARGS(rf->ps_applylsn)); } else if (strcmp(key, "ps_replytime") == 0) { pq_getmsgint(reply_message, sizeof(int32)); - //read value length - rf->ps_replytime = pq_getmsgint64(reply_message); + /* read value length */ + rf->ps_replytime = pq_getmsgint64(reply_message); { char *replyTimeStr; @@ -1933,13 +1924,13 @@ ParseReplicationFeedbackMessage(StringInfo reply_message, ReplicationFeedback * else { len = pq_getmsgint(reply_message, sizeof(int32)); - //read value length + /* read value length */ /* * Skip unknown keys to support backward compatibile protocol * changes */ - elog(LOG, "ParseReplicationFeedbackMessage: unknown key: %s len %d", key, len); + elog(LOG, "ParseReplicationFeedbackMessage: unknown key: %s len %d", key, len); pq_getmsgbytes(reply_message, len); }; } @@ -1973,7 +1964,6 @@ CombineHotStanbyFeedbacks(HotStandbyFeedback * hs) } } - /* * Get minimum of flushed LSNs of all safekeepers, which is the LSN of the * last WAL record that can be safely discarded. @@ -2009,8 +1999,7 @@ GetAcknowledgedByQuorumWALPosition(void) * Like in Raft, we aren't allowed to commit entries from previous * terms, so ignore reported LSN until it gets to epochStartLsn. */ - responses[i] = safekeeper[i].appendResponse.flushLsn >= propEpochStartLsn ? - safekeeper[i].appendResponse.flushLsn : 0; + responses[i] = safekeeper[i].appendResponse.flushLsn >= propEpochStartLsn ? safekeeper[i].appendResponse.flushLsn : 0; } qsort(responses, n_safekeepers, sizeof(XLogRecPtr), CompareLsn); @@ -2058,7 +2047,6 @@ replication_feedback_set(ReplicationFeedback * rf) SpinLockRelease(&walprop_shared->mutex); } - void replication_feedback_get_lsns(XLogRecPtr *writeLsn, XLogRecPtr *flushLsn, XLogRecPtr *applyLsn) { @@ -2069,12 +2057,11 @@ replication_feedback_get_lsns(XLogRecPtr *writeLsn, XLogRecPtr *flushLsn, XLogRe SpinLockRelease(&walprop_shared->mutex); } - /* * Get ReplicationFeedback fields from the most advanced safekeeper */ static void -GetLatestZentihFeedback(ReplicationFeedback * rf) +GetLatestNeonFeedback(ReplicationFeedback * rf) { int latest_safekeeper = 0; XLogRecPtr ps_writelsn = InvalidXLogRecPtr; @@ -2094,7 +2081,7 @@ GetLatestZentihFeedback(ReplicationFeedback * rf) rf->ps_applylsn = safekeeper[latest_safekeeper].appendResponse.rf.ps_applylsn; rf->ps_replytime = safekeeper[latest_safekeeper].appendResponse.rf.ps_replytime; - elog(DEBUG2, "GetLatestZentihFeedback: currentClusterSize %lu," + elog(DEBUG2, "GetLatestNeonFeedback: currentClusterSize %lu," " ps_writelsn %X/%X, ps_flushlsn %X/%X, ps_applylsn %X/%X, ps_replytime %lu", rf->currentClusterSize, LSN_FORMAT_ARGS(rf->ps_writelsn), @@ -2113,14 +2100,13 @@ HandleSafekeeperResponse(void) XLogRecPtr diskConsistentLsn; XLogRecPtr minFlushLsn; - minQuorumLsn = GetAcknowledgedByQuorumWALPosition(); diskConsistentLsn = quorumFeedback.rf.ps_flushlsn; if (!syncSafekeepers) { /* Get ReplicationFeedback fields from the most advanced safekeeper */ - GetLatestZentihFeedback(&quorumFeedback.rf); + GetLatestNeonFeedback(&quorumFeedback.rf); SetZenithCurrentClusterSize(quorumFeedback.rf.currentClusterSize); } @@ -2139,7 +2125,7 @@ HandleSafekeeperResponse(void) quorumFeedback.flushLsn, /* - * apply_lsn - This is what processed and durably saved at + * apply_lsn - This is what processed and durably saved at* * pageserver. */ quorumFeedback.rf.ps_flushlsn, @@ -2460,7 +2446,7 @@ backpressure_lag_impl(void) XLogRecPtr myFlushLsn = GetFlushRecPtr(); #endif replication_feedback_get_lsns(&writePtr, &flushPtr, &applyPtr); -#define MB ((XLogRecPtr)1024*1024) +#define MB ((XLogRecPtr)1024 * 1024) elog(DEBUG2, "current flushLsn %X/%X ReplicationFeedback: write %X/%X flush %X/%X apply %X/%X", LSN_FORMAT_ARGS(myFlushLsn), @@ -2468,23 +2454,17 @@ backpressure_lag_impl(void) LSN_FORMAT_ARGS(flushPtr), LSN_FORMAT_ARGS(applyPtr)); - if ((writePtr != InvalidXLogRecPtr - && max_replication_write_lag > 0 - && myFlushLsn > writePtr + max_replication_write_lag * MB)) + if ((writePtr != InvalidXLogRecPtr && max_replication_write_lag > 0 && myFlushLsn > writePtr + max_replication_write_lag * MB)) { return (myFlushLsn - writePtr - max_replication_write_lag * MB); } - if ((flushPtr != InvalidXLogRecPtr - && max_replication_flush_lag > 0 - && myFlushLsn > flushPtr + max_replication_flush_lag * MB)) + if ((flushPtr != InvalidXLogRecPtr && max_replication_flush_lag > 0 && myFlushLsn > flushPtr + max_replication_flush_lag * MB)) { return (myFlushLsn - flushPtr - max_replication_flush_lag * MB); } - if ((applyPtr != InvalidXLogRecPtr - && max_replication_apply_lag > 0 - && myFlushLsn > applyPtr + max_replication_apply_lag * MB)) + if ((applyPtr != InvalidXLogRecPtr && max_replication_apply_lag > 0 && myFlushLsn > applyPtr + max_replication_apply_lag * MB)) { return (myFlushLsn - applyPtr - max_replication_apply_lag * MB); } diff --git a/pgxn/neon/walproposer.h b/pgxn/neon/walproposer.h index 59e70f33bf..051c7c02a6 100644 --- a/pgxn/neon/walproposer.h +++ b/pgxn/neon/walproposer.h @@ -10,16 +10,16 @@ #include "utils/uuid.h" #include "replication/walreceiver.h" -#define SK_MAGIC 0xCafeCeefu -#define SK_PROTOCOL_VERSION 2 +#define SK_MAGIC 0xCafeCeefu +#define SK_PROTOCOL_VERSION 2 -#define MAX_SAFEKEEPERS 32 -#define MAX_SEND_SIZE (XLOG_BLCKSZ * 16) /* max size of a single - * WAL message */ -#define XLOG_HDR_SIZE (1+8*3) /* 'w' + startPos + walEnd + timestamp */ -#define XLOG_HDR_START_POS 1 /* offset of start position in wal sender +#define MAX_SAFEKEEPERS 32 +#define MAX_SEND_SIZE (XLOG_BLCKSZ * 16) /* max size of a single* WAL + * message */ +#define XLOG_HDR_SIZE (1 + 8 * 3) /* 'w' + startPos + walEnd + timestamp */ +#define XLOG_HDR_START_POS 1 /* offset of start position in wal sender* * message header */ -#define XLOG_HDR_END_POS (1+8) /* offset of end position in wal sender +#define XLOG_HDR_END_POS (1 + 8) /* offset of end position in wal sender* * message header */ /* @@ -39,8 +39,8 @@ typedef struct WalProposerConn WalProposerConn; struct WalMessage; typedef struct WalMessage WalMessage; -extern char *zenith_timeline_walproposer; -extern char *zenith_tenant_walproposer; +extern char *neon_timeline_walproposer; +extern char *neon_tenant_walproposer; /* Possible return values from ReadPGAsync */ typedef enum @@ -170,8 +170,8 @@ typedef struct ProposerGreeting uint32 pgVersion; pg_uuid_t proposerId; uint64 systemId; /* Postgres system identifier */ - uint8 ztimelineid[16]; /* Zenith timeline id */ - uint8 ztenantid[16]; + uint8 timeline_id[16]; /* Neon timeline id */ + uint8 tenant_id[16]; TimeLineID timeline; uint32 walSegSize; } ProposerGreeting; @@ -226,7 +226,7 @@ typedef struct VoteResponse * proposer to choose the most advanced one. */ XLogRecPtr flushLsn; - XLogRecPtr truncateLsn; /* minimal LSN which may be needed for + XLogRecPtr truncateLsn; /* minimal LSN which may be needed for* * recovery of some safekeeper */ TermHistory termHistory; XLogRecPtr timelineStartLsn; /* timeline globally starts at this LSN */ @@ -283,7 +283,6 @@ typedef struct HotStandbyFeedback FullTransactionId catalog_xmin; } HotStandbyFeedback; - typedef struct ReplicationFeedback { /* current size of the timeline on pageserver */ @@ -295,7 +294,6 @@ typedef struct ReplicationFeedback TimestampTz ps_replytime; } ReplicationFeedback; - typedef struct WalproposerShmemState { slock_t mutex; @@ -323,7 +321,7 @@ typedef struct AppendResponse XLogRecPtr commitLsn; HotStandbyFeedback hs; /* Feedback recieved from pageserver includes standby_status_update fields */ - /* and custom zenith feedback. */ + /* and custom neon feedback. */ /* This part of the message is extensible. */ ReplicationFeedback rf; } AppendResponse; @@ -332,7 +330,6 @@ typedef struct AppendResponse /* Other fields are fixed part */ #define APPENDRESPONSE_FIXEDPART_SIZE offsetof(AppendResponse, rf) - /* * Descriptor of safekeeper */ @@ -340,7 +337,7 @@ typedef struct Safekeeper { char const *host; char const *port; - char conninfo[MAXCONNINFO]; /* connection info for + char conninfo[MAXCONNINFO]; /* connection info for* * connecting/reconnecting */ /* @@ -366,12 +363,12 @@ typedef struct Safekeeper */ XLogRecPtr startStreamingAt; - bool flushWrite; /* set to true if we need to call AsyncFlush, + bool flushWrite; /* set to true if we need to call AsyncFlush,* * to flush pending messages */ XLogRecPtr streamingAt; /* current streaming position */ AppendRequestHeader appendRequest; /* request for sending to safekeeper */ - int eventPos; /* position in wait event set. Equal to -1 if + int eventPos; /* position in wait event set. Equal to -1 if* * no event */ SafekeeperState state; /* safekeeper state machine state */ TimestampTz startedConnAt; /* when connection attempt started */ @@ -380,7 +377,6 @@ typedef struct Safekeeper AppendResponse appendResponse; /* feedback for master */ } Safekeeper; - extern PGDLLIMPORT void WalProposerMain(Datum main_arg); void WalProposerBroadcast(XLogRecPtr startpos, XLogRecPtr endpos); void WalProposerPoll(void); diff --git a/pgxn/neon_test_utils/neontest.c b/pgxn/neon_test_utils/neontest.c index 07bd7bdd28..e0cea4177b 100644 --- a/pgxn/neon_test_utils/neontest.c +++ b/pgxn/neon_test_utils/neontest.c @@ -36,13 +36,13 @@ PG_FUNCTION_INFO_V1(get_raw_page_at_lsn_ex); PG_FUNCTION_INFO_V1(neon_xlogflush); /* - * Linkage to functions in zenith module. + * Linkage to functions in neon module. * The signature here would need to be updated whenever function parameters change in pagestore_smgr.c */ -typedef void (*zenith_read_at_lsn_type) (RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno, - XLogRecPtr request_lsn, bool request_latest, char *buffer); +typedef void (*neon_read_at_lsn_type) (RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno, + XLogRecPtr request_lsn, bool request_latest, char *buffer); -static zenith_read_at_lsn_type zenith_read_at_lsn_ptr; +static neon_read_at_lsn_type neon_read_at_lsn_ptr; /* * Module initialize function: fetch function pointers for cross-module calls. @@ -51,13 +51,13 @@ void _PG_init(void) { /* Asserts verify that typedefs above match original declarations */ - AssertVariableIsOfType(&zenith_read_at_lsn, zenith_read_at_lsn_type); - zenith_read_at_lsn_ptr = (zenith_read_at_lsn_type) - load_external_function("$libdir/neon", "zenith_read_at_lsn", + AssertVariableIsOfType(&neon_read_at_lsn, neon_read_at_lsn_type); + neon_read_at_lsn_ptr = (neon_read_at_lsn_type) + load_external_function("$libdir/neon", "neon_read_at_lsn", true, NULL); } -#define zenith_read_at_lsn zenith_read_at_lsn_ptr +#define neon_read_at_lsn neon_read_at_lsn_ptr /* * test_consume_xids(int4), for rapidly consuming XIDs, to test wraparound. @@ -96,7 +96,7 @@ test_consume_xids(PG_FUNCTION_ARGS) Datum clear_buffer_cache(PG_FUNCTION_ARGS) { - bool save_zenith_test_evict; + bool save_neon_test_evict; /* * Temporarily set the zenith_test_evict GUC, so that when we pin and @@ -104,7 +104,7 @@ clear_buffer_cache(PG_FUNCTION_ARGS) * buffers, as there is no explicit "evict this buffer" function in the * buffer manager. */ - save_zenith_test_evict = zenith_test_evict; + save_neon_test_evict = zenith_test_evict; zenith_test_evict = true; PG_TRY(); { @@ -149,14 +149,13 @@ clear_buffer_cache(PG_FUNCTION_ARGS) PG_FINALLY(); { /* restore the GUC */ - zenith_test_evict = save_zenith_test_evict; + zenith_test_evict = save_neon_test_evict; } PG_END_TRY(); PG_RETURN_VOID(); } - /* * Reads the page from page server without buffer cache * usage mimics get_raw_page() in pageinspect, but offers reading versions at specific LSN @@ -232,7 +231,6 @@ get_raw_page_at_lsn(PG_FUNCTION_ARGS) (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot access temporary tables of other sessions"))); - forknum = forkname_to_number(text_to_cstring(forkname)); /* Initialize buffer to copy to */ @@ -240,7 +238,7 @@ get_raw_page_at_lsn(PG_FUNCTION_ARGS) SET_VARSIZE(raw_page, BLCKSZ + VARHDRSZ); raw_page_data = VARDATA(raw_page); - zenith_read_at_lsn(rel->rd_node, forknum, blkno, read_lsn, request_latest, raw_page_data); + neon_read_at_lsn(rel->rd_node, forknum, blkno, read_lsn, request_latest, raw_page_data); relation_close(rel, AccessShareLock); @@ -272,8 +270,7 @@ get_raw_page_at_lsn_ex(PG_FUNCTION_ARGS) RelFileNode rnode = { .spcNode = PG_GETARG_OID(0), .dbNode = PG_GETARG_OID(1), - .relNode = PG_GETARG_OID(2) - }; + .relNode = PG_GETARG_OID(2)}; ForkNumber forknum = PG_GETARG_UINT32(3); @@ -281,14 +278,13 @@ get_raw_page_at_lsn_ex(PG_FUNCTION_ARGS) bool request_latest = PG_ARGISNULL(5); uint64 read_lsn = request_latest ? GetXLogInsertRecPtr() : PG_GETARG_INT64(5); - /* Initialize buffer to copy to */ bytea *raw_page = (bytea *) palloc(BLCKSZ + VARHDRSZ); SET_VARSIZE(raw_page, BLCKSZ + VARHDRSZ); raw_page_data = VARDATA(raw_page); - zenith_read_at_lsn(rnode, forknum, blkno, read_lsn, request_latest, raw_page_data); + neon_read_at_lsn(rnode, forknum, blkno, read_lsn, request_latest, raw_page_data); PG_RETURN_BYTEA_P(raw_page); } } diff --git a/proxy/Cargo.toml b/proxy/Cargo.toml index 5a450793f1..5417f4f2b3 100644 --- a/proxy/Cargo.toml +++ b/proxy/Cargo.toml @@ -32,7 +32,7 @@ sha2 = "0.10.2" socket2 = "0.4.4" thiserror = "1.0.30" tokio = { version = "1.17", features = ["macros"] } -tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } tokio-rustls = "0.23.0" url = "2.2.2" git-version = "0.3.5" diff --git a/pyproject.toml b/pyproject.toml index ec166ea7cd..9c2aa39c7c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [tool.poetry] -name = "zenith" +name = "neon" version = "0.1.0" description = "" authors = [] diff --git a/safekeeper/Cargo.toml b/safekeeper/Cargo.toml index 4ed30413e2..cae095c3c2 100644 --- a/safekeeper/Cargo.toml +++ b/safekeeper/Cargo.toml @@ -14,8 +14,8 @@ tracing = "0.1.27" clap = "3.0" daemonize = "0.4.1" tokio = { version = "1.17", features = ["macros", "fs"] } -postgres-protocol = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } -postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } anyhow = "1.0" crc32c = "0.6.0" humantime = "2.1.0" @@ -25,7 +25,7 @@ serde = { version = "1.0", features = ["derive"] } serde_with = "1.12.0" hex = "0.4.3" const_format = "0.2.21" -tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } git-version = "0.3.5" async-trait = "0.1" once_cell = "1.13.0" diff --git a/safekeeper/src/bin/safekeeper.rs b/safekeeper/src/bin/safekeeper.rs index 244c793250..d518ac01cc 100644 --- a/safekeeper/src/bin/safekeeper.rs +++ b/safekeeper/src/bin/safekeeper.rs @@ -30,8 +30,8 @@ use safekeeper::wal_service; use safekeeper::SafeKeeperConf; use utils::auth::JwtAuth; use utils::{ - http::endpoint, logging, project_git_version, shutdown::exit_now, signals, tcp_listener, - zid::NodeId, + http::endpoint, id::NodeId, logging, project_git_version, shutdown::exit_now, signals, + tcp_listener, }; const LOCK_FILE_NAME: &str = "safekeeper.lock"; @@ -39,7 +39,7 @@ const ID_FILE_NAME: &str = "safekeeper.id"; project_git_version!(GIT_VERSION); fn main() -> anyhow::Result<()> { - let arg_matches = App::new("Zenith safekeeper") + let arg_matches = App::new("Neon safekeeper") .about("Store WAL stream to local file system and push it to WAL receivers") .version(GIT_VERSION) .arg( diff --git a/safekeeper/src/broker.rs b/safekeeper/src/broker.rs index ce66131700..f276fad613 100644 --- a/safekeeper/src/broker.rs +++ b/safekeeper/src/broker.rs @@ -22,7 +22,7 @@ use etcd_broker::{ subscription_key::{OperationKind, SkOperationKind, SubscriptionKey}, Client, PutOptions, }; -use utils::zid::{NodeId, ZTenantTimelineId}; +use utils::id::{NodeId, TenantTimelineId}; const RETRY_INTERVAL_MSEC: u64 = 1000; const PUSH_INTERVAL_MSEC: u64 = 1000; @@ -45,7 +45,7 @@ pub fn thread_main(conf: SafeKeeperConf) { /// Key to per timeline per safekeeper data. fn timeline_safekeeper_path( broker_etcd_prefix: String, - zttid: ZTenantTimelineId, + zttid: TenantTimelineId, sk_id: NodeId, ) -> String { format!( @@ -162,12 +162,12 @@ pub fn get_candiate_name(system_id: NodeId) -> String { } async fn push_sk_info( - zttid: ZTenantTimelineId, + zttid: TenantTimelineId, mut client: Client, key: String, sk_info: SkTimelineInfo, mut lease: Lease, -) -> anyhow::Result<(ZTenantTimelineId, Lease)> { +) -> anyhow::Result<(TenantTimelineId, Lease)> { let put_opts = PutOptions::new().with_lease(lease.id); client .put( @@ -202,7 +202,7 @@ struct Lease { /// Push once in a while data about all active timelines to the broker. async fn push_loop(conf: SafeKeeperConf) -> anyhow::Result<()> { let mut client = Client::connect(&conf.broker_endpoints, None).await?; - let mut leases: HashMap = HashMap::new(); + let mut leases: HashMap = HashMap::new(); let push_interval = Duration::from_millis(PUSH_INTERVAL_MSEC); loop { diff --git a/safekeeper/src/control_file.rs b/safekeeper/src/control_file.rs index 7fc75246e1..ff23f0360f 100644 --- a/safekeeper/src/control_file.rs +++ b/safekeeper/src/control_file.rs @@ -14,7 +14,7 @@ use tracing::*; use crate::control_file_upgrade::upgrade_control_file; use crate::safekeeper::{SafeKeeperState, SK_FORMAT_VERSION, SK_MAGIC}; use metrics::{register_histogram_vec, Histogram, HistogramVec, DISK_WRITE_SECONDS_BUCKETS}; -use utils::{bin_ser::LeSer, zid::ZTenantTimelineId}; +use utils::{bin_ser::LeSer, id::TenantTimelineId}; use crate::SafeKeeperConf; @@ -55,7 +55,7 @@ pub struct FileStorage { } impl FileStorage { - pub fn restore_new(zttid: &ZTenantTimelineId, conf: &SafeKeeperConf) -> Result { + pub fn restore_new(zttid: &TenantTimelineId, conf: &SafeKeeperConf) -> Result { let timeline_dir = conf.timeline_dir(zttid); let tenant_id = zttid.tenant_id.to_string(); let timeline_id = zttid.timeline_id.to_string(); @@ -72,7 +72,7 @@ impl FileStorage { } pub fn create_new( - zttid: &ZTenantTimelineId, + zttid: &TenantTimelineId, conf: &SafeKeeperConf, state: SafeKeeperState, ) -> Result { @@ -115,7 +115,7 @@ impl FileStorage { // Load control file for given zttid at path specified by conf. pub fn load_control_file_conf( conf: &SafeKeeperConf, - zttid: &ZTenantTimelineId, + zttid: &TenantTimelineId, ) -> Result { let path = conf.timeline_dir(zttid).join(CONTROL_FILE_NAME); Self::load_control_file(path) @@ -252,7 +252,7 @@ mod test { use crate::{safekeeper::SafeKeeperState, SafeKeeperConf}; use anyhow::Result; use std::fs; - use utils::{lsn::Lsn, zid::ZTenantTimelineId}; + use utils::{id::TenantTimelineId, lsn::Lsn}; fn stub_conf() -> SafeKeeperConf { let workdir = tempfile::tempdir().unwrap().into_path(); @@ -264,7 +264,7 @@ mod test { fn load_from_control_file( conf: &SafeKeeperConf, - zttid: &ZTenantTimelineId, + zttid: &TenantTimelineId, ) -> Result<(FileStorage, SafeKeeperState)> { fs::create_dir_all(&conf.timeline_dir(zttid)).expect("failed to create timeline dir"); Ok(( @@ -275,7 +275,7 @@ mod test { fn create( conf: &SafeKeeperConf, - zttid: &ZTenantTimelineId, + zttid: &TenantTimelineId, ) -> Result<(FileStorage, SafeKeeperState)> { fs::create_dir_all(&conf.timeline_dir(zttid)).expect("failed to create timeline dir"); let state = SafeKeeperState::empty(); @@ -286,7 +286,7 @@ mod test { #[test] fn test_read_write_safekeeper_state() { let conf = stub_conf(); - let zttid = ZTenantTimelineId::generate(); + let zttid = TenantTimelineId::generate(); { let (mut storage, mut state) = create(&conf, &zttid).expect("failed to create state"); // change something @@ -301,7 +301,7 @@ mod test { #[test] fn test_safekeeper_state_checksum_mismatch() { let conf = stub_conf(); - let zttid = ZTenantTimelineId::generate(); + let zttid = TenantTimelineId::generate(); { let (mut storage, mut state) = create(&conf, &zttid).expect("failed to read state"); diff --git a/safekeeper/src/control_file_upgrade.rs b/safekeeper/src/control_file_upgrade.rs index 91d2f61c10..87204d6b49 100644 --- a/safekeeper/src/control_file_upgrade.rs +++ b/safekeeper/src/control_file_upgrade.rs @@ -7,9 +7,9 @@ use serde::{Deserialize, Serialize}; use tracing::*; use utils::{ bin_ser::LeSer, + id::{TenantId, TimelineId}, lsn::Lsn, pq_proto::SystemId, - zid::{ZTenantId, ZTimelineId}, }; /// Persistent consensus state of the acceptor. @@ -45,9 +45,8 @@ pub struct ServerInfoV2 { /// Postgres server version pub pg_version: u32, pub system_id: SystemId, - pub tenant_id: ZTenantId, - /// Zenith timelineid - pub ztli: ZTimelineId, + pub tenant_id: TenantId, + pub timeline_id: TimelineId, pub wal_seg_size: u32, } @@ -76,10 +75,9 @@ pub struct ServerInfoV3 { pub pg_version: u32, pub system_id: SystemId, #[serde(with = "hex")] - pub tenant_id: ZTenantId, - /// Zenith timelineid + pub tenant_id: TenantId, #[serde(with = "hex")] - pub timeline_id: ZTimelineId, + pub timeline_id: TimelineId, pub wal_seg_size: u32, } @@ -106,10 +104,9 @@ pub struct SafeKeeperStateV3 { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SafeKeeperStateV4 { #[serde(with = "hex")] - pub tenant_id: ZTenantId, - /// Zenith timelineid + pub tenant_id: TenantId, #[serde(with = "hex")] - pub timeline_id: ZTimelineId, + pub timeline_id: TimelineId, /// persistent acceptor state pub acceptor_state: AcceptorState, /// information about server @@ -154,7 +151,7 @@ pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result }; return Ok(SafeKeeperState { tenant_id: oldstate.server.tenant_id, - timeline_id: oldstate.server.ztli, + timeline_id: oldstate.server.timeline_id, acceptor_state: ac, server: ServerInfo { pg_version: oldstate.server.pg_version, @@ -181,7 +178,7 @@ pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result }; return Ok(SafeKeeperState { tenant_id: oldstate.server.tenant_id, - timeline_id: oldstate.server.ztli, + timeline_id: oldstate.server.timeline_id, acceptor_state: oldstate.acceptor_state, server, proposer_uuid: oldstate.proposer_uuid, @@ -193,9 +190,9 @@ pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result remote_consistent_lsn: Lsn(0), peers: Peers(vec![]), }); - // migrate to moving ztenantid/ztli to the top and adding some lsns + // migrate to moving tenant_id/timeline_id to the top and adding some lsns } else if version == 3 { - info!("reading safekeeper control file version {}", version); + info!("reading safekeeper control file version {version}"); let oldstate = SafeKeeperStateV3::des(&buf[..buf.len()])?; let server = ServerInfo { pg_version: oldstate.server.pg_version, diff --git a/safekeeper/src/handler.rs b/safekeeper/src/handler.rs index 3e301259ed..41b9ad66e1 100644 --- a/safekeeper/src/handler.rs +++ b/safekeeper/src/handler.rs @@ -14,10 +14,10 @@ use regex::Regex; use std::sync::Arc; use tracing::info; use utils::{ + id::{TenantId, TenantTimelineId, TimelineId}, lsn::Lsn, postgres_backend::{self, PostgresBackend}, pq_proto::{BeMessage, FeStartupPacket, RowDescriptor, INT4_OID, TEXT_OID}, - zid::{ZTenantId, ZTenantTimelineId, ZTimelineId}, }; /// Safekeeper handler of postgres commands @@ -25,8 +25,8 @@ pub struct SafekeeperPostgresHandler { pub conf: SafeKeeperConf, /// assigned application name pub appname: Option, - pub ztenantid: Option, - pub ztimelineid: Option, + pub tenant_id: Option, + pub timeline_id: Option, pub timeline: Option>, } @@ -63,17 +63,17 @@ fn parse_cmd(cmd: &str) -> Result { } impl postgres_backend::Handler for SafekeeperPostgresHandler { - // ztenant id and ztimeline id are passed in connection string params + // tenant_id and timeline_id are passed in connection string params fn startup(&mut self, _pgb: &mut PostgresBackend, sm: &FeStartupPacket) -> Result<()> { if let FeStartupPacket::StartupMessage { params, .. } = sm { if let Some(options) = params.options_raw() { for opt in options { match opt.split_once('=') { - Some(("ztenantid", value)) => { - self.ztenantid = Some(value.parse()?); + Some(("tenant_id", value)) => { + self.tenant_id = Some(value.parse()?); } - Some(("ztimelineid", value)) => { - self.ztimelineid = Some(value.parse()?); + Some(("timeline_id", value)) => { + self.timeline_id = Some(value.parse()?); } _ => continue, } @@ -95,18 +95,18 @@ impl postgres_backend::Handler for SafekeeperPostgresHandler { info!( "got query {:?} in timeline {:?}", - query_string, self.ztimelineid + query_string, self.timeline_id ); let create = !(matches!(cmd, SafekeeperPostgresCommand::StartReplication { .. }) || matches!(cmd, SafekeeperPostgresCommand::IdentifySystem)); - let tenantid = self.ztenantid.context("tenantid is required")?; - let timelineid = self.ztimelineid.context("timelineid is required")?; + let tenant_id = self.tenant_id.context("tenant_id is required")?; + let timeline_id = self.timeline_id.context("timeline_id is required")?; if self.timeline.is_none() { self.timeline.set( &self.conf, - ZTenantTimelineId::new(tenantid, timelineid), + TenantTimelineId::new(tenant_id, timeline_id), create, )?; } @@ -121,7 +121,7 @@ impl postgres_backend::Handler for SafekeeperPostgresHandler { SafekeeperPostgresCommand::IdentifySystem => self.handle_identify_system(pgb), SafekeeperPostgresCommand::JSONCtrl { ref cmd } => handle_json_ctrl(self, pgb, cmd), } - .context(format!("timeline {timelineid}"))?; + .context(format!("timeline {timeline_id}"))?; Ok(()) } @@ -132,8 +132,8 @@ impl SafekeeperPostgresHandler { SafekeeperPostgresHandler { conf, appname: None, - ztenantid: None, - ztimelineid: None, + tenant_id: None, + timeline_id: None, timeline: None, } } diff --git a/safekeeper/src/http/models.rs b/safekeeper/src/http/models.rs index 4b3ae7798e..e13ea50eaf 100644 --- a/safekeeper/src/http/models.rs +++ b/safekeeper/src/http/models.rs @@ -1,8 +1,8 @@ use serde::{Deserialize, Serialize}; -use utils::zid::{NodeId, ZTimelineId}; +use utils::id::{NodeId, TimelineId}; #[derive(Serialize, Deserialize)] pub struct TimelineCreateRequest { - pub timeline_id: ZTimelineId, + pub timeline_id: TimelineId, pub peer_ids: Vec, } diff --git a/safekeeper/src/http/routes.rs b/safekeeper/src/http/routes.rs index 13356c5921..14c9414c09 100644 --- a/safekeeper/src/http/routes.rs +++ b/safekeeper/src/http/routes.rs @@ -21,8 +21,8 @@ use utils::{ request::{ensure_no_body, parse_request_param}, RequestExt, RouterBuilder, }, + id::{NodeId, TenantId, TenantTimelineId, TimelineId}, lsn::Lsn, - zid::{NodeId, ZTenantId, ZTenantTimelineId, ZTimelineId}, }; use super::models::TimelineCreateRequest; @@ -68,9 +68,9 @@ struct AcceptorStateStatus { #[derive(Debug, Serialize)] struct TimelineStatus { #[serde(serialize_with = "display_serialize")] - tenant_id: ZTenantId, + tenant_id: TenantId, #[serde(serialize_with = "display_serialize")] - timeline_id: ZTimelineId, + timeline_id: TimelineId, acceptor_state: AcceptorStateStatus, #[serde(serialize_with = "display_serialize")] flush_lsn: Lsn, @@ -90,7 +90,7 @@ struct TimelineStatus { /// Report info about timeline. async fn timeline_status_handler(request: Request) -> Result, ApiError> { - let zttid = ZTenantTimelineId::new( + let zttid = TenantTimelineId::new( parse_request_param(&request, "tenant_id")?, parse_request_param(&request, "timeline_id")?, ); @@ -125,7 +125,7 @@ async fn timeline_status_handler(request: Request) -> Result) -> Result, ApiError> { let request_data: TimelineCreateRequest = json_request(&mut request).await?; - let zttid = ZTenantTimelineId { + let zttid = TenantTimelineId { tenant_id: parse_request_param(&request, "tenant_id")?, timeline_id: request_data.timeline_id, }; @@ -146,7 +146,7 @@ async fn timeline_create_handler(mut request: Request) -> Result, ) -> Result, ApiError> { - let zttid = ZTenantTimelineId::new( + let zttid = TenantTimelineId::new( parse_request_param(&request, "tenant_id")?, parse_request_param(&request, "timeline_id")?, ); @@ -181,7 +181,7 @@ async fn tenant_delete_force_handler( /// Used only in tests to hand craft required data. async fn record_safekeeper_info(mut request: Request) -> Result, ApiError> { - let zttid = ZTenantTimelineId::new( + let zttid = TenantTimelineId::new( parse_request_param(&request, "tenant_id")?, parse_request_param(&request, "timeline_id")?, ); diff --git a/safekeeper/src/json_ctrl.rs b/safekeeper/src/json_ctrl.rs index 16c1d36131..00fc43521b 100644 --- a/safekeeper/src/json_ctrl.rs +++ b/safekeeper/src/json_ctrl.rs @@ -97,8 +97,8 @@ fn prepare_safekeeper(spg: &mut SafekeeperPostgresHandler) -> Result<()> { pg_version: 0, // unknown proposer_id: [0u8; 16], system_id: 0, - ztli: spg.ztimelineid.unwrap(), - tenant_id: spg.ztenantid.unwrap(), + timeline_id: spg.timeline_id.unwrap(), + tenant_id: spg.tenant_id.unwrap(), tli: 0, wal_seg_size: WAL_SEGMENT_SIZE as u32, // 16MB, default for tests }); diff --git a/safekeeper/src/lib.rs b/safekeeper/src/lib.rs index 0335d61d3f..b466d5aab5 100644 --- a/safekeeper/src/lib.rs +++ b/safekeeper/src/lib.rs @@ -5,7 +5,7 @@ use std::path::PathBuf; use std::time::Duration; use url::Url; -use utils::zid::{NodeId, ZTenantId, ZTenantTimelineId}; +use utils::id::{NodeId, TenantId, TenantTimelineId}; pub mod broker; pub mod control_file; @@ -61,11 +61,11 @@ pub struct SafeKeeperConf { } impl SafeKeeperConf { - pub fn tenant_dir(&self, tenant_id: &ZTenantId) -> PathBuf { + pub fn tenant_dir(&self, tenant_id: &TenantId) -> PathBuf { self.workdir.join(tenant_id.to_string()) } - pub fn timeline_dir(&self, zttid: &ZTenantTimelineId) -> PathBuf { + pub fn timeline_dir(&self, zttid: &TenantTimelineId) -> PathBuf { self.tenant_dir(&zttid.tenant_id) .join(zttid.timeline_id.to_string()) } diff --git a/safekeeper/src/metrics.rs b/safekeeper/src/metrics.rs index c693035dd3..3fa3916266 100644 --- a/safekeeper/src/metrics.rs +++ b/safekeeper/src/metrics.rs @@ -8,7 +8,7 @@ use metrics::{ Gauge, IntGaugeVec, }; use postgres_ffi::XLogSegNo; -use utils::{lsn::Lsn, zid::ZTenantTimelineId}; +use utils::{id::TenantTimelineId, lsn::Lsn}; use crate::{ safekeeper::{SafeKeeperState, SafekeeperMemState}, @@ -16,7 +16,7 @@ use crate::{ }; pub struct FullTimelineInfo { - pub zttid: ZTenantTimelineId, + pub zttid: TenantTimelineId, pub replicas: Vec, pub wal_backup_active: bool, pub timeline_is_active: bool, diff --git a/safekeeper/src/receive_wal.rs b/safekeeper/src/receive_wal.rs index af4cfb6ba4..b0b6a73621 100644 --- a/safekeeper/src/receive_wal.rs +++ b/safekeeper/src/receive_wal.rs @@ -53,7 +53,7 @@ impl<'pg> ReceiveWalConn<'pg> { /// Receive WAL from wal_proposer pub fn run(&mut self, spg: &mut SafekeeperPostgresHandler) -> Result<()> { - let _enter = info_span!("WAL acceptor", timeline = %spg.ztimelineid.unwrap()).entered(); + let _enter = info_span!("WAL acceptor", timeline = %spg.timeline_id.unwrap()).entered(); // Notify the libpq client that it's allowed to send `CopyData` messages self.pg_backend diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index a2bdcb55e7..fa045eed90 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -19,9 +19,9 @@ use crate::send_wal::HotStandbyFeedback; use crate::wal_storage; use utils::{ bin_ser::LeSer, + id::{NodeId, TenantId, TenantTimelineId, TimelineId}, lsn::Lsn, pq_proto::{ReplicationFeedback, SystemId}, - zid::{NodeId, ZTenantId, ZTenantTimelineId, ZTimelineId}, }; pub const SK_MAGIC: u32 = 0xcafeceefu32; @@ -166,10 +166,9 @@ pub struct Peers(pub Vec<(NodeId, PeerInfo)>); #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SafeKeeperState { #[serde(with = "hex")] - pub tenant_id: ZTenantId, - /// Zenith timelineid + pub tenant_id: TenantId, #[serde(with = "hex")] - pub timeline_id: ZTimelineId, + pub timeline_id: TimelineId, /// persistent acceptor state pub acceptor_state: AcceptorState, /// information about server @@ -219,7 +218,7 @@ pub struct SafekeeperMemState { } impl SafeKeeperState { - pub fn new(zttid: &ZTenantTimelineId, peers: Vec) -> SafeKeeperState { + pub fn new(zttid: &TenantTimelineId, peers: Vec) -> SafeKeeperState { SafeKeeperState { tenant_id: zttid.tenant_id, timeline_id: zttid.timeline_id, @@ -245,7 +244,7 @@ impl SafeKeeperState { #[cfg(test)] pub fn empty() -> Self { - SafeKeeperState::new(&ZTenantTimelineId::empty(), vec![]) + SafeKeeperState::new(&TenantTimelineId::empty(), vec![]) } } @@ -260,9 +259,8 @@ pub struct ProposerGreeting { pub pg_version: u32, pub proposer_id: PgUuid, pub system_id: SystemId, - /// Zenith timelineid - pub ztli: ZTimelineId, - pub tenant_id: ZTenantId, + pub timeline_id: TimelineId, + pub tenant_id: TenantId, pub tli: TimeLineID, pub wal_seg_size: u32, } @@ -507,13 +505,13 @@ where { // constructor pub fn new( - ztli: ZTimelineId, + timeline_id: TimelineId, state: CTRL, mut wal_store: WAL, node_id: NodeId, ) -> Result> { - if state.timeline_id != ZTimelineId::from([0u8; 16]) && ztli != state.timeline_id { - bail!("Calling SafeKeeper::new with inconsistent ztli ({}) and SafeKeeperState.server.timeline_id ({})", ztli, state.timeline_id); + if state.timeline_id != TimelineId::from([0u8; 16]) && timeline_id != state.timeline_id { + bail!("Calling SafeKeeper::new with inconsistent timeline_id ({}) and SafeKeeperState.server.timeline_id ({})", timeline_id, state.timeline_id); } // initialize wal_store, if state is already initialized @@ -600,10 +598,10 @@ where self.state.tenant_id ); } - if msg.ztli != self.state.timeline_id { + if msg.timeline_id != self.state.timeline_id { bail!( "invalid timeline ID, got {}, expected {}", - msg.ztli, + msg.timeline_id, self.state.timeline_id ); } @@ -982,9 +980,9 @@ mod tests { persisted_state: SafeKeeperState::empty(), }; let wal_store = DummyWalStore { lsn: Lsn(0) }; - let ztli = ZTimelineId::from([0u8; 16]); + let timeline_id = TimelineId::from([0u8; 16]); - let mut sk = SafeKeeper::new(ztli, storage, wal_store, NodeId(0)).unwrap(); + let mut sk = SafeKeeper::new(timeline_id, storage, wal_store, NodeId(0)).unwrap(); // check voting for 1 is ok let vote_request = ProposerAcceptorMessage::VoteRequest(VoteRequest { term: 1 }); @@ -1000,7 +998,7 @@ mod tests { persisted_state: state, }; - sk = SafeKeeper::new(ztli, storage, sk.wal_store, NodeId(0)).unwrap(); + sk = SafeKeeper::new(timeline_id, storage, sk.wal_store, NodeId(0)).unwrap(); // and ensure voting second time for 1 is not ok vote_resp = sk.process_msg(&vote_request); @@ -1016,9 +1014,9 @@ mod tests { persisted_state: SafeKeeperState::empty(), }; let wal_store = DummyWalStore { lsn: Lsn(0) }; - let ztli = ZTimelineId::from([0u8; 16]); + let timeline_id = TimelineId::from([0u8; 16]); - let mut sk = SafeKeeper::new(ztli, storage, wal_store, NodeId(0)).unwrap(); + let mut sk = SafeKeeper::new(timeline_id, storage, wal_store, NodeId(0)).unwrap(); let mut ar_hdr = AppendRequestHeader { term: 1, diff --git a/safekeeper/src/send_wal.rs b/safekeeper/src/send_wal.rs index 293cf67c57..375b6eea18 100644 --- a/safekeeper/src/send_wal.rs +++ b/safekeeper/src/send_wal.rs @@ -30,7 +30,7 @@ use utils::{ // See: https://www.postgresql.org/docs/13/protocol-replication.html const HOT_STANDBY_FEEDBACK_TAG_BYTE: u8 = b'h'; const STANDBY_STATUS_UPDATE_TAG_BYTE: u8 = b'r'; -// zenith extension of replication protocol +// neon extension of replication protocol const NEON_STATUS_UPDATE_TAG_BYTE: u8 = b'z'; type FullTransactionId = u64; @@ -105,7 +105,7 @@ impl ReplicationConn { match &msg { FeMessage::CopyData(m) => { // There's three possible data messages that the client is supposed to send here: - // `HotStandbyFeedback` and `StandbyStatusUpdate` and `ZenithStandbyFeedback`. + // `HotStandbyFeedback` and `StandbyStatusUpdate` and `NeonStandbyFeedback`. match m.first().cloned() { Some(HOT_STANDBY_FEEDBACK_TAG_BYTE) => { @@ -165,12 +165,12 @@ impl ReplicationConn { pgb: &mut PostgresBackend, mut start_pos: Lsn, ) -> Result<()> { - let _enter = info_span!("WAL sender", timeline = %spg.ztimelineid.unwrap()).entered(); + let _enter = info_span!("WAL sender", timeline = %spg.timeline_id.unwrap()).entered(); // spawn the background thread which receives HotStandbyFeedback messages. let bg_timeline = Arc::clone(spg.timeline.get()); let bg_stream_in = self.stream_in.take().unwrap(); - let bg_timeline_id = spg.ztimelineid.unwrap(); + let bg_timeline_id = spg.timeline_id.unwrap(); let state = ReplicaState::new(); // This replica_id is used below to check if it's time to stop replication. diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index 8d101e6ff6..cf317c41c3 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -21,9 +21,9 @@ use tokio::sync::mpsc::Sender; use tracing::*; use utils::{ + id::{NodeId, TenantId, TenantTimelineId}, lsn::Lsn, pq_proto::ReplicationFeedback, - zid::{NodeId, ZTenantId, ZTenantTimelineId}, }; use crate::control_file; @@ -98,7 +98,7 @@ impl SharedState { /// Initialize timeline state, creating control file fn create( conf: &SafeKeeperConf, - zttid: &ZTenantTimelineId, + zttid: &TenantTimelineId, peer_ids: Vec, ) -> Result { let state = SafeKeeperState::new(zttid, peer_ids); @@ -119,7 +119,7 @@ impl SharedState { /// Restore SharedState from control file. /// If file doesn't exist, bails out. - fn restore(conf: &SafeKeeperConf, zttid: &ZTenantTimelineId) -> Result { + fn restore(conf: &SafeKeeperConf, zttid: &TenantTimelineId) -> Result { let control_store = control_file::FileStorage::restore_new(zttid, conf)?; let wal_store = wal_storage::PhysicalStorage::new(zttid, conf); @@ -143,7 +143,7 @@ impl SharedState { /// Mark timeline active/inactive and return whether s3 offloading requires /// start/stop action. - fn update_status(&mut self, ttid: ZTenantTimelineId) -> bool { + fn update_status(&mut self, ttid: TenantTimelineId) -> bool { let is_active = self.is_active(); if self.active != is_active { info!("timeline {} active={} now", ttid, is_active); @@ -213,7 +213,7 @@ impl SharedState { // // To choose what feedback to use and resend to compute node, // we need to know which pageserver compute node considers to be main. - // See https://github.com/zenithdb/zenith/issues/1171 + // See https://github.com/neondatabase/neon/issues/1171 // if let Some(pageserver_feedback) = state.pageserver_feedback { if let Some(acc_feedback) = acc.pageserver_feedback { @@ -227,7 +227,7 @@ impl SharedState { // last lsn received by pageserver // FIXME if multiple pageservers are streaming WAL, last_received_lsn must be tracked per pageserver. - // See https://github.com/zenithdb/zenith/issues/1171 + // See https://github.com/neondatabase/neon/issues/1171 acc.last_received_lsn = Lsn::from(pageserver_feedback.ps_writelsn); // When at least one pageserver has preserved data up to remote_consistent_lsn, @@ -256,11 +256,11 @@ impl SharedState { /// Database instance (tenant) pub struct Timeline { - pub zttid: ZTenantTimelineId, + pub zttid: TenantTimelineId, /// Sending here asks for wal backup launcher attention (start/stop /// offloading). Sending zttid instead of concrete command allows to do /// sending without timeline lock. - wal_backup_launcher_tx: Sender, + wal_backup_launcher_tx: Sender, commit_lsn_watch_tx: watch::Sender, /// For breeding receivers. commit_lsn_watch_rx: watch::Receiver, @@ -269,8 +269,8 @@ pub struct Timeline { impl Timeline { fn new( - zttid: ZTenantTimelineId, - wal_backup_launcher_tx: Sender, + zttid: TenantTimelineId, + wal_backup_launcher_tx: Sender, shared_state: SharedState, ) -> Timeline { let (commit_lsn_watch_tx, commit_lsn_watch_rx) = @@ -539,13 +539,13 @@ impl Timeline { // Utilities needed by various Connection-like objects pub trait TimelineTools { - fn set(&mut self, conf: &SafeKeeperConf, zttid: ZTenantTimelineId, create: bool) -> Result<()>; + fn set(&mut self, conf: &SafeKeeperConf, zttid: TenantTimelineId, create: bool) -> Result<()>; fn get(&self) -> &Arc; } impl TimelineTools for Option> { - fn set(&mut self, conf: &SafeKeeperConf, zttid: ZTenantTimelineId, create: bool) -> Result<()> { + fn set(&mut self, conf: &SafeKeeperConf, zttid: TenantTimelineId, create: bool) -> Result<()> { *self = Some(GlobalTimelines::get(conf, zttid, create)?); Ok(()) } @@ -556,8 +556,8 @@ impl TimelineTools for Option> { } struct GlobalTimelinesState { - timelines: HashMap>, - wal_backup_launcher_tx: Option>, + timelines: HashMap>, + wal_backup_launcher_tx: Option>, } static TIMELINES_STATE: Lazy> = Lazy::new(|| { @@ -577,7 +577,7 @@ pub struct TimelineDeleteForceResult { pub struct GlobalTimelines; impl GlobalTimelines { - pub fn init(wal_backup_launcher_tx: Sender) { + pub fn init(wal_backup_launcher_tx: Sender) { let mut state = TIMELINES_STATE.lock().unwrap(); assert!(state.wal_backup_launcher_tx.is_none()); state.wal_backup_launcher_tx = Some(wal_backup_launcher_tx); @@ -586,7 +586,7 @@ impl GlobalTimelines { fn create_internal( mut state: MutexGuard, conf: &SafeKeeperConf, - zttid: ZTenantTimelineId, + zttid: TenantTimelineId, peer_ids: Vec, ) -> Result> { match state.timelines.get(&zttid) { @@ -612,7 +612,7 @@ impl GlobalTimelines { pub fn create( conf: &SafeKeeperConf, - zttid: ZTenantTimelineId, + zttid: TenantTimelineId, peer_ids: Vec, ) -> Result> { let state = TIMELINES_STATE.lock().unwrap(); @@ -623,7 +623,7 @@ impl GlobalTimelines { /// If control file doesn't exist and create=false, bails out. pub fn get( conf: &SafeKeeperConf, - zttid: ZTenantTimelineId, + zttid: TenantTimelineId, create: bool, ) -> Result> { let _enter = info_span!("", timeline = %zttid.timeline_id).entered(); @@ -664,13 +664,12 @@ impl GlobalTimelines { } /// Get loaded timeline, if it exists. - pub fn get_loaded(zttid: ZTenantTimelineId) -> Option> { + pub fn get_loaded(zttid: TenantTimelineId) -> Option> { let state = TIMELINES_STATE.lock().unwrap(); state.timelines.get(&zttid).map(Arc::clone) } - /// Get ZTenantTimelineIDs of all active timelines. - pub fn get_active_timelines() -> HashSet { + pub fn get_active_timelines() -> HashSet { let state = TIMELINES_STATE.lock().unwrap(); state .timelines @@ -692,7 +691,7 @@ impl GlobalTimelines { fn delete_force_internal( conf: &SafeKeeperConf, - zttid: &ZTenantTimelineId, + zttid: &TenantTimelineId, was_active: bool, ) -> Result { match std::fs::remove_dir_all(conf.timeline_dir(zttid)) { @@ -721,7 +720,7 @@ impl GlobalTimelines { /// TODO: ensure all of the above never happens. pub async fn delete_force( conf: &SafeKeeperConf, - zttid: &ZTenantTimelineId, + zttid: &TenantTimelineId, ) -> Result { info!("deleting timeline {}", zttid); let timeline = TIMELINES_STATE.lock().unwrap().timelines.remove(zttid); @@ -737,8 +736,8 @@ impl GlobalTimelines { /// There may be a race if new timelines are created simultaneously. pub async fn delete_force_all_for_tenant( conf: &SafeKeeperConf, - tenant_id: &ZTenantId, - ) -> Result> { + tenant_id: &TenantId, + ) -> Result> { info!("deleting all timelines for tenant {}", tenant_id); let mut to_delete = HashMap::new(); { diff --git a/safekeeper/src/wal_backup.rs b/safekeeper/src/wal_backup.rs index 5d946e37a4..85e967e218 100644 --- a/safekeeper/src/wal_backup.rs +++ b/safekeeper/src/wal_backup.rs @@ -23,7 +23,7 @@ use tokio::sync::watch; use tokio::time::sleep; use tracing::*; -use utils::{lsn::Lsn, zid::ZTenantTimelineId}; +use utils::{id::TenantTimelineId, lsn::Lsn}; use crate::broker::{Election, ElectionLeader}; use crate::timeline::{GlobalTimelines, Timeline}; @@ -38,7 +38,7 @@ const UPLOAD_FAILURE_RETRY_MAX_MS: u64 = 5000; pub fn wal_backup_launcher_thread_main( conf: SafeKeeperConf, - wal_backup_launcher_rx: Receiver, + wal_backup_launcher_rx: Receiver, ) { let rt = Builder::new_multi_thread() .worker_threads(conf.backup_runtime_threads) @@ -53,7 +53,7 @@ pub fn wal_backup_launcher_thread_main( /// Check whether wal backup is required for timeline. If yes, mark that launcher is /// aware of current status and return the timeline. -fn is_wal_backup_required(zttid: ZTenantTimelineId) -> Option> { +fn is_wal_backup_required(zttid: TenantTimelineId) -> Option> { GlobalTimelines::get_loaded(zttid).filter(|t| t.wal_backup_attend()) } @@ -70,7 +70,7 @@ struct WalBackupTimelineEntry { /// Start per timeline task, if it makes sense for this safekeeper to offload. fn consider_start_task( conf: &SafeKeeperConf, - zttid: ZTenantTimelineId, + zttid: TenantTimelineId, task: &mut WalBackupTimelineEntry, ) { if !task.timeline.can_wal_backup() { @@ -117,7 +117,7 @@ const CHECK_TASKS_INTERVAL_MSEC: u64 = 1000; /// panics and separate elections from offloading itself. async fn wal_backup_launcher_main_loop( conf: SafeKeeperConf, - mut wal_backup_launcher_rx: Receiver, + mut wal_backup_launcher_rx: Receiver, ) { info!( "WAL backup launcher started, remote config {:?}", @@ -135,7 +135,7 @@ async fn wal_backup_launcher_main_loop( // Presense in this map means launcher is aware s3 offloading is needed for // the timeline, but task is started only if it makes sense for to offload // from this safekeeper. - let mut tasks: HashMap = HashMap::new(); + let mut tasks: HashMap = HashMap::new(); let mut ticker = tokio::time::interval(Duration::from_millis(CHECK_TASKS_INTERVAL_MSEC)); loop { @@ -193,7 +193,7 @@ struct WalBackupTask { /// Offload single timeline. async fn backup_task_main( - zttid: ZTenantTimelineId, + zttid: TenantTimelineId, timeline_dir: PathBuf, mut shutdown_rx: Receiver<()>, election: Election, diff --git a/safekeeper/src/wal_storage.rs b/safekeeper/src/wal_storage.rs index 644237a00d..58b69f06e7 100644 --- a/safekeeper/src/wal_storage.rs +++ b/safekeeper/src/wal_storage.rs @@ -25,7 +25,7 @@ use std::path::{Path, PathBuf}; use tracing::*; -use utils::{lsn::Lsn, zid::ZTenantTimelineId}; +use utils::{id::TenantTimelineId, lsn::Lsn}; use crate::safekeeper::SafeKeeperState; @@ -86,7 +86,7 @@ struct WalStorageMetrics { } impl WalStorageMetrics { - fn new(zttid: &ZTenantTimelineId) -> Self { + fn new(zttid: &TenantTimelineId) -> Self { let tenant_id = zttid.tenant_id.to_string(); let timeline_id = zttid.timeline_id.to_string(); Self { @@ -130,7 +130,7 @@ pub trait Storage { /// When storage is just created, all LSNs are zeroes and there are no segments on disk. pub struct PhysicalStorage { metrics: WalStorageMetrics, - zttid: ZTenantTimelineId, + zttid: TenantTimelineId, timeline_dir: PathBuf, conf: SafeKeeperConf, @@ -161,7 +161,7 @@ pub struct PhysicalStorage { } impl PhysicalStorage { - pub fn new(zttid: &ZTenantTimelineId, conf: &SafeKeeperConf) -> PhysicalStorage { + pub fn new(zttid: &TenantTimelineId, conf: &SafeKeeperConf) -> PhysicalStorage { let timeline_dir = conf.timeline_dir(zttid); PhysicalStorage { metrics: WalStorageMetrics::new(zttid), diff --git a/scripts/generate_and_push_perf_report.sh b/scripts/generate_and_push_perf_report.sh index df84fa0dd8..9e03302b0f 100755 --- a/scripts/generate_and_push_perf_report.sh +++ b/scripts/generate_and_push_perf_report.sh @@ -5,8 +5,8 @@ set -eux -o pipefail SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -echo "Uploading perf report to zenith pg" -# ingest per test results data into zenith backed postgres running in staging to build grafana reports on that data +echo "Uploading perf report to neon pg" +# ingest per test results data into neon backed postgres running in staging to build grafana reports on that data DATABASE_URL="$PERF_TEST_RESULT_CONNSTR" poetry run python "$SCRIPT_DIR"/ingest_perf_test_result.py --ingest "$REPORT_FROM" # Activate poetry's venv. Needed because git upload does not run in a project dir (it uses tmp to store the repository) @@ -16,8 +16,8 @@ DATABASE_URL="$PERF_TEST_RESULT_CONNSTR" poetry run python "$SCRIPT_DIR"/ingest_ echo "Uploading perf result to zenith-perf-data" scripts/git-upload \ - --repo=https://"$VIP_VAP_ACCESS_TOKEN"@github.com/zenithdb/zenith-perf-data.git \ - --message="add performance test result for $GITHUB_SHA zenith revision" \ + --repo=https://"$VIP_VAP_ACCESS_TOKEN"@github.com/neondatabase/zenith-perf-data.git \ + --message="add performance test result for $GITHUB_SHA neon revision" \ --branch=master \ copy "$REPORT_FROM" "data/$REPORT_TO" `# COPY FROM TO_RELATIVE`\ --merge \ diff --git a/scripts/perf_report_template.html b/scripts/perf_report_template.html index 2847e75a00..c86ab37c2d 100644 --- a/scripts/perf_report_template.html +++ b/scripts/perf_report_template.html @@ -19,7 +19,7 @@ } -

Zenith Performance Tests

+

Neon Performance Tests

{% for suit_name, suit_data in context.items() %}

Runs for {{ suit_name }}

@@ -38,7 +38,7 @@ {% for row in suit_data.rows %} - {{ row.revision[:6] }} + {{ row.revision[:6] }} {% for column_value in row.values %} {{ column_value.value }}{{column_value.ratio}} {% endfor %} diff --git a/test_runner/README.md b/test_runner/README.md index c7ec361d65..44751944b3 100644 --- a/test_runner/README.md +++ b/test_runner/README.md @@ -60,7 +60,7 @@ Useful environment variables: `TEST_OUTPUT`: Set the directory where test state and test output files should go. `TEST_SHARED_FIXTURES`: Try to re-use a single pageserver for all the tests. -`ZENITH_PAGESERVER_OVERRIDES`: add a `;`-separated set of configs that will be passed as +`NEON_PAGESERVER_OVERRIDES`: add a `;`-separated set of configs that will be passed as `--pageserver-config-override=${value}` parameter values when neon_local cli is invoked `RUST_LOG`: logging configuration to pass into Neon CLI diff --git a/test_runner/fixtures/benchmark_fixture.py b/test_runner/fixtures/benchmark_fixture.py index b9cdfdebc4..b5565dab0f 100644 --- a/test_runner/fixtures/benchmark_fixture.py +++ b/test_runner/fixtures/benchmark_fixture.py @@ -16,7 +16,7 @@ from typing import Iterator, Optional import pytest from _pytest.config import Config from _pytest.terminal import TerminalReporter -from fixtures.types import ZTenantId, ZTimelineId +from fixtures.types import TenantId, TimelineId """ This file contains fixtures for micro-benchmarks. @@ -365,11 +365,11 @@ class NeonBenchmarker: assert matches, f"metric {metric_name} not found" return int(round(float(matches.group(1)))) - def get_timeline_size(self, repo_dir: Path, tenantid: ZTenantId, timelineid: ZTimelineId): + def get_timeline_size(self, repo_dir: Path, tenant_id: TenantId, timeline_id: TimelineId): """ Calculate the on-disk size of a timeline """ - path = "{}/tenants/{}/timelines/{}".format(repo_dir, tenantid, timelineid) + path = f"{repo_dir}/tenants/{tenant_id}/timelines/{timeline_id}" totalbytes = 0 for root, dirs, files in os.walk(path): diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 69c6d31315..0c03429f95 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -29,7 +29,7 @@ import pytest import requests from cached_property import cached_property from fixtures.log_helper import log -from fixtures.types import Lsn, ZTenantId, ZTimelineId +from fixtures.types import Lsn, TenantId, TimelineId # Type-related stuff from psycopg2.extensions import connection as PgConnection @@ -754,7 +754,7 @@ class NeonEnv: # generate initial tenant ID here instead of letting 'neon init' generate it, # so that we don't need to dig it out of the config file afterwards. - self.initial_tenant = ZTenantId.generate() + self.initial_tenant = TenantId.generate() # Create a config file corresponding to the options toml = textwrap.dedent( @@ -776,7 +776,7 @@ class NeonEnv: pg=self.port_distributor.get_port(), http=self.port_distributor.get_port(), ) - pageserver_auth_type = "ZenithJWT" if config.auth_enabled else "Trust" + pageserver_auth_type = "NeonJWT" if config.auth_enabled else "Trust" toml += textwrap.dedent( f""" @@ -841,7 +841,7 @@ class NeonEnv: """Get list of safekeeper endpoints suitable for safekeepers GUC""" return ",".join([f"localhost:{wa.port.pg}" for wa in self.safekeepers]) - def timeline_dir(self, tenant_id: ZTenantId, timeline_id: ZTimelineId) -> Path: + def timeline_dir(self, tenant_id: TenantId, timeline_id: TimelineId) -> Path: """Get a timeline directory's path based on the repo directory of the test environment""" return self.repo_dir / "tenants" / str(tenant_id) / "timelines" / str(timeline_id) @@ -971,7 +971,7 @@ class NeonPageserverHttpClient(requests.Session): assert isinstance(res_json, list) return res_json - def tenant_create(self, new_tenant_id: Optional[ZTenantId] = None) -> ZTenantId: + def tenant_create(self, new_tenant_id: Optional[TenantId] = None) -> TenantId: res = self.post( f"http://localhost:{self.port}/v1/tenant", json={ @@ -983,24 +983,24 @@ class NeonPageserverHttpClient(requests.Session): raise Exception(f"could not create tenant: already exists for id {new_tenant_id}") new_tenant_id = res.json() assert isinstance(new_tenant_id, str) - return ZTenantId(new_tenant_id) + return TenantId(new_tenant_id) - def tenant_attach(self, tenant_id: ZTenantId): + def tenant_attach(self, tenant_id: TenantId): res = self.post(f"http://localhost:{self.port}/v1/tenant/{tenant_id}/attach") self.verbose_error(res) - def tenant_detach(self, tenant_id: ZTenantId): + def tenant_detach(self, tenant_id: TenantId): res = self.post(f"http://localhost:{self.port}/v1/tenant/{tenant_id}/detach") self.verbose_error(res) - def tenant_status(self, tenant_id: ZTenantId) -> Dict[Any, Any]: + def tenant_status(self, tenant_id: TenantId) -> Dict[Any, Any]: res = self.get(f"http://localhost:{self.port}/v1/tenant/{tenant_id}") self.verbose_error(res) res_json = res.json() assert isinstance(res_json, dict) return res_json - def timeline_list(self, tenant_id: ZTenantId) -> List[Dict[str, Any]]: + def timeline_list(self, tenant_id: TenantId) -> List[Dict[str, Any]]: res = self.get(f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline") self.verbose_error(res) res_json = res.json() @@ -1009,9 +1009,9 @@ class NeonPageserverHttpClient(requests.Session): def timeline_create( self, - tenant_id: ZTenantId, - new_timeline_id: Optional[ZTimelineId] = None, - ancestor_timeline_id: Optional[ZTimelineId] = None, + tenant_id: TenantId, + new_timeline_id: Optional[TimelineId] = None, + ancestor_timeline_id: Optional[TimelineId] = None, ancestor_start_lsn: Optional[Lsn] = None, ) -> Dict[Any, Any]: res = self.post( @@ -1032,8 +1032,8 @@ class NeonPageserverHttpClient(requests.Session): def timeline_detail( self, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, include_non_incremental_logical_size: bool = False, include_non_incremental_physical_size: bool = False, ) -> Dict[Any, Any]: @@ -1052,7 +1052,7 @@ class NeonPageserverHttpClient(requests.Session): assert isinstance(res_json, dict) return res_json - def timeline_delete(self, tenant_id: ZTenantId, timeline_id: ZTimelineId): + def timeline_delete(self, tenant_id: TenantId, timeline_id: TimelineId): res = self.delete( f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}" ) @@ -1174,17 +1174,17 @@ class NeonCli(AbstractNeonCli): def create_tenant( self, - tenant_id: Optional[ZTenantId] = None, - timeline_id: Optional[ZTimelineId] = None, + tenant_id: Optional[TenantId] = None, + timeline_id: Optional[TimelineId] = None, conf: Optional[Dict[str, str]] = None, - ) -> Tuple[ZTenantId, ZTimelineId]: + ) -> Tuple[TenantId, TimelineId]: """ Creates a new tenant, returns its id and its initial timeline's id. """ if tenant_id is None: - tenant_id = ZTenantId.generate() + tenant_id = TenantId.generate() if timeline_id is None: - timeline_id = ZTimelineId.generate() + timeline_id = TimelineId.generate() if conf is None: res = self.raw_cli( [ @@ -1211,7 +1211,7 @@ class NeonCli(AbstractNeonCli): res.check_returncode() return tenant_id, timeline_id - def config_tenant(self, tenant_id: ZTenantId, conf: Dict[str, str]): + def config_tenant(self, tenant_id: TenantId, conf: Dict[str, str]): """ Update tenant config. """ @@ -1230,8 +1230,8 @@ class NeonCli(AbstractNeonCli): return res def create_timeline( - self, new_branch_name: str, tenant_id: Optional[ZTenantId] = None - ) -> ZTimelineId: + self, new_branch_name: str, tenant_id: Optional[TenantId] = None + ) -> TimelineId: cmd = [ "timeline", "create", @@ -1250,9 +1250,9 @@ class NeonCli(AbstractNeonCli): if matches is not None: created_timeline_id = matches.group("timeline_id") - return ZTimelineId(str(created_timeline_id)) + return TimelineId(str(created_timeline_id)) - def create_root_branch(self, branch_name: str, tenant_id: Optional[ZTenantId] = None): + def create_root_branch(self, branch_name: str, tenant_id: Optional[TenantId] = None): cmd = [ "timeline", "create", @@ -1274,15 +1274,15 @@ class NeonCli(AbstractNeonCli): if created_timeline_id is None: raise Exception("could not find timeline id after `neon timeline create` invocation") else: - return ZTimelineId(created_timeline_id) + return TimelineId(created_timeline_id) def create_branch( self, new_branch_name: str = DEFAULT_BRANCH_NAME, ancestor_branch_name: Optional[str] = None, - tenant_id: Optional[ZTenantId] = None, + tenant_id: Optional[TenantId] = None, ancestor_start_lsn: Optional[Lsn] = None, - ) -> ZTimelineId: + ) -> TimelineId: cmd = [ "timeline", "branch", @@ -1308,11 +1308,9 @@ class NeonCli(AbstractNeonCli): if created_timeline_id is None: raise Exception("could not find timeline id after `neon timeline create` invocation") else: - return ZTimelineId(str(created_timeline_id)) + return TimelineId(str(created_timeline_id)) - def list_timelines( - self, tenant_id: Optional[ZTenantId] = None - ) -> List[Tuple[str, ZTimelineId]]: + def list_timelines(self, tenant_id: Optional[TenantId] = None) -> List[Tuple[str, TimelineId]]: """ Returns a list of (branch_name, timeline_id) tuples out of parsed `neon timeline list` CLI output. """ @@ -1324,14 +1322,14 @@ class NeonCli(AbstractNeonCli): ) timelines_cli = sorted( map( - lambda branch_and_id: (branch_and_id[0], ZTimelineId(branch_and_id[1])), + lambda branch_and_id: (branch_and_id[0], TimelineId(branch_and_id[1])), TIMELINE_DATA_EXTRACTOR.findall(res.stdout), ) ) return timelines_cli def init( - self, config_toml: str, initial_timeline_id: Optional[ZTimelineId] = None + self, config_toml: str, initial_timeline_id: Optional[TimelineId] = None ) -> "subprocess.CompletedProcess[str]": with tempfile.NamedTemporaryFile(mode="w+") as tmp: tmp.write(config_toml) @@ -1410,7 +1408,7 @@ class NeonCli(AbstractNeonCli): self, branch_name: str, node_name: Optional[str] = None, - tenant_id: Optional[ZTenantId] = None, + tenant_id: Optional[TenantId] = None, lsn: Optional[Lsn] = None, port: Optional[int] = None, ) -> "subprocess.CompletedProcess[str]": @@ -1436,7 +1434,7 @@ class NeonCli(AbstractNeonCli): def pg_start( self, node_name: str, - tenant_id: Optional[ZTenantId] = None, + tenant_id: Optional[TenantId] = None, lsn: Optional[Lsn] = None, port: Optional[int] = None, ) -> "subprocess.CompletedProcess[str]": @@ -1460,7 +1458,7 @@ class NeonCli(AbstractNeonCli): def pg_stop( self, node_name: str, - tenant_id: Optional[ZTenantId] = None, + tenant_id: Optional[TenantId] = None, destroy=False, check_return_code=True, ) -> "subprocess.CompletedProcess[str]": @@ -1558,7 +1556,7 @@ def append_pageserver_param_overrides( f"--pageserver-config-override=remote_storage={remote_storage_toml_table}" ) - env_overrides = os.getenv("ZENITH_PAGESERVER_OVERRIDES") + env_overrides = os.getenv("NEON_PAGESERVER_OVERRIDES") if env_overrides is not None: params_to_update += [ f"--pageserver-config-override={o.strip()}" for o in env_overrides.split(";") @@ -1867,7 +1865,7 @@ class Postgres(PgProtocol): """An object representing a running postgres daemon.""" def __init__( - self, env: NeonEnv, tenant_id: ZTenantId, port: int, check_stop_result: bool = True + self, env: NeonEnv, tenant_id: TenantId, port: int, check_stop_result: bool = True ): super().__init__(host="localhost", port=port, user="cloud_admin", dbname="postgres") self.env = env @@ -2057,7 +2055,7 @@ class PostgresFactory: self, branch_name: str, node_name: Optional[str] = None, - tenant_id: Optional[ZTenantId] = None, + tenant_id: Optional[TenantId] = None, lsn: Optional[Lsn] = None, config_lines: Optional[List[str]] = None, ) -> Postgres: @@ -2081,7 +2079,7 @@ class PostgresFactory: self, branch_name: str, node_name: Optional[str] = None, - tenant_id: Optional[ZTenantId] = None, + tenant_id: Optional[TenantId] = None, lsn: Optional[Lsn] = None, config_lines: Optional[List[str]] = None, ) -> Postgres: @@ -2157,7 +2155,7 @@ class Safekeeper: return self def append_logical_message( - self, tenant_id: ZTenantId, timeline_id: ZTimelineId, request: Dict[str, Any] + self, tenant_id: TenantId, timeline_id: TimelineId, request: Dict[str, Any] ) -> Dict[str, Any]: """ Send JSON_CTRL query to append LogicalMessage to WAL and modify @@ -2167,7 +2165,7 @@ class Safekeeper: # "replication=0" hacks psycopg not to send additional queries # on startup, see https://github.com/psycopg/psycopg2/pull/482 - connstr = f"host=localhost port={self.port.pg} replication=0 options='-c ztimelineid={timeline_id} ztenantid={tenant_id}'" + connstr = f"host=localhost port={self.port.pg} replication=0 options='-c timeline_id={timeline_id} tenant_id={tenant_id}'" with closing(psycopg2.connect(connstr)) as conn: # server doesn't support transactions @@ -2202,8 +2200,8 @@ class SafekeeperTimelineStatus: class SafekeeperMetrics: # These are metrics from Prometheus which uses float64 internally. # As a consequence, values may differ from real original int64s. - flush_lsn_inexact: Dict[Tuple[ZTenantId, ZTimelineId], int] = field(default_factory=dict) - commit_lsn_inexact: Dict[Tuple[ZTenantId, ZTimelineId], int] = field(default_factory=dict) + flush_lsn_inexact: Dict[Tuple[TenantId, TimelineId], int] = field(default_factory=dict) + commit_lsn_inexact: Dict[Tuple[TenantId, TimelineId], int] = field(default_factory=dict) class SafekeeperHttpClient(requests.Session): @@ -2221,7 +2219,7 @@ class SafekeeperHttpClient(requests.Session): self.get(f"http://localhost:{self.port}/v1/status").raise_for_status() def timeline_status( - self, tenant_id: ZTenantId, timeline_id: ZTimelineId + self, tenant_id: TenantId, timeline_id: TimelineId ) -> SafekeeperTimelineStatus: res = self.get(f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}") res.raise_for_status() @@ -2234,16 +2232,14 @@ class SafekeeperHttpClient(requests.Session): remote_consistent_lsn=Lsn(resj["remote_consistent_lsn"]), ) - def record_safekeeper_info(self, tenant_id: ZTenantId, timeline_id: ZTimelineId, body): + def record_safekeeper_info(self, tenant_id: TenantId, timeline_id: TimelineId, body): res = self.post( f"http://localhost:{self.port}/v1/record_safekeeper_info/{tenant_id}/{timeline_id}", json=body, ) res.raise_for_status() - def timeline_delete_force( - self, tenant_id: ZTenantId, timeline_id: ZTimelineId - ) -> Dict[Any, Any]: + def timeline_delete_force(self, tenant_id: TenantId, timeline_id: TimelineId) -> Dict[Any, Any]: res = self.delete( f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}" ) @@ -2252,7 +2248,7 @@ class SafekeeperHttpClient(requests.Session): assert isinstance(res_json, dict) return res_json - def tenant_delete_force(self, tenant_id: ZTenantId) -> Dict[Any, Any]: + def tenant_delete_force(self, tenant_id: TenantId) -> Dict[Any, Any]: res = self.delete(f"http://localhost:{self.port}/v1/tenant/{tenant_id}") res.raise_for_status() res_json = res.json() @@ -2273,16 +2269,16 @@ class SafekeeperHttpClient(requests.Session): all_metrics_text, re.MULTILINE, ): - metrics.flush_lsn_inexact[ - (ZTenantId(match.group(1)), ZTimelineId(match.group(2))) - ] = int(match.group(3)) + metrics.flush_lsn_inexact[(TenantId(match.group(1)), TimelineId(match.group(2)))] = int( + match.group(3) + ) for match in re.finditer( r'^safekeeper_commit_lsn{tenant_id="([0-9a-f]+)",timeline_id="([0-9a-f]+)"} (\S+)$', all_metrics_text, re.MULTILINE, ): metrics.commit_lsn_inexact[ - (ZTenantId(match.group(1)), ZTimelineId(match.group(2))) + (TenantId(match.group(1)), TimelineId(match.group(2))) ] = int(match.group(3)) return metrics @@ -2456,7 +2452,7 @@ def list_files_to_compare(pgdata_dir: Path): # pg is the existing and running compute node, that we want to compare with a basebackup def check_restored_datadir_content(test_output_dir: Path, env: NeonEnv, pg: Postgres): # Get the timeline ID. We need it for the 'basebackup' command - timeline = ZTimelineId(pg.safe_psql("SHOW neon.timeline_id")[0][0]) + timeline = TimelineId(pg.safe_psql("SHOW neon.timeline_id")[0][0]) # stop postgres to ensure that files won't change pg.stop() @@ -2540,7 +2536,7 @@ def wait_until(number_of_iterations: int, interval: float, func): def assert_timeline_local( - pageserver_http_client: NeonPageserverHttpClient, tenant: ZTenantId, timeline: ZTimelineId + pageserver_http_client: NeonPageserverHttpClient, tenant: TenantId, timeline: TimelineId ): timeline_detail = pageserver_http_client.timeline_detail( tenant, @@ -2554,14 +2550,14 @@ def assert_timeline_local( def assert_no_in_progress_downloads_for_tenant( pageserver_http_client: NeonPageserverHttpClient, - tenant: ZTenantId, + tenant: TenantId, ): tenant_status = pageserver_http_client.tenant_status(tenant) assert tenant_status["has_in_progress_downloads"] is False, tenant_status def remote_consistent_lsn( - pageserver_http_client: NeonPageserverHttpClient, tenant: ZTenantId, timeline: ZTimelineId + pageserver_http_client: NeonPageserverHttpClient, tenant: TenantId, timeline: TimelineId ) -> Lsn: detail = pageserver_http_client.timeline_detail(tenant, timeline) @@ -2578,8 +2574,8 @@ def remote_consistent_lsn( def wait_for_upload( pageserver_http_client: NeonPageserverHttpClient, - tenant: ZTenantId, - timeline: ZTimelineId, + tenant: TenantId, + timeline: TimelineId, lsn: Lsn, ): """waits for local timeline upload up to specified lsn""" @@ -2601,7 +2597,7 @@ def wait_for_upload( def last_record_lsn( - pageserver_http_client: NeonPageserverHttpClient, tenant: ZTenantId, timeline: ZTimelineId + pageserver_http_client: NeonPageserverHttpClient, tenant: TenantId, timeline: TimelineId ) -> Lsn: detail = pageserver_http_client.timeline_detail(tenant, timeline) @@ -2612,8 +2608,8 @@ def last_record_lsn( def wait_for_last_record_lsn( pageserver_http_client: NeonPageserverHttpClient, - tenant: ZTenantId, - timeline: ZTimelineId, + tenant: TenantId, + timeline: TimelineId, lsn: Lsn, ): """waits for pageserver to catch up to a certain lsn""" @@ -2632,7 +2628,7 @@ def wait_for_last_record_lsn( ) -def wait_for_last_flush_lsn(env: NeonEnv, pg: Postgres, tenant: ZTenantId, timeline: ZTimelineId): +def wait_for_last_flush_lsn(env: NeonEnv, pg: Postgres, tenant: TenantId, timeline: TimelineId): """Wait for pageserver to catch up the latest flush LSN""" last_flush_lsn = Lsn(pg.safe_psql("SELECT pg_current_wal_flush_lsn()")[0][0]) wait_for_last_record_lsn(env.pageserver.http_client(), tenant, timeline, last_flush_lsn) @@ -2643,8 +2639,8 @@ def fork_at_current_lsn( pg: Postgres, new_branch_name: str, ancestor_branch_name: str, - tenant_id: Optional[ZTenantId] = None, -) -> ZTimelineId: + tenant_id: Optional[TenantId] = None, +) -> TimelineId: """ Create new branch at the last LSN of an existing branch. The "last LSN" is taken from the given Postgres instance. The pageserver will wait for all the diff --git a/test_runner/fixtures/types.py b/test_runner/fixtures/types.py index bdf675a785..de2e131b79 100644 --- a/test_runner/fixtures/types.py +++ b/test_runner/fixtures/types.py @@ -46,11 +46,11 @@ class Lsn: @total_ordering -class ZId: +class Id: """ Datatype for a Neon tenant and timeline IDs. Internally it's a 16-byte array, and - the string representation is in hex. This corresponds to the ZId / ZTenantId / - ZTimelineIds in the Rust code. + the string representation is in hex. This corresponds to the Id / TenantId / + TimelineIds in the Rust code. """ def __init__(self, x: str): @@ -79,11 +79,11 @@ class ZId: return cls(random.randbytes(16).hex()) -class ZTenantId(ZId): +class TenantId(Id): def __repr__(self): - return f'ZTenantId("{self.id.hex()}")' + return f'`TenantId("{self.id.hex()}")' -class ZTimelineId(ZId): +class TimelineId(Id): def __repr__(self): - return f'ZTimelineId("{self.id.hex()}")' + return f'TimelineId("{self.id.hex()}")' diff --git a/test_runner/performance/README.md b/test_runner/performance/README.md index 8bac8080db..21e48cf899 100644 --- a/test_runner/performance/README.md +++ b/test_runner/performance/README.md @@ -20,4 +20,4 @@ All tests run only once. Usually to obtain more consistent performance numbers, Local test results for main branch, and results of daily performance tests, are stored in a neon project deployed in production environment. There is a Grafana dashboard that visualizes the results. Here is the [dashboard](https://observer.zenith.tech/d/DGKBm9Jnz/perf-test-results?orgId=1). The main problem with it is the unavailability to point at particular commit, though the data for that is available in the database. Needs some tweaking from someone who knows Grafana tricks. -There is also an inconsistency in test naming. Test name should be the same across platforms, and results can be differentiated by the platform field. But currently, platform is sometimes included in test name because of the way how parametrization works in pytest. I.e. there is a platform switch in the dashboard with zenith-local-ci and zenith-staging variants. I.e. some tests under zenith-local-ci value for a platform switch are displayed as `Test test_runner/performance/test_bulk_insert.py::test_bulk_insert[vanilla]` and `Test test_runner/performance/test_bulk_insert.py::test_bulk_insert[zenith]` which is highly confusing. +There is also an inconsistency in test naming. Test name should be the same across platforms, and results can be differentiated by the platform field. But currently, platform is sometimes included in test name because of the way how parametrization works in pytest. I.e. there is a platform switch in the dashboard with neon-local-ci and neon-staging variants. I.e. some tests under neon-local-ci value for a platform switch are displayed as `Test test_runner/performance/test_bulk_insert.py::test_bulk_insert[vanilla]` and `Test test_runner/performance/test_bulk_insert.py::test_bulk_insert[neon]` which is highly confusing. diff --git a/test_runner/regress/test_ancestor_branch.py b/test_runner/regress/test_ancestor_branch.py index b8e81824b0..cb2621ff02 100644 --- a/test_runner/regress/test_ancestor_branch.py +++ b/test_runner/regress/test_ancestor_branch.py @@ -1,6 +1,6 @@ from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnvBuilder -from fixtures.types import ZTimelineId +from fixtures.types import TimelineId from fixtures.utils import query_scalar @@ -27,7 +27,7 @@ def test_ancestor_branch(neon_env_builder: NeonEnvBuilder): pg_branch0 = env.postgres.create_start("main", tenant_id=tenant) branch0_cur = pg_branch0.connect().cursor() - branch0_timeline = ZTimelineId(query_scalar(branch0_cur, "SHOW neon.timeline_id")) + branch0_timeline = TimelineId(query_scalar(branch0_cur, "SHOW neon.timeline_id")) log.info(f"b0 timeline {branch0_timeline}") # Create table, and insert 100k rows. @@ -51,7 +51,7 @@ def test_ancestor_branch(neon_env_builder: NeonEnvBuilder): log.info("postgres is running on 'branch1' branch") branch1_cur = pg_branch1.connect().cursor() - branch1_timeline = ZTimelineId(query_scalar(branch1_cur, "SHOW neon.timeline_id")) + branch1_timeline = TimelineId(query_scalar(branch1_cur, "SHOW neon.timeline_id")) log.info(f"b1 timeline {branch1_timeline}") branch1_lsn = query_scalar(branch1_cur, "SELECT pg_current_wal_insert_lsn()") @@ -74,7 +74,7 @@ def test_ancestor_branch(neon_env_builder: NeonEnvBuilder): log.info("postgres is running on 'branch2' branch") branch2_cur = pg_branch2.connect().cursor() - branch2_timeline = ZTimelineId(query_scalar(branch2_cur, "SHOW neon.timeline_id")) + branch2_timeline = TimelineId(query_scalar(branch2_cur, "SHOW neon.timeline_id")) log.info(f"b2 timeline {branch2_timeline}") branch2_lsn = query_scalar(branch2_cur, "SELECT pg_current_wal_insert_lsn()") diff --git a/test_runner/regress/test_auth.py b/test_runner/regress/test_auth.py index 08e38e1461..d9082efada 100644 --- a/test_runner/regress/test_auth.py +++ b/test_runner/regress/test_auth.py @@ -2,7 +2,7 @@ from contextlib import closing import pytest from fixtures.neon_fixtures import NeonEnvBuilder, NeonPageserverApiException -from fixtures.types import ZTenantId +from fixtures.types import TenantId def test_pageserver_auth(neon_env_builder: NeonEnvBuilder): @@ -13,7 +13,7 @@ def test_pageserver_auth(neon_env_builder: NeonEnvBuilder): tenant_token = env.auth_keys.generate_tenant_token(env.initial_tenant) tenant_http_client = env.pageserver.http_client(tenant_token) - invalid_tenant_token = env.auth_keys.generate_tenant_token(ZTenantId.generate()) + invalid_tenant_token = env.auth_keys.generate_tenant_token(TenantId.generate()) invalid_tenant_http_client = env.pageserver.http_client(invalid_tenant_token) management_token = env.auth_keys.generate_management_token() diff --git a/test_runner/regress/test_branch_behind.py b/test_runner/regress/test_branch_behind.py index 5bd6368bfc..cfb9649867 100644 --- a/test_runner/regress/test_branch_behind.py +++ b/test_runner/regress/test_branch_behind.py @@ -2,7 +2,7 @@ import psycopg2.extras import pytest from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnvBuilder -from fixtures.types import Lsn, ZTimelineId +from fixtures.types import Lsn, TimelineId from fixtures.utils import print_gc_result, query_scalar @@ -28,7 +28,7 @@ def test_branch_behind(neon_env_builder: NeonEnvBuilder): main_cur = pgmain.connect().cursor() - timeline = ZTimelineId(query_scalar(main_cur, "SHOW neon.timeline_id")) + timeline = TimelineId(query_scalar(main_cur, "SHOW neon.timeline_id")) # Create table, and insert the first 100 rows main_cur.execute("CREATE TABLE foo (t text)") diff --git a/test_runner/regress/test_broken_timeline.py b/test_runner/regress/test_broken_timeline.py index ce3a74930e..fd81981b2b 100644 --- a/test_runner/regress/test_broken_timeline.py +++ b/test_runner/regress/test_broken_timeline.py @@ -5,7 +5,7 @@ from typing import List, Tuple import pytest from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, Postgres -from fixtures.types import ZTenantId, ZTimelineId +from fixtures.types import TenantId, TimelineId # Test restarting page server, while safekeeper and compute node keep @@ -15,7 +15,7 @@ def test_broken_timeline(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 3 env = neon_env_builder.init_start() - tenant_timelines: List[Tuple[ZTenantId, ZTimelineId, Postgres]] = [] + tenant_timelines: List[Tuple[TenantId, TimelineId, Postgres]] = [] for n in range(4): tenant_id, timeline_id = env.neon_cli.create_tenant() diff --git a/test_runner/regress/test_fullbackup.py b/test_runner/regress/test_fullbackup.py index af94865549..8de2687c9b 100644 --- a/test_runner/regress/test_fullbackup.py +++ b/test_runner/regress/test_fullbackup.py @@ -8,7 +8,7 @@ from fixtures.neon_fixtures import ( VanillaPostgres, pg_distrib_dir, ) -from fixtures.types import Lsn, ZTimelineId +from fixtures.types import Lsn, TimelineId from fixtures.utils import query_scalar, subprocess_capture num_rows = 1000 @@ -27,7 +27,7 @@ def test_fullbackup( log.info("postgres is running on 'test_fullbackup' branch") with pgmain.cursor() as cur: - timeline = ZTimelineId(query_scalar(cur, "SHOW neon.timeline_id")) + timeline = TimelineId(query_scalar(cur, "SHOW neon.timeline_id")) # data loading may take a while, so increase statement timeout cur.execute("SET statement_timeout='300s'") diff --git a/test_runner/regress/test_gc_aggressive.py b/test_runner/regress/test_gc_aggressive.py index 67ce8871cd..88d4ad8a6e 100644 --- a/test_runner/regress/test_gc_aggressive.py +++ b/test_runner/regress/test_gc_aggressive.py @@ -3,7 +3,7 @@ import random from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, Postgres -from fixtures.types import ZTimelineId +from fixtures.types import TimelineId from fixtures.utils import query_scalar # Test configuration @@ -29,7 +29,7 @@ async def update_table(pg: Postgres): # Perform aggressive GC with 0 horizon -async def gc(env: NeonEnv, timeline: ZTimelineId): +async def gc(env: NeonEnv, timeline: TimelineId): psconn = await env.pageserver.connect_async() while updates_performed < updates_to_perform: @@ -37,7 +37,7 @@ async def gc(env: NeonEnv, timeline: ZTimelineId): # At the same time, run UPDATEs and GC -async def update_and_gc(env: NeonEnv, pg: Postgres, timeline: ZTimelineId): +async def update_and_gc(env: NeonEnv, pg: Postgres, timeline: TimelineId): workers = [] for worker_id in range(num_connections): workers.append(asyncio.create_task(update_table(pg))) @@ -62,7 +62,7 @@ def test_gc_aggressive(neon_env_builder: NeonEnvBuilder): log.info("postgres is running on test_gc_aggressive branch") with pg.cursor() as cur: - timeline = ZTimelineId(query_scalar(cur, "SHOW neon.timeline_id")) + timeline = TimelineId(query_scalar(cur, "SHOW neon.timeline_id")) # Create table, and insert the first 100 rows cur.execute("CREATE TABLE foo (id int, counter int, t text)") diff --git a/test_runner/regress/test_import.py b/test_runner/regress/test_import.py index fc9f41bda0..60cc0551ab 100644 --- a/test_runner/regress/test_import.py +++ b/test_runner/regress/test_import.py @@ -17,7 +17,7 @@ from fixtures.neon_fixtures import ( wait_for_last_record_lsn, wait_for_upload, ) -from fixtures.types import Lsn, ZTenantId, ZTimelineId +from fixtures.types import Lsn, TenantId, TimelineId from fixtures.utils import subprocess_capture @@ -69,8 +69,8 @@ def test_import_from_vanilla(test_output_dir, pg_bin, vanilla_pg, neon_env_build end_lsn = manifest["WAL-Ranges"][0]["End-LSN"] node_name = "import_from_vanilla" - tenant = ZTenantId.generate() - timeline = ZTimelineId.generate() + tenant = TenantId.generate() + timeline = TimelineId.generate() # Set up pageserver for import neon_env_builder.enable_local_fs_remote_storage() @@ -195,7 +195,7 @@ def _generate_data(num_rows: int, pg: Postgres) -> Lsn: def _import( - expected_num_rows: int, lsn: Lsn, env: NeonEnv, pg_bin: PgBin, timeline: ZTimelineId + expected_num_rows: int, lsn: Lsn, env: NeonEnv, pg_bin: PgBin, timeline: TimelineId ) -> str: """Test importing backup data to the pageserver. @@ -228,9 +228,9 @@ def _import( # start the pageserver again env.pageserver.start() - # Import using another tenantid, because we use the same pageserver. + # Import using another tenant_id, because we use the same pageserver. # TODO Create another pageserver to make test more realistic. - tenant = ZTenantId.generate() + tenant = TenantId.generate() # Import to pageserver node_name = "import_from_pageserver" diff --git a/test_runner/regress/test_neon_cli.py b/test_runner/regress/test_neon_cli.py index b2342e5ee8..a9dc63dd50 100644 --- a/test_runner/regress/test_neon_cli.py +++ b/test_runner/regress/test_neon_cli.py @@ -7,11 +7,11 @@ from fixtures.neon_fixtures import ( NeonEnvBuilder, NeonPageserverHttpClient, ) -from fixtures.types import ZTenantId, ZTimelineId +from fixtures.types import TenantId, TimelineId def helper_compare_timeline_list( - pageserver_http_client: NeonPageserverHttpClient, env: NeonEnv, initial_tenant: ZTenantId + pageserver_http_client: NeonPageserverHttpClient, env: NeonEnv, initial_tenant: TenantId ): """ Compare timelines list returned by CLI and directly via API. @@ -20,7 +20,7 @@ def helper_compare_timeline_list( timelines_api = sorted( map( - lambda t: ZTimelineId(t["timeline_id"]), + lambda t: TimelineId(t["timeline_id"]), pageserver_http_client.timeline_list(initial_tenant), ) ) @@ -85,7 +85,7 @@ def test_cli_tenant_list(neon_simple_env: NeonEnv): helper_compare_tenant_list(pageserver_http_client, env) res = env.neon_cli.list_tenants() - tenants = sorted(map(lambda t: ZTenantId(t.split()[0]), res.stdout.splitlines())) + tenants = sorted(map(lambda t: TenantId(t.split()[0]), res.stdout.splitlines())) assert env.initial_tenant in tenants assert tenant1 in tenants diff --git a/test_runner/regress/test_old_request_lsn.py b/test_runner/regress/test_old_request_lsn.py index 2b5e2edb5f..c99e13f45f 100644 --- a/test_runner/regress/test_old_request_lsn.py +++ b/test_runner/regress/test_old_request_lsn.py @@ -1,7 +1,7 @@ import psycopg2.extras from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnvBuilder -from fixtures.types import ZTimelineId +from fixtures.types import TimelineId from fixtures.utils import print_gc_result, query_scalar @@ -27,7 +27,7 @@ def test_old_request_lsn(neon_env_builder: NeonEnvBuilder): cur = pg_conn.cursor() # Get the timeline ID of our branch. We need it for the 'do_gc' command - timeline = ZTimelineId(query_scalar(cur, "SHOW neon.timeline_id")) + timeline = TimelineId(query_scalar(cur, "SHOW neon.timeline_id")) psconn = env.pageserver.connect() pscur = psconn.cursor(cursor_factory=psycopg2.extras.DictCursor) diff --git a/test_runner/regress/test_pageserver_api.py b/test_runner/regress/test_pageserver_api.py index a7b7189824..def6bd5b33 100644 --- a/test_runner/regress/test_pageserver_api.py +++ b/test_runner/regress/test_pageserver_api.py @@ -11,7 +11,7 @@ from fixtures.neon_fixtures import ( pg_distrib_dir, wait_until, ) -from fixtures.types import Lsn, ZTenantId, ZTimelineId +from fixtures.types import Lsn, TenantId, TimelineId # test that we cannot override node id after init @@ -60,39 +60,39 @@ def test_pageserver_init_node_id(neon_simple_env: NeonEnv): assert "has node id already, it cannot be overridden" in bad_update.stderr -def check_client(client: NeonPageserverHttpClient, initial_tenant: ZTenantId): +def check_client(client: NeonPageserverHttpClient, initial_tenant: TenantId): client.check_status() # check initial tenant is there - assert initial_tenant in {ZTenantId(t["id"]) for t in client.tenant_list()} + assert initial_tenant in {TenantId(t["id"]) for t in client.tenant_list()} # create new tenant and check it is also there - tenant_id = ZTenantId.generate() + tenant_id = TenantId.generate() client.tenant_create(tenant_id) - assert tenant_id in {ZTenantId(t["id"]) for t in client.tenant_list()} + assert tenant_id in {TenantId(t["id"]) for t in client.tenant_list()} timelines = client.timeline_list(tenant_id) assert len(timelines) == 0, "initial tenant should not have any timelines" # create timeline - timeline_id = ZTimelineId.generate() + timeline_id = TimelineId.generate() client.timeline_create(tenant_id=tenant_id, new_timeline_id=timeline_id) timelines = client.timeline_list(tenant_id) assert len(timelines) > 0 # check it is there - assert timeline_id in {ZTimelineId(b["timeline_id"]) for b in client.timeline_list(tenant_id)} + assert timeline_id in {TimelineId(b["timeline_id"]) for b in client.timeline_list(tenant_id)} for timeline in timelines: - timeline_id = ZTimelineId(timeline["timeline_id"]) + timeline_id = TimelineId(timeline["timeline_id"]) timeline_details = client.timeline_detail( tenant_id=tenant_id, timeline_id=timeline_id, include_non_incremental_logical_size=True, ) - assert ZTenantId(timeline_details["tenant_id"]) == tenant_id - assert ZTimelineId(timeline_details["timeline_id"]) == timeline_id + assert TenantId(timeline_details["tenant_id"]) == tenant_id + assert TimelineId(timeline_details["timeline_id"]) == timeline_id assert timeline_details.get("local") is not None @@ -118,8 +118,8 @@ def test_pageserver_http_get_wal_receiver_not_found(neon_simple_env: NeonEnv): def expect_updated_msg_lsn( client: NeonPageserverHttpClient, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, prev_msg_lsn: Optional[Lsn], ) -> Lsn: timeline_details = client.timeline_detail(tenant_id, timeline_id=timeline_id) diff --git a/test_runner/regress/test_pitr_gc.py b/test_runner/regress/test_pitr_gc.py index 329f4b7d24..786266b70e 100644 --- a/test_runner/regress/test_pitr_gc.py +++ b/test_runner/regress/test_pitr_gc.py @@ -3,7 +3,7 @@ from contextlib import closing import psycopg2.extras from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnvBuilder -from fixtures.types import ZTimelineId +from fixtures.types import TimelineId from fixtures.utils import print_gc_result, query_scalar @@ -25,7 +25,7 @@ def test_pitr_gc(neon_env_builder: NeonEnvBuilder): main_pg_conn = pgmain.connect() main_cur = main_pg_conn.cursor() - timeline = ZTimelineId(query_scalar(main_cur, "SHOW neon.timeline_id")) + timeline = TimelineId(query_scalar(main_cur, "SHOW neon.timeline_id")) # Create table main_cur.execute("CREATE TABLE foo (t text)") diff --git a/test_runner/regress/test_remote_storage.py b/test_runner/regress/test_remote_storage.py index 04baef6ba0..cbe74cad5c 100644 --- a/test_runner/regress/test_remote_storage.py +++ b/test_runner/regress/test_remote_storage.py @@ -1,5 +1,5 @@ # It's possible to run any regular test with the local fs remote storage via -# env ZENITH_PAGESERVER_OVERRIDES="remote_storage={local_path='/tmp/neon_zzz/'}" poetry ...... +# env NEON_PAGESERVER_OVERRIDES="remote_storage={local_path='/tmp/neon_zzz/'}" poetry ...... import os import shutil @@ -17,7 +17,7 @@ from fixtures.neon_fixtures import ( wait_for_upload, wait_until, ) -from fixtures.types import Lsn, ZTenantId, ZTimelineId +from fixtures.types import Lsn, TenantId, TimelineId from fixtures.utils import query_scalar @@ -61,8 +61,8 @@ def test_remote_storage_backup_and_restore( client = env.pageserver.http_client() - tenant_id = ZTenantId(pg.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = ZTimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = TenantId(pg.safe_psql("show neon.tenant_id")[0][0]) + timeline_id = TimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) checkpoint_numbers = range(1, 3) diff --git a/test_runner/regress/test_tenant_detach.py b/test_runner/regress/test_tenant_detach.py index 147e22b38f..e3c9a091f9 100644 --- a/test_runner/regress/test_tenant_detach.py +++ b/test_runner/regress/test_tenant_detach.py @@ -4,10 +4,10 @@ import psycopg2 import pytest from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, NeonPageserverApiException -from fixtures.types import ZTenantId, ZTimelineId +from fixtures.types import TenantId, TimelineId -def do_gc_target(env: NeonEnv, tenant_id: ZTenantId, timeline_id: ZTimelineId): +def do_gc_target(env: NeonEnv, tenant_id: TenantId, timeline_id: TimelineId): """Hack to unblock main, see https://github.com/neondatabase/neon/issues/2211""" try: env.pageserver.safe_psql(f"do_gc {tenant_id} {timeline_id} 0") @@ -20,7 +20,7 @@ def test_tenant_detach_smoke(neon_env_builder: NeonEnvBuilder): pageserver_http = env.pageserver.http_client() # first check for non existing tenant - tenant_id = ZTenantId.generate() + tenant_id = TenantId.generate() with pytest.raises( expected_exception=NeonPageserverApiException, match=f"Tenant not found for id {tenant_id}", @@ -46,7 +46,7 @@ def test_tenant_detach_smoke(neon_env_builder: NeonEnvBuilder): with pytest.raises( expected_exception=psycopg2.DatabaseError, match="gc target timeline does not exist" ): - bogus_timeline_id = ZTimelineId.generate() + bogus_timeline_id = TimelineId.generate() env.pageserver.safe_psql(f"do_gc {tenant_id} {bogus_timeline_id} 0") # try to concurrently run gc and detach diff --git a/test_runner/regress/test_tenant_relocation.py b/test_runner/regress/test_tenant_relocation.py index 56563ebe87..aa7d92f1fd 100644 --- a/test_runner/regress/test_tenant_relocation.py +++ b/test_runner/regress/test_tenant_relocation.py @@ -24,7 +24,7 @@ from fixtures.neon_fixtures import ( wait_for_upload, wait_until, ) -from fixtures.types import Lsn, ZTenantId, ZTimelineId +from fixtures.types import Lsn, TenantId, TimelineId from fixtures.utils import query_scalar, subprocess_capture @@ -113,15 +113,15 @@ def load(pg: Postgres, stop_event: threading.Event, load_ok_event: threading.Eve def populate_branch( pg: Postgres, - tenant_id: ZTenantId, + tenant_id: TenantId, ps_http: NeonPageserverHttpClient, create_table: bool, expected_sum: Optional[int], -) -> Tuple[ZTimelineId, Lsn]: +) -> Tuple[TimelineId, Lsn]: # insert some data with pg_cur(pg) as cur: cur.execute("SHOW neon.timeline_id") - timeline_id = ZTimelineId(cur.fetchone()[0]) + timeline_id = TimelineId(cur.fetchone()[0]) log.info("timeline to relocate %s", timeline_id) log.info( @@ -149,8 +149,8 @@ def populate_branch( def ensure_checkpoint( pageserver_cur, pageserver_http: NeonPageserverHttpClient, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, current_lsn: Lsn, ): # run checkpoint manually to be sure that data landed in remote storage @@ -162,8 +162,8 @@ def ensure_checkpoint( def check_timeline_attached( new_pageserver_http_client: NeonPageserverHttpClient, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, old_timeline_detail: Dict[str, Any], old_current_lsn: Lsn, ): @@ -187,8 +187,8 @@ def switch_pg_to_new_pageserver( env: NeonEnv, pg: Postgres, new_pageserver_port: int, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, ) -> pathlib.Path: pg.stop() @@ -265,7 +265,7 @@ def test_tenant_relocation( pageserver_http = env.pageserver.http_client() tenant_id, initial_timeline_id = env.neon_cli.create_tenant( - ZTenantId("74ee8b079a0e437eb0afea7d26a07209") + TenantId("74ee8b079a0e437eb0afea7d26a07209") ) log.info("tenant to relocate %s initial_timeline_id %s", tenant_id, initial_timeline_id) diff --git a/test_runner/regress/test_tenant_tasks.py b/test_runner/regress/test_tenant_tasks.py index 1214d703d0..97a13bbcb0 100644 --- a/test_runner/regress/test_tenant_tasks.py +++ b/test_runner/regress/test_tenant_tasks.py @@ -1,6 +1,6 @@ from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnvBuilder, wait_until -from fixtures.types import ZTenantId, ZTimelineId +from fixtures.types import TenantId, TimelineId def get_only_element(l): # noqa: E741 @@ -23,7 +23,7 @@ def test_tenant_tasks(neon_env_builder: NeonEnvBuilder): def get_state(tenant): all_states = client.tenant_list() - matching = [t for t in all_states if ZTenantId(t["id"]) == tenant] + matching = [t for t in all_states if TenantId(t["id"]) == tenant] return get_only_element(matching)["state"] def get_metric_value(name): @@ -35,8 +35,8 @@ def test_tenant_tasks(neon_env_builder: NeonEnvBuilder): value = line.lstrip(name).strip() return int(value) - def delete_all_timelines(tenant: ZTenantId): - timelines = [ZTimelineId(t["timeline_id"]) for t in client.timeline_list(tenant)] + def delete_all_timelines(tenant: TenantId): + timelines = [TimelineId(t["timeline_id"]) for t in client.timeline_list(tenant)] for t in timelines: client.timeline_delete(tenant, t) @@ -56,7 +56,7 @@ def test_tenant_tasks(neon_env_builder: NeonEnvBuilder): # Delete all timelines on all tenants for tenant_info in client.tenant_list(): - tenant_id = ZTenantId(tenant_info["id"]) + tenant_id = TenantId(tenant_info["id"]) delete_all_timelines(tenant_id) wait_until(10, 0.2, lambda: assert_active_without_jobs(tenant_id)) diff --git a/test_runner/regress/test_tenants.py b/test_runner/regress/test_tenants.py index bd53aae25c..4e7610a96f 100644 --- a/test_runner/regress/test_tenants.py +++ b/test_runner/regress/test_tenants.py @@ -8,7 +8,7 @@ import pytest from fixtures.log_helper import log from fixtures.metrics import PAGESERVER_PER_TENANT_METRICS, parse_metrics from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder -from fixtures.types import Lsn, ZTenantId +from fixtures.types import Lsn, TenantId from prometheus_client.samples import Sample @@ -188,7 +188,7 @@ def test_pageserver_metrics_removed_after_detach(neon_env_builder: NeonEnvBuilde cur.execute("SELECT sum(key) FROM t") assert cur.fetchone() == (5000050000,) - def get_ps_metric_samples_for_tenant(tenant_id: ZTenantId) -> List[Sample]: + def get_ps_metric_samples_for_tenant(tenant_id: TenantId) -> List[Sample]: ps_metrics = parse_metrics(env.pageserver.http_client().get_metrics(), "pageserver") samples = [] for metric_name in ps_metrics.metrics: diff --git a/test_runner/regress/test_tenants_with_remote_storage.py b/test_runner/regress/test_tenants_with_remote_storage.py index 70b474c9a9..85f371c845 100644 --- a/test_runner/regress/test_tenants_with_remote_storage.py +++ b/test_runner/regress/test_tenants_with_remote_storage.py @@ -19,7 +19,7 @@ from fixtures.neon_fixtures import ( wait_for_last_record_lsn, wait_for_upload, ) -from fixtures.types import Lsn, ZTenantId, ZTimelineId +from fixtures.types import Lsn, TenantId, TimelineId async def tenant_workload(env: NeonEnv, pg: Postgres): @@ -58,7 +58,7 @@ def test_tenants_many(neon_env_builder: NeonEnvBuilder, remote_storage_kind: Rem env = neon_env_builder.init_start() - tenants_pgs: List[Tuple[ZTenantId, Postgres]] = [] + tenants_pgs: List[Tuple[TenantId, Postgres]] = [] for _ in range(1, 5): # Use a tiny checkpoint distance, to create a lot of layers quickly @@ -83,8 +83,8 @@ def test_tenants_many(neon_env_builder: NeonEnvBuilder, remote_storage_kind: Rem res = pg.safe_psql_many( ["SHOW neon.tenant_id", "SHOW neon.timeline_id", "SELECT pg_current_wal_flush_lsn()"] ) - tenant_id = ZTenantId(res[0][0][0]) - timeline_id = ZTimelineId(res[1][0][0]) + tenant_id = TenantId(res[0][0][0]) + timeline_id = TimelineId(res[1][0][0]) current_lsn = Lsn(res[2][0][0]) # wait until pageserver receives all the data diff --git a/test_runner/regress/test_timeline_delete.py b/test_runner/regress/test_timeline_delete.py index 5a20dbd232..2eea8dd3cc 100644 --- a/test_runner/regress/test_timeline_delete.py +++ b/test_runner/regress/test_timeline_delete.py @@ -1,6 +1,6 @@ import pytest from fixtures.neon_fixtures import NeonEnv, NeonPageserverApiException, wait_until -from fixtures.types import ZTenantId, ZTimelineId +from fixtures.types import TenantId, TimelineId def test_timeline_delete(neon_simple_env: NeonEnv): @@ -10,12 +10,12 @@ def test_timeline_delete(neon_simple_env: NeonEnv): # first try to delete non existing timeline # for existing tenant: - invalid_timeline_id = ZTimelineId.generate() + invalid_timeline_id = TimelineId.generate() with pytest.raises(NeonPageserverApiException, match="timeline not found"): ps_http.timeline_delete(tenant_id=env.initial_tenant, timeline_id=invalid_timeline_id) # for non existing tenant: - invalid_tenant_id = ZTenantId.generate() + invalid_tenant_id = TenantId.generate() with pytest.raises( NeonPageserverApiException, match=f"Tenant {invalid_tenant_id} not found in the local state", diff --git a/test_runner/regress/test_timeline_size.py b/test_runner/regress/test_timeline_size.py index 6fbc430e80..83018f46f5 100644 --- a/test_runner/regress/test_timeline_size.py +++ b/test_runner/regress/test_timeline_size.py @@ -15,7 +15,7 @@ from fixtures.neon_fixtures import ( assert_timeline_local, wait_for_last_flush_lsn, ) -from fixtures.types import ZTenantId, ZTimelineId +from fixtures.types import TenantId, TimelineId from fixtures.utils import get_timeline_dir_size @@ -386,7 +386,7 @@ def test_tenant_physical_size(neon_simple_env: NeonEnv): tenant, timeline = env.neon_cli.create_tenant() - def get_timeline_physical_size(timeline: ZTimelineId): + def get_timeline_physical_size(timeline: TimelineId): res = client.timeline_detail(tenant, timeline, include_non_incremental_physical_size=True) return res["local"]["current_physical_size_non_incremental"] @@ -415,7 +415,7 @@ def test_tenant_physical_size(neon_simple_env: NeonEnv): assert tenant_physical_size == timeline_total_size -def assert_physical_size(env: NeonEnv, tenant_id: ZTenantId, timeline_id: ZTimelineId): +def assert_physical_size(env: NeonEnv, tenant_id: TenantId, timeline_id: TimelineId): """Check the current physical size returned from timeline API matches the total physical size of the timeline on disk""" client = env.pageserver.http_client() @@ -431,7 +431,7 @@ def assert_physical_size(env: NeonEnv, tenant_id: ZTenantId, timeline_id: ZTimel # Timeline logical size initialization is an asynchronous background task that runs once, # try a few times to ensure it's activated properly def wait_for_timeline_size_init( - client: NeonPageserverHttpClient, tenant: ZTenantId, timeline: ZTimelineId + client: NeonPageserverHttpClient, tenant: TenantId, timeline: TimelineId ): for i in range(10): timeline_details = assert_timeline_local(client, tenant, timeline) diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index cd370e60c0..8c5b4c8c30 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -32,13 +32,13 @@ from fixtures.neon_fixtures import ( wait_for_last_record_lsn, wait_for_upload, ) -from fixtures.types import Lsn, ZTenantId, ZTimelineId +from fixtures.types import Lsn, TenantId, TimelineId from fixtures.utils import get_dir_size, query_scalar def wait_lsn_force_checkpoint( - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, pg: Postgres, ps: NeonPageserver, pageserver_conn_options={}, @@ -74,7 +74,7 @@ def wait_lsn_force_checkpoint( @dataclass class TimelineMetrics: - timeline_id: ZTimelineId + timeline_id: TimelineId last_record_lsn: Lsn # One entry per each Safekeeper, order is the same flush_lsns: List[Lsn] = field(default_factory=list) @@ -126,7 +126,7 @@ def test_many_timelines(neon_env_builder: NeonEnvBuilder): timeline_metrics = [] for timeline_detail in timeline_details: - timeline_id = ZTimelineId(timeline_detail["timeline_id"]) + timeline_id = TimelineId(timeline_detail["timeline_id"]) local_timeline_detail = timeline_detail.get("local") if local_timeline_detail is None: @@ -273,8 +273,8 @@ def test_broker(neon_env_builder: NeonEnvBuilder): pg.safe_psql("CREATE TABLE t(key int primary key, value text)") # learn neon timeline from compute - tenant_id = ZTenantId(pg.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = ZTimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = TenantId(pg.safe_psql("show neon.tenant_id")[0][0]) + timeline_id = TimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) # wait until remote_consistent_lsn gets advanced on all safekeepers clients = [sk.http_client() for sk in env.safekeepers] @@ -325,8 +325,8 @@ def test_wal_removal(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): ] ) - tenant_id = ZTenantId(pg.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = ZTimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = TenantId(pg.safe_psql("show neon.tenant_id")[0][0]) + timeline_id = TimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) # force checkpoint to advance remote_consistent_lsn pageserver_conn_options = {} @@ -348,7 +348,7 @@ def test_wal_removal(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): auth_token=env.auth_keys.generate_tenant_token(tenant_id) ) http_cli_other = env.safekeepers[0].http_client( - auth_token=env.auth_keys.generate_tenant_token(ZTenantId.generate()) + auth_token=env.auth_keys.generate_tenant_token(TenantId.generate()) ) http_cli_noauth = env.safekeepers[0].http_client() @@ -438,8 +438,8 @@ def test_wal_backup(neon_env_builder: NeonEnvBuilder, remote_storage_kind: Remot pg = env.postgres.create_start("test_safekeepers_wal_backup") # learn neon timeline from compute - tenant_id = ZTenantId(pg.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = ZTimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = TenantId(pg.safe_psql("show neon.tenant_id")[0][0]) + timeline_id = TimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) pg_conn = pg.connect() cur = pg_conn.cursor() @@ -493,8 +493,8 @@ def test_s3_wal_replay(neon_env_builder: NeonEnvBuilder, remote_storage_kind: Re pg = env.postgres.create_start("test_s3_wal_replay") # learn neon timeline from compute - tenant_id = ZTenantId(pg.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = ZTimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = TenantId(pg.safe_psql("show neon.tenant_id")[0][0]) + timeline_id = TimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) expected_sum = 0 @@ -584,8 +584,8 @@ class ProposerPostgres(PgProtocol): self, pgdata_dir: str, pg_bin, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, listen_addr: str, port: int, ): @@ -593,8 +593,8 @@ class ProposerPostgres(PgProtocol): self.pgdata_dir: str = pgdata_dir self.pg_bin: PgBin = pg_bin - self.tenant_id: ZTenantId = tenant_id - self.timeline_id: ZTimelineId = timeline_id + self.tenant_id: TenantId = tenant_id + self.timeline_id: TimelineId = timeline_id self.listen_addr: str = listen_addr self.port: int = port @@ -672,8 +672,8 @@ def test_sync_safekeepers( neon_env_builder.num_safekeepers = 3 env = neon_env_builder.init_start() - tenant_id = ZTenantId.generate() - timeline_id = ZTimelineId.generate() + tenant_id = TenantId.generate() + timeline_id = TimelineId.generate() # write config for proposer pgdata_dir = os.path.join(env.repo_dir, "proposer_pgdata") @@ -725,8 +725,8 @@ def test_timeline_status(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): wa = env.safekeepers[0] # learn neon timeline from compute - tenant_id = ZTenantId(pg.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = ZTimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = TenantId(pg.safe_psql("show neon.tenant_id")[0][0]) + timeline_id = TimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) if not auth_enabled: wa_http_cli = wa.http_client() @@ -735,7 +735,7 @@ def test_timeline_status(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): wa_http_cli = wa.http_client(auth_token=env.auth_keys.generate_tenant_token(tenant_id)) wa_http_cli.check_status() wa_http_cli_bad = wa.http_client( - auth_token=env.auth_keys.generate_tenant_token(ZTenantId.generate()) + auth_token=env.auth_keys.generate_tenant_token(TenantId.generate()) ) wa_http_cli_bad.check_status() wa_http_cli_noauth = wa.http_client() @@ -785,15 +785,15 @@ class SafekeeperEnv: self.bin_safekeeper = os.path.join(str(neon_binpath), "safekeeper") self.safekeepers: Optional[List[subprocess.CompletedProcess[Any]]] = None self.postgres: Optional[ProposerPostgres] = None - self.tenant_id: Optional[ZTenantId] = None - self.timeline_id: Optional[ZTimelineId] = None + self.tenant_id: Optional[TenantId] = None + self.timeline_id: Optional[TimelineId] = None def init(self) -> "SafekeeperEnv": assert self.postgres is None, "postgres is already initialized" assert self.safekeepers is None, "safekeepers are already initialized" - self.tenant_id = ZTenantId.generate() - self.timeline_id = ZTimelineId.generate() + self.tenant_id = TenantId.generate() + self.timeline_id = TimelineId.generate() self.repo_dir.mkdir(exist_ok=True) # Create config and a Safekeeper object for each safekeeper @@ -912,9 +912,7 @@ def test_replace_safekeeper(neon_env_builder: NeonEnvBuilder): sum_after = query_scalar(cur, "SELECT SUM(key) FROM t") assert sum_after == sum_before + 5000050000 - def show_statuses( - safekeepers: List[Safekeeper], tenant_id: ZTenantId, timeline_id: ZTimelineId - ): + def show_statuses(safekeepers: List[Safekeeper], tenant_id: TenantId, timeline_id: TimelineId): for sk in safekeepers: http_cli = sk.http_client() try: @@ -935,8 +933,8 @@ def test_replace_safekeeper(neon_env_builder: NeonEnvBuilder): pg.start() # learn neon timeline from compute - tenant_id = ZTenantId(pg.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = ZTimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = TenantId(pg.safe_psql("show neon.tenant_id")[0][0]) + timeline_id = TimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) execute_payload(pg) show_statuses(env.safekeepers, tenant_id, timeline_id) @@ -1134,7 +1132,7 @@ def test_delete_force(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): assert (sk_data_dir / str(tenant_id_other) / str(timeline_id_other)).is_dir() # Remove non-existing branch, should succeed - assert sk_http.timeline_delete_force(tenant_id, ZTimelineId("00" * 16)) == { + assert sk_http.timeline_delete_force(tenant_id, TimelineId("00" * 16)) == { "dir_existed": False, "was_active": False, } diff --git a/test_runner/regress/test_wal_acceptor_async.py b/test_runner/regress/test_wal_acceptor_async.py index e36d3cf94b..9d2008296a 100644 --- a/test_runner/regress/test_wal_acceptor_async.py +++ b/test_runner/regress/test_wal_acceptor_async.py @@ -7,7 +7,7 @@ from typing import List, Optional import asyncpg from fixtures.log_helper import getLogger from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, Postgres, Safekeeper -from fixtures.types import Lsn, ZTenantId, ZTimelineId +from fixtures.types import Lsn, TenantId, TimelineId log = getLogger("root.safekeeper_async") @@ -103,8 +103,8 @@ async def run_random_worker(stats: WorkerStats, pg: Postgres, worker_id, n_accou async def wait_for_lsn( safekeeper: Safekeeper, - tenant_id: ZTenantId, - timeline_id: ZTimelineId, + tenant_id: TenantId, + timeline_id: TimelineId, wait_lsn: Lsn, polling_interval=1, timeout=60, @@ -155,8 +155,8 @@ async def run_restarts_under_load( test_timeout_at = time.monotonic() + 5 * 60 pg_conn = await pg.connect_async() - tenant_id = ZTenantId(await pg_conn.fetchval("show neon.tenant_id")) - timeline_id = ZTimelineId(await pg_conn.fetchval("show neon.timeline_id")) + tenant_id = TenantId(await pg_conn.fetchval("show neon.tenant_id")) + timeline_id = TimelineId(await pg_conn.fetchval("show neon.timeline_id")) bank = BankClient(pg_conn, n_accounts=n_accounts, init_amount=init_amount) # create tables and initial balances diff --git a/test_runner/regress/test_wal_restore.py b/test_runner/regress/test_wal_restore.py index 6fd509c4d1..21921a3bc2 100644 --- a/test_runner/regress/test_wal_restore.py +++ b/test_runner/regress/test_wal_restore.py @@ -9,7 +9,7 @@ from fixtures.neon_fixtures import ( base_dir, pg_distrib_dir, ) -from fixtures.types import ZTenantId +from fixtures.types import TenantId def test_wal_restore( @@ -22,7 +22,7 @@ def test_wal_restore( env.neon_cli.create_branch("test_wal_restore") pg = env.postgres.create_start("test_wal_restore") pg.safe_psql("create table t as select generate_series(1,300000)") - tenant_id = ZTenantId(pg.safe_psql("show neon.tenant_id")[0][0]) + tenant_id = TenantId(pg.safe_psql("show neon.tenant_id")[0][0]) env.neon_cli.pageserver_stop() port = port_distributor.get_port() data_dir = test_output_dir / "pgsql.restored"