diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index 8f42cf699b..6608eb5154 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -1286,9 +1286,7 @@ impl ComputeNode { // In case of error, log and fail the check, but don't crash. // We're playing it safe because these errors could be transient - // and we don't yet retry. Also being careful here allows us to - // be backwards compatible with safekeepers that don't have the - // TIMELINE_STATUS API yet. + // and we don't yet retry. if responses.len() < quorum { error!( "failed sync safekeepers check {:?} {:?} {:?}", diff --git a/control_plane/src/endpoint.rs b/control_plane/src/endpoint.rs index 91a62b0ca4..792da14a32 100644 --- a/control_plane/src/endpoint.rs +++ b/control_plane/src/endpoint.rs @@ -464,7 +464,7 @@ impl Endpoint { conf.append("max_connections", "100"); conf.append("wal_level", "logical"); // wal_sender_timeout is the maximum time to wait for WAL replication. - // It also defines how often the walreciever will send a feedback message to the wal sender. + // It also defines how often the walreceiver will send a feedback message to the wal sender. conf.append("wal_sender_timeout", "5s"); conf.append("listen_addresses", &self.pg_address.ip().to_string()); conf.append("port", &self.pg_address.port().to_string()); diff --git a/docs/pageserver-services.md b/docs/pageserver-services.md index 11d984eb08..3c430c6236 100644 --- a/docs/pageserver-services.md +++ b/docs/pageserver-services.md @@ -75,7 +75,7 @@ CLI examples: * AWS S3 : `env AWS_ACCESS_KEY_ID='SOMEKEYAAAAASADSAH*#' AWS_SECRET_ACCESS_KEY='SOMEsEcReTsd292v' ${PAGESERVER_BIN} -c "remote_storage={bucket_name='some-sample-bucket',bucket_region='eu-north-1', prefix_in_bucket='/test_prefix/'}"` For Amazon AWS S3, a key id and secret access key could be located in `~/.aws/credentials` if awscli was ever configured to work with the desired bucket, on the AWS Settings page for a certain user. Also note, that the bucket names does not contain any protocols when used on AWS. -For local S3 installations, refer to the their documentation for name format and credentials. +For local S3 installations, refer to their documentation for name format and credentials. Similar to other pageserver settings, toml config file can be used to configure either of the storages as backup targets. Required sections are: diff --git a/pageserver/src/deletion_queue/validator.rs b/pageserver/src/deletion_queue/validator.rs index 363b1427f5..c9bfbd8adc 100644 --- a/pageserver/src/deletion_queue/validator.rs +++ b/pageserver/src/deletion_queue/validator.rs @@ -1,5 +1,5 @@ //! The validator is responsible for validating DeletionLists for execution, -//! based on whethe the generation in the DeletionList is still the latest +//! based on whether the generation in the DeletionList is still the latest //! generation for a tenant. //! //! The purpose of validation is to ensure split-brain safety in the cluster diff --git a/pageserver/src/utilization.rs b/pageserver/src/utilization.rs index ccfad7a391..0dafa5c4bb 100644 --- a/pageserver/src/utilization.rs +++ b/pageserver/src/utilization.rs @@ -1,6 +1,6 @@ //! An utilization metric which is used to decide on which pageserver to put next tenant. //! -//! The metric is exposed via `GET /v1/utilization`. Refer and maintain it's openapi spec as the +//! The metric is exposed via `GET /v1/utilization`. Refer and maintain its openapi spec as the //! truth. use std::path::Path; diff --git a/pageserver/src/walingest.rs b/pageserver/src/walingest.rs index f852051178..dfd0071ce3 100644 --- a/pageserver/src/walingest.rs +++ b/pageserver/src/walingest.rs @@ -1069,7 +1069,7 @@ impl WalIngest { // NB: In PostgreSQL, the next-multi-xid stored in the control file is allowed to // go to 0, and it's fixed up by skipping to FirstMultiXactId in functions that // read it, like GetNewMultiXactId(). This is different from how nextXid is - // incremented! nextXid skips over < FirstNormalTransactionId when the the value + // incremented! nextXid skips over < FirstNormalTransactionId when the value // is stored, so it's never 0 in a checkpoint. // // I don't know why it's done that way, it seems less error-prone to skip over 0 diff --git a/proxy/src/proxy/connect_compute.rs b/proxy/src/proxy/connect_compute.rs index 9f642f52ab..ce9774e3eb 100644 --- a/proxy/src/proxy/connect_compute.rs +++ b/proxy/src/proxy/connect_compute.rs @@ -110,7 +110,7 @@ where debug!(error = ?err, COULD_NOT_CONNECT); let node_info = if !node_info.cached() || !err.should_retry_wake_compute() { - // If we just recieved this from cplane and didn't get it from cache, we shouldn't retry. + // If we just received this from cplane and not from the cache, we shouldn't retry. // Do not need to retrieve a new node_info, just return the old one. if !should_retry(&err, num_retries, compute.retry) { Metrics::get().proxy.retries_metric.observe( diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index b9fff05c6c..ea1b045b78 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -728,7 +728,7 @@ class NeonEnvBuilder: # NB: neon_local rewrites postgresql.conf on each start based on neon_local config. No need to patch it. # However, in this new NeonEnv, the pageservers and safekeepers listen on different ports, and the storage # controller will currently reject re-attach requests from them because the NodeMetadata isn't identical. - # So, from_repo_dir patches up the the storcon database. + # So, from_repo_dir patches up the storcon database. patch_script_path = self.repo_dir / "storage_controller_db.startup.sql" assert not patch_script_path.exists() patch_script = "" diff --git a/test_runner/regress/test_broken_timeline.py b/test_runner/regress/test_broken_timeline.py index 1209b3a818..0d92bf8406 100644 --- a/test_runner/regress/test_broken_timeline.py +++ b/test_runner/regress/test_broken_timeline.py @@ -24,10 +24,7 @@ def test_local_corruption(neon_env_builder: NeonEnvBuilder): [ ".*get_values_reconstruct_data for layer .*", ".*could not find data for key.*", - ".*is not active. Current state: Broken.*", ".*will not become active. Current state: Broken.*", - ".*failed to load metadata.*", - ".*load failed.*load local timeline.*", ".*: layer load failed, assuming permanent failure:.*", ".*failed to get checkpoint bytes.*", ".*failed to get control bytes.*", diff --git a/test_runner/regress/test_compaction.py b/test_runner/regress/test_compaction.py index 963a19d640..76485c8321 100644 --- a/test_runner/regress/test_compaction.py +++ b/test_runner/regress/test_compaction.py @@ -687,7 +687,7 @@ def test_sharding_compaction( for _i in range(0, 10): # Each of these does some writes then a checkpoint: because we set image_creation_threshold to 1, # these should result in image layers each time we write some data into a shard, and also shards - # recieving less data hitting their "empty image layer" path (wherre they should skip writing the layer, + # receiving less data hitting their "empty image layer" path (where they should skip writing the layer, # rather than asserting) workload.churn_rows(64) diff --git a/test_runner/regress/test_tenants.py b/test_runner/regress/test_tenants.py index c54dd8b38d..7f32f34d36 100644 --- a/test_runner/regress/test_tenants.py +++ b/test_runner/regress/test_tenants.py @@ -76,7 +76,6 @@ def test_tenants_normal_work(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 3 env = neon_env_builder.init_start() - """Tests tenants with and without wal acceptors""" tenant_1, _ = env.create_tenant() tenant_2, _ = env.create_tenant()