chore: remove repetitive words (#7206)

Signed-off-by: availhang <mayangang@outlook.com>
This commit is contained in:
George Ma
2024-03-25 23:43:02 +08:00
committed by GitHub
parent 2713142308
commit d837ce0686
8 changed files with 8 additions and 8 deletions

View File

@@ -294,7 +294,7 @@ where
// is in state 'taken' but the thread that would unlock it is
// not there.
// 2. A rust object that represented some external resource in the
// parent now got implicitly copied by the the fork, even though
// parent now got implicitly copied by the fork, even though
// the object's type is not `Copy`. The parent program may use
// non-copyability as way to enforce unique ownership of an
// external resource in the typesystem. The fork breaks that

View File

@@ -12,7 +12,7 @@
//!
//! The endpoint is managed by the `compute_ctl` binary. When an endpoint is
//! started, we launch `compute_ctl` It synchronizes the safekeepers, downloads
//! the basebackup from the pageserver to initialize the the data directory, and
//! the basebackup from the pageserver to initialize the data directory, and
//! finally launches the PostgreSQL process. It watches the PostgreSQL process
//! until it exits.
//!

View File

@@ -247,7 +247,7 @@ fn scenario_4() {
//
// This is in total 5000 + 1000 + 5000 + 1000 = 12000
//
// (If we used the the method from the previous scenario, and
// (If we used the method from the previous scenario, and
// kept only snapshot at the branch point, we'd need to keep
// all the WAL between 10000-18000 on the main branch, so
// the total size would be 5000 + 1000 + 8000 = 14000. The

View File

@@ -69,7 +69,7 @@ pub struct Config {
/// should be removed once we have a better solution there.
sys_buffer_bytes: u64,
/// Minimum fraction of total system memory reserved *before* the the cgroup threshold; in
/// Minimum fraction of total system memory reserved *before* the cgroup threshold; in
/// other words, providing a ceiling for the highest value of the threshold by enforcing that
/// there's at least `cgroup_min_overhead_fraction` of the total memory remaining beyond the
/// threshold.

View File

@@ -435,7 +435,7 @@ pub(crate) static RESIDENT_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(||
static REMOTE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
register_uint_gauge_vec!(
"pageserver_remote_physical_size",
"The size of the layer files present in the remote storage that are listed in the the remote index_part.json.",
"The size of the layer files present in the remote storage that are listed in the remote index_part.json.",
// Corollary: If any files are missing from the index part, they won't be included here.
&["tenant_id", "shard_id", "timeline_id"]
)

View File

@@ -782,7 +782,7 @@ where
}
}
// NB: don't use `buf.is_empty()` here; it is from the
// `impl Deref for Slice { Target = [u8] }`; the the &[u8]
// `impl Deref for Slice { Target = [u8] }`; the &[u8]
// returned by it only covers the initialized portion of `buf`.
// Whereas we're interested in ensuring that we filled the entire
// buffer that the user passed in.

View File

@@ -105,7 +105,7 @@ def test_pageserver_multiple_keys(neon_env_builder: NeonEnvBuilder):
# The neon_local tool generates one key pair at a hardcoded path by default.
# As a preparation for our test, move the public key of the key pair into a
# directory at the same location as the hardcoded path by:
# 1. moving the the file at `configured_pub_key_path` to a temporary location
# 1. moving the file at `configured_pub_key_path` to a temporary location
# 2. creating a new directory at `configured_pub_key_path`
# 3. moving the file from the temporary location into the newly created directory
configured_pub_key_path = Path(env.repo_dir) / "auth_public_key.pem"

View File

@@ -838,7 +838,7 @@ def test_compaction_waits_for_upload(
# upload_stuck_layers and the original initdb L0
client.timeline_checkpoint(tenant_id, timeline_id)
# as uploads are paused, the the upload_stuck_layers should still be with us
# as uploads are paused, the upload_stuck_layers should still be with us
for name in upload_stuck_layers:
path = env.pageserver.timeline_dir(tenant_id, timeline_id) / name
assert path.exists(), "uploads are stuck still over compaction"