Compare commits

...

4 Commits

Author SHA1 Message Date
Konstantin Knizhnik
6b35f3307f Update pageserver/src/tenant/layer_map.rs
Co-authored-by: Joonas Koivunen <joonas@neon.tech>
2023-04-29 22:26:04 +03:00
Konstantin Knizhnik
5f39328a06 Replace suplicate layer in layer map 2023-04-28 14:49:20 +03:00
Konstantin Knizhnik
fa7e975787 Make clippy happy 2023-04-27 16:27:24 +03:00
Konstantin Knizhnik
9b095034bc Do not return Error in case of duplicate layer detetion 2023-04-27 15:40:05 +03:00
4 changed files with 63 additions and 18 deletions

View File

@@ -55,6 +55,7 @@ use anyhow::{bail, Result};
use std::collections::VecDeque;
use std::ops::Range;
use std::sync::Arc;
use tracing::*;
use utils::lsn::Lsn;
use historic_layer_coverage::BufferedHistoricLayerCoverage;
@@ -275,18 +276,24 @@ where
///
pub(self) fn insert_historic_noflush(&mut self, layer: Arc<L>) -> anyhow::Result<()> {
let key = historic_layer_coverage::LayerKey::from(&*layer);
if self.historic.contains(&key) {
bail!(
"Attempt to insert duplicate layer {} in layer map",
layer.short_id()
);
}
self.historic.insert(key, Arc::clone(&layer));
match self.historic.replace(&key, Arc::clone(&layer), |existing| {
!Self::compare_arced_layers(existing, &layer)
}) {
Replacement::Replaced { .. } => {
if Self::is_l0(&layer) {
bail!("Duplicate L0 layer {}", layer.short_id());
}
warn!("Replace duplicate layer {} in layer map", layer.short_id());
}
Replacement::Unexpected(_) => bail!("Replace layer with itself is prohibited"),
Replacement::NotFound | Replacement::RemovalBuffered => {
self.historic.insert(key, Arc::clone(&layer));
if Self::is_l0(&layer) {
self.l0_delta_layers.push(layer);
if Self::is_l0(&layer) {
self.l0_delta_layers.push(layer);
}
}
}
Ok(())
}

View File

@@ -417,14 +417,6 @@ impl<Value: Clone> BufferedHistoricLayerCoverage<Value> {
}
}
pub fn contains(&self, layer_key: &LayerKey) -> bool {
match self.buffer.get(layer_key) {
Some(None) => false, // layer remove was buffered
Some(_) => true, // layer insert was buffered
None => self.layers.contains_key(layer_key), // no buffered ops for this layer
}
}
pub fn insert(&mut self, layer_key: LayerKey, value: Value) {
self.buffer.insert(layer_key, Some(value));
}

View File

@@ -3300,6 +3300,10 @@ impl Timeline {
drop(all_keys_iter); // So that deltas_to_compact is no longer borrowed
fail_point!("compact-level0-phase1-finish", |_| {
Err(anyhow::anyhow!("failpoint compact-level0-phase1-finish").into())
});
Ok(CompactLevel0Phase1Result {
new_layers,
deltas_to_compact,

View File

@@ -0,0 +1,42 @@
import time
import pytest
from fixtures.neon_fixtures import NeonEnvBuilder, PgBin
# Test duplicate layer detection
#
# This test sets fail point at the end of first compaction phase:
# after flushing new L1 layers but before deletion of L0 layes
# It should cause generation of duplicate L1 layer by compaction after restart
@pytest.mark.timeout(600)
def test_duplicate_layers(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin):
env = neon_env_builder.init_start()
# These warnings are expected, when the pageserver is restarted abruptly
env.pageserver.allowed_errors.append(".*found future image layer.*")
env.pageserver.allowed_errors.append(".*found future delta layer.*")
env.pageserver.allowed_errors.append(".*duplicate layer.*")
pageserver_http = env.pageserver.http_client()
# Use aggressive compaction and checkpoint settings
tenant_id, _ = env.neon_cli.create_tenant(
conf={
"checkpoint_distance": f"{1024 ** 2}",
"compaction_target_size": f"{1024 ** 2}",
"compaction_period": "1 s",
"compaction_threshold": "3",
}
)
endpoint = env.endpoints.create_start("main", tenant_id=tenant_id)
connstr = endpoint.connstr(options="-csynchronous_commit=off")
pg_bin.run_capture(["pgbench", "-i", "-s10", connstr])
pageserver_http.configure_failpoints(("compact-level0-phase1-finish", "exit"))
with pytest.raises(Exception):
pg_bin.run_capture(["pgbench", "-P1", "-N", "-c5", "-T500", "-Mprepared", connstr])
env.pageserver.stop()
env.pageserver.start()
time.sleep(10) # let compaction to be performed