From 290f007b8ea9ceb243ff536dfabfdcb847980743 Mon Sep 17 00:00:00 2001 From: "Alex Chi Z." <4198311+skyzh@users.noreply.github.com> Date: Tue, 18 Feb 2025 10:43:33 -0500 Subject: [PATCH] Revert "feat(pageserver): repartition on L0-L1 boundary (#10548)" (#10870) This reverts commit 443c8d0b4bfead651ebbbade5dcb49c6cba00ee6. ## Problem We observe a massive amount of compaction errors. ## Summary of changes If the tenant did not write any L1 layers (i.e., they accumulate L0 layers where number of them is below L0 threshold), image creation will always fail. Therefore, it's not correct to simply use the disk_consistent_lsn or L0/L1 boundary for the image creation. --- pageserver/src/tenant.rs | 18 +- pageserver/src/tenant/timeline/compaction.rs | 156 ++++++++---------- .../regress/test_layers_from_future.py | 3 - 3 files changed, 69 insertions(+), 108 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 5a2c5c0c46..bab1a02527 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -7846,18 +7846,6 @@ mod tests { } tline.freeze_and_flush().await?; - // Force layers to L1 - tline - .compact( - &cancel, - { - let mut flags = EnumSet::new(); - flags.insert(CompactFlags::ForceL0Compaction); - flags - }, - &ctx, - ) - .await?; if iter % 5 == 0 { let (_, before_delta_file_accessed) = @@ -7870,7 +7858,6 @@ mod tests { let mut flags = EnumSet::new(); flags.insert(CompactFlags::ForceImageLayerCreation); flags.insert(CompactFlags::ForceRepartition); - flags.insert(CompactFlags::ForceL0Compaction); flags }, &ctx, @@ -8317,8 +8304,6 @@ mod tests { let cancel = CancellationToken::new(); - // Image layer creation happens on the disk_consistent_lsn so we need to force set it now. - tline.force_set_disk_consistent_lsn(Lsn(0x40)); tline .compact( &cancel, @@ -8332,7 +8317,8 @@ mod tests { ) .await .unwrap(); - // Image layers are created at repartition LSN + + // Image layers are created at last_record_lsn let images = tline .inspect_image_layers(Lsn(0x40), &ctx, io_concurrency.clone()) .await diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index 6931f360a4..e1e3eabb90 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -692,21 +692,6 @@ impl Timeline { // Define partitioning schema if needed - let l0_l1_boundary_lsn = { - // We do the repartition on the L0-L1 boundary. All data below the boundary - // are compacted by L0 with low read amplification, thus making the `repartition` - // function run fast. - let guard = self.layers.read().await; - let l0_min_lsn = guard - .layer_map()? - .level0_deltas() - .iter() - .map(|l| l.get_lsn_range().start) - .min() - .unwrap_or(self.get_disk_consistent_lsn()); - l0_min_lsn.max(self.get_ancestor_lsn()) - }; - // 1. L0 Compact let l0_outcome = { let timer = self.metrics.compact_time_histo.start_timer(); @@ -733,86 +718,79 @@ impl Timeline { return Ok(CompactionOutcome::YieldForL0); } - if l0_l1_boundary_lsn < self.partitioning.read().1 { - // We never go backwards when repartition and create image layers. - info!("skipping image layer generation because repartition LSN is greater than L0-L1 boundary LSN."); - } else { - // 2. Repartition and create image layers if necessary - match self - .repartition( - l0_l1_boundary_lsn, - self.get_compaction_target_size(), - options.flags, - ctx, - ) - .await - { - Ok(((dense_partitioning, sparse_partitioning), lsn)) => { - // Disables access_stats updates, so that the files we read remain candidates for eviction after we're done with them - let image_ctx = RequestContextBuilder::extend(ctx) - .access_stats_behavior(AccessStatsBehavior::Skip) - .build(); + // 2. Repartition and create image layers if necessary + match self + .repartition( + self.get_last_record_lsn(), + self.get_compaction_target_size(), + options.flags, + ctx, + ) + .await + { + Ok(((dense_partitioning, sparse_partitioning), lsn)) => { + // Disables access_stats updates, so that the files we read remain candidates for eviction after we're done with them + let image_ctx = RequestContextBuilder::extend(ctx) + .access_stats_behavior(AccessStatsBehavior::Skip) + .build(); - let mut partitioning = dense_partitioning; - partitioning - .parts - .extend(sparse_partitioning.into_dense().parts); + let mut partitioning = dense_partitioning; + partitioning + .parts + .extend(sparse_partitioning.into_dense().parts); - // 3. Create new image layers for partitions that have been modified "enough". - let (image_layers, outcome) = self - .create_image_layers( - &partitioning, - lsn, - if options - .flags - .contains(CompactFlags::ForceImageLayerCreation) - { - ImageLayerCreationMode::Force - } else { - ImageLayerCreationMode::Try - }, - &image_ctx, - self.last_image_layer_creation_status - .load() - .as_ref() - .clone(), - !options.flags.contains(CompactFlags::NoYield), - ) - .await - .inspect_err(|err| { - if let CreateImageLayersError::GetVectoredError( - GetVectoredError::MissingKey(_), - ) = err - { - critical!("missing key during compaction: {err:?}"); - } - })?; + // 3. Create new image layers for partitions that have been modified "enough". + let (image_layers, outcome) = self + .create_image_layers( + &partitioning, + lsn, + if options + .flags + .contains(CompactFlags::ForceImageLayerCreation) + { + ImageLayerCreationMode::Force + } else { + ImageLayerCreationMode::Try + }, + &image_ctx, + self.last_image_layer_creation_status + .load() + .as_ref() + .clone(), + !options.flags.contains(CompactFlags::NoYield), + ) + .await + .inspect_err(|err| { + if let CreateImageLayersError::GetVectoredError( + GetVectoredError::MissingKey(_), + ) = err + { + critical!("missing key during compaction: {err:?}"); + } + })?; - self.last_image_layer_creation_status - .store(Arc::new(outcome.clone())); + self.last_image_layer_creation_status + .store(Arc::new(outcome.clone())); - self.upload_new_image_layers(image_layers)?; - if let LastImageLayerCreationStatus::Incomplete { .. } = outcome { - // Yield and do not do any other kind of compaction. - info!("skipping shard ancestor compaction due to pending image layer generation tasks (preempted by L0 compaction)."); - return Ok(CompactionOutcome::YieldForL0); - } + self.upload_new_image_layers(image_layers)?; + if let LastImageLayerCreationStatus::Incomplete { .. } = outcome { + // Yield and do not do any other kind of compaction. + info!("skipping shard ancestor compaction due to pending image layer generation tasks (preempted by L0 compaction)."); + return Ok(CompactionOutcome::YieldForL0); } - Err(err) => { - // no partitioning? This is normal, if the timeline was just created - // as an empty timeline. Also in unit tests, when we use the timeline - // as a simple key-value store, ignoring the datadir layout. Log the - // error but continue. - // - // Suppress error when it's due to cancellation - if !self.cancel.is_cancelled() && !err.is_cancelled() { - tracing::error!( - "could not compact, repartitioning keyspace failed: {err:?}" - ); - } + } + Err(err) => { + // no partitioning? This is normal, if the timeline was just created + // as an empty timeline. Also in unit tests, when we use the timeline + // as a simple key-value store, ignoring the datadir layout. Log the + // error but continue. + // + // Suppress error when it's due to cancellation + if !self.cancel.is_cancelled() && !err.is_cancelled() { + tracing::error!("could not compact, repartitioning keyspace failed: {err:?}"); } - }; - } + } + }; let partition_count = self.partitioning.read().0 .0.parts.len(); diff --git a/test_runner/regress/test_layers_from_future.py b/test_runner/regress/test_layers_from_future.py index 3ac4ed1a3e..872d3dc4cf 100644 --- a/test_runner/regress/test_layers_from_future.py +++ b/test_runner/regress/test_layers_from_future.py @@ -20,9 +20,6 @@ from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind from fixtures.utils import query_scalar, wait_until -@pytest.mark.skip( - reason="We won't create future layers any more after https://github.com/neondatabase/neon/pull/10548" -) @pytest.mark.parametrize( "attach_mode", ["default_generation", "same_generation"],