diff --git a/src/core/searcher.rs b/src/core/searcher.rs index dc54da871..3bc359a78 100644 --- a/src/core/searcher.rs +++ b/src/core/searcher.rs @@ -188,10 +188,11 @@ impl Searcher { let futures = groups .into_iter() - .map(|((segment_ord, _cache_key), doc_ids)| { + .map(|((segment_ord, cache_key), doc_ids)| { // Each group fetches documents from exactly one block and // therefore gets an independent block cache of size one. - let store_reader = self.inner.store_readers[segment_ord as usize].fork_cache(1); + let store_reader = + self.inner.store_readers[segment_ord as usize].fork_cache(1, &[cache_key]); async move { let mut docs = Vec::new(); diff --git a/src/store/reader.rs b/src/store/reader.rs index 0c0de9c12..cf3e13663 100644 --- a/src/store/reader.rs +++ b/src/store/reader.rs @@ -148,15 +148,26 @@ impl StoreReader { } /// Clones the given store reader with an independent block cache of the given size. + /// + /// `cache_keys` is used to seed the forked cache from the current cache + /// if some blocks are already available. #[cfg(feature = "quickwit")] - pub(crate) fn fork_cache(&self, cache_num_blocks: usize) -> Self { - Self { + pub(crate) fn fork_cache(&self, cache_num_blocks: usize, cache_keys: &[CacheKey]) -> Self { + let forked = Self { decompressor: self.decompressor, data: self.data.clone(), cache: BlockCache::new(cache_num_blocks), skip_index: Arc::clone(&self.skip_index), space_usage: self.space_usage.clone(), + }; + + for &CacheKey(pos) in cache_keys { + if let Some(block) = self.cache.get_from_cache(pos) { + forked.cache.put_into_cache(pos, block); + } } + + forked } pub(crate) fn block_checkpoints(&self) -> impl Iterator + '_ {