Rename SegmentSize -> WalSegmentSize

This commit is contained in:
John Spray
2025-06-25 14:45:52 +01:00
parent 68491147f5
commit 890003f97f
15 changed files with 63 additions and 63 deletions

View File

@@ -6,7 +6,7 @@ use postgres_ffi::v17::waldecoder_handler::WalStreamDecoderHandler;
use postgres_ffi::waldecoder::WalStreamDecoder;
use postgres_versioninfo::PgMajorVersion;
use pprof::criterion::{Output, PProfProfiler};
use utils::lsn::{Lsn, SegmentSize};
use utils::lsn::{Lsn, WalSegmentSize};
const KB: usize = 1024;
@@ -22,7 +22,7 @@ criterion_main!(benches);
fn bench_complete_record(c: &mut Criterion) {
let mut g = c.benchmark_group("complete_record");
for size in [64, KB, 8 * KB, 128 * KB] {
let value_size = size as SegmentSize;
let value_size = size as WalSegmentSize;
// Kind of weird to change the group throughput per benchmark, but it's the only way
// to vary it per benchmark. It works.
g.throughput(criterion::Throughput::Bytes(value_size as u64));
@@ -31,7 +31,7 @@ fn bench_complete_record(c: &mut Criterion) {
});
}
fn run_bench(b: &mut Bencher, size: SegmentSize) -> anyhow::Result<()> {
fn run_bench(b: &mut Bencher, size: WalSegmentSize) -> anyhow::Result<()> {
const PREFIX: &CStr = c"";
let value_size = LogicalMessageGenerator::make_value_size(size, PREFIX);
let value = vec![1; value_size as usize];

View File

@@ -12,7 +12,7 @@
use bytes::Bytes;
use utils::bin_ser::SerializeError;
use utils::lsn::{Lsn, SegmentSize};
use utils::lsn::{Lsn, WalSegmentSize};
pub use postgres_versioninfo::PgMajorVersion;
@@ -241,7 +241,7 @@ pub use v14::xlog_utils::{
pub const BLCKSZ: u16 = 8192;
pub const RELSEG_SIZE: u32 = 1024 * 1024 * 1024 / (BLCKSZ as u32);
pub const XLOG_BLCKSZ: usize = 8192;
pub const WAL_SEGMENT_SIZE: SegmentSize = 16 * 1024 * 1024;
pub const WAL_SEGMENT_SIZE: WalSegmentSize = 16 * 1024 * 1024;
pub const MAX_SEND_SIZE: usize = XLOG_BLCKSZ * 16;

View File

@@ -2,7 +2,7 @@ use std::ffi::{CStr, CString};
use bytes::{Bytes, BytesMut};
use crc32c::crc32c_append;
use utils::lsn::{Lsn, SegmentSize};
use utils::lsn::{Lsn, WalSegmentSize};
use super::bindings::{RmgrId, XLogLongPageHeaderData, XLogPageHeaderData, XLOG_PAGE_MAGIC};
use super::xlog_utils::{
@@ -39,7 +39,7 @@ impl Record {
// Construct the WAL record header.
let mut header = XLogRecord {
xl_tot_len: XLOG_SIZE_OF_XLOG_RECORD + data_header.len() as SegmentSize + self.data.len() as SegmentSize,
xl_tot_len: XLOG_SIZE_OF_XLOG_RECORD + data_header.len() as WalSegmentSize + self.data.len() as WalSegmentSize,
xl_xid: 0,
xl_prev: prev_lsn.into(),
xl_info: self.info,
@@ -234,10 +234,10 @@ impl LogicalMessageGenerator {
/// Computes how large a value must be to get a record of the given size. Convenience method to
/// construct records of pre-determined size. Panics if the record size is too small.
pub fn make_value_size(record_size: SegmentSize, prefix: &CStr) -> SegmentSize {
pub fn make_value_size(record_size: WalSegmentSize, prefix: &CStr) -> WalSegmentSize {
let xlog_header_size = XLOG_SIZE_OF_XLOG_RECORD;
let lm_header_size = size_of::<XlLogicalMessage>() as SegmentSize;
let prefix_size = prefix.to_bytes_with_nul().len() as SegmentSize;
let lm_header_size = size_of::<XlLogicalMessage>() as WalSegmentSize;
let prefix_size = prefix.to_bytes_with_nul().len() as WalSegmentSize;
let data_header_size = match record_size - xlog_header_size - 2 {
0..=255 => 2,
256..=258 => panic!("impossible record_size {record_size}"),

View File

@@ -35,7 +35,7 @@ use std::time::SystemTime;
use utils::bin_ser::DeserializeError;
use utils::bin_ser::SerializeError;
use utils::lsn::{Lsn, SegmentSize};
use utils::lsn::{Lsn, WalSegmentSize};
pub const XLOG_FNAME_LEN: usize = 24;
pub const XLP_BKP_REMOVABLE: u16 = 0x0004;
@@ -43,9 +43,9 @@ pub const XLP_FIRST_IS_CONTRECORD: u16 = 0x0001;
pub const XLP_REM_LEN_OFFS: usize = 2 + 2 + 4 + 8;
pub const XLOG_RECORD_CRC_OFFS: usize = 4 + 4 + 8 + 1 + 1 + 2;
pub const XLOG_SIZE_OF_XLOG_SHORT_PHD: SegmentSize = size_of::<XLogPageHeaderData>() as SegmentSize;
pub const XLOG_SIZE_OF_XLOG_LONG_PHD: SegmentSize = size_of::<XLogLongPageHeaderData>() as SegmentSize;
pub const XLOG_SIZE_OF_XLOG_RECORD: SegmentSize = size_of::<XLogRecord>() as SegmentSize;
pub const XLOG_SIZE_OF_XLOG_SHORT_PHD: WalSegmentSize = size_of::<XLogPageHeaderData>() as WalSegmentSize;
pub const XLOG_SIZE_OF_XLOG_LONG_PHD: WalSegmentSize = size_of::<XLogLongPageHeaderData>() as WalSegmentSize;
pub const XLOG_SIZE_OF_XLOG_RECORD: WalSegmentSize = size_of::<XLogRecord>() as WalSegmentSize;
#[allow(clippy::identity_op)]
pub const SIZE_OF_XLOG_RECORD_DATA_HEADER_SHORT: usize = 1 * 2;
@@ -58,19 +58,19 @@ pub const SIZE_OF_XLOG_RECORD_DATA_HEADER_SHORT: usize = 1 * 2;
/// in order to let CLOG_TRUNCATE mechanism correctly extend CLOG.
const XID_CHECKPOINT_INTERVAL: u32 = 1024;
pub fn XLogSegmentsPerXLogId(wal_segsz_bytes: SegmentSize) -> XLogSegNo {
pub fn XLogSegmentsPerXLogId(wal_segsz_bytes: WalSegmentSize) -> XLogSegNo {
(0x100000000u64 / wal_segsz_bytes as u64) as XLogSegNo
}
pub fn XLogSegNoOffsetToRecPtr(
segno: XLogSegNo,
offset: u32,
wal_segsz_bytes: SegmentSize,
wal_segsz_bytes: WalSegmentSize,
) -> XLogRecPtr {
segno * (wal_segsz_bytes as u64) + (offset as u64)
}
pub fn XLogFileName(tli: TimeLineID, logSegNo: XLogSegNo, wal_segsz_bytes: SegmentSize) -> String {
pub fn XLogFileName(tli: TimeLineID, logSegNo: XLogSegNo, wal_segsz_bytes: WalSegmentSize) -> String {
format!(
"{:>08X}{:>08X}{:>08X}",
tli,
@@ -81,7 +81,7 @@ pub fn XLogFileName(tli: TimeLineID, logSegNo: XLogSegNo, wal_segsz_bytes: Segme
pub fn XLogFromFileName(
fname: &OsStr,
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
) -> anyhow::Result<(XLogSegNo, TimeLineID)> {
if let Some(fname_str) = fname.to_str() {
let tli = u32::from_str_radix(&fname_str[0..8], 16)?;
@@ -111,7 +111,7 @@ pub fn IsPartialXLogFileName(fname: &OsStr) -> bool {
/// If LSN points to the beginning of the page, then shift it to first record,
/// otherwise align on 8-bytes boundary (required for WAL records)
pub fn normalize_lsn(lsn: Lsn, seg_sz: SegmentSize) -> Lsn {
pub fn normalize_lsn(lsn: Lsn, seg_sz: WalSegmentSize) -> Lsn {
if lsn.0 % XLOG_BLCKSZ as u64 == 0 {
let hdr_size = if lsn.0 % seg_sz as u64 == 0 {
XLOG_SIZE_OF_XLOG_LONG_PHD
@@ -227,7 +227,7 @@ pub use timestamp_conversions::{to_pg_timestamp, try_from_pg_timestamp};
// back.
pub fn find_end_of_wal(
data_dir: &Path,
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
start_lsn: Lsn, // start reading WAL at this point; must point at record start_lsn.
) -> anyhow::Result<Lsn> {
let mut result = start_lsn;
@@ -438,7 +438,7 @@ pub fn generate_wal_segment(segno: u64, system_id: u64, lsn: Lsn) -> Result<Byte
let page_off = lsn.block_offset();
let seg_off = lsn.segment_offset(WAL_SEGMENT_SIZE);
let first_page_only = seg_off < XLOG_BLCKSZ as SegmentSize;
let first_page_only = seg_off < XLOG_BLCKSZ as WalSegmentSize;
// If first records starts in the middle of the page, pretend in page header
// there is a fake record which ends where first real record starts. This
// makes pg_waldump etc happy.

View File

@@ -18,7 +18,7 @@ pub const XLOG_BLCKSZ: u32 = 8192;
pub struct Lsn(pub u64);
/// Size of a Postgres WAL segment. These are always small enough to fit in a u32.
pub type SegmentSize = u32;
pub type WalSegmentSize = u32;
impl Serialize for Lsn {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@@ -166,19 +166,19 @@ impl Lsn {
/// Compute the offset into a segment
#[inline]
pub fn segment_offset(self, seg_sz: SegmentSize) -> SegmentSize {
(self.0 % seg_sz as u64) as SegmentSize
pub fn segment_offset(self, seg_sz: WalSegmentSize) -> WalSegmentSize {
(self.0 % seg_sz as u64) as WalSegmentSize
}
/// Compute LSN of the segment start.
#[inline]
pub fn segment_lsn(self, seg_sz: SegmentSize) -> Lsn {
pub fn segment_lsn(self, seg_sz: WalSegmentSize) -> Lsn {
Lsn(self.0 - (self.0 % seg_sz as u64))
}
/// Compute the segment number
#[inline]
pub fn segment_number(self, seg_sz: SegmentSize) -> u64 {
pub fn segment_number(self, seg_sz: WalSegmentSize) -> u64 {
self.0 / seg_sz as u64
}
@@ -199,7 +199,7 @@ impl Lsn {
/// Compute the block offset of the first byte of this Lsn within this
/// segment
#[inline]
pub fn page_offset_in_segment(self, seg_sz: SegmentSize) -> u64 {
pub fn page_offset_in_segment(self, seg_sz: WalSegmentSize) -> u64 {
(self.0 - self.block_offset()) - self.segment_lsn(seg_sz).0
}
@@ -466,7 +466,7 @@ mod tests {
assert_eq!(Lsn(u64::MAX).widening_sub(0u64), i128::from(u64::MAX));
assert_eq!(Lsn(0).widening_sub(u64::MAX), -i128::from(u64::MAX));
let seg_sz: SegmentSize = 16 * 1024 * 1024;
let seg_sz: WalSegmentSize = 16 * 1024 * 1024;
assert_eq!(Lsn(0x1000007).segment_offset(seg_sz), 7);
assert_eq!(Lsn(0x1000007).segment_number(seg_sz), 1u64);

View File

@@ -29,7 +29,7 @@ use tokio::io;
use tokio::io::AsyncWrite;
use tokio_tar::{Builder, EntryType, Header};
use tracing::*;
use utils::lsn::{Lsn, SegmentSize};
use utils::lsn::{Lsn, WalSegmentSize};
use crate::context::RequestContext;
use crate::pgdatadir_mapping::Version;
@@ -773,7 +773,7 @@ where
self.lsn,
)
.map_err(|e| anyhow!(e).context("Failed generating wal segment"))?;
if SegmentSize::try_from(wal_seg.len()) != Ok(WAL_SEGMENT_SIZE) {
if WalSegmentSize::try_from(wal_seg.len()) != Ok(WAL_SEGMENT_SIZE) {
return Err(BasebackupError::Server(anyhow!(
"wal_seg.len() != WAL_SEGMENT_SIZE, wal_seg.len()={}",
wal_seg.len()

View File

@@ -9,7 +9,7 @@ use tokio::fs::OpenOptions;
use tokio::io::{AsyncSeekExt, AsyncWriteExt};
use tracing::{info, warn};
use utils::id::TenantTimelineId;
use utils::lsn::{Lsn, SegmentSize};
use utils::lsn::{Lsn, WalSegmentSize};
use crate::GlobalTimelines;
use crate::control_file::FileStorage;
@@ -171,7 +171,7 @@ pub async fn handle_request(
async fn copy_disk_segments(
tli: &WalResidentTimeline,
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
start_lsn: Lsn,
end_lsn: Lsn,
tli_dir_path: &Utf8PathBuf,

View File

@@ -27,7 +27,7 @@ use tracing::{error, info, instrument};
use utils::crashsafe::fsync_async_opt;
use utils::id::{NodeId, TenantTimelineId};
use utils::logging::SecretString;
use utils::lsn::{Lsn, SegmentSize};
use utils::lsn::{Lsn, WalSegmentSize};
use utils::pausable_failpoint;
use crate::control_file::CONTROL_FILE_NAME;
@@ -100,7 +100,7 @@ pub struct SnapshotContext {
pub term: Term,
pub last_log_term: Term,
pub flush_lsn: Lsn,
pub wal_seg_size: SegmentSize,
pub wal_seg_size: WalSegmentSize,
// used to remove WAL hold off in Drop.
pub tli: WalResidentTimeline,
}

View File

@@ -23,7 +23,7 @@ use tokio::time::Instant;
use tokio_util::sync::CancellationToken;
use tracing::*;
use utils::id::{NodeId, TenantId, TenantTimelineId};
use utils::lsn::{Lsn, SegmentSize};
use utils::lsn::{Lsn, WalSegmentSize};
use utils::sync::gate::Gate;
use crate::metrics::{FullTimelineInfo, MISC_OPERATION_SECONDS, WalStorageMetrics};
@@ -338,7 +338,7 @@ impl SharedState {
Ok(Self::new(sk))
}
pub(crate) fn get_wal_seg_size(&self) -> SegmentSize {
pub(crate) fn get_wal_seg_size(&self) -> WalSegmentSize {
self.sk.state().server.wal_seg_size
}
@@ -747,7 +747,7 @@ impl Timeline {
}
/// Returns wal_seg_size.
pub async fn get_wal_seg_size(&self) -> SegmentSize {
pub async fn get_wal_seg_size(&self) -> WalSegmentSize {
self.read_shared_state().await.get_wal_seg_size()
}

View File

@@ -11,7 +11,7 @@ use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncWriteExt};
use tracing::{debug, info, instrument, warn};
use utils::crashsafe::durable_rename;
use utils::lsn::SegmentSize;
use utils::lsn::WalSegmentSize;
use crate::metrics::{
EVICTION_EVENTS_COMPLETED, EVICTION_EVENTS_STARTED, EvictionEvent, NUM_EVICTED_TIMELINES,
@@ -277,12 +277,12 @@ async fn compare_local_segment_with_remote(
async fn do_validation(
mgr: &Manager,
file: &mut File,
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
partial: &PartialRemoteSegment,
storage: &GenericRemoteStorage,
) -> anyhow::Result<()> {
let local_size = file.metadata().await?.len();
if SegmentSize::try_from(local_size) != Ok(wal_seg_size) {
if WalSegmentSize::try_from(local_size) != Ok(wal_seg_size) {
anyhow::bail!(
"local segment size is invalid: found {}, expected {}",
local_size,

View File

@@ -20,7 +20,7 @@ use tokio::task::{JoinError, JoinHandle};
use tokio::time::Instant;
use tokio_util::sync::CancellationToken;
use tracing::{Instrument, debug, info, info_span, instrument, warn};
use utils::lsn::{Lsn, SegmentSize};
use utils::lsn::{Lsn, WalSegmentSize};
use crate::SafeKeeperConf;
use crate::control_file::{FileStorage, Storage};
@@ -198,7 +198,7 @@ pub(crate) struct Manager {
// configuration & dependencies
pub(crate) tli: ManagerTimeline,
pub(crate) conf: SafeKeeperConf,
pub(crate) wal_seg_size: SegmentSize,
pub(crate) wal_seg_size: WalSegmentSize,
pub(crate) walsenders: Arc<WalSenders>,
pub(crate) wal_backup: Arc<WalBackup>,

View File

@@ -23,7 +23,7 @@ use tokio::task::JoinHandle;
use tokio_util::sync::CancellationToken;
use tracing::*;
use utils::id::{NodeId, TenantTimelineId};
use utils::lsn::{Lsn, SegmentSize};
use utils::lsn::{Lsn, WalSegmentSize};
use utils::{backoff, pausable_failpoint};
use crate::metrics::{BACKED_UP_SEGMENTS, BACKUP_ERRORS, WAL_BACKUP_TASKS};
@@ -52,7 +52,7 @@ impl WalBackupTaskHandle {
/// Do we have anything to upload to S3, i.e. should safekeepers run backup activity?
pub(crate) fn is_wal_backup_required(
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
num_computes: usize,
state: &StateSnapshot,
) -> bool {
@@ -210,7 +210,7 @@ impl WalBackup {
struct WalBackupTask {
timeline: WalResidentTimeline,
timeline_dir: Utf8PathBuf,
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
parallel_jobs: usize,
commit_lsn_watch_rx: watch::Receiver<Lsn>,
storage: Arc<GenericRemoteStorage>,
@@ -338,7 +338,7 @@ async fn backup_lsn_range(
storage: Arc<GenericRemoteStorage>,
backup_lsn: &mut Lsn,
end_lsn: Lsn,
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
timeline_dir: &Utf8Path,
parallel_jobs: usize,
) -> Result<()> {
@@ -461,12 +461,12 @@ impl Segment {
remote_timeline_path.join(self.object_name())
}
pub fn size(self) -> SegmentSize {
(u64::from(self.end_lsn) - u64::from(self.start_lsn)) as SegmentSize
pub fn size(self) -> WalSegmentSize {
(u64::from(self.end_lsn) - u64::from(self.start_lsn)) as WalSegmentSize
}
}
fn get_segments(start: Lsn, end: Lsn, seg_size: SegmentSize) -> Vec<Segment> {
fn get_segments(start: Lsn, end: Lsn, seg_size: WalSegmentSize) -> Vec<Segment> {
let first_seg = start.segment_number(seg_size);
let last_seg = end.segment_number(seg_size);
@@ -484,7 +484,7 @@ async fn backup_object(
storage: &GenericRemoteStorage,
source_file: &Utf8Path,
target_file: &RemotePath,
size: SegmentSize,
size: WalSegmentSize,
) -> Result<()> {
let file = File::open(&source_file)
.await
@@ -503,7 +503,7 @@ pub(crate) async fn backup_partial_segment(
storage: &GenericRemoteStorage,
source_file: &Utf8Path,
target_file: &RemotePath,
size: SegmentSize,
size: WalSegmentSize,
) -> Result<()> {
let file = File::open(&source_file)
.await
@@ -647,7 +647,7 @@ pub async fn delete_objects(storage: &GenericRemoteStorage, paths: &[RemotePath]
/// Copy segments from one timeline to another. Used in copy_timeline.
pub async fn copy_s3_segments(
storage: &GenericRemoteStorage,
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
src_ttid: &TenantTimelineId,
dst_ttid: &TenantTimelineId,
from_segment: XLogSegNo,

View File

@@ -29,7 +29,7 @@ use serde::{Deserialize, Serialize};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, instrument, warn};
use utils::id::NodeId;
use utils::lsn::{Lsn, SegmentSize};
use utils::lsn::{Lsn, WalSegmentSize};
use crate::SafeKeeperConf;
use crate::metrics::{
@@ -151,7 +151,7 @@ impl State {
}
pub struct PartialBackup {
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
tli: WalResidentTimeline,
conf: SafeKeeperConf,
local_prefix: Utf8PathBuf,

View File

@@ -28,7 +28,7 @@ use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeekExt, AsyncWriteExt};
use tracing::*;
use utils::crashsafe::durable_rename;
use utils::id::TenantTimelineId;
use utils::lsn::{Lsn, SegmentSize};
use utils::lsn::{Lsn, WalSegmentSize};
use crate::metrics::{
REMOVED_WAL_SEGMENTS, WAL_STORAGE_OPERATION_SECONDS, WalStorageMetrics, time_io_closure,
@@ -92,7 +92,7 @@ pub struct PhysicalStorage {
no_sync: bool,
/// Size of WAL segment in bytes.
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
pg_version: PgVersionId,
system_id: u64,
@@ -318,7 +318,7 @@ impl PhysicalStorage {
async fn write_in_segment(
&mut self,
segno: u64,
xlogoff: SegmentSize,
xlogoff: WalSegmentSize,
buf: &[u8],
) -> Result<bool> {
let mut file = if let Some(file) = self.file.take() {
@@ -609,7 +609,7 @@ impl Storage for PhysicalStorage {
/// Remove all WAL segments in timeline_dir that match the given predicate.
async fn remove_segments_from_disk(
timeline_dir: &Utf8Path,
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
remove_predicate: impl Fn(XLogSegNo) -> bool,
) -> Result<()> {
let _timer = WAL_STORAGE_OPERATION_SECONDS
@@ -650,7 +650,7 @@ async fn remove_segments_from_disk(
pub struct WalReader {
remote_path: RemotePath,
timeline_dir: Utf8PathBuf,
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
pos: Lsn,
wal_segment: Option<Pin<Box<dyn AsyncRead + Send + Sync>>>,
@@ -838,7 +838,7 @@ impl WalReader {
pub(crate) async fn open_wal_file(
timeline_dir: &Utf8Path,
segno: XLogSegNo,
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
) -> Result<(tokio::fs::File, bool)> {
let (wal_file_path, wal_file_partial_path) = wal_file_paths(timeline_dir, segno, wal_seg_size);
@@ -865,7 +865,7 @@ pub(crate) async fn open_wal_file(
pub fn wal_file_paths(
timeline_dir: &Utf8Path,
segno: XLogSegNo,
wal_seg_size: SegmentSize,
wal_seg_size: WalSegmentSize,
) -> (Utf8PathBuf, Utf8PathBuf) {
let wal_file_name = XLogFileName(PG_TLI, segno, wal_seg_size);
let wal_file_path = timeline_dir.join(wal_file_name.clone());

View File

@@ -13,7 +13,7 @@ use serde::Serialize;
use tokio_postgres::types::PgLsn;
use tracing::{debug, error, info};
use utils::id::{TenantId, TenantTimelineId, TimelineId};
use utils::lsn::{Lsn, SegmentSize};
use utils::lsn::{Lsn, WalSegmentSize};
use crate::cloud_admin_api::CloudAdminApiClient;
use crate::metadata_stream::stream_listing;
@@ -22,7 +22,7 @@ use crate::{
};
/// Generally we should ask safekeepers, but so far we use everywhere default 16MB.
const WAL_SEGSIZE: SegmentSize = 16 * 1024 * 1024;
const WAL_SEGSIZE: WalSegmentSize = 16 * 1024 * 1024;
#[derive(Serialize)]
pub struct MetadataSummary {