diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index 079f477f75..77c320a181 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -733,17 +733,10 @@ impl PageServerHandler { let latest_gc_cutoff_lsn = timeline.tline.get_latest_gc_cutoff_lsn(); let lsn = Self::wait_or_get_last_lsn(timeline, req.lsn, req.latest, &latest_gc_cutoff_lsn)?; - let all_rels = timeline.list_rels(pg_constants::DEFAULTTABLESPACE_OID, req.dbnode, lsn)?; - let mut total_blocks: i64 = 0; + let total_blocks = + timeline.get_db_size(pg_constants::DEFAULTTABLESPACE_OID, req.dbnode, lsn)?; - for rel in all_rels { - if rel.forknum == 0 { - let n_blocks = timeline.get_rel_size(rel, lsn).unwrap_or(0); - total_blocks += n_blocks as i64; - } - } - - let db_size = total_blocks * pg_constants::BLCKSZ as i64; + let db_size = total_blocks as i64 * pg_constants::BLCKSZ as i64; Ok(PagestreamBeMessage::DbSize(PagestreamDbSizeResponse { db_size, diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 59a53d68a1..ce305a55f4 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -123,6 +123,19 @@ impl DatadirTimeline { self.tline.get(key, lsn) } + // Get size of a database in blocks + pub fn get_db_size(&self, spcnode: Oid, dbnode: Oid, lsn: Lsn) -> Result { + let mut total_blocks = 0; + + let rels = self.list_rels(spcnode, dbnode, lsn)?; + + for rel in rels { + let n_blocks = self.get_rel_size(rel, lsn)?; + total_blocks += n_blocks as usize; + } + Ok(total_blocks) + } + /// Get size of a relation file pub fn get_rel_size(&self, tag: RelTag, lsn: Lsn) -> Result { ensure!(tag.relnode != 0, "invalid relnode"); @@ -667,6 +680,10 @@ impl<'a, R: Repository> DatadirModification<'a, R> { } pub fn drop_dbdir(&mut self, spcnode: Oid, dbnode: Oid) -> Result<()> { + let req_lsn = self.tline.get_last_record_lsn(); + + let total_blocks = self.tline.get_db_size(spcnode, dbnode, req_lsn)?; + // Remove entry from dbdir let buf = self.get(DBDIR_KEY)?; let mut dir = DbDirectory::des(&buf)?; @@ -680,7 +697,8 @@ impl<'a, R: Repository> DatadirModification<'a, R> { ); } - // FIXME: update pending_nblocks + // Update logical database size. + self.pending_nblocks -= total_blocks as isize; // Delete all relations and metadata files for the spcnode/dnode self.delete(dbdir_key_range(spcnode, dbnode)); diff --git a/test_runner/batch_others/test_createdropdb.py b/test_runner/batch_others/test_createdropdb.py index 392e5a6fd4..151ce997ee 100644 --- a/test_runner/batch_others/test_createdropdb.py +++ b/test_runner/batch_others/test_createdropdb.py @@ -35,9 +35,14 @@ def test_createdb(neon_simple_env: NeonEnv): with closing(db.connect(dbname='foodb')) as conn: with conn.cursor() as cur: # Check database size in both branches - cur.execute( - 'select pg_size_pretty(pg_database_size(%s)), pg_size_pretty(sum(pg_relation_size(oid))) from pg_class where relisshared is false;', - ('foodb', )) + cur.execute(""" + select pg_size_pretty(pg_database_size('foodb')), + pg_size_pretty( + sum(pg_relation_size(oid, 'main')) + +sum(pg_relation_size(oid, 'vm')) + +sum(pg_relation_size(oid, 'fsm')) + ) FROM pg_class where relisshared is false + """) res = cur.fetchone() # check that dbsize equals sum of all relation sizes, excluding shared ones # This is how we define dbsize in neon for now diff --git a/test_runner/batch_others/test_timeline_size.py b/test_runner/batch_others/test_timeline_size.py index 70dbe8ac4a..5734091757 100644 --- a/test_runner/batch_others/test_timeline_size.py +++ b/test_runner/batch_others/test_timeline_size.py @@ -8,7 +8,6 @@ import time def test_timeline_size(neon_simple_env: NeonEnv): env = neon_simple_env - # Branch at the point where only 100 rows were inserted new_timeline_id = env.neon_cli.create_branch('test_timeline_size', 'empty') client = env.pageserver.http_client() @@ -23,7 +22,6 @@ def test_timeline_size(neon_simple_env: NeonEnv): with conn.cursor() as cur: cur.execute("SHOW neon.timeline_id") - # Create table, and insert the first 100 rows cur.execute("CREATE TABLE foo (t text)") cur.execute(""" INSERT INTO foo @@ -43,6 +41,51 @@ def test_timeline_size(neon_simple_env: NeonEnv): "current_logical_size_non_incremental"] +def test_timeline_size_createdropdb(neon_simple_env: NeonEnv): + env = neon_simple_env + new_timeline_id = env.neon_cli.create_branch('test_timeline_size', 'empty') + + client = env.pageserver.http_client() + timeline_details = assert_local(client, env.initial_tenant, new_timeline_id) + assert timeline_details['local']['current_logical_size'] == timeline_details['local'][ + 'current_logical_size_non_incremental'] + + pgmain = env.postgres.create_start("test_timeline_size") + log.info("postgres is running on 'test_timeline_size' branch") + + with closing(pgmain.connect()) as conn: + with conn.cursor() as cur: + cur.execute("SHOW neon.timeline_id") + + res = assert_local(client, env.initial_tenant, new_timeline_id) + local_details = res['local'] + assert local_details["current_logical_size"] == local_details[ + "current_logical_size_non_incremental"] + + cur.execute('CREATE DATABASE foodb') + with closing(pgmain.connect(dbname='foodb')) as conn: + with conn.cursor() as cur2: + + cur2.execute("CREATE TABLE foo (t text)") + cur2.execute(""" + INSERT INTO foo + SELECT 'long string to consume some space' || g + FROM generate_series(1, 10) g + """) + + res = assert_local(client, env.initial_tenant, new_timeline_id) + local_details = res['local'] + assert local_details["current_logical_size"] == local_details[ + "current_logical_size_non_incremental"] + + cur.execute('DROP DATABASE foodb') + + res = assert_local(client, env.initial_tenant, new_timeline_id) + local_details = res['local'] + assert local_details["current_logical_size"] == local_details[ + "current_logical_size_non_incremental"] + + # wait until received_lsn_lag is 0 def wait_for_pageserver_catchup(pgmain: Postgres, polling_interval=1, timeout=60): started_at = time.time()