Rename relkind to relpersistence

This commit is contained in:
Kosntantin Knizhnik
2025-07-19 08:42:16 +03:00
committed by Konstantin Knizhnik
parent ae7b92abeb
commit 28ce584d01
6 changed files with 128 additions and 128 deletions

View File

@@ -19,7 +19,7 @@ OBJS = \
neon_walreader.o \
pagestore_smgr.o \
relsize_cache.o \
relkind_cache.o \
relperst_cache.o \
unstable_extensions.o \
walproposer.o \
walproposer_pg.o \

View File

@@ -489,7 +489,7 @@ _PG_init(void)
/* Stage 1: Define GUCs, and other early intialization */
pg_init_libpagestore();
relsize_hash_init();
relkind_hash_init();
relperst_hash_init();
lfc_init();
pg_init_walproposer();
init_lwlsncache();
@@ -723,7 +723,7 @@ neon_shmem_request_hook(void)
NeonPerfCountersShmemRequest();
PagestoreShmemRequest();
RelsizeCacheShmemRequest();
RelkindCacheShmemRequest();
RelperstCacheShmemRequest();
WalproposerShmemRequest();
LwLsnCacheShmemRequest();
}
@@ -746,7 +746,7 @@ neon_shmem_startup_hook(void)
NeonPerfCountersShmemInit();
PagestoreShmemInit();
RelsizeCacheShmemInit();
RelkindCacheShmemInit();
RelperstCacheShmemInit();
WalproposerShmemInit();
LwLsnCacheShmemInit();

View File

@@ -74,7 +74,7 @@ extern PGDLLEXPORT void LogicalSlotsMonitorMain(Datum main_arg);
extern void LfcShmemRequest(void);
extern void PagestoreShmemRequest(void);
extern void RelsizeCacheShmemRequest(void);
extern void RelkindCacheShmemRequest(void);
extern void RelperstCacheShmemRequest(void);
extern void WalproposerShmemRequest(void);
extern void LwLsnCacheShmemRequest(void);
extern void NeonPerfCountersShmemRequest(void);
@@ -82,7 +82,7 @@ extern void NeonPerfCountersShmemRequest(void);
extern void LfcShmemInit(void);
extern void PagestoreShmemInit(void);
extern void RelsizeCacheShmemInit(void);
extern void RelkindCacheShmemInit(void);
extern void RelperstCacheShmemInit(void);
extern void WalproposerShmemInit(void);
extern void LwLsnCacheShmemInit(void);
extern void NeonPerfCountersShmemInit(void);

View File

@@ -299,18 +299,18 @@ extern void update_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum, BlockN
extern void forget_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum);
/*
* Relation kind enum.
* Relation persistence enum.
*/
typedef enum
{
/* The persistence is not known */
RELKIND_UNKNOWN,
NEON_RELPERSISTENCE_UNKNOWN,
/* The relation is a permanent relation that is WAL-logged normally */
RELKIND_PERMANENT,
NEON_RELPERSISTENCE_PERMANENT,
/* The relation is an unlogged table/index, stored only on local disk */
RELKIND_UNLOGGED,
NEON_RELPERSISTENCE_UNLOGGED,
/*
* The relation is a permanent (index) relation, but it is being built by an in-progress
@@ -319,29 +319,29 @@ typedef enum
* This is currently used for GiST, SP-GiST and GIN indexes, as well as the pgvector
* extension.
*/
RELKIND_UNLOGGED_BUILD
} RelKind;
NEON_RELPERSISTENCE_UNLOGGED_BUILD
} NeonRelPersistence;
/*
* Entry type stored in relkind_hash. We have just one entry for the whole relation, i.e. we don't have separate entries for the individual forks.
* Entry type stored in relperst_hash. We have just one entry for the whole relation, i.e. we don't have separate entries for the individual forks.
* It gets a little complicated with unlogged relations. The main fork of an unlogged relation is considered UNLOGGED, but its init-fork is
* treated as PERMANENT. It is specially checked in neon_write.
*/
typedef struct
{
NRelFileInfo rel;
uint8 relkind; /* See RelKind */
uint8 relperst; /* See NeonRelPersistence */
uint16 access_count;
dlist_node lru_node; /* LRU list node */
} RelKindEntry;
} NeonRelPersistenceEntry;
extern LWLockId finish_unlogged_build_lock;
extern void relkind_hash_init(void);
extern void set_cached_relkind(NRelFileInfo rinfo, RelKind relkind);
extern RelKind get_cached_relkind(NRelFileInfo rinfo);
extern RelKindEntry* pin_cached_relkind(NRelFileInfo rinfo, RelKind relkind);
extern void unpin_cached_relkind(RelKindEntry* entry);
extern void forget_cached_relkind(NRelFileInfo rinfo);
extern void relperst_hash_init(void);
extern void set_cached_relperst(NRelFileInfo rinfo, NeonRelPersistence relperst);
extern NeonRelPersistence get_cached_relperst(NRelFileInfo rinfo);
extern NeonRelPersistenceEntry* pin_cached_relperst(NRelFileInfo rinfo, NeonRelPersistence relperst);
extern void unpin_cached_relperst(NeonRelPersistenceEntry* entry);
extern void forget_cached_relperst(NRelFileInfo rinfo);
#endif /* PAGESTORE_CLIENT_H */

View File

@@ -97,7 +97,7 @@ typedef enum
int debug_compare_local;
static NRelFileInfo unlogged_build_rel_info;
static RelKindEntry* unlogged_build_rel_entry;
static NeonRelPersistenceEntry* unlogged_build_rel_entry;
static UnloggedBuildPhase unlogged_build_phase = UNLOGGED_BUILD_NOT_IN_PROGRESS;
static bool neon_redo_read_buffer_filter(XLogReaderState *record, uint8 block_id);
@@ -878,7 +878,7 @@ neon_unlink(NRelFileInfoBackend rinfo, ForkNumber forkNum, bool isRedo)
if (!NRelFileInfoBackendIsTemp(rinfo))
{
forget_cached_relsize(InfoFromNInfoB(rinfo), forkNum);
forget_cached_relkind(InfoFromNInfoB(rinfo));
forget_cached_relperst(InfoFromNInfoB(rinfo));
}
}
@@ -1603,26 +1603,26 @@ neon_write(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const vo
#endif
{
XLogRecPtr lsn;
RelKind relkind;
NeonRelPersistence relperst;
bool is_locked = false;
NRelFileInfo rinfo = InfoFromSMgrRel(reln);
switch (reln->smgr_relpersistence)
{
case 0:
relkind = get_cached_relkind(rinfo);
if (relkind == RELKIND_UNKNOWN)
relperst = get_cached_relperst(rinfo);
if (relperst == NEON_RELPERSISTENCE_UNKNOWN)
{
/* We do not know relation persistence: let's determine it */
relkind = mdexists(reln, debug_compare_local ? INIT_FORKNUM : forknum) ? RELKIND_UNLOGGED : RELKIND_PERMANENT;
relperst = mdexists(reln, debug_compare_local ? INIT_FORKNUM : forknum) ? NEON_RELPERSISTENCE_UNLOGGED : NEON_RELPERSISTENCE_PERMANENT;
/*
* There is no lock hold between get_cached_relkind and set_cached_relkind.
* We assume that multiple backends can repeat this check and get the same result (there is assert in set_cached_relkind).
* There is no lock hold between get_cached_relperst and set_cached_relperst.
* We assume that multiple backends can repeat this check and get the same result (there is assert in set_cached_relperst).
* And concurrent setting UNLOGGED_BUILD is not possible because only one backend can perform unlogged build.
*/
set_cached_relkind(rinfo, relkind);
set_cached_relperst(rinfo, relperst);
}
if (relkind == RELKIND_UNLOGGED_BUILD)
if (relperst == NEON_RELPERSISTENCE_UNLOGGED_BUILD)
{
/* In case of unlogged build we need to avoid race condition at unlogged build end.
* Obtain shared lock here to prevent backend completing unlogged build from performing cleanup amnd remvong files.
@@ -1630,11 +1630,11 @@ neon_write(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const vo
LWLockAcquire(finish_unlogged_build_lock, LW_SHARED);
is_locked = true;
/*
* Recheck relkind under lock - may be unlogged build is already finished
* Recheck relperst under lock - may be unlogged build is already finished
*/
relkind = get_cached_relkind(rinfo);
relperst = get_cached_relperst(rinfo);
}
if (relkind == RELKIND_UNLOGGED || relkind == RELKIND_UNLOGGED_BUILD)
if (relperst == NEON_RELPERSISTENCE_UNLOGGED || relperst == NEON_RELPERSISTENCE_UNLOGGED_BUILD)
{
#if PG_MAJORVERSION_NUM >= 17
mdwritev(reln, forknum, blocknum, &buffer, 1, skipFsync);
@@ -1646,7 +1646,7 @@ neon_write(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const vo
{
LWLockRelease(finish_unlogged_build_lock);
}
if (relkind == RELKIND_UNLOGGED || relkind == RELKIND_UNLOGGED_BUILD)
if (relperst == NEON_RELPERSISTENCE_UNLOGGED || relperst == NEON_RELPERSISTENCE_UNLOGGED_BUILD)
{
return;
}
@@ -1709,7 +1709,7 @@ static void
neon_writev(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
const void **buffers, BlockNumber nblocks, bool skipFsync)
{
RelKind relkind;
NeonRelPersistence relperst;
NRelFileInfo rinfo = InfoFromSMgrRel(reln);
bool is_locked = false;
switch (reln->smgr_relpersistence)
@@ -1719,14 +1719,14 @@ neon_writev(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
{
break; /* init fork is always permanent */
}
relkind = get_cached_relkind(rinfo);
if (relkind == RELKIND_UNKNOWN)
relperst = get_cached_relperst(rinfo);
if (relperst == NEON_RELPERSISTENCE_UNKNOWN)
{
/* We do not know relation persistence: let's determine it */
relkind = mdexists(reln, debug_compare_local ? INIT_FORKNUM : forknum) ? RELKIND_UNLOGGED : RELKIND_PERMANENT;
set_cached_relkind(rinfo, relkind);
relperst = mdexists(reln, debug_compare_local ? INIT_FORKNUM : forknum) ? NEON_RELPERSISTENCE_UNLOGGED : NEON_RELPERSISTENCE_PERMANENT;
set_cached_relperst(rinfo, relperst);
}
if (relkind == RELKIND_UNLOGGED_BUILD)
if (relperst == NEON_RELPERSISTENCE_UNLOGGED_BUILD)
{
/* In case of unlogged build we need to avoid race condition at unlogged build end.
* Obtain shared lock here to prevent backend completing unlogged build from performing cleanup amnd remvong files.
@@ -1734,11 +1734,11 @@ neon_writev(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
LWLockAcquire(finish_unlogged_build_lock, LW_SHARED);
is_locked = true;
/*
* Recheck relkind under lock - may be unlogged build is already finished
* Recheck relperst under lock - may be unlogged build is already finished
*/
relkind = get_cached_relkind(rinfo);
relperst = get_cached_relperst(rinfo);
}
if (relkind == RELKIND_UNLOGGED || relkind == RELKIND_UNLOGGED_BUILD)
if (relperst == NEON_RELPERSISTENCE_UNLOGGED || relperst == NEON_RELPERSISTENCE_UNLOGGED_BUILD)
{
/* It exists locally. Guess it's unlogged then. */
mdwritev(reln, forknum, blkno, buffers, nblocks, skipFsync);
@@ -1747,7 +1747,7 @@ neon_writev(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
{
LWLockRelease(finish_unlogged_build_lock);
}
if (relkind == RELKIND_UNLOGGED || relkind == RELKIND_UNLOGGED_BUILD)
if (relperst == NEON_RELPERSISTENCE_UNLOGGED || relperst == NEON_RELPERSISTENCE_UNLOGGED_BUILD)
{
return;
}
@@ -2036,7 +2036,7 @@ neon_start_unlogged_build(SMgrRelation reln)
case RELPERSISTENCE_TEMP:
case RELPERSISTENCE_UNLOGGED:
unlogged_build_rel_info = InfoFromSMgrRel(reln);
unlogged_build_rel_entry = pin_cached_relkind(unlogged_build_rel_info, RELKIND_UNLOGGED);
unlogged_build_rel_entry = pin_cached_relperst(unlogged_build_rel_info, NEON_RELPERSISTENCE_UNLOGGED);
unlogged_build_phase = UNLOGGED_BUILD_NOT_PERMANENT;
if (debug_compare_local)
{
@@ -2059,7 +2059,7 @@ neon_start_unlogged_build(SMgrRelation reln)
#endif
unlogged_build_rel_info = InfoFromSMgrRel(reln);
unlogged_build_rel_entry = pin_cached_relkind(unlogged_build_rel_info, RELKIND_UNLOGGED_BUILD);
unlogged_build_rel_entry = pin_cached_relperst(unlogged_build_rel_info, NEON_RELPERSISTENCE_UNLOGGED_BUILD);
unlogged_build_phase = UNLOGGED_BUILD_PHASE_1;
/*
@@ -2080,7 +2080,7 @@ unlogged_build_cleanup(void)
{
NRelFileInfoInvalidate(unlogged_build_rel_info);
unlogged_build_phase = UNLOGGED_BUILD_NOT_IN_PROGRESS;
unpin_cached_relkind(unlogged_build_rel_entry);
unpin_cached_relperst(unlogged_build_rel_entry);
unlogged_build_rel_entry = NULL;
}
@@ -2165,7 +2165,7 @@ neon_end_unlogged_build(SMgrRelation reln)
/* Obtain exclusive lock to prevent concrrent writes to the file while we performing cleanup */
LWLockAcquire(finish_unlogged_build_lock, LW_EXCLUSIVE);
unlogged_build_rel_entry->relkind = RELKIND_PERMANENT;
unlogged_build_rel_entry->relperst = NEON_RELPERSISTENCE_PERMANENT;
LWLockRelease(finish_unlogged_build_lock);
/* Remove local copy */

View File

@@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* relkind_cache.c
* Cache to track the relkind of relations
* relperst_cache.c
* Cache to track the relperst of relations
*
* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@@ -51,42 +51,42 @@ typedef struct
uint64 pinned;
dlist_head lru; /* double linked list for LRU replacement
* algorithm */
} RelKindHashControl;
} NeonRelPersistenceHashControl;
/*
* Size of a cache entry is 32 bytes. So this default will take about 2 MB,
* which seems reasonable.
*/
#define DEFAULT_RELKIND_HASH_SIZE (64 * 1024)
#define DEFAULT_RELPERST_HASH_SIZE (64 * 1024)
static HTAB *relkind_hash;
static int relkind_hash_size = DEFAULT_RELKIND_HASH_SIZE;
static RelKindHashControl* relkind_ctl;
static HTAB *relperst_hash;
static int relperst_hash_size = DEFAULT_RELPERST_HASH_SIZE;
static NeonRelPersistenceHashControl* relperst_ctl;
LWLockId finish_unlogged_build_lock;
LWLockId relkind_hash_lock;
LWLockId relperst_hash_lock;
/*
* Shared memory registration
*/
void
RelkindCacheShmemRequest(void)
RelperstCacheShmemRequest(void)
{
RequestAddinShmemSpace(sizeof(RelKindHashControl) + hash_estimate_size(relkind_hash_size, sizeof(RelKindEntry)));
RequestNamedLWLockTranche("neon_relkind", 2);
RequestAddinShmemSpace(sizeof(NeonRelPersistenceHashControl) + hash_estimate_size(relperst_hash_size, sizeof(NeonRelPersistenceEntry)));
RequestNamedLWLockTranche("neon_relperst", 2);
}
/*
* Intialize shared memory
*/
void
RelkindCacheShmemInit(void)
RelperstCacheShmemInit(void)
{
static HASHCTL info;
bool found;
relkind_ctl = (RelKindHashControl *) ShmemInitStruct("relkind_hash", sizeof(RelKindHashControl), &found);
relperst_ctl = (NeonRelPersistenceHashControl *) ShmemInitStruct("relperst_hash", sizeof(NeonRelPersistenceHashControl), &found);
if (!found)
{
/*
@@ -94,64 +94,64 @@ RelkindCacheShmemInit(void)
* Or actually twice that, because while performing an unlogged index build, each backend can also be trying to write out a page for another
* relation and hence hold one more entry in the cache pinned. Use MaxConnections instead of MaxBackends because only normal backends can perform unlogged build.
*/
size_t hash_size = Max(2 * MaxConnections, relkind_hash_size);
relkind_hash_lock = (LWLockId) GetNamedLWLockTranche("neon_relkind");
finish_unlogged_build_lock = (LWLockId)(GetNamedLWLockTranche("neon_relkind") + 1);
size_t hash_size = Max(2 * MaxConnections, relperst_hash_size);
relperst_hash_lock = (LWLockId) GetNamedLWLockTranche("neon_relperst");
finish_unlogged_build_lock = (LWLockId)(GetNamedLWLockTranche("neon_relperst") + 1);
info.keysize = sizeof(NRelFileInfo);
info.entrysize = sizeof(RelKindEntry);
relkind_hash = ShmemInitHash("neon_relkind",
info.entrysize = sizeof(NeonRelPersistenceEntry);
relperst_hash = ShmemInitHash("neon_relperst",
hash_size, hash_size,
&info,
HASH_ELEM | HASH_BLOBS);
relkind_ctl->size = 0;
relkind_ctl->hits = 0;
relkind_ctl->misses = 0;
relkind_ctl->pinned = 0;
dlist_init(&relkind_ctl->lru);
relperst_ctl->size = 0;
relperst_ctl->hits = 0;
relperst_ctl->misses = 0;
relperst_ctl->pinned = 0;
dlist_init(&relperst_ctl->lru);
}
}
/*
* Lookup existed entry or create new one
*/
static RelKindEntry*
static NeonRelPersistenceEntry*
get_pinned_entry(NRelFileInfo rinfo)
{
bool found;
RelKindEntry* entry = hash_search(relkind_hash, &rinfo, HASH_ENTER_NULL, &found);
NeonRelPersistenceEntry* entry = hash_search(relperst_hash, &rinfo, HASH_ENTER_NULL, &found);
if (entry == NULL)
{
if (dlist_is_empty(&relkind_ctl->lru))
if (dlist_is_empty(&relperst_ctl->lru))
{
neon_log(PANIC, "Not unpinned relkind entries");
neon_log(PANIC, "Not unpinned relperst entries");
}
else
{
/*
* Remove least recently used element from the hash.
*/
RelKindEntry *victim = dlist_container(RelKindEntry, lru_node, dlist_pop_head_node(&relkind_ctl->lru));
NeonRelPersistenceEntry *victim = dlist_container(NeonRelPersistenceEntry, lru_node, dlist_pop_head_node(&relperst_ctl->lru));
Assert(victim->access_count == 0);
hash_search(relkind_hash, &victim->rel, HASH_REMOVE, &found);
hash_search(relperst_hash, &victim->rel, HASH_REMOVE, &found);
Assert(found);
Assert(relkind_ctl->size > 0);
relkind_ctl->size -= 1;
Assert(relperst_ctl->size > 0);
relperst_ctl->size -= 1;
}
entry = hash_search(relkind_hash, &rinfo, HASH_ENTER_NULL, &found);
entry = hash_search(relperst_hash, &rinfo, HASH_ENTER_NULL, &found);
Assert(!found);
}
if (!found)
{
entry->relkind = RELKIND_UNKNOWN; /* information about relation kind is not yet available */
relkind_ctl->pinned += 1;
entry->relperst = NEON_RELPERSISTENCE_UNKNOWN; /* information about relation kind is not yet available */
relperst_ctl->pinned += 1;
entry->access_count = 1;
relkind_ctl->size += 1;
relperst_ctl->size += 1;
}
else if (entry->access_count++ == 0)
{
dlist_delete(&entry->lru_node);
relkind_ctl->pinned += 1;
relperst_ctl->pinned += 1;
}
return entry;
}
@@ -160,65 +160,65 @@ get_pinned_entry(NRelFileInfo rinfo)
* Unpin entry and place it at the end of LRU list
*/
static void
unpin_entry(RelKindEntry *entry)
unpin_entry(NeonRelPersistenceEntry *entry)
{
Assert(entry->access_count != 0);
if (--entry->access_count == 0)
{
Assert(relkind_ctl->pinned != 0);
relkind_ctl->pinned -= 1;
dlist_push_tail(&relkind_ctl->lru, &entry->lru_node);
Assert(relperst_ctl->pinned != 0);
relperst_ctl->pinned -= 1;
dlist_push_tail(&relperst_ctl->lru, &entry->lru_node);
}
}
/*
* Intialize new entry. This function is used by neon_start_unlogged_build to mark relation involved in unlogged build.
* In case of overflow removes least recently used entry.
* Return pinned entry. It will be released by unpin_cached_relkind at the end of unlogged build.
* Return pinned entry. It will be released by unpin_cached_relperst at the end of unlogged build.
*/
RelKindEntry*
pin_cached_relkind(NRelFileInfo rinfo, RelKind relkind)
NeonRelPersistenceEntry*
pin_cached_relperst(NRelFileInfo rinfo, NeonRelPersistence relperst)
{
RelKindEntry *entry;
NeonRelPersistenceEntry *entry;
LWLockAcquire(relkind_hash_lock, LW_EXCLUSIVE);
LWLockAcquire(relperst_hash_lock, LW_EXCLUSIVE);
entry = get_pinned_entry(rinfo);
entry->relkind = relkind;
entry->relperst = relperst;
LWLockRelease(relkind_hash_lock);
LWLockRelease(relperst_hash_lock);
return entry;
}
/*
* Lookup entry and create new one if not exists. This function is called by neon_write to detenmine if changes should be written to the local disk.
* In case of overflow removes least recently used entry.
* If entry is found and its relkind is known, then it is stored in provided location and NULL is returned.
* If entry is not found then new one is created, pinned and returned. Entry should be updated using store_cached_relkind.
* If entry is found and its relperst is known, then it is stored in provided location and NULL is returned.
* If entry is not found then new one is created, pinned and returned. Entry should be updated using store_cached_relperst.
* Shared lock is obtained if relation is involved in inlogged build.
*/
RelKind
get_cached_relkind(NRelFileInfo rinfo)
NeonRelPersistence
get_cached_relperst(NRelFileInfo rinfo)
{
RelKindEntry *entry;
RelKind relkind = RELKIND_UNKNOWN;
NeonRelPersistenceEntry *entry;
NeonRelPersistence relperst = NEON_RELPERSISTENCE_UNKNOWN;
LWLockAcquire(relkind_hash_lock, LW_EXCLUSIVE);
LWLockAcquire(relperst_hash_lock, LW_EXCLUSIVE);
entry = hash_search(relkind_hash, &rinfo, HASH_FIND, NULL);
entry = hash_search(relperst_hash, &rinfo, HASH_FIND, NULL);
if (entry != NULL)
{
/* Do pin+unpin entry to move it to the end of LRU list */
if (entry->access_count++ == 0)
{
dlist_delete(&entry->lru_node);
relkind_ctl->pinned += 1;
relperst_ctl->pinned += 1;
}
relkind = entry->relkind;
relperst = entry->relperst;
unpin_entry(entry);
}
LWLockRelease(relkind_hash_lock);
return relkind;
LWLockRelease(relperst_hash_lock);
return relperst;
}
@@ -226,61 +226,61 @@ get_cached_relkind(NRelFileInfo rinfo)
* Store relation kind as a result of mdexists check. Unpin entry.
*/
void
set_cached_relkind(NRelFileInfo rinfo, RelKind relkind)
set_cached_relperst(NRelFileInfo rinfo, NeonRelPersistence relperst)
{
RelKindEntry *entry;
NeonRelPersistenceEntry *entry;
LWLockAcquire(relkind_hash_lock, LW_EXCLUSIVE);
LWLockAcquire(relperst_hash_lock, LW_EXCLUSIVE);
/* Do pin+unpin entry to move it to the end of LRU list */
entry = get_pinned_entry(rinfo);
Assert(entry->relkind == RELKIND_UNKNOWN || entry->relkind == relkind);
entry->relkind = relkind;
Assert(entry->relperst == NEON_RELPERSISTENCE_UNKNOWN || entry->relperst == relperst);
entry->relperst = relperst;
unpin_entry(entry);
LWLockRelease(relkind_hash_lock);
LWLockRelease(relperst_hash_lock);
}
void
unpin_cached_relkind(RelKindEntry* entry)
unpin_cached_relperst(NeonRelPersistenceEntry* entry)
{
if (entry)
{
LWLockAcquire(relkind_hash_lock, LW_EXCLUSIVE);
LWLockAcquire(relperst_hash_lock, LW_EXCLUSIVE);
unpin_entry(entry);
LWLockRelease(relkind_hash_lock);
LWLockRelease(relperst_hash_lock);
}
}
void
forget_cached_relkind(NRelFileInfo rinfo)
forget_cached_relperst(NRelFileInfo rinfo)
{
RelKindEntry *entry;
NeonRelPersistenceEntry *entry;
LWLockAcquire(relkind_hash_lock, LW_EXCLUSIVE);
LWLockAcquire(relperst_hash_lock, LW_EXCLUSIVE);
entry = hash_search(relkind_hash, &rinfo, HASH_REMOVE, NULL);
entry = hash_search(relperst_hash, &rinfo, HASH_REMOVE, NULL);
if (entry)
{
Assert(entry->access_count == 0);
dlist_delete(&entry->lru_node);
relkind_ctl->size -= 1;
relperst_ctl->size -= 1;
}
LWLockRelease(relkind_hash_lock);
LWLockRelease(relperst_hash_lock);
}
void
relkind_hash_init(void)
relperst_hash_init(void)
{
DefineCustomIntVariable("neon.relkind_hash_size",
"Sets the maximum number of cached relation kinds for neon",
DefineCustomIntVariable("neon.relperst_hash_size",
"Sets the maximum number of cached relation persistence for neon",
NULL,
&relkind_hash_size,
DEFAULT_RELKIND_HASH_SIZE,
&relperst_hash_size,
DEFAULT_RELPERST_HASH_SIZE,
1,
INT_MAX,
PGC_POSTMASTER,