/* * ginbuildempty() -- build an empty gin index in the initialization fork */ Datum ginbuildempty(PG_FUNCTION_ARGS) { Relation index = (Relation) PG_GETARG_POINTER(0); Buffer RootBuffer, MetaBuffer; /* An empty GIN index has two pages. */ MetaBuffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL); LockBuffer(MetaBuffer, BUFFER_LOCK_EXCLUSIVE); RootBuffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL); LockBuffer(RootBuffer, BUFFER_LOCK_EXCLUSIVE); /* Initialize and xlog metabuffer and root buffer. */ START_CRIT_SECTION(); GinInitMetabuffer(MetaBuffer); MarkBufferDirty(MetaBuffer); log_newpage_buffer(MetaBuffer); GinInitBuffer(RootBuffer, GIN_LEAF); MarkBufferDirty(RootBuffer); log_newpage_buffer(RootBuffer); END_CRIT_SECTION(); /* Unlock and release the buffers. */ UnlockReleaseBuffer(MetaBuffer); UnlockReleaseBuffer(RootBuffer); PG_RETURN_VOID(); }
/* * ginbuildempty() -- build an empty gin index in the initialization fork */ void ginbuildempty(Relation index) { Buffer RootBuffer, MetaBuffer; /* An empty GIN index has two pages. */ MetaBuffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL); LockBuffer(MetaBuffer, BUFFER_LOCK_EXCLUSIVE); RootBuffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL); LockBuffer(RootBuffer, BUFFER_LOCK_EXCLUSIVE); /* Initialize and xlog metabuffer and root buffer. */ START_CRIT_SECTION(); GinInitMetabuffer(MetaBuffer); MarkBufferDirty(MetaBuffer); log_newpage_buffer(MetaBuffer, true); GinInitBuffer(RootBuffer, GIN_LEAF); MarkBufferDirty(RootBuffer); log_newpage_buffer(RootBuffer, false); END_CRIT_SECTION(); /* Unlock and release the buffers. */ UnlockReleaseBuffer(MetaBuffer); UnlockReleaseBuffer(RootBuffer); }
/* * gistbuildempty() -- build an empty gist index in the initialization fork */ void gistbuildempty(Relation index) { Buffer buffer; /* Initialize the root page */ buffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); /* Initialize and xlog buffer */ START_CRIT_SECTION(); GISTInitBuffer(buffer, F_LEAF); MarkBufferDirty(buffer); log_newpage_buffer(buffer, true); END_CRIT_SECTION(); /* Unlock and release the buffer */ UnlockReleaseBuffer(buffer); }
void brinbuildempty(Relation index) { Buffer metabuf; /* An empty BRIN index has a metapage only. */ metabuf = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL); LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); /* Initialize and xlog metabuffer. */ START_CRIT_SECTION(); brin_metapage_init(BufferGetPage(metabuf), BrinGetPagesPerRange(index), BRIN_CURRENT_VERSION); MarkBufferDirty(metabuf); log_newpage_buffer(metabuf, false); END_CRIT_SECTION(); UnlockReleaseBuffer(metabuf); }
/* * gistbuildempty() -- build an empty gist index in the initialization fork */ Datum gistbuildempty(PG_FUNCTION_ARGS) { Relation index = (Relation) PG_GETARG_POINTER(0); Buffer buffer; /* Initialize the root page */ buffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); /* Initialize and xlog buffer */ START_CRIT_SECTION(); GISTInitBuffer(buffer, F_LEAF); MarkBufferDirty(buffer); log_newpage_buffer(buffer, true); END_CRIT_SECTION(); /* Unlock and release the buffer */ UnlockReleaseBuffer(buffer); PG_RETURN_VOID(); }
/* * Initialize a page as an empty regular BRIN page, WAL-log this, and record * the page in FSM. * * There are several corner situations in which we extend the relation to * obtain a new page and later find that we cannot use it immediately. When * that happens, we don't want to leave the page go unrecorded in FSM, because * there is no mechanism to get the space back and the index would bloat. * Also, because we would not WAL-log the action that would initialize the * page, the page would go uninitialized in a standby (or after recovery). */ static void brin_initialize_empty_new_buffer(Relation idxrel, Buffer buffer) { Page page; BRIN_elog((DEBUG2, "brin_initialize_empty_new_buffer: initializing blank page %u", BufferGetBlockNumber(buffer))); START_CRIT_SECTION(); page = BufferGetPage(buffer); brin_page_init(page, BRIN_PAGETYPE_REGULAR); MarkBufferDirty(buffer); log_newpage_buffer(buffer, true); END_CRIT_SECTION(); /* * We update the FSM for this page, but this is not WAL-logged. This is * acceptable because VACUUM will scan the index and update the FSM with * pages whose FSM records were forgotten in a crash. */ RecordPageWithFreeSpace(idxrel, BufferGetBlockNumber(buffer), br_page_get_freespace(page)); }
/* * FreeSpaceMapTruncateRel - adjust for truncation of a relation. * * The caller must hold AccessExclusiveLock on the relation, to ensure that * other backends receive the smgr invalidation event that this function sends * before they access the FSM again. * * nblocks is the new size of the heap. */ void FreeSpaceMapTruncateRel(Relation rel, BlockNumber nblocks) { BlockNumber new_nfsmblocks; FSMAddress first_removed_address; uint16 first_removed_slot; Buffer buf; RelationOpenSmgr(rel); /* * If no FSM has been created yet for this relation, there's nothing to * truncate. */ if (!smgrexists(rel->rd_smgr, FSM_FORKNUM)) return; /* Get the location in the FSM of the first removed heap block */ first_removed_address = fsm_get_location(nblocks, &first_removed_slot); /* * Zero out the tail of the last remaining FSM page. If the slot * representing the first removed heap block is at a page boundary, as the * first slot on the FSM page that first_removed_address points to, we can * just truncate that page altogether. */ if (first_removed_slot > 0) { buf = fsm_readbuf(rel, first_removed_address, false); if (!BufferIsValid(buf)) return; /* nothing to do; the FSM was already smaller */ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); fsm_truncate_avail(BufferGetPage(buf), first_removed_slot); /* * Truncation of a relation is WAL-logged at a higher-level, and we * will be called at WAL replay. But if checksums are enabled, we need * to still write a WAL record to protect against a torn page, if the * page is flushed to disk before the truncation WAL record. We cannot * use MarkBufferDirtyHint here, because that will not dirty the page * during recovery. */ MarkBufferDirty(buf); if (!InRecovery && RelationNeedsWAL(rel) && XLogHintBitIsNeeded()) log_newpage_buffer(buf, false); END_CRIT_SECTION(); UnlockReleaseBuffer(buf); new_nfsmblocks = fsm_logical_to_physical(first_removed_address) + 1; } else { new_nfsmblocks = fsm_logical_to_physical(first_removed_address); if (smgrnblocks(rel->rd_smgr, FSM_FORKNUM) <= new_nfsmblocks) return; /* nothing to do; the FSM was already smaller */ } /* Truncate the unused FSM pages, and send smgr inval message */ smgrtruncate(rel->rd_smgr, FSM_FORKNUM, new_nfsmblocks); /* * We might as well update the local smgr_fsm_nblocks setting. * smgrtruncate sent an smgr cache inval message, which will cause other * backends to invalidate their copy of smgr_fsm_nblocks, and this one too * at the next command boundary. But this ensures it isn't outright wrong * until then. */ if (rel->rd_smgr) rel->rd_smgr->smgr_fsm_nblocks = new_nfsmblocks; /* * Update upper-level FSM pages to account for the truncation. This is * important because the just-truncated pages were likely marked as * all-free, and would be preferentially selected. */ FreeSpaceMapVacuumRange(rel, nblocks, InvalidBlockNumber); }