/* * Post-VACUUM cleanup. * * Result: a palloc'd struct containing statistical info for VACUUM displays. */ IndexBulkDeleteResult * blvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) { Relation index = info->index; BlockNumber npages, blkno; BlockNumber totFreePages; if (info->analyze_only) return stats; if (stats == NULL) stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); /* * Iterate over the pages: insert deleted pages into FSM and collect * statistics. */ npages = RelationGetNumberOfBlocks(index); totFreePages = 0; for (blkno = BLOOM_HEAD_BLKNO; blkno < npages; blkno++) { Buffer buffer; Page page; vacuum_delay_point(); buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno, RBM_NORMAL, info->strategy); LockBuffer(buffer, BUFFER_LOCK_SHARE); page = BufferGetPage(buffer, NULL, NULL, BGP_NO_SNAPSHOT_TEST); if (BloomPageIsDeleted(page)) { RecordFreeIndexPage(index, blkno); totFreePages++; } else { stats->num_index_tuples += BloomPageGetMaxOffset(page); stats->estimated_count += BloomPageGetMaxOffset(page); } UnlockReleaseBuffer(buffer); } IndexFreeSpaceMapVacuum(info->index); stats->pages_free = totFreePages; stats->num_pages = RelationGetNumberOfBlocks(index); return stats; }
/* * Insert all matching tuples into to a bitmap. */ int64 blgetbitmap(IndexScanDesc scan, TIDBitmap *tbm) { int64 ntids = 0; BlockNumber blkno = BLOOM_HEAD_BLKNO, npages; int i; BufferAccessStrategy bas; BloomScanOpaque so = (BloomScanOpaque) scan->opaque; if (so->sign == NULL) { /* New search: have to calculate search signature */ ScanKey skey = scan->keyData; so->sign = palloc0(sizeof(SignType) * so->state.opts.bloomLength); for (i = 0; i < scan->numberOfKeys; i++) { /* * Assume bloom-indexable operators to be strict, so nothing could * be found for NULL key. */ if (skey->sk_flags & SK_ISNULL) { pfree(so->sign); so->sign = NULL; return 0; } /* Add next value to the signature */ signValue(&so->state, so->sign, skey->sk_argument, skey->sk_attno - 1); skey++; } } /* * We're going to read the whole index. This is why we use appropriate * buffer access strategy. */ bas = GetAccessStrategy(BAS_BULKREAD); npages = RelationGetNumberOfBlocks(scan->indexRelation); for (blkno = BLOOM_HEAD_BLKNO; blkno < npages; blkno++) { Buffer buffer; Page page; buffer = ReadBufferExtended(scan->indexRelation, MAIN_FORKNUM, blkno, RBM_NORMAL, bas); LockBuffer(buffer, BUFFER_LOCK_SHARE); page = BufferGetPage(buffer); TestForOldSnapshot(scan->xs_snapshot, scan->indexRelation, page); if (!BloomPageIsDeleted(page)) { OffsetNumber offset, maxOffset = BloomPageGetMaxOffset(page); for (offset = 1; offset <= maxOffset; offset++) { BloomTuple *itup = BloomPageGetTuple(&so->state, page, offset); bool res = true; /* Check index signature with scan signature */ for (i = 0; i < so->state.opts.bloomLength; i++) { if ((itup->sign[i] & so->sign[i]) != so->sign[i]) { res = false; break; } } /* Add matching tuples to bitmap */ if (res) { tbm_add_tuples(tbm, &itup->heapPtr, 1, true); ntids++; } } } UnlockReleaseBuffer(buffer); CHECK_FOR_INTERRUPTS(); } FreeAccessStrategy(bas); return ntids; }
/* * Bulk deletion of all index entries pointing to a set of heap tuples. * The set of target tuples is specified via a callback routine that tells * whether any given heap tuple (identified by ItemPointer) is being deleted. * * Result: a palloc'd struct containing statistical info for VACUUM displays. */ IndexBulkDeleteResult * blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state) { Relation index = info->index; BlockNumber blkno, npages; FreeBlockNumberArray notFullPage; int countPage = 0; BloomState state; Buffer buffer; Page page; GenericXLogState *gxlogState; if (stats == NULL) stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); initBloomState(&state, index); /* * Interate over the pages. We don't care about concurrently added pages, * they can't contain tuples to delete. */ npages = RelationGetNumberOfBlocks(index); for (blkno = BLOOM_HEAD_BLKNO; blkno < npages; blkno++) { BloomTuple *itup, *itupPtr, *itupEnd; buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno, RBM_NORMAL, info->strategy); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); gxlogState = GenericXLogStart(index); page = GenericXLogRegisterBuffer(gxlogState, buffer, 0); if (BloomPageIsDeleted(page)) { UnlockReleaseBuffer(buffer); GenericXLogAbort(gxlogState); CHECK_FOR_INTERRUPTS(); continue; } /* Iterate over the tuples */ itup = itupPtr = BloomPageGetTuple(&state, page, FirstOffsetNumber); itupEnd = BloomPageGetTuple(&state, page, OffsetNumberNext(BloomPageGetMaxOffset(page))); while (itup < itupEnd) { /* Do we have to delete this tuple? */ if (callback(&itup->heapPtr, callback_state)) { stats->tuples_removed += 1; BloomPageGetOpaque(page)->maxoff--; } else { if (itupPtr != itup) { /* * If we already delete something before, we have to move * this tuple backward. */ memmove((Pointer) itupPtr, (Pointer) itup, state.sizeOfBloomTuple); } stats->num_index_tuples++; itupPtr = BloomPageGetNextTuple(&state, itupPtr); } itup = BloomPageGetNextTuple(&state, itup); } Assert(itupPtr == BloomPageGetTuple(&state, page, OffsetNumberNext(BloomPageGetMaxOffset(page)))); /* * Add page to notFullPage list if we will not mark page as deleted and * there is a free space on it */ if (BloomPageGetMaxOffset(page) != 0 && BloomPageGetFreeSpace(&state, page) > state.sizeOfBloomTuple && countPage < BloomMetaBlockN) notFullPage[countPage++] = blkno; /* Did we delete something? */ if (itupPtr != itup) { /* Is it empty page now? */ if (BloomPageGetMaxOffset(page) == 0) BloomPageSetDeleted(page); /* Adjust pg_lower */ ((PageHeader) page)->pd_lower = (Pointer) itupPtr - page; /* Finish WAL-logging */ GenericXLogFinish(gxlogState); } else { /* Didn't change anything: abort WAL-logging */ GenericXLogAbort(gxlogState); } UnlockReleaseBuffer(buffer); CHECK_FOR_INTERRUPTS(); } if (countPage > 0) { BloomMetaPageData *metaData; buffer = ReadBuffer(index, BLOOM_METAPAGE_BLKNO); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); gxlogState = GenericXLogStart(index); page = GenericXLogRegisterBuffer(gxlogState, buffer, 0); metaData = BloomPageGetMeta(page); memcpy(metaData->notFullPage, notFullPage, sizeof(BlockNumber) * countPage); metaData->nStart = 0; metaData->nEnd = countPage; GenericXLogFinish(gxlogState); UnlockReleaseBuffer(buffer); } return stats; }
/* * Bulk deletion of all index entries pointing to a set of heap tuples. * The set of target tuples is specified via a callback routine that tells * whether any given heap tuple (identified by ItemPointer) is being deleted. * * Result: a palloc'd struct containing statistical info for VACUUM displays. */ IndexBulkDeleteResult * blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state) { Relation index = info->index; BlockNumber blkno, npages; FreeBlockNumberArray notFullPage; int countPage = 0; BloomState state; Buffer buffer; Page page; BloomMetaPageData *metaData; GenericXLogState *gxlogState; if (stats == NULL) stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); initBloomState(&state, index); /* * Interate over the pages. We don't care about concurrently added pages, * they can't contain tuples to delete. */ npages = RelationGetNumberOfBlocks(index); for (blkno = BLOOM_HEAD_BLKNO; blkno < npages; blkno++) { BloomTuple *itup, *itupPtr, *itupEnd; vacuum_delay_point(); buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno, RBM_NORMAL, info->strategy); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); gxlogState = GenericXLogStart(index); page = GenericXLogRegisterBuffer(gxlogState, buffer, 0); /* Ignore empty/deleted pages until blvacuumcleanup() */ if (PageIsNew(page) || BloomPageIsDeleted(page)) { UnlockReleaseBuffer(buffer); GenericXLogAbort(gxlogState); continue; } /* * Iterate over the tuples. itup points to current tuple being * scanned, itupPtr points to where to save next non-deleted tuple. */ itup = itupPtr = BloomPageGetTuple(&state, page, FirstOffsetNumber); itupEnd = BloomPageGetTuple(&state, page, OffsetNumberNext(BloomPageGetMaxOffset(page))); while (itup < itupEnd) { /* Do we have to delete this tuple? */ if (callback(&itup->heapPtr, callback_state)) { /* Yes; adjust count of tuples that will be left on page */ BloomPageGetOpaque(page)->maxoff--; stats->tuples_removed += 1; } else { /* No; copy it to itupPtr++, but skip copy if not needed */ if (itupPtr != itup) memmove((Pointer) itupPtr, (Pointer) itup, state.sizeOfBloomTuple); itupPtr = BloomPageGetNextTuple(&state, itupPtr); } itup = BloomPageGetNextTuple(&state, itup); } /* Assert that we counted correctly */ Assert(itupPtr == BloomPageGetTuple(&state, page, OffsetNumberNext(BloomPageGetMaxOffset(page)))); /* * Add page to new notFullPage list if we will not mark page as * deleted and there is free space on it */ if (BloomPageGetMaxOffset(page) != 0 && BloomPageGetFreeSpace(&state, page) >= state.sizeOfBloomTuple && countPage < BloomMetaBlockN) notFullPage[countPage++] = blkno; /* Did we delete something? */ if (itupPtr != itup) { /* Is it empty page now? */ if (BloomPageGetMaxOffset(page) == 0) BloomPageSetDeleted(page); /* Adjust pg_lower */ ((PageHeader) page)->pd_lower = (Pointer) itupPtr - page; /* Finish WAL-logging */ GenericXLogFinish(gxlogState); } else { /* Didn't change anything: abort WAL-logging */ GenericXLogAbort(gxlogState); } UnlockReleaseBuffer(buffer); } /* * Update the metapage's notFullPage list with whatever we found. Our * info could already be out of date at this point, but blinsert() will * cope if so. */ buffer = ReadBuffer(index, BLOOM_METAPAGE_BLKNO); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); gxlogState = GenericXLogStart(index); page = GenericXLogRegisterBuffer(gxlogState, buffer, 0); metaData = BloomPageGetMeta(page); memcpy(metaData->notFullPage, notFullPage, sizeof(BlockNumber) * countPage); metaData->nStart = 0; metaData->nEnd = countPage; GenericXLogFinish(gxlogState); UnlockReleaseBuffer(buffer); return stats; }