/* * _hash_ovflblkno_to_bitno * * Convert overflow page block number to bit number for free-page bitmap. */ uint32 _hash_ovflblkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) { uint32 splitnum = metap->hashm_ovflpoint; uint32 i; uint32 bitnum; /* Determine the split number containing this page */ for (i = 1; i <= splitnum; i++) { if (ovflblkno <= (BlockNumber) _hash_get_totalbuckets(i)) break; /* oops */ bitnum = ovflblkno - _hash_get_totalbuckets(i); /* * bitnum has to be greater than number of overflow page added in * previous split point. The overflow page at this splitnum (i) if any * should start from (_hash_get_totalbuckets(i) + * metap->hashm_spares[i - 1] + 1). */ if (bitnum > metap->hashm_spares[i - 1] && bitnum <= metap->hashm_spares[i]) return bitnum - 1; /* -1 to convert 1-based to 0-based */ } ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid overflow block number %u", ovflblkno))); return 0; /* keep compiler quiet */ }
/* * Convert overflow page bit number (its index in the free-page bitmaps) * to block number within the index. */ static BlockNumber bitno_to_blkno(HashMetaPage metap, uint32 ovflbitnum) { uint32 splitnum = metap->hashm_ovflpoint; uint32 i; /* Convert zero-based bitnumber to 1-based page number */ ovflbitnum += 1; /* Determine the split number for this page (must be >= 1) */ for (i = 1; i < splitnum && ovflbitnum > metap->hashm_spares[i]; i++) /* loop */ ; /* * Convert to absolute page number by adding the number of bucket pages * that exist before this split point. */ return (BlockNumber) (_hash_get_totalbuckets(i) + ovflbitnum); }
/* * Attempt to expand the hash table by creating one new bucket. * * This will silently do nothing if we don't get cleanup lock on old or * new bucket. * * Complete the pending splits and remove the tuples from old bucket, * if there are any left over from the previous split. * * The caller must hold a pin, but no lock, on the metapage buffer. * The buffer is returned in the same state. */ void _hash_expandtable(Relation rel, Buffer metabuf) { HashMetaPage metap; Bucket old_bucket; Bucket new_bucket; uint32 spare_ndx; BlockNumber start_oblkno; BlockNumber start_nblkno; Buffer buf_nblkno; Buffer buf_oblkno; Page opage; Page npage; HashPageOpaque oopaque; HashPageOpaque nopaque; uint32 maxbucket; uint32 highmask; uint32 lowmask; bool metap_update_masks = false; bool metap_update_splitpoint = false; restart_expand: /* * Write-lock the meta page. It used to be necessary to acquire a * heavyweight lock to begin a split, but that is no longer required. */ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); _hash_checkpage(rel, metabuf, LH_META_PAGE); metap = HashPageGetMeta(BufferGetPage(metabuf)); /* * Check to see if split is still needed; someone else might have already * done one while we waited for the lock. * * Make sure this stays in sync with _hash_doinsert() */ if (metap->hashm_ntuples <= (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1)) goto fail; /* * Can't split anymore if maxbucket has reached its maximum possible * value. * * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because * the calculation maxbucket+1 mustn't overflow). Currently we restrict * to half that because of overflow looping in _hash_log2() and * insufficient space in hashm_spares[]. It's moot anyway because an * index with 2^32 buckets would certainly overflow BlockNumber and hence * _hash_alloc_buckets() would fail, but if we supported buckets smaller * than a disk block then this would be an independent constraint. * * If you change this, see also the maximum initial number of buckets in * _hash_init(). */ if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE) goto fail; /* * Determine which bucket is to be split, and attempt to take cleanup lock * on the old bucket. If we can't get the lock, give up. * * The cleanup lock protects us not only against other backends, but * against our own backend as well. * * The cleanup lock is mainly to protect the split from concurrent * inserts. See src/backend/access/hash/README, Lock Definitions for * further details. Due to this locking restriction, if there is any * pending scan, the split will give up which is not good, but harmless. */ new_bucket = metap->hashm_maxbucket + 1; old_bucket = (new_bucket & metap->hashm_lowmask); start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket); buf_oblkno = _hash_getbuf_with_condlock_cleanup(rel, start_oblkno, LH_BUCKET_PAGE); if (!buf_oblkno) goto fail; opage = BufferGetPage(buf_oblkno); oopaque = (HashPageOpaque) PageGetSpecialPointer(opage); /* * We want to finish the split from a bucket as there is no apparent * benefit by not doing so and it will make the code complicated to finish * the split that involves multiple buckets considering the case where new * split also fails. We don't need to consider the new bucket for * completing the split here as it is not possible that a re-split of new * bucket starts when there is still a pending split from old bucket. */ if (H_BUCKET_BEING_SPLIT(oopaque)) { /* * Copy bucket mapping info now; refer the comment in code below where * we copy this information before calling _hash_splitbucket to see * why this is okay. */ maxbucket = metap->hashm_maxbucket; highmask = metap->hashm_highmask; lowmask = metap->hashm_lowmask; /* * Release the lock on metapage and old_bucket, before completing the * split. */ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); LockBuffer(buf_oblkno, BUFFER_LOCK_UNLOCK); _hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket, highmask, lowmask); /* release the pin on old buffer and retry for expand. */ _hash_dropbuf(rel, buf_oblkno); goto restart_expand; } /* * Clean the tuples remained from the previous split. This operation * requires cleanup lock and we already have one on the old bucket, so * let's do it. We also don't want to allow further splits from the bucket * till the garbage of previous split is cleaned. This has two * advantages; first, it helps in avoiding the bloat due to garbage and * second is, during cleanup of bucket, we are always sure that the * garbage tuples belong to most recently split bucket. On the contrary, * if we allow cleanup of bucket after meta page is updated to indicate * the new split and before the actual split, the cleanup operation won't * be able to decide whether the tuple has been moved to the newly created * bucket and ended up deleting such tuples. */ if (H_NEEDS_SPLIT_CLEANUP(oopaque)) { /* * Copy bucket mapping info now; refer to the comment in code below * where we copy this information before calling _hash_splitbucket to * see why this is okay. */ maxbucket = metap->hashm_maxbucket; highmask = metap->hashm_highmask; lowmask = metap->hashm_lowmask; /* Release the metapage lock. */ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL, maxbucket, highmask, lowmask, NULL, NULL, true, NULL, NULL); _hash_dropbuf(rel, buf_oblkno); goto restart_expand; } /* * There shouldn't be any active scan on new bucket. * * Note: it is safe to compute the new bucket's blkno here, even though we * may still need to update the BUCKET_TO_BLKNO mapping. This is because * the current value of hashm_spares[hashm_ovflpoint] correctly shows * where we are going to put a new splitpoint's worth of buckets. */ start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket); /* * If the split point is increasing we need to allocate a new batch of * bucket pages. */ spare_ndx = _hash_spareindex(new_bucket + 1); if (spare_ndx > metap->hashm_ovflpoint) { uint32 buckets_to_add; Assert(spare_ndx == metap->hashm_ovflpoint + 1); /* * We treat allocation of buckets as a separate WAL-logged action. * Even if we fail after this operation, won't leak bucket pages; * rather, the next split will consume this space. In any case, even * without failure we don't use all the space in one split operation. */ buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket; if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add)) { /* can't split due to BlockNumber overflow */ _hash_relbuf(rel, buf_oblkno); goto fail; } } /* * Physically allocate the new bucket's primary page. We want to do this * before changing the metapage's mapping info, in case we can't get the * disk space. Ideally, we don't need to check for cleanup lock on new * bucket as no other backend could find this bucket unless meta page is * updated. However, it is good to be consistent with old bucket locking. */ buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM); if (!IsBufferCleanupOK(buf_nblkno)) { _hash_relbuf(rel, buf_oblkno); _hash_relbuf(rel, buf_nblkno); goto fail; } /* * Since we are scribbling on the pages in the shared buffers, establish a * critical section. Any failure in this next code leaves us with a big * problem: the metapage is effectively corrupt but could get written back * to disk. */ START_CRIT_SECTION(); /* * Okay to proceed with split. Update the metapage bucket mapping info. */ metap->hashm_maxbucket = new_bucket; if (new_bucket > metap->hashm_highmask) { /* Starting a new doubling */ metap->hashm_lowmask = metap->hashm_highmask; metap->hashm_highmask = new_bucket | metap->hashm_lowmask; metap_update_masks = true; } /* * If the split point is increasing we need to adjust the hashm_spares[] * array and hashm_ovflpoint so that future overflow pages will be created * beyond this new batch of bucket pages. */ if (spare_ndx > metap->hashm_ovflpoint) { metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint]; metap->hashm_ovflpoint = spare_ndx; metap_update_splitpoint = true; } MarkBufferDirty(metabuf); /* * Copy bucket mapping info now; this saves re-accessing the meta page * inside _hash_splitbucket's inner loop. Note that once we drop the * split lock, other splits could begin, so these values might be out of * date before _hash_splitbucket finishes. That's okay, since all it * needs is to tell which of these two buckets to map hashkeys into. */ maxbucket = metap->hashm_maxbucket; highmask = metap->hashm_highmask; lowmask = metap->hashm_lowmask; opage = BufferGetPage(buf_oblkno); oopaque = (HashPageOpaque) PageGetSpecialPointer(opage); /* * Mark the old bucket to indicate that split is in progress. (At * operation end, we will clear the split-in-progress flag.) Also, for a * primary bucket page, hasho_prevblkno stores the number of buckets that * existed as of the last split, so we must update that value here. */ oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT; oopaque->hasho_prevblkno = maxbucket; MarkBufferDirty(buf_oblkno); npage = BufferGetPage(buf_nblkno); /* * initialize the new bucket's primary page and mark it to indicate that * split is in progress. */ nopaque = (HashPageOpaque) PageGetSpecialPointer(npage); nopaque->hasho_prevblkno = maxbucket; nopaque->hasho_nextblkno = InvalidBlockNumber; nopaque->hasho_bucket = new_bucket; nopaque->hasho_flag = LH_BUCKET_PAGE | LH_BUCKET_BEING_POPULATED; nopaque->hasho_page_id = HASHO_PAGE_ID; MarkBufferDirty(buf_nblkno); /* XLOG stuff */ if (RelationNeedsWAL(rel)) { xl_hash_split_allocate_page xlrec; XLogRecPtr recptr; xlrec.new_bucket = maxbucket; xlrec.old_bucket_flag = oopaque->hasho_flag; xlrec.new_bucket_flag = nopaque->hasho_flag; xlrec.flags = 0; XLogBeginInsert(); XLogRegisterBuffer(0, buf_oblkno, REGBUF_STANDARD); XLogRegisterBuffer(1, buf_nblkno, REGBUF_WILL_INIT); XLogRegisterBuffer(2, metabuf, REGBUF_STANDARD); if (metap_update_masks) { xlrec.flags |= XLH_SPLIT_META_UPDATE_MASKS; XLogRegisterBufData(2, (char *) &metap->hashm_lowmask, sizeof(uint32)); XLogRegisterBufData(2, (char *) &metap->hashm_highmask, sizeof(uint32)); } if (metap_update_splitpoint) { xlrec.flags |= XLH_SPLIT_META_UPDATE_SPLITPOINT; XLogRegisterBufData(2, (char *) &metap->hashm_ovflpoint, sizeof(uint32)); XLogRegisterBufData(2, (char *) &metap->hashm_spares[metap->hashm_ovflpoint], sizeof(uint32)); } XLogRegisterData((char *) &xlrec, SizeOfHashSplitAllocPage); recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_ALLOCATE_PAGE); PageSetLSN(BufferGetPage(buf_oblkno), recptr); PageSetLSN(BufferGetPage(buf_nblkno), recptr); PageSetLSN(BufferGetPage(metabuf), recptr); } END_CRIT_SECTION(); /* drop lock, but keep pin */ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); /* Relocate records to the new bucket */ _hash_splitbucket(rel, metabuf, old_bucket, new_bucket, buf_oblkno, buf_nblkno, NULL, maxbucket, highmask, lowmask); /* all done, now release the pins on primary buckets. */ _hash_dropbuf(rel, buf_oblkno); _hash_dropbuf(rel, buf_nblkno); return; /* Here if decide not to split or fail to acquire old bucket lock */ fail: /* We didn't write the metapage, so just drop lock */ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); }
/* * _hash_init_metabuffer() -- Initialize the metadata page of a hash index. */ void _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, uint16 ffactor, bool initpage) { HashMetaPage metap; HashPageOpaque pageopaque; Page page; double dnumbuckets; uint32 num_buckets; uint32 spare_index; uint32 i; /* * Choose the number of initial bucket pages to match the fill factor * given the estimated number of tuples. We round up the result to the * total number of buckets which has to be allocated before using its * _hashm_spare element. However always force at least 2 bucket pages. The * upper limit is determined by considerations explained in * _hash_expandtable(). */ dnumbuckets = num_tuples / ffactor; if (dnumbuckets <= 2.0) num_buckets = 2; else if (dnumbuckets >= (double) 0x40000000) num_buckets = 0x40000000; else num_buckets = _hash_get_totalbuckets(_hash_spareindex(dnumbuckets)); spare_index = _hash_spareindex(num_buckets); Assert(spare_index < HASH_MAX_SPLITPOINTS); page = BufferGetPage(buf); if (initpage) _hash_pageinit(page, BufferGetPageSize(buf)); pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); pageopaque->hasho_prevblkno = InvalidBlockNumber; pageopaque->hasho_nextblkno = InvalidBlockNumber; pageopaque->hasho_bucket = -1; pageopaque->hasho_flag = LH_META_PAGE; pageopaque->hasho_page_id = HASHO_PAGE_ID; metap = HashPageGetMeta(page); metap->hashm_magic = HASH_MAGIC; metap->hashm_version = HASH_VERSION; metap->hashm_ntuples = 0; metap->hashm_nmaps = 0; metap->hashm_ffactor = ffactor; metap->hashm_bsize = HashGetMaxBitmapSize(page); /* find largest bitmap array size that will fit in page size */ for (i = _hash_log2(metap->hashm_bsize); i > 0; --i) { if ((1 << i) <= metap->hashm_bsize) break; } Assert(i > 0); metap->hashm_bmsize = 1 << i; metap->hashm_bmshift = i + BYTE_TO_BIT; Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1)); /* * Label the index with its primary hash support function's OID. This is * pretty useless for normal operation (in fact, hashm_procid is not used * anywhere), but it might be handy for forensic purposes so we keep it. */ metap->hashm_procid = procid; /* * We initialize the index with N buckets, 0 .. N-1, occupying physical * blocks 1 to N. The first freespace bitmap page is in block N+1. */ metap->hashm_maxbucket = num_buckets - 1; /* * Set highmask as next immediate ((2 ^ x) - 1), which should be * sufficient to cover num_buckets. */ metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1; metap->hashm_lowmask = (metap->hashm_highmask >> 1); MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares)); MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp)); /* Set up mapping for one spare page after the initial splitpoints */ metap->hashm_spares[spare_index] = 1; metap->hashm_ovflpoint = spare_index; metap->hashm_firstfree = 0; /* * Set pd_lower just past the end of the metadata. This is to log full * page image of metapage in xloginsert.c. */ ((PageHeader) page)->pd_lower = ((char *) metap + sizeof(HashMetaPageData)) - (char *) page; }