void HashMap::merge(TransactionContext& txn) { if (hashMapImage_->front_ > INITIAL_HASH_ARRAY_SIZE) { if (hashMapImage_->split_ == 0) { hashMapImage_->front_ = hashMapImage_->front_ >> 1; hashMapImage_->split_ = hashMapImage_->front_; hashMapImage_->toMerge_ = false; } hashMapImage_->split_--; hashMapImage_->rear_--; uint64_t sourceBucketId = hashMapImage_->rear_; Bucket sourceBucket( txn, *getObjectManager(), maxArraySize_, maxCollisionArraySize_); hashArray_.get(txn, sourceBucketId, sourceBucket); Bucket::Cursor cursor(txn, *getObjectManager()); sourceBucket.set(txn, cursor); bool isNext = sourceBucket.next(txn, cursor); if (!isNext) { return; } uint32_t addr = hashBucketAddrFromObject<T>( txn, sourceBucket.getCurrentOId(cursor)); cursor.array_.reset(); Bucket targetBucket( txn, *getObjectManager(), maxArraySize_, maxCollisionArraySize_); hashArray_.get(txn, addr, targetBucket); if (targetBucket.merge(txn, sourceBucket, ThresholdMergeLimit_) == 0) { hashMapImage_->toMerge_ = false; hashMapImage_->split_++; hashMapImage_->rear_++; return; } }
void SingleBucketJoinTest::testPersistenceCanHandleSingleBucketJoin() { TestFileStorComponents c(*this, "testPersistenceCanHandleSingleBucketJoin"); document::BucketId targetBucket(16, 1); document::BucketId sourceBucket(17, 1); createBucket(sourceBucket); // Make sure it's not empty c.sendPut(sourceBucket, DocumentIndex(0), PutTimestamp(1000)); expectOkReply<api::PutReply>(c.top); c.top.getRepliesOnce(); auto cmd = std::make_shared<api::JoinBucketsCommand>(makeDocumentBucket(targetBucket)); cmd->getSourceBuckets().push_back(sourceBucket); cmd->getSourceBuckets().push_back(sourceBucket); c.top.sendDown(cmd); // If single bucket join locking is not working properly, this // will hang forever. expectOkReply<api::JoinBucketsReply>(c.top); }