/******************************************************************************* * @author : Rohan Jyoti * @name : destroyMembershipList * @param : Membership List to destory * @return : void * @purpose : Deallocate memory associated with a membership list ******************************************************************************/ void destroyMembershipList(queue_t *incomingML) { unsigned int i; for(i=0; i<queue_size(incomingML); i++) { mPayload_t *tempPayload = (mPayload_t *)queue_at(incomingML, i); destroyPayload(tempPayload); } queue_destroy(incomingML); }
void PackedPayloadHashTable::clear() { const std::size_t used_buckets = header_->buckets_allocated.load(std::memory_order_relaxed); // Destroy existing values, if necessary. destroyPayload(); // Zero-out slot array. std::memset( slots_, 0x0, sizeof(std::atomic<std::size_t>) * header_->num_slots); // Zero-out used buckets. std::memset(buckets_, 0x0, used_buckets * bucket_size_); header_->buckets_allocated.store(0, std::memory_order_relaxed); header_->variable_length_bytes_allocated.store(0, std::memory_order_relaxed); key_manager_.zeroNextVariableLengthKeyOffset(); }
void swap(Nullable& other) noexcept(std::is_nothrow_move_constructible<StoredType>() && noexcept(swap(std::declval<StoredType&>(), std::declval<StoredType&>()))) { if (engaged && other.engaged) { ::std::swap(payload, other.payload); ::std::swap(null, other.null); } else if (engaged) { other.constructPayload(std::move(payload)); destroyPayload(); other.null = null; null = false; } else { constructPayload(std::move(other.payload)); other.destroyPayload(); null = other.null; other.null = false; } }
Nullable& operator=(Nullable&& other) noexcept(std::is_nothrow_move_constructible<StoredType>() && std::is_nothrow_move_assignable<StoredType>()) { if (engaged && other.engaged) { payload = std::move(other.payload); null = other.null; } else { if (other.engaged) { constructPayload(std::move(other.payload)); null = other.null; } else { destroyPayload(); null = true; } } return *this; }
Nullable& operator=(const Nullable& other) { if (engaged && other.engaged) { payload = other.payload; null = other.null; } else { if (other.engaged) { constructPayload(other.payload); null = other.null; } else { destroyPayload(); null = true; } } return *this; }
void PackedPayloadHashTable::resize(const std::size_t extra_buckets, const std::size_t extra_variable_storage, const std::size_t retry_num) { // A retry should never be necessary with this implementation of HashTable. // Separate chaining ensures that any resized hash table with more buckets // than the original table will be able to hold more entries than the // original. DEBUG_ASSERT(retry_num == 0); SpinSharedMutexExclusiveLock<true> write_lock(this->resize_shared_mutex_); // Recheck whether the hash table is still full. Note that multiple threads // might wait to rebuild this hash table simultaneously. Only the first one // should do the rebuild. if (!isFull(extra_variable_storage)) { return; } // Approximately double the number of buckets and slots. // // TODO(chasseur): It may be worth it to more than double the number of // buckets here so that we can maintain a good, sparse fill factor for a // longer time as more values are inserted. Such behavior should take into // account kHashTableLoadFactor. std::size_t resized_num_slots = get_next_prime_number( (header_->num_buckets + extra_buckets / 2) * kHashTableLoadFactor * 2); std::size_t variable_storage_required = (resized_num_slots / kHashTableLoadFactor) * key_manager_.getEstimatedVariableKeySize(); const std::size_t original_variable_storage_used = header_->variable_length_bytes_allocated.load(std::memory_order_relaxed); // If this resize was triggered by a too-large variable-length key, bump up // the variable-length storage requirement. if ((extra_variable_storage > 0) && (extra_variable_storage + original_variable_storage_used > key_manager_.getVariableLengthKeyStorageSize())) { variable_storage_required += extra_variable_storage; } const std::size_t resized_memory_required = sizeof(Header) + resized_num_slots * sizeof(std::atomic<std::size_t>) + (resized_num_slots / kHashTableLoadFactor) * bucket_size_ + variable_storage_required; const std::size_t resized_storage_slots = this->storage_manager_->SlotsNeededForBytes(resized_memory_required); if (resized_storage_slots == 0) { FATAL_ERROR( "Storage requirement for resized SeparateChainingHashTable " "exceeds maximum allocation size."); } // Get a new StorageBlob to hold the resized hash table. const block_id resized_blob_id = this->storage_manager_->createBlob(resized_storage_slots); MutableBlobReference resized_blob = this->storage_manager_->getBlobMutable(resized_blob_id); // Locate data structures inside the new StorageBlob. void *aligned_memory_start = resized_blob->getMemoryMutable(); std::size_t available_memory = resized_storage_slots * kSlotSizeBytes; if (align(alignof(Header), sizeof(Header), aligned_memory_start, available_memory) == nullptr) { // Should be impossible, as noted in constructor. FATAL_ERROR( "StorageBlob used to hold resized SeparateChainingHashTable " "is too small to meet alignment requirements of " "LinearOpenAddressingHashTable::Header."); } else if (aligned_memory_start != resized_blob->getMemoryMutable()) { // Again, should be impossible. DEV_WARNING("In SeparateChainingHashTable::resize(), StorageBlob " << "memory adjusted by " << (resized_num_slots * kSlotSizeBytes - available_memory) << " bytes to meet alignment requirement for " << "LinearOpenAddressingHashTable::Header."); } Header *resized_header = static_cast<Header *>(aligned_memory_start); aligned_memory_start = static_cast<char *>(aligned_memory_start) + sizeof(Header); available_memory -= sizeof(Header); // As in constructor, recompute the number of slots and buckets using the // actual available memory. std::size_t resized_num_buckets = (available_memory - extra_variable_storage) / (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>) + bucket_size_ + key_manager_.getEstimatedVariableKeySize()); resized_num_slots = get_previous_prime_number(resized_num_buckets * kHashTableLoadFactor); resized_num_buckets = resized_num_slots / kHashTableLoadFactor; // Locate slot array. std::atomic<std::size_t> *resized_slots = static_cast<std::atomic<std::size_t> *>(aligned_memory_start); aligned_memory_start = static_cast<char *>(aligned_memory_start) + sizeof(std::atomic<std::size_t>) * resized_num_slots; available_memory -= sizeof(std::atomic<std::size_t>) * resized_num_slots; // As in constructor, we will be extra paranoid and use align() to locate the // start of the array of buckets, as well. void *resized_buckets = aligned_memory_start; if (align( kBucketAlignment, bucket_size_, resized_buckets, available_memory) == nullptr) { FATAL_ERROR( "StorageBlob used to hold resized SeparateChainingHashTable " "is too small to meet alignment requirements of buckets."); } else if (resized_buckets != aligned_memory_start) { DEV_WARNING( "Bucket array start position adjusted to meet alignment " "requirement for SeparateChainingHashTable's value type."); if (resized_num_buckets * bucket_size_ + variable_storage_required > available_memory) { --resized_num_buckets; } } aligned_memory_start = static_cast<char *>(aligned_memory_start) + resized_num_buckets * bucket_size_; available_memory -= resized_num_buckets * bucket_size_; void *resized_variable_length_key_storage = aligned_memory_start; const std::size_t resized_variable_length_key_storage_size = available_memory; const std::size_t original_buckets_used = header_->buckets_allocated.load(std::memory_order_relaxed); // Initialize the header. resized_header->num_slots = resized_num_slots; resized_header->num_buckets = resized_num_buckets; resized_header->buckets_allocated.store(original_buckets_used, std::memory_order_relaxed); resized_header->variable_length_bytes_allocated.store( original_variable_storage_used, std::memory_order_relaxed); // Bulk-copy buckets. This is safe because: // 1. The "next" pointers will be adjusted when rebuilding chains below. // 2. The hash codes will stay the same. // 3. For key components: // a. Inline keys will stay exactly the same. // b. Offsets into variable-length storage will remain valid, because // we also do a byte-for-byte copy of variable-length storage below. // c. Absolute external pointers will still point to the same address. // d. Relative pointers are not used with resizable hash tables. // 4. If values are not trivially copyable, then we invoke ValueT's copy // or move constructor with placement new. // NOTE(harshad) - Regarding point 4 above, as this is a specialized // hash table implemented for aggregation, the values are trivially copyable, // therefore we don't need to invoke payload values' copy/move constructors. std::memcpy(resized_buckets, buckets_, original_buckets_used * bucket_size_); // Copy over variable-length key components, if any. if (original_variable_storage_used > 0) { DEBUG_ASSERT(original_variable_storage_used == key_manager_.getNextVariableLengthKeyOffset()); DEBUG_ASSERT(original_variable_storage_used <= resized_variable_length_key_storage_size); std::memcpy(resized_variable_length_key_storage, key_manager_.getVariableLengthKeyStorage(), original_variable_storage_used); } destroyPayload(); // Make resized structures active. std::swap(this->blob_, resized_blob); header_ = resized_header; slots_ = resized_slots; buckets_ = resized_buckets; key_manager_.setVariableLengthStorageInfo( resized_variable_length_key_storage, resized_variable_length_key_storage_size, &(resized_header->variable_length_bytes_allocated)); // Drop the old blob. const block_id old_blob_id = resized_blob->getID(); resized_blob.release(); this->storage_manager_->deleteBlockOrBlobFile(old_blob_id); // Rebuild chains. void *current_bucket = buckets_; for (std::size_t bucket_num = 0; bucket_num < original_buckets_used; ++bucket_num) { std::atomic<std::size_t> *next_ptr = static_cast<std::atomic<std::size_t> *>(current_bucket); const std::size_t hash_code = *reinterpret_cast<const std::size_t *>( static_cast<const char *>(current_bucket) + sizeof(std::atomic<std::size_t>)); const std::size_t slot_number = hash_code % header_->num_slots; std::size_t slot_ptr_value = 0; if (slots_[slot_number].compare_exchange_strong( slot_ptr_value, bucket_num + 1, std::memory_order_relaxed)) { // This bucket is the first in the chain for this block, so reset its // next pointer to 0. next_ptr->store(0, std::memory_order_relaxed); } else { // A chain already exists starting from this slot, so put this bucket at // the head. next_ptr->store(slot_ptr_value, std::memory_order_relaxed); slots_[slot_number].store(bucket_num + 1, std::memory_order_relaxed); } current_bucket = static_cast<char *>(current_bucket) + bucket_size_; } }