/** * Time and report the execution of one hash across the entire Dictionary * * \param [in] hindex index of the hash Collider to use */ void TimeOne (const int hindex) { // Hashing speed uint32_t reps = 100; Hasher h = m_hashes[hindex].m_hash; int start = clock (); for (std::vector<std::string>::const_iterator w = m_words.begin (); w != m_words.end(); ++w) { for (uint32_t i = 0; i < reps; ++i) { h.clear ().GetHash32 (*w); } } int stop = clock (); double delta = stop - start; double per = 1e9 * delta / (m_nphrases * reps * CLOCKS_PER_SEC); std::cout << std::left << std::setw (32) << m_hashes[hindex].GetName () << std::right << std::setw (10) << m_nphrases << std::setw (10) << reps << std::setw (10) << stop - start << std::setw (12) << per << std::endl; } // TimeOne ()
Common::Hash Hasher::hashWithSalt(const Common::Hash& hash, quint64 salt) { Hasher hasher; hasher.addData(hash.getData(), Hash::HASH_SIZE); hasher.addSalt(salt); return hasher.getResult(); }
std::size_t FunctionTypeData::hash() const { Hasher hasher; hasher.add(attributes()); hasher.add(returnType()); hasher.add(parameterTypes()); return hasher.get(); }
Common::Hash Hasher::hash(const QString& str) { const QByteArray data = str.toUtf8(); Hasher hasher; hasher.addData(data.constData(), data.size()); return hasher.getResult(); }
/** * Returns hash(str) + salt. */ Common::Hash Hasher::hashWithSalt(const QString& str, quint64 salt) { const QByteArray data = str.toUtf8(); Hasher hasher; hasher.addData(data.constData(), data.size()); hasher.addSalt(salt); return hasher.getResult(); }
std::size_t FunctionAttributes::hash() const { Hasher hasher; hasher.add(isVarArg()); hasher.add(isMethod()); hasher.add(isTemplated()); hasher.add(noExceptPredicate()); return hasher.get(); }
void Hasher::_callbackExt(int gpio, int level, uint32_t tick, void *user) { /* Need a static callback to link with C. */ Hasher *mySelf = (Hasher *) user; mySelf->_callback(gpio, level, tick); /* Call the instance callback. */ }
size_t hash() const { Hasher hasher; hasher.add(kind()); switch (kind()) { case NULLVAL: break; case BOOLEAN: hasher.add(boolValue()); break; case INTEGER: hasher.add(integerValue()); break; case FLOATINGPOINT: hasher.add(floatValue()); break; case CHARACTER: hasher.add(characterValue()); break; case STRING: hasher.add(stringValue()); break; } return hasher.get(); }
/** * Get the appropriate hash value * * \param [in] phrase the string to hash * \return the hash value, using the number of bits set in the constructor */ uint64_t GetHash (const std::string phrase) { m_hash.clear (); uint64_t h = 0; if (m_bits == Bits32) { h = m_hash.GetHash32 (phrase); } else { h = m_hash.GetHash64 (phrase); } return h; }
ImageView &TransientAllocator::request_attachment(unsigned width, unsigned height, VkFormat format, unsigned index) { Hasher h; h.u32(width); h.u32(height); h.u32(format); h.u32(index); auto hash = h.get(); auto *node = transients.request(hash); if (node) return node->handle->get_view(); auto image_info = ImageCreateInfo::transient_render_target(width, height, format); node = transients.emplace(hash, device->create_image(image_info, nullptr)); return node->handle->get_view(); }
void generateHash( const char* filename ) { // Create hasher Hasher hasher; MD5Strategy* md5Strategy = new MD5Strategy(); hasher.addStrategy( md5Strategy ); SHA256Strategy* sha256Strategy = new SHA256Strategy(); hasher.addStrategy( sha256Strategy ); // Read the file and hash it ifstream input( filename, ios_base::in | ios_base::binary ); if ( input.is_open() ) { char buffer[1024]; hasher.init(); while ( !input.eof() ) { input.read( buffer, 1023 ); int numRead = input.gcount(); hasher.update( buffer, numRead ); } input.close(); string messageDigest; hasher.digest( "MD5", messageDigest ); cout << messageDigest << " "; hasher.digest( "SHA256", messageDigest ); cout << messageDigest << endl; } else { cerr << "ERROR: Unable to open file: " << filename << endl; } }
Framebuffer &FramebufferAllocator::request_framebuffer(const RenderPassInfo &info) { auto &rp = device->request_render_pass(info); Hasher h; h.u64(rp.get_cookie()); for (unsigned i = 0; i < info.num_color_attachments; i++) if (info.color_attachments[i]) h.u64(info.color_attachments[i]->get_cookie()); if (info.depth_stencil) h.u64(info.depth_stencil->get_cookie()); auto hash = h.get(); auto *node = framebuffers.request(hash); if (node) return *node; return *framebuffers.emplace(hash, device, rp, info); }
error getHasher( const std::string& _name, Hasher& _hasher ) { boost::unordered_map<const std::string, const HashStrategy*>::const_iterator it = _strategies.find( _name ); if ( _strategies.end() == it ) { std::stringstream msg; msg << "Unknown hashing scheme [" << _name << "]"; return ERROR( SYS_INVALID_INPUT_PARAM, msg.str() ); } _hasher.init( it->second ); return SUCCESS(); }
void test (std::string const& what, std::size_t n) { using namespace std; using namespace std::chrono; xor_shift_engine g(1); array<std::uint8_t, KeySize> key; auto const start = clock_type::now(); while(n--) { rngfill (key, g); Hasher h; h.append(key.data(), KeySize); volatile size_t temp = static_cast<std::size_t>(h); (void)temp; } auto const elapsed = clock_type::now() - start; log << setw(12) << what << " " << duration<double>(elapsed) << "s"; }
size_t Predicate::hash() const { Hasher hasher; hasher.add(kind()); switch (kind()) { case TRUE: case FALSE: case SELFCONST: { break; } case AND: { hasher.add(andLeft()); hasher.add(andRight()); break; } case OR: { hasher.add(orLeft()); hasher.add(orRight()); break; } case SATISFIES: { hasher.add(satisfiesType()); hasher.add(satisfiesRequirement()); break; } case VARIABLE: { hasher.add(variableTemplateVar()); break; } } return hasher.get(); }
void HashTable::rebuild() { if (table.size() <= n * 4 && n * 2 <= table.size()) return; vector < string > data; for (hash_iterator_type it = begin(); it != end(); ++it) data.push_back(*it); int nn = table.size() > n * 4 ? n / 2 : n * 2; hasher.next_hash(); table.assign(nn, list < string > ()); int cur_hash; for (vector < string >::iterator it = data.begin(); it != data.end(); ++it) { cur_hash = hasher(*it, nn); table[cur_hash].push_back(*it); } }
void CommandBuffer::flush_graphics_pipeline() { Hasher h; active_vbos = 0; auto &layout = current_layout->get_resource_layout(); for_each_bit(layout.attribute_mask, [&](uint32_t bit) { h.u32(bit); active_vbos |= 1u << attribs[bit].binding; h.u32(attribs[bit].binding); h.u32(attribs[bit].format); h.u32(attribs[bit].offset); }); for_each_bit(active_vbos, [&](uint32_t bit) { h.u32(vbo_input_rates[bit]); h.u32(vbo_strides[bit]); }); h.u64(render_pass->get_cookie()); h.u64(current_program->get_cookie()); h.data(static_state.words, sizeof(static_state.words)); if (static_state.state.blend_enable) { const auto needs_blend_constant = [](VkBlendFactor factor) { return factor == VK_BLEND_FACTOR_CONSTANT_COLOR || factor == VK_BLEND_FACTOR_CONSTANT_ALPHA; }; bool b0 = needs_blend_constant(static_cast<VkBlendFactor>(static_state.state.src_color_blend)); bool b1 = needs_blend_constant(static_cast<VkBlendFactor>(static_state.state.src_alpha_blend)); bool b2 = needs_blend_constant(static_cast<VkBlendFactor>(static_state.state.dst_color_blend)); bool b3 = needs_blend_constant(static_cast<VkBlendFactor>(static_state.state.dst_alpha_blend)); if (b0 || b1 || b2 || b3) h.data(reinterpret_cast<uint32_t *>(potential_static_state.blend_constants), sizeof(potential_static_state.blend_constants)); } auto hash = h.get(); current_pipeline = current_program->get_graphics_pipeline(hash); if (current_pipeline == VK_NULL_HANDLE) current_pipeline = build_graphics_pipeline(hash); }
std::size_t FunctionType::hash() const { Hasher hasher; hasher.add(data_); return hasher.get(); }
// **Sample** main function/driver-- THIS IS NOT A COMPLETE TEST SUITE // YOU MUST WRITE YOUR OWN TESTS // See assignment description. int main( int argc, char* argv[]) { // Generate empty hash tables: Hasher* goodHashRP1 = new Hasher('g', 'd'); Hasher* goodHashQP1 = new Hasher('g', 'q'); Hasher* badHashRP1 = new Hasher('b', 'd'); Hasher* badHashQP1 = new Hasher('b', 'q'); // Generate hash tables that are systematically loaded from file. // Note that if you cannot fit an element you should stop inserting elements // and set a flag to full. Hasher* goodHashRPa = new Hasher('g', 'd', 0.25, "4000record.txt"); Hasher* goodHashRPb = new Hasher('g', 'd', 0.50, "4000record.txt"); Hasher* goodHashRPc = new Hasher('g', 'd', 0.75, "4000record.txt"); Hasher* goodHashQPa = new Hasher('g', 'q', 0.25, "4000record.txt"); Hasher* goodHashQPb = new Hasher('g', 'q', 0.50, "4000record.txt"); Hasher* goodHashQPc = new Hasher('g', 'q', 0.75, "4000record.txt"); Hasher* poorHashRPa = new Hasher('b', 'd', 0.25, "4000record.txt"); Hasher* poorHashRPb = new Hasher('b', 'd', 0.50, "4000record.txt"); Hasher* poorHashRPc = new Hasher('b', 'd', 0.75, "4000record.txt"); Hasher* poorHashQPa = new Hasher('b', 'q', 0.25, "4000record.txt"); Hasher* poorHashQPb = new Hasher('b', 'q', 0.50, "4000record.txt"); Hasher* poorHashQPc = new Hasher('b', 'q', 0.75, "4000record.txt"); goodHashRPa->printStat(); goodHashRPb->printStat(); goodHashRPc->printStat(); goodHashQPa->printStat(); goodHashQPb->printStat(); goodHashQPc->printStat(); poorHashRPa->printStat(); poorHashRPb->printStat(); poorHashRPc->printStat(); poorHashQPa->printStat(); poorHashQPb->printStat(); poorHashQPc->printStat(); // Sample use case: std::cout << "Insert MUZEJKGA 10" << std::endl; std::string key = "MUZEJKGA"; int value = 10; if(goodHashRP1->insert(key, value)) std::cout << "Inserted" << std::endl; else std::cout << "Failed to insert" << std::endl; goodHashRP1->printTable(); int subscript = -1; std::cout << "search for inserted" << std::endl; if(goodHashRP1->search(key, subscript)) std::cout << "Found at " << subscript << std::endl; else std::cout << "Failed to find" << std::endl; goodHashRP1->printTable(); std::cout << "remove that one" << std::endl; if(goodHashRP1->remove(key)) std::cout << "Removed" << std::endl; else std::cout << "Not deleted/not found" << std::endl; goodHashRP1->printTable(); std::cout << "remove once more" << std::endl; if(goodHashRP1->remove(key)) std::cout << "Removed" << std::endl; else std::cout << "Not deleted/not found" << std::endl; goodHashRP1->printTable(); std::cout << "insert again" << std::endl; if(goodHashRP1->insert(key, value)) std::cout << "Inserted" << std::endl; else std::cout << "Failed to insert" << std::endl; goodHashRP1->printTable(); std::cout << "search for it" << std::endl; if(goodHashRP1->search(key, subscript)) std::cout << "Found at " << subscript << std::endl; else std::cout << "Failed to find" << std::endl; goodHashRP1->printTable(); value=3; std::cout << "insert with same key diff val" << std::endl; if(goodHashRP1->insert(key, value)) std::cout << "Inserted" << std::endl; else std::cout << "Failed to insert" << std::endl; goodHashRP1->printTable(); std::cout << "find it" << std::endl; if(goodHashRP1->search(key, subscript)) std::cout << "Found at " << subscript << std::endl; else std::cout << "Failed to find" << std::endl; goodHashRP1->printTable(); std::cout << "remove " << std::endl; if(goodHashRP1->remove(key)) std::cout << "Removed" << std::endl; else std::cout << "Not deleted/not found" << std::endl; goodHashRP1->printTable(); std::cout << "insert again" << std::endl; if(goodHashRP1->insert(key, value)) std::cout << "Inserted" << std::endl; else std::cout << "Failed to insert" << std::endl; goodHashRP1->printTable(); return 0; }
Common::Hash Hasher::hash(const Common::Hash& hash) { Hasher hasher; hasher.addData(hash.getData(), Hash::HASH_SIZE); return hasher.getResult(); }
size_t Value::hash() const { Hasher hasher; hasher.add(kind()); hasher.add(type()); switch (kind()) { case Value::SELF: break; case Value::THIS: break; case Value::CONSTANT: hasher.add(constant()); break; case Value::ALIAS: hasher.add(&(alias())); hasher.add(aliasTemplateArguments().size()); for (const auto& argument: aliasTemplateArguments()) { hasher.add(argument); } break; case Value::PREDICATE: hasher.add(predicate()); break; case Value::LOCALVAR: hasher.add(&(localVar())); break; case Value::REINTERPRET: hasher.add(reinterpretOperand()); break; case Value::DEREF_REFERENCE: hasher.add(derefOperand()); break; case Value::TERNARY: hasher.add(ternaryCondition()); hasher.add(ternaryIfTrue()); hasher.add(ternaryIfFalse()); break; case Value::CAST: hasher.add(castTargetType()); hasher.add(castOperand()); break; case Value::POLYCAST: hasher.add(polyCastTargetType()); hasher.add(polyCastOperand()); break; case Value::INTERNALCONSTRUCT: hasher.add(internalConstructParameters().size()); for (const auto& param: internalConstructParameters()) { hasher.add(param); } break; case Value::MEMBERACCESS: hasher.add(memberAccessObject()); hasher.add(&(memberAccessVar())); break; case Value::BIND_REFERENCE: hasher.add(bindReferenceOperand()); break; case Value::TYPEREF: hasher.add(typeRefType()); break; case Value::TEMPLATEVARREF: hasher.add(templateVar()); break; case Value::CALL: hasher.add(callValue()); hasher.add(callParameters().size()); for (const auto& param: callParameters()) { hasher.add(param); } break; case Value::FUNCTIONREF: hasher.add(functionRefParentType()); hasher.add(&(functionRefFunction())); hasher.add(functionRefTemplateArguments().size()); for (const auto& arg: functionRefTemplateArguments()) { hasher.add(arg); } break; case Value::TEMPLATEFUNCTIONREF: hasher.add(templateFunctionRefParentType()); hasher.add(templateFunctionRefName()); hasher.add(templateFunctionRefFunctionType()); break; case Value::METHODOBJECT: hasher.add(methodObject()); hasher.add(methodOwner()); break; case Value::INTERFACEMETHODOBJECT: hasher.add(interfaceMethodObject()); hasher.add(interfaceMethodOwner()); break; case Value::STATICINTERFACEMETHODOBJECT: hasher.add(staticInterfaceMethodObject()); hasher.add(staticInterfaceMethodOwner()); break; case Value::CAPABILITYTEST: hasher.add(capabilityTestCheckType()); hasher.add(capabilityTestCapabilityType()); break; case Value::ARRAYLITERAL: hasher.add(arrayLiteralValues().size()); for (const auto& value: arrayLiteralValues()) { hasher.add(value); } break; case Value::NEW: hasher.add(newPlacementArg()); hasher.add(newOperand()); break; case Value::CASTDUMMYOBJECT: break; } return hasher.get(); }
void CommandBuffer::flush_descriptor_set(uint32_t set) { auto &layout = current_layout->get_resource_layout(); auto &set_layout = layout.sets[set]; uint32_t num_dynamic_offsets = 0; uint32_t dynamic_offsets[VULKAN_NUM_BINDINGS]; Hasher h; // UBOs for_each_bit(set_layout.uniform_buffer_mask, [&](uint32_t binding) { h.u64(cookies[set][binding]); h.u32(bindings[set][binding].buffer.range); VK_ASSERT(bindings[set][binding].buffer.buffer != VK_NULL_HANDLE); dynamic_offsets[num_dynamic_offsets++] = bindings[set][binding].buffer.offset; }); // SSBOs for_each_bit(set_layout.storage_buffer_mask, [&](uint32_t binding) { h.u64(cookies[set][binding]); h.u32(bindings[set][binding].buffer.offset); h.u32(bindings[set][binding].buffer.range); VK_ASSERT(bindings[set][binding].buffer.buffer != VK_NULL_HANDLE); }); // Sampled buffers for_each_bit(set_layout.sampled_buffer_mask, [&](uint32_t binding) { h.u64(cookies[set][binding]); VK_ASSERT(bindings[set][binding].buffer_view != VK_NULL_HANDLE); }); // Sampled images for_each_bit(set_layout.sampled_image_mask, [&](uint32_t binding) { h.u64(cookies[set][binding]); h.u64(secondary_cookies[set][binding]); h.u32(bindings[set][binding].image.imageLayout); VK_ASSERT(bindings[set][binding].image.imageView != VK_NULL_HANDLE); VK_ASSERT(bindings[set][binding].image.sampler != VK_NULL_HANDLE); }); // Storage images for_each_bit(set_layout.storage_image_mask, [&](uint32_t binding) { h.u64(cookies[set][binding]); h.u32(bindings[set][binding].image.imageLayout); VK_ASSERT(bindings[set][binding].image.imageView != VK_NULL_HANDLE); }); // Input attachments for_each_bit(set_layout.input_attachment_mask, [&](uint32_t binding) { h.u64(cookies[set][binding]); h.u32(bindings[set][binding].image.imageLayout); VK_ASSERT(bindings[set][binding].image.imageView != VK_NULL_HANDLE); }); Hash hash = h.get(); auto allocated = current_layout->get_allocator(set)->find(hash); // The descriptor set was not successfully cached, rebuild. if (!allocated.second) { uint32_t write_count = 0; uint32_t buffer_info_count = 0; VkWriteDescriptorSet writes[VULKAN_NUM_BINDINGS]; VkDescriptorBufferInfo buffer_info[VULKAN_NUM_BINDINGS]; for_each_bit(set_layout.uniform_buffer_mask, [&](uint32_t binding) { auto &write = writes[write_count++]; write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write.pNext = nullptr; write.descriptorCount = 1; write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; write.dstArrayElement = 0; write.dstBinding = binding; write.dstSet = allocated.first; // Offsets are applied dynamically. auto &buffer = buffer_info[buffer_info_count++]; buffer = bindings[set][binding].buffer; buffer.offset = 0; write.pBufferInfo = &buffer; }); for_each_bit(set_layout.storage_buffer_mask, [&](uint32_t binding) { auto &write = writes[write_count++]; write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write.pNext = nullptr; write.descriptorCount = 1; write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; write.dstArrayElement = 0; write.dstBinding = binding; write.dstSet = allocated.first; write.pBufferInfo = &bindings[set][binding].buffer; }); for_each_bit(set_layout.sampled_buffer_mask, [&](uint32_t binding) { auto &write = writes[write_count++]; write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write.pNext = nullptr; write.descriptorCount = 1; write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; write.dstArrayElement = 0; write.dstBinding = binding; write.dstSet = allocated.first; write.pTexelBufferView = &bindings[set][binding].buffer_view; }); for_each_bit(set_layout.sampled_image_mask, [&](uint32_t binding) { auto &write = writes[write_count++]; write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write.pNext = nullptr; write.descriptorCount = 1; write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; write.dstArrayElement = 0; write.dstBinding = binding; write.dstSet = allocated.first; write.pImageInfo = &bindings[set][binding].image; }); for_each_bit(set_layout.storage_image_mask, [&](uint32_t binding) { auto &write = writes[write_count++]; write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write.pNext = nullptr; write.descriptorCount = 1; write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; write.dstArrayElement = 0; write.dstBinding = binding; write.dstSet = allocated.first; write.pImageInfo = &bindings[set][binding].image; }); for_each_bit(set_layout.input_attachment_mask, [&](uint32_t binding) { auto &write = writes[write_count++]; write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write.pNext = nullptr; write.descriptorCount = 1; write.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; write.dstArrayElement = 0; write.dstBinding = binding; write.dstSet = allocated.first; write.pImageInfo = &bindings[set][binding].image; }); vkUpdateDescriptorSets(device->get_device(), write_count, writes, 0, nullptr); } vkCmdBindDescriptorSets(cmd, render_pass ? VK_PIPELINE_BIND_POINT_GRAPHICS : VK_PIPELINE_BIND_POINT_COMPUTE, current_pipeline_layout, set, 1, &allocated.first, num_dynamic_offsets, dynamic_offsets); }
static void calculate_rendezvous_hash(std::vector<std::string> cluster, std::vector<uint32_t> cluster_rendezvous_hashes, TimerID id, uint32_t replication_factor, std::vector<std::string>& replicas, Hasher* hasher) { if (replication_factor == 0u) { return; } std::map<uint32_t, size_t> hash_to_idx; std::vector<std::string> ordered_cluster; // Do a rendezvous hash, by hashing this timer repeatedly, seeded by a // different per-server value each time. Rank the servers for this timer // based on this hash output. for (unsigned int ii = 0; ii < cluster.size(); ++ii) { uint32_t server_hash = cluster_rendezvous_hashes[ii]; uint32_t hash = hasher->do_hash(id, server_hash); // Deal with hash collisions by incrementing the hash. For // example, if I have server hashes A, B, C, D which cause // this timer to hash to 10, 40, 10, 30: // hash_to_idx[10] = 0 (A's index) // hash_to_idx[40] = 1 (B's index) // hash_to_idx[10] exists, increment C's hash // hash_to_idx[11] = 2 (C's index) // hash_to_idx[30] = 3 (D's index) // // Iterating over hash_to_idx then gives (10, 0), (11, 2), (40, 1) // and (30, 3), so the ordered list is A, C, B, D. Effectively, the // first entry in the original list consistently wins. // // This doesn't work perfectly in the edge case // If I have servers A, B, C, D which cause this // timer to hash to 10, 11, 10, 11: // hash_to_idx[10] = 0 (A's index) // hash_to_idx[11] = 1 (B's index) // hash_to_idx[10] exists, increment C's hash // hash_to_idx[11] exists, increment C's hash // hash_to_idx[12] = 2 (C's index) // hash_to_idx[11] exists, increment D's hash // hash_to_idx[12] exists, increment D's hash // hash_to_idx[13] = 3 (D's index) // // Iterating over hash_to_idx then gives (10, 0), (11, 1), (12, 2) // and (13, 3), so the ordered list is A, B, C, D. This is wrong, // but deterministic - the only problem in this very rare case is that // more timers will be moved around when scaling. while (hash_to_idx.find(hash) != hash_to_idx.end()) { // LCOV_EXCL_START hash++; // LCOV_EXCL_STOP } hash_to_idx[hash] = ii; } // Pick the lowest hash value as the primary replica. for (std::map<uint32_t, size_t>::iterator ii = hash_to_idx.begin(); ii != hash_to_idx.end(); ii++) { ordered_cluster.push_back(cluster[ii->second]); } replicas.push_back(ordered_cluster.front()); // Pick the (N-1) highest hash values as the backup replicas. replication_factor = replication_factor > ordered_cluster.size() ? ordered_cluster.size() : replication_factor; for (size_t jj = 1; jj < replication_factor; jj++) { replicas.push_back(ordered_cluster.back()); ordered_cluster.pop_back(); } }