/** Reserve some part of the pipe for reading \param[in] s is the number of element to reserve \param[out] rid is an iterator to a description of the reservation that has been done if successful \param[in] blocking specify if the call wait for the operation to succeed \return true if the reservation was successful */ bool reserve_read(std::size_t s, rid_iterator &rid, bool blocking = false) { // Lock the pipe to avoid being disturbed std::unique_lock<std::mutex> ul { cb_mutex }; TRISYCL_DUMP_T("Before read reservation cb.size() = " << cb.size() << " size() = " << size()); if (s == 0) // Empty reservation requested, so nothing to do return false; if (blocking) /* If in blocking mode, wait for enough elements to read in the pipe for the reservation. This condition can change when a write is done */ write_done.wait(ul, [&] { return s <= size(); }); else if (s > size()) // Not enough elements to read in the pipe for the reservation return false; // Compute the location of the first element of the reservation auto first = cb.begin() + read_reserved_frozen; // Increment the number of frozen elements read_reserved_frozen += s; /* Add a description of the reservation at the end of the reservation queue */ r_rid_q.emplace_back(first, s); // Return the iterator to the last reservation descriptor rid = r_rid_q.end() - 1; TRISYCL_DUMP_T("After reservation cb.size() = " << cb.size() << " size() = " << size()); return true; }
void Enter(Activity *activity) { if (current) { stack.emplace_back(current); } current = activity; current->dispatchStart(); }
S4() { for(int i = 0; i < 6; ++i) { m_s1.emplace_back(); m_s2.emplace_back(); m_s3.emplace_back(); } }
static inline void propagate_age_update(Cfg& conf, std::deque<Cfg>& tmpres, std::size_t dst) { // here we propagate an age field update to all pointers that are equal // and thus experience the update too => only for age updates of next fields // split to find truly equal pointers auto shape_split = disambiguate(*conf.shape, dst); for (Shape* s : shape_split) { tmpres.emplace_back(Cfg(conf, s)); Cfg& config = tmpres.back(); // pointer equal to dst => experiences age update too for (std::size_t i = 0; i < s->size(); i++) { if (i == dst) continue; if (s->test(i, dst, EQ)) { #if CAS_OVERAPPROXIMATE_AGE_PROPAGATION // overapproximation: drop age relation of pointers that observe the age assignemnt for (std::size_t j = 0; j < s->size(); j++) for (bool b : {false, true}) config.ages->set(j, b, i, true, AgeRel::BOT); #else mk_next_age_equal(config, i, dst, true); #endif } } } // the shape/ages from conf may no longer be valid => overwrite the shape/ages conf.shape = std::move(tmpres.back().shape); conf.ages = std::move(tmpres.back().ages); tmpres.pop_back(); }
int Cursor::Next(const size_t count, std::deque<nx::String>& buf) { int rs = 0; if (isShared()) { if (extra_state_ && extra_state_->IsReady()) { // +extra with mixing std::string rs = extra_state_->Get(extra_delim_); buf.emplace_back(rs.begin(), rs.end()); ++(*extra_state_); rs = 1; } else { std::deque<nx::String> tmpbuf; rs = do_next(count * shared_total_, tmpbuf); if(rs >= 0) for(size_t i = 0, j = shared_curr_ - 1; i < count && j < tmpbuf.size(); ++i, j = shared_curr_ - 1 + shared_total_*i ) { buf.push_back(tmpbuf[j]); } } } else { ++(*extra_state_); rs = do_next(count, buf); } return rs; }
void createPlugins(MyPluginFactory& factory, const std::vector<std::string>& availablePlugins, std::deque<MyPlugin>& plugins) { for(const auto& pluginName : availablePlugins) { plugins.emplace_back(factory.instance(pluginName)); } }
void run_on_main_thread(std::function<void()> &func) { { std::unique_lock<std::mutex> lock(main_thread_mutex); main_func.emplace_back(func); } main_thread_cv.notify_all(); std::unique_lock<std::mutex> lock(main_thread_mutex); main_thread_cv.wait(lock, []{ return main_done || main_func.empty(); }); }
/** Enqueue a work item */ bool Enqueue(WorkItem* item) { boost::unique_lock<boost::mutex> lock(cs); if (queue.size() >= maxDepth) { return false; } queue.emplace_back(std::unique_ptr<WorkItem>(item)); cond.notify_one(); return true; }
void element_is_ready() { std::unique_lock<std::mutex> guard(m_lock); m_elements.emplace_back(std::move(m_current)); m_current.reset(new element); if (m_elements.size() > m_pool.size() * 2) { m_parser_wait.wait(guard, [&] {return m_elements.size() < m_pool.size();}); } guard.unlock(); m_pool_wait.notify_one(); }
// ------------------------------------------------------------------------ void addAndSetTime(uint32_t ping, uint64_t server_time) { if (m_synchronised.load() == true) return; if (m_force_set_timer.load() == true) { m_force_set_timer.store(false); m_synchronised.store(true); STKHost::get()->setNetworkTimer(server_time + (uint64_t)(ping / 2)); return; } const uint64_t cur_time = StkTime::getMonoTimeMs(); // Discard too close time compared to last ping // (due to resend when packet loss) // 10 packets per second as seen in STKHost const uint64_t frequency = (uint64_t)((1.0f / 10.0f) * 1000.0f) / 2; if (!m_times.empty() && cur_time - std::get<2>(m_times.back()) < frequency) return; // Take max 20 averaged samples from m_times, the next addAndGetTime // is used to determine that server_time if it's correct, if not // clear half in m_times until it's correct if (m_times.size() >= 20) { uint64_t sum = std::accumulate(m_times.begin(), m_times.end(), (uint64_t)0, [cur_time](const uint64_t previous, const std::tuple<uint32_t, uint64_t, uint64_t>& b)->uint64_t { return previous + (uint64_t)(std::get<0>(b) / 2) + std::get<1>(b) + cur_time - std::get<2>(b); }); const int64_t averaged_time = sum / 20; const int64_t server_time_now = server_time + (uint64_t)(ping / 2); int difference = (int)std::abs(averaged_time - server_time_now); if (std::abs(averaged_time - server_time_now) < UserConfigParams::m_timer_sync_difference_tolerance) { STKHost::get()->setNetworkTimer(averaged_time); m_times.clear(); m_force_set_timer.store(false); m_synchronised.store(true); Log::info("NetworkTimerSynchronizer", "Network " "timer synchronized, difference: %dms", difference); return; } m_times.erase(m_times.begin(), m_times.begin() + 10); } m_times.emplace_back(ping, server_time, cur_time); }
void append(Args&&... args) { static_assert( std::is_same<typename Event::tag, Tag>::value, "message protocol is not compatible with this message queue" ); if(!upstream_) { return operations.emplace_back(aux::make_frozen<Event>(std::forward<Args>(args)...)); } upstream_->send<Event>(std::forward<Args>(args)...); }
void ProfileDrawer::DbgTimingInfo(DbgTimingInfoType type, const spring_time start, const spring_time end) { if (!IsEnabled()) return; switch (type) { case TIMING_VIDEO: { vidFrames.emplace_back(start, end); } break; case TIMING_SIM: { simFrames.emplace_back(start, end); } break; case TIMING_GC: { lgcFrames.emplace_back(start, end); } break; case TIMING_SWAP: { swpFrames.emplace_back(start, end); } break; case TIMING_UNSYNCED: { uusFrames.emplace_back(start, end); } break; } }
result_type traverse(std::conditional_t< reassmble, ast::expression &&, ast::expression const & > _input) { // if reassmble is true, then input is taken apart, then reassembled assert(output_.empty()); if (_input.rest_.empty()) { return dispatcher_(std::move(_input.first_)); } else if (_input.rest_.size() == 1) { auto & operation_ = _input.rest_.back(); return dispatcher_(std::move(_input.first_), operation_.operator_, std::move(operation_.operand_)); } else { //output_.reserve(_input.rest_.size() * 2 + 1); // total number of operators and operands std::stack< aggregate_wrapper< lhs_op > > lhs_op_; for (auto & operation_ : _input.rest_) { size_type const precedence_ = ast::precedence(operation_.operator_); while (!lhs_op_.empty()) { lhs_op const & top_ = lhs_op_.top(); if (ast::precedence(top_.operator_) < precedence_) { break; } output_.emplace_back(top_.lhs_, top_.operator_, output_.size()); lhs_op_.pop(); } lhs_op_.emplace(output_.size(), operation_.operator_); output_.emplace_back(&operation_.operand_); } while (!lhs_op_.empty()) { lhs_op const & top_ = lhs_op_.top(); output_.emplace_back(top_.lhs_, top_.operator_, output_.size()); lhs_op_.pop(); } output_.emplace_front(&_input.first_); return operator () (output_.back()); } }
std::unique_ptr<ui8[]> getCachedFile(ResourceID rid) { for(auto & file : cache) { if (file.name == rid) return file.getCopy(); } // Still here? Cache miss if (cache.size() > cacheSize) cache.pop_front(); auto data = CResourceHandler::get()->load(rid)->readAll(); cache.emplace_back(std::move(rid), data.second, std::move(data.first)); return cache.back().getCopy(); }
ComponentType& addComponent(Args&&... args) { static_assert(std::is_base_of<NodeComponent, ComponentType>::value, "ComponentType must inherit from NodeComponent."); assert(!hasComponent<ComponentType>() && "SceneNode::addComponent component of that type already exists."); ComponentType* component{ new ComponentType{std::forward<Args>(args)...}}; component->parent = this; components.emplace_back(std::unique_ptr<NodeComponent>{component}); componentArray[getComponentTypeId<ComponentType>()] = component; componentBitset[getComponentTypeId<ComponentType>()] = true; return *component; }
void Arbitre::checkOnEat(Vector<int> const &pos, Vector<int> const &nb, int color, char const * const *map, std::deque<Vector<Vector<int>>> &coords) { Vector<int> tmpx, tmpy, tmpz; int ocolor; ocolor = color == WHITE ? BLACK : WHITE; tmpx = pos + nb; tmpy = tmpx + nb; tmpz = tmpy + nb; if (tmpx.x >= 0 && tmpy.x >= 0 && tmpz.x >= 0 && tmpx.x < 19 && tmpy.x < 19 && tmpz.x < 19 && tmpx.y >= 0 && tmpy.y >= 0 && tmpz.y >= 0 && tmpx.y < 19 && tmpy.y < 19 && tmpz.y < 19 && map[tmpx.y][tmpx.x] == ocolor && map[tmpy.y][tmpy.x] == ocolor && map[tmpz.y][tmpz.x] == color) coords.emplace_back(tmpx, tmpy); }
void do_resolve (std::vector <std::string> const& names, HandlerType const& handler, CompletionCounter) { assert (! names.empty()); if (m_stop_called == false) { m_work.emplace_back (names, handler); JLOG(m_journal.debug()) << "Queued new job with " << names.size() << " tasks. " << m_work.size() << " jobs outstanding."; if (m_work.size() > 0) { m_io_service.post (m_strand.wrap (std::bind ( &ResolverAsioImpl::do_work, this, CompletionCounter (this)))); } } }
void TTreeBase::ukkonenPush( std::deque<TUkkonenBuildCursor>& cursors, size_t position ) { cursors.emplace_back(TUkkonenBuildCursor(&rootEdge)); TNode* nodeForLink = nullptr; while (cursors.front().deleted) { cursors.pop_front(); } //*dbg*/ size_t counter = 0; for (auto& cursor : cursors) { if (cursor.deleted) { //*dbg*/ std::cerr << counter++ << ": skip deleted cursor\n"; /*dbg*/ throw TSimpleException("impossible situation"); continue; } //*dbg*/ else { //*dbg*/ std::cerr << counter++ << ": cursor: \n"; //*dbg*/ } /*dbg*/ std::cerr << "step " << cursor.edge->subString.str << std::endl; /*dbg*/ std::cerr << "step edge ptr " << cursor.edge << std::endl; int stepType = cursor.step(position); /*dbg*/ std::cerr << "step: " << stepType << "\n\n"; if (stepType > 1) { TNode* currentNode = cursor.edge->parentNode; if (nodeForLink != nullptr && nodeForLink->suffixLink == nullptr) { //*dbg*/ std::cerr << "link\n"; nodeForLink->suffixLink = currentNode; } nodeForLink = currentNode; } if (stepType == 2) { cursor.deleted = true; } } }
void add_msg_string(std::string &&msg, game_message_type const type) { if (msg.length() == 0) { return; } // hide messages if dead if (g->u.is_dead_state()) { return; } if (type == m_debug && !debug_mode) { return; } if (coalesce_messages(msg, type)) { return; } while (messages.size() > 255) { messages.pop_front(); } messages.emplace_back(remove_color_tags(std::move(msg)), type); }
/** Reserve some part of the pipe for writing \param[in] s is the number of element to reserve \param[out] rid is an iterator to a description of the reservation that has been done if successful \param[in] blocking specify if the call wait for the operation to succeed \return true if the reservation was successful */ bool reserve_write(std::size_t s, rid_iterator &rid, bool blocking = false) { // Lock the pipe to avoid being disturbed std::unique_lock<std::mutex> ul { cb_mutex }; TRISYCL_DUMP_T("Before write reservation cb.size() = " << cb.size() << " size() = " << size()); if (s == 0) // Empty reservation requested, so nothing to do return false; if (blocking) /* If in blocking mode, wait for enough room in the pipe, that may be changed when a read is done. Do not use a difference here because it is only about unsigned values */ read_done.wait(ul, [&] { return cb.size() + s <= capacity(); }); else if (cb.size() + s > capacity()) // Not enough room in the pipe for the reservation return false; /* If there is enough room in the pipe, just create default values in it to do the reservation */ for (std::size_t i = 0; i != s; ++i) cb.push_back(); /* Compute the location of the first element a posteriori since it may not exist a priori if cb was empty before */ auto first = cb.end() - s; /* Add a description of the reservation at the end of the reservation queue */ w_rid_q.emplace_back(first, s); // Return the iterator to the last reservation descriptor rid = w_rid_q.end() - 1; TRISYCL_DUMP_T("After reservation cb.size() = " << cb.size() << " size() = " << size()); return true; }
void finished_with_buffer(std::shared_ptr<std::vector<unsigned char> > buffer) { if(buffer.unique()) { free_buffers.emplace_back(std::move(buffer)); } }
void emplace(ARGS&&... args) { // Add to back Items.emplace_back(std::forward<ARGS>(args)...); }
event_context::event_context() { event_contexts.emplace_back(); }
void push(ARG&& val) { // Add to back Items.emplace_back(std::forward<ARG>(val)); }
/// Process data from bitcoind daemon (esp. RPC getrawmempool for live transactions) void daemonThreadFunc(BlockChain* blockChain) { leveldb::WriteBatch batch; while (true) { std::this_thread::sleep_for(std::chrono::seconds(1)); if (requestedQuit) break; if (!txCheck) continue; try { auto transactions = rpcClient->CallMethod("getrawmempool", Json::Value()); BlockInfo blockInfo; Hash256 latestBlock; { boost::lock_guard<boost::mutex> guard(chainMutex); latestBlock = currentChain.back(); auto blockInfoIt = blocks.find(latestBlock); if (blockInfoIt == blocks.end()) { continue; } blockInfo = blockInfoIt->second; } if (pendingPreviousBlock != latestBlock) { pendingPreviousBlock = latestBlock; // Broadcast new block for live update Json::Value blockResult; blockResult["item"] = "block"; convertBlock(blockResult, pendingPreviousBlock, blockInfo); websocket_server.broadcast_livetx(blockResult.toStyledString().c_str()); { boost::lock_guard<boost::mutex> guard(pendingTransactionsMutex); pendingTransactionIndices.clear(); pendingTransactions.clear(); pendingTransactionsByAddress.clear(); } } for (auto tx = transactions.begin(); tx != transactions.end(); ++tx) { Hash256 txHash; encodeHexString((uint8_t*) &txHash, 32, (*tx).asString(), true); batch.Clear(); { // Already exists? boost::unique_lock<boost::mutex> guard(pendingTransactionsMutex); if (pendingTransactionIndices.find(txHash) != pendingTransactionIndices.end()) continue; } { DbTransaction dbTx; Json::Value parameters(Json::arrayValue); parameters.append(*tx); auto rawTx = rpcClient->CallMethod("getrawtransaction", parameters); auto data = rawTx.asCString(); auto bufferLength = strlen(data) / 2; llvm::SmallVector<uint8_t, 65536> buffer; buffer.resize(bufferLength); encodeHexString(buffer.begin(), bufferLength, data); BlockChain::BlockTransaction tx2; if (!blockChain->processSingleTransaction(buffer.begin(), bufferLength, tx2)) assert("could not read tx" && false); assert(memcmp(tx2.transactionHash, &txHash, 32) == 0); uint64_t totalOutput = 0; bool needBroadcast = false; auto txIndex = dbHelper.txLoad(txHash, dbTx, NULL, NULL); if (txIndex == 0) { { boost::unique_lock<boost::mutex> guard(pendingTransactionsMutex); try { txIndex = processTransaction(batch, dbTx, tx2, time(NULL), false, &pendingTransactionIndices, &pendingTransactions); } catch (std::exception e) { batch.Clear(); continue; } } needBroadcast = true; dbHelper.txSave(batch, txHash, dbTx); db->Write(leveldb::WriteOptions(), &batch); } { boost::unique_lock<boost::mutex> guard(pendingTransactionsMutex); pendingTransactionIndices[txHash] = pendingTransactions.size(); pendingTransactions.emplace_back(txHash, dbTx); // lastPendingTransactions only holds last N items if (lastPendingTransactions.size() >= lastPendingTransactionsSize) lastPendingTransactions.pop_front(); lastPendingTransactions.emplace_back(txHash, dbTx); for (auto output = dbTx.outputs.begin(); output != dbTx.outputs.end(); ++output) { if (output->address.IsNull()) continue; totalOutput += output->value; pendingTransactionsByAddress.emplace(output->address, pendingTransactions.size() - 1); } for (auto input = dbTx.inputs.begin(); input != dbTx.inputs.end(); ++input) { if (input->address.IsNull()) continue; pendingTransactionsByAddress.emplace(input->address, pendingTransactions.size() - 1); } } if (needBroadcast) { Json::Value txResult; // Broadcast tx for live update txResult["item"] = "tx"; txResult["output"] = (double)totalOutput; char timeBuffer[65]; convertTime(dbTx.getBestTimeStamp(), timeBuffer, sizeof(timeBuffer)); txResult["time"] = timeBuffer; txResult["coinage_destroyed"] = dbTx.coinAgeDestroyed / 86400.0; txResult["hash"] = *tx; websocket_server.broadcast_livetx(txResult.toStyledString().c_str()); } } } } catch (std::exception e) { boost::lock_guard<boost::mutex> guard(pendingTransactionsMutex); pendingTransactionIndices.clear(); pendingTransactions.clear(); pendingTransactionsByAddress.clear(); printf("Error processing live transactions: %s!\n", e.what()); } } }
void bad_emplace_back_deque1(std::deque<int> &D, int n) { auto i0 = D.cbegin(), i1 = D.cend(); D.emplace_back(n); *i0; // expected-warning{{Invalidated iterator accessed}} --i1; // expected-warning{{Invalidated iterator accessed}} }
void emplace(Arguments&& ... args) { std::unique_lock<std::mutex> lock(mutex_); queue_.emplace_back(std::forward<Arguments>(args) ...); }
future<bool> add_listener() { return lock(), listeners.emplace_back(), listeners.back().get_future(); }
void AddTask(std::function<void()> workFn, std::function<void()> completionFn) { unique_lock lock(_mutex); _pending.emplace_back(workFn, completionFn); _condPending.notify_one(); }
void emplace(Arguments&& ... args) { std::unique_lock<std::mutex> lock(mutex_); queue_.emplace_back(args ...); }