/** Return pointer to new tracebuffer entry. */ PUBLIC static Tb_entry* Jdb_tbuf::new_entry() { Tb_entry *tb; { auto guard = lock_guard(_lock); tb = _tbuf_act; status()->current = (Address)tb; if (++_tbuf_act >= _tbuf_max) _tbuf_act = buffer(); if (_entries < _max_entries) _entries++; tb->number(++_number); } tb->rdtsc(); tb->rdpmc1(); tb->rdpmc2(); return tb; }
std::string Client::invoke(const schema::Request& request) { std::string data; { std::lock_guard<std::mutex> lock_guard(*lock); rpc_connection->send(encoder::encode_delimited(request)); size_t size = 0; while (true) { try { data += rpc_connection->receive(1); size = decoder::decode_size_and_position(data).first; break; } catch (decoder::DecodeFailed&) { } } data = rpc_connection->receive(size); } schema::Response response; decoder::decode(response, data, this); if (response.has_error()) throw RPCError(response.error()); return response.return_value(); }
PUBLIC unsigned long Slab_cache::reap() // request that cache returns memory to system { Slab *s = 0; unsigned long sz = 0; for (;;) { { auto guard = lock_guard(lock); s = _empty.front(); // nothing to free if (!s) return 0; cxx::H_list<Slab>::remove(s); } // explicitly call destructor to delete s; s->~Slab(); block_free(reinterpret_cast<char *>(s + 1) - _slab_size, _slab_size); sz += _slab_size; } return sz; }
PUBLIC void NO_INSTRUMENT Switch_lock::wait_free() { auto guard = lock_guard(cpu_lock); assert (!valid()); // have we already the lock? if ((_lock_owner & ~1UL) == (Address)current()) { clear_lock_owner(); Context *c = current(); c->dec_lock_cnt(); return; } for(;;) { assert(cpu_lock.test()); Address _owner = access_once(&_lock_owner); Context *owner = (Context *)(_owner & ~1UL); if (!owner) break; // Help lock owner until lock becomes free // while (test()) current()->switch_exec_helping(owner, Context::Helping, &_lock_owner, _owner); Proc::preemption_point(); } }
void ResourceManager::assureExists(const std::string& name, bool unsafe) const { if (!unsafe) auto lock = lock_guard(_resource_mutex); if (!exists(name, unsafe)) { throw ResourceNotExistsException("ResourceManager: Resource \'" + name + "\' does not exist"); } }
// TODO: think about this locking strategy! Fix it so there are no deadlocks or // invalid states (e.g., the handlers_ list is > 0 iff the group has a registered // callback. // Maybe just always register handler, and just turn it on or off... void Group::addFeedbackHandler(GroupFeedbackHandler handler) { std::lock_guard<std::mutex> lock_guard(handler_lock_); handlers_.push_back(handler); // Is the copy OK here? or are std::function classes move-only? if (handlers_.size() == 1) // (i.e., this was the first one) hebiGroupRegisterFeedbackHandler(group_, callbackWrapper, (void*)this); }
/** Release an device interrupt. @param t the receiver that ownes the IRQ @return true if t really was the owner of the IRQ and operation was successful */ PRIVATE bool Irq_sender::free(Thread *t, Kobject ***rl) { bool ret = mp_cas(&_irq_thread, t, reinterpret_cast<Thread*>(0)); if (ret) { auto guard = lock_guard(cpu_lock); mask(); if (EXPECT_TRUE(t != 0)) { t->Receiver::abort_send(this); // release cpu-lock early, actually before delete guard.reset(); if (t->dec_ref() == 0) t->initiate_deletion(rl); } } return ret; }
bool Rcu_data::do_batch() { int count = 0; bool need_resched = false; for (Rcu_list::Const_iterator l = _d.begin(); l != _d.end();) { Rcu_item *i = *l; ++l; need_resched |= i->_call_back(i); ++count; } // XXX: I do not know why this and the former stuff is w/o cpu lock // but the couting needs it ? _d.clear(); // XXX: we use clear, we seemingly worked through the whole list //_d.head(l); { auto guard = lock_guard(cpu_lock); _len -= count; } #if 0 if (_d.full()) { Timeout *t = &_rcu_timeout.cpu(_cpu); t->set(t->get_timeout(0) + Rcu::Period, _cpu); } #endif return need_resched; }
void mastermind_t::data::set_user_settings_factory(namespace_state_t::user_settings_factory_t user_settings_factory_) { // TODO: forbid to change settings during background thread is running std::lock_guard<std::mutex> lock_guard(m_mutex); user_settings_factory = std::move(user_settings_factory_); }
void Module::addFeedbackHandler(ModuleFeedbackHandler handler) { std::lock_guard<std::mutex> lock_guard(handler_lock_); handlers_.push_back(handler); if (handlers_.size() == 1) hebiModuleRegisterFeedbackHandler(internal_, callbackWrapper, (void*)this); }
bool Rcu_data::do_batch() { int count = 0; bool need_resched = false; for (Rcu_list::Const_iterator l = _d.begin(); l != _d.end();) { Rcu_item *i = *l; ++l; need_resched |= i->_call_back(i); ++count; } // XXX: I do not know why this and the former stuff is w/o cpu lock // but the couting needs it? _d.clear(); // XXX: we use clear, we seemingly worked through the whole list //_d.head(l); { auto guard = lock_guard(cpu_lock); _len -= count; } return need_resched; }
/** Acquire the lock with priority inheritance. * If the lock is occupied, enqueue in list of helpers and lend CPU * to current lock owner until we are the lock owner. * @return #Locked if the lock was already locked by the current context. * #Not_locked if the current context got the lock (the usual case). * #Invalid if the lock does not exist (see valid()). */ PUBLIC Switch_lock::Status NO_INSTRUMENT Switch_lock::lock() { auto guard = lock_guard(cpu_lock); return lock_dirty(); }
PUBLIC inline void NO_INSTRUMENT Switch_lock::invalidate() { auto guard = lock_guard(cpu_lock); atomic_mp_or(&_lock_owner, 1); }
///------------------------------------------------------------------------------------ float WingsailControlNode::calculateTailAngle() { std::lock_guard<std::mutex> lock_guard(m_lock); if(m_ApparentWindDir != DATA_OUT_OF_RANGE) { // lists that will contain the forces on X and Y in the boat coordinates system std::vector<double> xBoat_Forces ; std::vector<double> yBoat_Forces ; // transforming given calculated lifts and drags into forces in the boat coordinates system double apparentWindDir_counterClock = -m_ApparentWindDir; apparentWindDir_counterClock = Utility::degreeToRadian(apparentWindDir_counterClock); apparentWindDir_counterClock = Utility::limitRadianAngleRange(apparentWindDir_counterClock); for (int i = 0;i < 53;i++) { xBoat_Forces.push_back(DRAGS[i]*cos(apparentWindDir_counterClock) - LIFTS[i]*sin (apparentWindDir_counterClock)); yBoat_Forces.push_back(LIFTS[i]*cos(apparentWindDir_counterClock) + DRAGS[i]*sin (apparentWindDir_counterClock)); } std::vector<double> maxAndIndex_xBoat_Forces; maxAndIndex_xBoat_Forces = Utility::maxAndIndex(xBoat_Forces); double orderTail_counterClock; double orderTail; orderTail_counterClock = maxAndIndex_xBoat_Forces[1] - 26; orderTail = - orderTail_counterClock; orderTail = restrictWingsail(orderTail); return orderTail; } else { return NO_COMMAND; } }
///---------------------------------------------------------------------------------- void WingsailControlNode::processLocalNavigationMessage(const LocalNavigationMsg* msg) { std::lock_guard<std::mutex> lock_guard(m_lock); m_TargetCourse = msg->targetCourse(); m_TargetTackStarboard = msg->targetTackStarboard(); }
bool ServerSummaryRow::AppendTableToBuffs(int32_t client_id, petuum::RecordBuff* buffs, bool resume) { std::lock_guard<std::mutex> lock_guard(mutex_); if (resume) { bool append_row_suc = AppendRowToBuffs(client_id, buffs, tmp_row_buff_, curr_row_size_, 0); if (!append_row_suc) return false; //++index_iter_; } //for (; index_iter_ != num_words_; ++index_iter_) { curr_row_size_ = summary_row_.SparseSerializedSize();//SerializedSize(); if (curr_row_size_ > tmp_row_buff_size_) { delete[] tmp_row_buff_; tmp_row_buff_size_ = curr_row_size_; tmp_row_buff_ = new uint8_t[curr_row_size_]; } curr_row_size_ = summary_row_.SparseSerialize(tmp_row_buff_);//->Serialize(tmp_row_buff_); bool append_row_suc = AppendRowToBuffs(client_id, buffs, tmp_row_buff_, curr_row_size_, 0); if (!append_row_suc) { VLOG(0) << "Failed at Summary Row"; return false; } //} delete[] tmp_row_buff_; return true; }
PRIVATE void Rcu_data::move_batch(Rcu_list &l) { auto guard = lock_guard(cpu_lock); _n.append(l); }
PRIVATE void Rcu_data::check_quiescent_state(Rcu_glbl *rgp) { if (_q_batch != rgp->_current) { // start new grace period _pending = 1; _q_passed = 0; _q_batch = rgp->_current; return; } // Is the grace period already completed for this cpu? // use _pending, not bitmap to avoid cache trashing if (!_pending) return; // Was there a quiescent state since the beginning of the grace period? if (!_q_passed) return; _pending = 0; auto guard = lock_guard(rgp->_lock); if (EXPECT_TRUE(_q_batch == rgp->_current)) rgp->cpu_quiet(_cpu); }
void ChatWindow::processUp(packet &&data) { std::lock_guard<std::mutex> lock_guard(_mutex); queue.append(toVariant(data).toString()); QMetaObject::invokeMethod(this, "read_queue", Qt::QueuedConnection); }
/*! Wakes all threads waiting on the wait condition. The order in which the threads are woken up depends on the operating system's scheduling policies and cannot be controlled or predicted. \sa wakeOne() */ void QWaitCondition::wakeAll() { Genode::Lock::Guard lock_guard(d->mutex); while (d->sem.cnt() < 0) { d->sem.up(); } }
/*! Wakes one thread waiting on the wait condition. The thread that is woken up depends on the operating system's scheduling policies, and cannot be controlled or predicted. If you want to wake up a specific thread, the solution is typically to use different wait conditions and have different threads wait on different conditions. \sa wakeAll() */ void QWaitCondition::wakeOne() { Genode::Lock::Guard lock_guard(d->mutex); if (d->sem.cnt() < 0) { d->sem.up(); } }
Connection::Connection(std::shared_ptr<boost::asio::ip::tcp::socket> client_socket, WebApplication *application_pointer) : application_pointer_(application_pointer), client_socket_(client_socket), timeout_timer_(client_socket->get_io_service()), current_request_(new HTTPRequest) { resetTimeoutTimer(); startAsyncRead(); std::lock_guard<std::mutex> lock_guard(connections_count_mutex_); connections_count_++; }
/** Free the lock. Return the CPU to helper if there is one, since it had to have a higher priority to be able to help (priority may be its own, it may run on a donated timeslice or round robin scheduling may have selected a thread on the same priority level as me) @pre The lock must be valid (see valid()). */ PUBLIC void NO_INSTRUMENT Switch_lock::clear() { auto guard = lock_guard(cpu_lock); switch_dirty(clear_no_switch_dirty()); }
PUBLIC void Kmem_alloc::unaligned_free(unsigned long size, void *page) { assert(size >=8 /*NEW INTERFACE PARANIOIA*/); auto guard = lock_guard(lock); a->free(page, size); }
void Worker::setWork(WorkType work) { status = Status::ACTIVE; std::lock_guard<std::mutex> lock_guard(activatorMutex); this->work = std::move(work); isWorkReallyPosted = true; activator.notify_one(); }
void work_on_data(void) { std::lock_guard<std::mutex> lock_guard(m_mutex); std::cout<<"Running work_on_data method"<<std::endl; std::cout<<"work_on_data starts on local data = "<<m_data<<std::endl; std::this_thread::sleep_for(std::chrono::seconds(6)); std::cout<<"work_on_data terminates on local data = "<<m_data<<std::endl; std::cout<<"Running work_on_data method done!"<<std::endl; }
void CDispatcher::Process() { std::lock_guard<std::mutex> lock_guard(m_QueueMtx); while (m_Queue.empty() == false) { m_Queue.front()(); m_Queue.pop(); } }
/** Is lock set?. @return #Locked if lock is set, #Not_locked if not locked, and #Invalid if the lock does not exist (see valid()). */ PUBLIC inline Switch_lock::Status NO_INSTRUMENT Switch_lock::test() const { auto guard = lock_guard(cpu_lock); if (EXPECT_FALSE(!valid())) return Invalid; return (_lock_owner & ~1UL) ? Locked : Not_locked; }
PRIVATE inline NOEXPORT L4_error Ipc_gate::block(Thread *ct, L4_timeout const &to, Utcb *u) { Unsigned64 t = 0; if (!to.is_never()) { t = to.microsecs(Timer::system_clock(), u); if (!t) return L4_error::Timeout; } { auto g = lock_guard(_wait_q.lock()); ct->set_wait_queue(&_wait_q); ct->sender_enqueue(&_wait_q, ct->sched_context()->prio()); } ct->state_change_dirty(~Thread_ready, Thread_send_wait); IPC_timeout timeout; if (t) { timeout.set(t, current_cpu()); ct->set_timeout(&timeout); } ct->schedule(); ct->state_change(~Thread_ipc_mask, Thread_ready); ct->reset_timeout(); if (EXPECT_FALSE(ct->in_sender_list() && timeout.has_hit())) { auto g = lock_guard(_wait_q.lock()); if (!ct->in_sender_list()) return L4_error::None; ct->sender_dequeue(&_wait_q); return L4_error::Timeout; } return L4_error::None; }
PUBLIC void Irq_sender::destroy(Kobject ***rl) { auto g = lock_guard(cpu_lock); auto t = access_once(&_irq_thread); if (t) free(t, rl); Irq::destroy(rl); }