Esempio n. 1
0
	void alert_manager::maybe_notify(alert* a, std::unique_lock<std::mutex>& lock)
	{
		if (m_alerts[m_generation].size() == 1)
		{
			lock.unlock();

			// we just posted to an empty queue. If anyone is waiting for
			// alerts, we need to notify them. Also (potentially) call the
			// user supplied m_notify callback to let the client wake up its
			// message loop to poll for alerts.
			if (m_notify) m_notify();

			// TODO: 2 keep a count of the number of threads waiting. Only if it's
			// > 0 notify them
			m_condition.notify_all();
		}
		else
		{
			lock.unlock();
		}

#ifndef TORRENT_DISABLE_EXTENSIONS
		for (auto& e : m_ses_extensions)
			e->on_alert(a);
#else
		TORRENT_UNUSED(a);
#endif
	}
Esempio n. 2
0
CS_StatusValue UsbCameraImpl::DeviceCmdSetMode(
    std::unique_lock<wpi::mutex>& lock, const Message& msg) {
  VideoMode newMode;
  if (msg.kind == Message::kCmdSetMode) {
    newMode.pixelFormat = msg.data[0];
    newMode.width = msg.data[1];
    newMode.height = msg.data[2];
    newMode.fps = msg.data[3];
    m_modeSetPixelFormat = true;
    m_modeSetResolution = true;
    m_modeSetFPS = true;
  } else if (msg.kind == Message::kCmdSetPixelFormat) {
    newMode = m_mode;
    newMode.pixelFormat = msg.data[0];
    m_modeSetPixelFormat = true;
  } else if (msg.kind == Message::kCmdSetResolution) {
    newMode = m_mode;
    newMode.width = msg.data[0];
    newMode.height = msg.data[1];
    m_modeSetResolution = true;
  } else if (msg.kind == Message::kCmdSetFPS) {
    newMode = m_mode;
    newMode.fps = msg.data[0];
    m_modeSetFPS = true;
  }

  // If the pixel format or resolution changed, we need to disconnect and
  // reconnect
  if (newMode.pixelFormat != m_mode.pixelFormat ||
      newMode.width != m_mode.width || newMode.height != m_mode.height) {
    m_mode = newMode;
    lock.unlock();
    bool wasStreaming = m_streaming;
    if (wasStreaming) DeviceStreamOff();
    if (m_fd >= 0) {
      DeviceDisconnect();
      DeviceConnect();
    }
    if (wasStreaming) DeviceStreamOn();
    Notifier::GetInstance().NotifySourceVideoMode(*this, newMode);
    lock.lock();
  } else if (newMode.fps != m_mode.fps) {
    m_mode = newMode;
    lock.unlock();
    // Need to stop streaming to set FPS
    bool wasStreaming = m_streaming;
    if (wasStreaming) DeviceStreamOff();
    DeviceSetFPS();
    if (wasStreaming) DeviceStreamOn();
    Notifier::GetInstance().NotifySourceVideoMode(*this, newMode);
    lock.lock();
  }

  return CS_OK;
}
void F::stop_client (std::unique_lock <std::mutex> &lock)
{
  bool wasLocked;

  if (lock) {
    wasLocked = true;
  }

  if (!wasLocked) {
    lock.lock();
  }

  if (client) {
    client->stop();
  }

  terminate = true;

  lock.unlock();

  GST_DEBUG ("Waiting for client thread to finish");
  clientThread.join();

  if (wasLocked) {
    lock.lock();
  }
}
Esempio n. 4
0
 void handle_reconnect(std::unique_lock<std::mutex>& lock, SyncSession& session) const override
 {
     // Ask the binding to retry getting the token for this session.
     std::shared_ptr<SyncSession> session_ptr = session.shared_from_this();
     lock.unlock();
     session.m_config.bind_session_handler(session_ptr->m_realm_path, session_ptr->m_config, session_ptr);
 }
 channel_op_status push_and_notify_( ptr_t new_node,
                                     std::unique_lock< mutex > & lk) noexcept {
     push_tail_( new_node);
     lk.unlock();
     not_empty_cond_.notify_one();
     return channel_op_status::success;
 }
Esempio n. 6
0
void Heap::deallocateXLarge(std::unique_lock<StaticMutex>& lock, void* object)
{
    Range toDeallocate = m_xLargeObjects.pop(&findXLarge(lock, object));

    lock.unlock();
    vmDeallocate(toDeallocate.begin(), toDeallocate.size());
    lock.lock();
}
Esempio n. 7
0
 bool access_token_expired(std::unique_lock<std::mutex>& lock, SyncSession& session) const override
 {
     session.advance_state(lock, waiting_for_access_token);
     std::shared_ptr<SyncSession> session_ptr = session.shared_from_this();
     lock.unlock();
     session.m_config.bind_session_handler(session_ptr->m_realm_path, session_ptr->m_config, session_ptr);
     return false;
 }
Esempio n. 8
0
void SyncSession::unregister(std::unique_lock<std::mutex>& lock)
{
    REALM_ASSERT(lock.owns_lock());
    REALM_ASSERT(m_state == &State::inactive); // Must stop an active session before unregistering.

    lock.unlock();
    SyncManager::shared().unregister_session(m_realm_path);
}
Esempio n. 9
0
void PageInfo::finishRelease(std::unique_lock<std::mutex> lock) {
	delete[] p_buffer;
	
	lock.unlock();
	p_cache->p_cacheHost->afterRelease(this);
	lock.lock();
	
	if(p_waitQueue.empty()) {
		auto iterator = p_cache->p_presentPages.find(p_number);
		assert(iterator != p_cache->p_presentPages.end());
		p_cache->p_presentPages.erase(iterator);
		delete this;
	}else{
		lock.unlock();
		p_cache->p_cacheHost->requestAcquire(this);
	}
}
 // helper for releasing connection and placing it in pool
 inline void pushConnection( std::unique_lock<std::mutex> &locker, ConPool &pool, redisConnection* con )
 {
     pool.second.push(con);
     locker.unlock();
     // notify other threads for their wake up in case of they are waiting
     // about empty connection queue
     pool.first.notify_one();
 }
Esempio n. 11
0
static inline void sleep(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds duration)
{
    if (duration == std::chrono::milliseconds(0))
        return;
    
    lock.unlock();
    std::this_thread::sleep_for(duration);
    lock.lock();
}
Esempio n. 12
0
void RepeatedTimerTask::schedule(std::unique_lock<raft_mutex_t>& lck) {
    _next_duetime =
            butil::milliseconds_from_now(adjust_timeout_ms(_timeout_ms));
    if (bthread_timer_add(&_timer, _next_duetime, on_timedout, this) != 0) {
        lck.unlock();
        LOG(ERROR) << "Fail to add timer";
        return on_timedout(this);
    }
}
Esempio n. 13
0
void SingleTaskScheduler::resume(std::unique_lock<Spinlock> lock) {
    assert(task_->status == Suspended || task_->status == Listening);
    assert(!task_->scheduled);
    task_->status = Running;
    task_->scheduled = false;
    task_->resumes += 1;
    resumed.store(true, std::memory_order_release);
    lock.unlock();
}
Esempio n. 14
0
bool Listener::FindNextEventInternal(
    std::unique_lock<std::mutex> &lock,
    Broadcaster *broadcaster,             // nullptr for any broadcaster
    const ConstString *broadcaster_names, // nullptr for any event
    uint32_t num_broadcaster_names, uint32_t event_type_mask, EventSP &event_sp,
    bool remove) {
  // NOTE: callers of this function must lock m_events_mutex using a
  // Mutex::Locker
  // and pass the locker as the first argument. m_events_mutex is no longer
  // recursive.
  Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_EVENTS));

  if (m_events.empty())
    return false;

  Listener::event_collection::iterator pos = m_events.end();

  if (broadcaster == nullptr && broadcaster_names == nullptr &&
      event_type_mask == 0) {
    pos = m_events.begin();
  } else {
    pos = std::find_if(m_events.begin(), m_events.end(),
                       EventMatcher(broadcaster, broadcaster_names,
                                    num_broadcaster_names, event_type_mask));
  }

  if (pos != m_events.end()) {
    event_sp = *pos;

    if (log != nullptr)
      log->Printf("%p '%s' Listener::FindNextEventInternal(broadcaster=%p, "
                  "broadcaster_names=%p[%u], event_type_mask=0x%8.8x, "
                  "remove=%i) event %p",
                  static_cast<void *>(this), GetName(),
                  static_cast<void *>(broadcaster),
                  static_cast<const void *>(broadcaster_names),
                  num_broadcaster_names, event_type_mask, remove,
                  static_cast<void *>(event_sp.get()));

    if (remove) {
      m_events.erase(pos);
      // Unlock the event queue here.  We've removed this event and are about to
      // return
      // it so it should be okay to get the next event off the queue here - and
      // it might
      // be useful to do that in the "DoOnRemoval".
      lock.unlock();
      event_sp->DoOnRemoval();
    }
    return true;
  }

  event_sp.reset();
  return false;
}
Esempio n. 15
0
int HttpMessage::UnlockAndFlushToBodyReader(std::unique_lock<butil::Mutex>& mu) {
    if (_body.empty()) {
        mu.unlock();
        return 0;
    }
    butil::IOBuf body_seen = _body.movable();
    ProgressiveReader* r = _body_reader;
    mu.unlock();
    for (size_t i = 0; i < body_seen.backing_block_num(); ++i) {
        butil::StringPiece blk = body_seen.backing_block(i);
        butil::Status st = r->OnReadOnePart(blk.data(), blk.size());
        if (!st.ok()) {
            mu.lock();
            _body_reader = NULL;
            mu.unlock();
            r->OnEndOfMessage(st);
            return -1;
        }
    }
    return 0;
}
Esempio n. 16
0
void WorkQueue::runItemWithoutLock(std::unique_lock<std::mutex> &lock) {
    Item item = std::move(item_heap.front());
    std::pop_heap(std::begin(item_heap), std::end(item_heap));
    item_heap.pop_back();

    idle_flag = false;
    lock.unlock();
    item.func();
    lock.lock();
    idle_flag = true;
    cond.notify_all();
}
Esempio n. 17
0
	// checks to see if we're no longer exceeding the high watermark,
	// and if we're in fact below the low watermark. If so, we need to
	// post the notification messages to the peers that are waiting for
	// more buffers to received data into
	void disk_buffer_pool::check_buffer_level(std::unique_lock<std::mutex>& l)
	{
		TORRENT_ASSERT(l.owns_lock());
		if (!m_exceeded_max_size || m_in_use > m_low_watermark) return;

		m_exceeded_max_size = false;

		std::vector<std::weak_ptr<disk_observer>> cbs;
		m_observers.swap(cbs);
		l.unlock();
		m_ios.post(std::bind(&watermark_callback, std::move(cbs)));
	}
Esempio n. 18
0
  TetrisGame_impl(IRenderFunc *cb_):cb(cb_), timeCount(0),
				    mField(),current(I,lockdelay,&mField),
				    ghost(I,lockdelay,&mField),inputBuffer(),
				    re(),pieces(shift_right,hard_drop),
				    isPaused(true),isContinuing(false),
				    pauseMutex(),cbMutex(),
				    pauseLock(pauseMutex),
				    pauseCondition(),
				    runner()
  {
    pauseLock.unlock();
    inputBuffer.reserve(minBuffer);
  }
Esempio n. 19
0
bool UsbCameraProperty::DeviceSet(std::unique_lock<wpi::mutex>& lock,
                                  IAMVideoProcAmp* pProcAmp,
                                  int newValue) const {
  if (!pProcAmp) return true;

  lock.unlock();
  if (SUCCEEDED(
          pProcAmp->Set(tagVideoProc, newValue, VideoProcAmp_Flags_Manual))) {
    lock.lock();
    return true;
  }

  return false;
}
Esempio n. 20
0
bool UsbCameraProperty::DeviceGet(std::unique_lock<wpi::mutex>& lock,
                                  IAMVideoProcAmp* pProcAmp) {
  if (!pProcAmp) return true;

  lock.unlock();
  long newValue = 0, paramFlag = 0;  // NOLINT(runtime/int)
  if (SUCCEEDED(pProcAmp->Get(tagVideoProc, &newValue, &paramFlag))) {
    lock.lock();
    value = newValue;
    return true;
  }

  return false;
}
Esempio n. 21
0
	void file_pool::remove_oldest(std::unique_lock<std::mutex>& l)
	{
		file_set::iterator i = std::min_element(m_files.begin(), m_files.end()
			, boost::bind(&lru_file_entry::last_use, boost::bind(&file_set::value_type::second, _1))
				< boost::bind(&lru_file_entry::last_use, boost::bind(&file_set::value_type::second, _2)));
		if (i == m_files.end()) return;

		file_handle file_ptr = i->second.file_ptr;
		m_files.erase(i);

		// closing a file may be long running operation (mac os x)
		l.unlock();
		file_ptr.reset();
		l.lock();
	}
Esempio n. 22
0
	void file_pool::remove_oldest(std::unique_lock<std::mutex>& l)
	{
		using value_type = decltype(m_files)::value_type;
		auto const i = std::min_element(m_files.begin(), m_files.end()
			, [] (value_type const& lhs, value_type const& rhs)
				{ return lhs.second.last_use < rhs.second.last_use; });
		if (i == m_files.end()) return;

		file_handle file_ptr = i->second.file_ptr;
		m_files.erase(i);

		// closing a file may be long running operation (mac os x)
		l.unlock();
		file_ptr.reset();
		l.lock();
	}
Esempio n. 23
0
bool StatefulWriter::try_remove_change(std::chrono::microseconds& microseconds,
        std::unique_lock<std::recursive_mutex>& lock)
{
    logInfo(RTPS_WRITER, "Starting process try remove change for writer " << getGuid());

    SequenceNumber_t min_low_mark;

    for(auto it = matched_readers.begin(); it != matched_readers.end(); ++it)
    {
        std::lock_guard<std::recursive_mutex> rguard(*(*it)->mp_mutex);

        if(min_low_mark == SequenceNumber_t() || (*it)->get_low_mark() < min_low_mark)
        {
            min_low_mark = (*it)->get_low_mark();
        }
    }

    SequenceNumber_t calc = min_low_mark < get_seq_num_min() ? SequenceNumber_t() :
        (min_low_mark - get_seq_num_min()) + 1;
    unsigned int may_remove_change = 1;

    if(calc <= SequenceNumber_t())
    {
        lock.unlock();
        std::unique_lock<std::mutex> may_lock(may_remove_change_mutex_);
        may_remove_change_ = 0;
        may_remove_change_cond_.wait_for(may_lock, microseconds,
                [&]() { return may_remove_change_ > 0; });
        may_remove_change = may_remove_change_;
        may_lock.unlock();
        lock.lock();
    }

    // Some changes acked
    if(may_remove_change == 1)
    {
        return mp_history->remove_min_change();
    }
    // Waiting a change was removed.
    else if(may_remove_change == 2)
    {
        return true;
    }

    return false;
}
Esempio n. 24
0
void natpmp::disable(error_code const& ec, std::unique_lock<std::mutex>& l)
{
	m_disabled = true;

	for (std::vector<mapping_t>::iterator i = m_mappings.begin()
		, end(m_mappings.end()); i != end; ++i)
	{
		if (i->protocol == none) continue;
		int const proto = i->protocol;
		i->protocol = none;
		int index = i - m_mappings.begin();
		l.unlock();
		m_callback(index, address(), 0, proto, ec);
		l.lock();
	}
	close_impl(l);
}
Esempio n. 25
0
void CacheHost::releaseItem(Cacheable *item,
		std::unique_lock<std::mutex> &lock) {
	assert(lock.owns_lock());
	
	assert(item != &p_sentinel);
	item->p_alive = false;
	p_activeFootprint -= item->getFootprint();

	// remove the item from the list
	Cacheable *less_recently = item->p_lessRecentlyUsed;
	Cacheable *more_recently = item->p_moreRecentlyUsed;
	less_recently->p_moreRecentlyUsed = more_recently;
	more_recently->p_lessRecentlyUsed = less_recently;

	lock.unlock();
	item->release();
	lock.lock();
}
void NetworkInterfaceImplBoost::run(std::unique_lock<std::mutex>& lock)
{
    try
    {
        if (m_acceptor.is_open())
        {
            lock.unlock();
            m_ioService.run();
        }
    }
    catch (const std::exception& e)
    {
        logger.error("NetworkInterfaceImplBoost::run() caught: ", e.what());
    }
    catch (...)
    {
        logger.error("NetworkInterfaceImplBoost::run() caught error");
    }
}
Esempio n. 27
0
void MultiTaskScheduler::resume(Task* task, std::unique_lock<Spinlock> lock) {
    assert(lock.owns_lock());
    assert(task->status == Starting || task->status == Listening || task->status == Suspended);
    assert(!task->scheduled);
    TaskStatus status = task->status;
    task->resumes += 1;
    task->scheduled = true;
    lock.unlock();

    if (status == Starting || status == Listening) {
        std::unique_lock<Spinlock> lock(softMutex);
        softTasks.push_front(task);
    } else if (status == Suspended) {
        std::unique_lock<Spinlock> lock(hardMutex);
        hardTasks.push_front(task);
    } else {
        // Impossible.
        __builtin_unreachable();
    }
}
Esempio n. 28
0
void natpmp::log(char const* msg, std::unique_lock<std::mutex>& l)
{
	l.unlock();
	m_log_callback(msg);
	l.lock();
}
Esempio n. 29
0
void FunctionScheduler::runOneFunction(std::unique_lock<std::mutex>& lock,
                                       steady_clock::time_point now) {
  DCHECK(lock.mutex() == &mutex_);
  DCHECK(lock.owns_lock());

  // The function to run will be at the end of functions_ already.
  //
  // Fully remove it from functions_ now.
  // We need to release mutex_ while we invoke this function, and we need to
  // maintain the heap property on functions_ while mutex_ is unlocked.
  RepeatFunc func(std::move(functions_.back()));
  functions_.pop_back();
  if (!func.cb) {
    VLOG(5) << func.name << "function has been canceled while waiting";
    return;
  }
  currentFunction_ = &func;

  // Update the function's next run time.
  if (steady_) {
    // This allows scheduler to catch up
    func.setNextRunTimeSteady();
  } else {
    // Note that we set nextRunTime based on the current time where we started
    // the function call, rather than the time when the function finishes.
    // This ensures that we call the function once every time interval, as
    // opposed to waiting time interval seconds between calls.  (These can be
    // different if the function takes a significant amount of time to run.)
    func.setNextRunTimeStrict(now);
  }

  // Release the lock while we invoke the user's function
  lock.unlock();

  // Invoke the function
  try {
    VLOG(5) << "Now running " << func.name;
    func.cb();
  } catch (const std::exception& ex) {
    LOG(ERROR) << "Error running the scheduled function <"
      << func.name << ">: " << exceptionStr(ex);
  }

  // Re-acquire the lock
  lock.lock();

  if (!currentFunction_) {
    // The function was cancelled while we were running it.
    // We shouldn't reschedule it;
    return;
  }
  // Clear currentFunction_
  CHECK_EQ(currentFunction_, &func);
  currentFunction_ = nullptr;

  // Re-insert the function into our functions_ heap.
  // We only maintain the heap property while running_ is set.  (running_ may
  // have been cleared while we were invoking the user's function.)
  functions_.push_back(std::move(func));
  if (running_) {
    std::push_heap(functions_.begin(), functions_.end(), fnCmp_);
  }
}
Esempio n. 30
0
 void close_( std::unique_lock< mutex > & lk) noexcept {
     state_ = queue_status::closed;
     lk.unlock();
     not_empty_cond_.notify_all();
 }