Beispiel #1
0
OsmAnd::GPUAPI::AtlasTextureInGPU::~AtlasTextureInGPU()
{
    const int tilesRemaining = 
#if OSMAND_DEBUG
        _tiles.size();
#else
        _tilesCounter.load();
#endif
    if (tilesRemaining > 0)
        LogPrintf(LogSeverityLevel::Error, "By the time of atlas texture destruction, it still contained %d allocated slots", tilesRemaining);
    assert(tilesRemaining == 0);

    // Clear all references to this atlas
    {
        QMutexLocker scopedLock(&pool->_freedSlotsMutex);

        pool->_freedSlots.remove(this);
    }
    {
        QMutexLocker scopedLock(&pool->_unusedSlotsMutex);

        if (pool->_lastNonFullAtlasTexture == this)
        {
            pool->_lastNonFullAtlasTexture = nullptr;
            pool->_lastNonFullAtlasTextureWeak.reset();
        }
    }
}
OsmAnd::RasterizerEnvironment_P::~RasterizerEnvironment_P()
{
    {
        QMutexLocker scopedLock(&_shadersBitmapsMutex);

        _shadersBitmaps.clear();
    }

    {
        QMutexLocker scopedLock(&_pathEffectsMutex);

        for(auto& pathEffect : _pathEffects)
            pathEffect->unref();
    }
}
Beispiel #3
0
ptr_lib::shared_ptr<DataBlock>
FrameBuffer::acquireData(const ndn::Interest& interest, ndn::Name& nalType )
{
    lock_guard<recursive_mutex> scopedLock(syncMutex_);

    ptr_lib::shared_ptr<DataBlock> data;
    std::map<ndn::Name, ptr_lib::shared_ptr<DataBlock> >::reverse_iterator re_iter;
    //iter = activeSlots_.find(interest.getName());

    ndn::Name name;
    for( re_iter = activeSlots_.rbegin(); re_iter != activeSlots_.rend(); ++re_iter )
    {
        name = re_iter->first;
        if( interest.getName().equals(name.getPrefix(interest.getName().size())))
        {
            nalType = name.getSubName(-2);
            break;
        }
    }
    if (re_iter!=activeSlots_.rend())   //if found
    {
        data = re_iter->second;
        //cout << "********** size:" << segBlock->size() << " &data=" << (void*)(segBlock->dataPtr()) <<  endl;
    }
    return data;
}
std::unique_ptr<AutoConditionLock> AutoConditionLock::waitAndAcquire(
    const std::shared_ptr<WaitableMutexWrapper>& manager, nsecs_t waitTime) {

    if (manager == nullptr || manager->mMutex == nullptr) {
        // Bad input, return null
        return std::unique_ptr<AutoConditionLock> {nullptr};
    }

    // Acquire scoped lock
    std::unique_ptr<AutoConditionLock> scopedLock(new AutoConditionLock(manager));

    // Figure out what time in the future we should hit the timeout
    nsecs_t failTime = systemTime(SYSTEM_TIME_MONOTONIC) + waitTime;

    // Wait until we timeout, or success
    while(manager->mState) {
        status_t ret = manager->mCondition.waitRelative(*(manager->mMutex), waitTime);
        if (ret != NO_ERROR) {
            // Timed out or whatever, return null
            return std::unique_ptr<AutoConditionLock> {nullptr};
        }
        waitTime = failTime - systemTime(SYSTEM_TIME_MONOTONIC);
    }

    // Set the condition and return
    manager->mState = true;
    return scopedLock;
}
Beispiel #5
0
bool SSDBClient::Connect(const std::string& ip, int port, const std::string& auth)
{
    Base::Mutex::ScopedLock scopedLock(m_oMutex);
    m_bConnected = false;
    m_Client = ssdb::Client::connect(ip.c_str(), port);
    if (!m_Client) return false;

    if (!auth.empty()) 
    {
        const std::vector<std::string>* rsp = m_Client->request("AUTH", auth);
        ssdb::Status status(rsp);
        if (status.ok()) 
        {
            m_bConnected = true;
        }
        else
        {
            delete m_Client;
            m_Client = NULL;
        }
    } 
    else 
    {
        m_bConnected = true;
    }
    TSeqArrayResults results;
    keys("*", results);
    return m_bConnected;
}
Beispiel #6
0
bool SSDBClient::DelKey( const std::string& key )
{
    Base::Mutex::ScopedLock scopedLock(m_oMutex);
    std::string realKey = RealKey(key);
    std::string type;
    if (!Type(key, type)) return false;
    if (type == "string") 
    {
        m_Client->del(realKey);
    } 
    else if  (type == "hash")
    {
        int64_t size;
        m_Client->hclear(realKey,  &size);
    }
    else if (type == "zset")
    {
        int64_t size;
        m_Client->zclear(realKey, &size);
    }
    else if (type == "queue")
    {
        m_Client->request("qclear", realKey);
    } 
    return true;
}
Beispiel #7
0
bool
SubscriberDelegate::is_group_coherent() const
{
    org::opensplice::core::ScopedObjectLock scopedLock(*this);
    return this->qos_.delegate().policy<dds::core::policy::Presentation>().delegate().coherent_access() &&
            this->qos_.delegate().policy<dds::core::policy::Presentation>().delegate().access_scope() == dds::core::policy::PresentationAccessScopeKind::GROUP;
}
Beispiel #8
0
std::vector<org::opensplice::sub::AnyDataReaderDelegate::ref_type>
SubscriberDelegate::get_datareaders(
    const dds::sub::status::DataState& mask)
{
    org::opensplice::core::ScopedObjectLock scopedLock(*this);

    std::vector<org::opensplice::sub::AnyDataReaderDelegate::ref_type> readers;
    u_dataReader uReader;
    u_sampleMask uMask;
    u_result uResult;
    c_iter uList;

    /* Get list from user layer. */
    uMask = org::opensplice::sub::AnyDataReaderDelegate::getUserMask(mask);
    uResult = u_subscriberGetDataReaders(u_subscriber(this->userHandle), uMask, &uList);
    ISOCPP_U_RESULT_CHECK_AND_THROW(uResult, "Could not get datareaders.");

    /* Translate user layer list. */
    readers.reserve(c_iterLength(uList));
    while ((uReader = u_dataReader(c_iterTakeFirst(uList))) != NULL) {
        org::opensplice::core::ObjectDelegate::ref_type reader =
                org::opensplice::core::EntityDelegate::extract_strong_ref(u_entity(uReader));
        if (reader) {
            readers.push_back(OSPL_CXX11_STD_MODULE::dynamic_pointer_cast<AnyDataReaderDelegate>(reader));
        }
    }
    c_iterFree(uList);

    return readers;
}
Beispiel #9
0
void
SubscriberDelegate::default_datareader_qos(const dds::sub::qos::DataReaderQos& drqos)
{
    org::opensplice::core::ScopedObjectLock scopedLock(*this);
    drqos.delegate().check();
    this->default_dr_qos_ = drqos;
}
Beispiel #10
0
dds::sub::qos::DataReaderQos
SubscriberDelegate::default_datareader_qos() const
{
    org::opensplice::core::ScopedObjectLock scopedLock(*this);
    dds::sub::qos::DataReaderQos qos = this->default_dr_qos_;
    return qos;
}
void
PublisherDelegate::default_datawriter_qos(const dds::pub::qos::DataWriterQos& dwqos)
{
    org::opensplice::core::ScopedObjectLock scopedLock(*this);
    dwqos.delegate().check();
    this->default_dwqos_ = dwqos;
}
dds::pub::qos::DataWriterQos
PublisherDelegate::default_datawriter_qos() const
{
    org::opensplice::core::ScopedObjectLock scopedLock(*this);
    dds::pub::qos::DataWriterQos qos = this->default_dwqos_;
    return qos;
}
Beispiel #13
0
void ThreadPool::InsertTask(ITask* pTask)
{
    ScopedLock scopedLock(mMutex);
    mTaskList.push_back(pTask);
    mTaskLeftCount++;
    mGetTaskCV.WakeSingle();
}
    void LockManager::cleanupUnusedLocks() {
        for (unsigned i = 0; i < _numLockBuckets; i++) {
            LockBucket* bucket = &_lockBuckets[i];
            scoped_spinlock scopedLock(bucket->mutex);

            LockHeadMap::iterator it = bucket->data.begin();
            while (it != bucket->data.end()) {
                LockHead* lock = it->second;
                if (lock->grantedModes == 0) {
                    invariant(lock->grantedModes == 0);
                    invariant(lock->grantedQueue == NULL);
                    invariant(lock->conflictModes == 0);
                    invariant(lock->conflictQueueBegin == NULL);
                    invariant(lock->conflictQueueEnd == NULL);
                    invariant(lock->conversionsCount == 0);

                    bucket->data.erase(it++);
                    delete lock;
                }
                else {
                    it++;
                }
            }
        }
    }
Beispiel #15
0
    void LockManager::downgrade(LockRequest* request, LockMode newMode) {
        invariant(request->lock);
        invariant(request->status == LockRequest::STATUS_GRANTED);
        invariant(request->recursiveCount > 0);

        // The conflict set of the newMode should be a subset of the conflict set of the old mode.
        // Can't downgrade from S -> IX for example.
        invariant((LockConflictsTable[request->mode] | LockConflictsTable[newMode]) 
                                == LockConflictsTable[request->mode]);

        LockHead* lock = request->lock;

        LockBucket* bucket = _getBucket(lock->resourceId);
        SimpleMutex::scoped_lock scopedLock(bucket->mutex);

        invariant(lock->grantedQueueBegin != NULL);
        invariant(lock->grantedQueueEnd != NULL);
        invariant(lock->grantedModes != 0);

        lock->changeGrantedModeCount(newMode, LockHead::Increment);
        lock->changeGrantedModeCount(request->mode, LockHead::Decrement);
        request->mode = newMode;

        _onLockModeChanged(lock, true);
    }
Beispiel #16
0
void MetadataManager::beginReceive(const ChunkRange& range) {
    stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);

    // Collection is not known to be sharded if the active metadata tracker is null
    invariant(_activeMetadataTracker);

    // If range is contained within pending chunks, this means a previous migration must have failed
    // and we need to clean all overlaps
    RangeVector overlappedChunks;
    getRangeMapOverlap(_receivingChunks, range.getMin(), range.getMax(), &overlappedChunks);

    for (const auto& overlapChunkMin : overlappedChunks) {
        auto itRecv = _receivingChunks.find(overlapChunkMin.first);
        invariant(itRecv != _receivingChunks.end());

        const ChunkRange receivingRange(itRecv->first, itRecv->second);

        _receivingChunks.erase(itRecv);

        // Make sure any potentially partially copied chunks are scheduled to be cleaned up
        _addRangeToClean_inlock(receivingRange);
    }

    // Need to ensure that the background range deleter task won't delete the range we are about to
    // receive
    _removeRangeToClean_inlock(range);
    _receivingChunks.insert(std::make_pair(range.getMin().getOwned(), range.getMax().getOwned()));

    // For compatibility with the current range deleter, update the pending chunks on the collection
    // metadata to include the chunk being received
    ChunkType chunk;
    chunk.setMin(range.getMin());
    chunk.setMax(range.getMax());
    _setActiveMetadata_inlock(_activeMetadataTracker->metadata->clonePlusPending(chunk));
}
    bool LockManager::unlock(LockRequest* request) {
        invariant(request->lock);

        // Fast path for decrementing multiple references of the same lock. It is safe to do this
        // without locking, because 1) all calls for the same lock request must be done on the same
        // thread and 2) if there are lock requests hanging of a given LockHead, then this lock
        // will never disappear.
        request->recursiveCount--;
        if ((request->status == LockRequest::STATUS_GRANTED) && (request->recursiveCount > 0)) {
            return false;
        }

        LockHead* lock = request->lock;

        LockBucket* bucket = _getBucket(lock->resourceId);
        scoped_spinlock scopedLock(bucket->mutex);

        invariant(lock->grantedQueue != NULL);
        invariant(lock->grantedModes != 0);

        if (request->status == LockRequest::STATUS_WAITING) {
            // This cancels a pending lock request
            invariant(request->recursiveCount == 0);

            lock->removeFromConflictQueue(request);
            lock->changeConflictModeCount(request->mode, LockHead::Decrement);
        }
        else if (request->status == LockRequest::STATUS_CONVERTING) {
            // This cancels a pending convert request
            invariant(request->recursiveCount > 0);

            // Lock only goes from GRANTED to CONVERTING, so cancelling the conversion request
            // brings it back to the previous granted mode.
            request->status = LockRequest::STATUS_GRANTED;

            lock->conversionsCount--;
            lock->changeGrantedModeCount(request->convertMode, LockHead::Decrement);

            request->convertMode = MODE_NONE;

            _onLockModeChanged(lock, lock->grantedCounts[request->convertMode] == 0);
        }
        else if (request->status == LockRequest::STATUS_GRANTED) {
            // This releases a currently held lock and is the most common path, so it should be
            // as efficient as possible.
            invariant(request->recursiveCount == 0);

            // Remove from the granted list
            lock->removeFromGrantedQueue(request);
            lock->changeGrantedModeCount(request->mode, LockHead::Decrement);

            _onLockModeChanged(lock, lock->grantedCounts[request->mode] == 0);
        }
        else {
            // Invalid request status
            invariant(false);
        }

        return (request->recursiveCount == 0);
    }
Beispiel #18
0
void MetadataManager::append(BSONObjBuilder* builder) {
    stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);

    BSONArrayBuilder rtcArr(builder->subarrayStart("rangesToClean"));
    for (const auto& entry : _rangesToClean) {
        BSONObjBuilder obj;
        ChunkRange r = ChunkRange(entry.first, entry.second);
        r.append(&obj);
        rtcArr.append(obj.done());
    }
    rtcArr.done();

    BSONArrayBuilder pcArr(builder->subarrayStart("pendingChunks"));
    for (const auto& entry : _receivingChunks) {
        BSONObjBuilder obj;
        ChunkRange r = ChunkRange(entry.first, entry.second);
        r.append(&obj);
        pcArr.append(obj.done());
    }
    pcArr.done();

    BSONArrayBuilder amrArr(builder->subarrayStart("activeMetadataRanges"));
    for (const auto& entry : _activeMetadataTracker->metadata->getChunks()) {
        BSONObjBuilder obj;
        ChunkRange r = ChunkRange(entry.first, entry.second);
        r.append(&obj);
        amrArr.append(obj.done());
    }
    amrArr.done();
}
bool ElevationMap::update(const grid_map::Matrix& varianceUpdate, const grid_map::Matrix& horizontalVarianceUpdateX,
                          const grid_map::Matrix& horizontalVarianceUpdateY,
                          const grid_map::Matrix& horizontalVarianceUpdateXY, const ros::Time& time)
{
  boost::recursive_mutex::scoped_lock scopedLock(rawMapMutex_);

  const auto& size = rawMap_.getSize();

  if (!((Index(varianceUpdate.rows(), varianceUpdate.cols()) == size).all()
      && (Index(horizontalVarianceUpdateX.rows(), horizontalVarianceUpdateX.cols()) == size).all()
      && (Index(horizontalVarianceUpdateY.rows(), horizontalVarianceUpdateY.cols()) == size).all()
      && (Index(horizontalVarianceUpdateXY.rows(), horizontalVarianceUpdateXY.cols()) == size).all())) {
    ROS_ERROR("The size of the update matrices does not match.");
    return false;
  }

  rawMap_.get("variance") += varianceUpdate;
  rawMap_.get("horizontal_variance_x") += horizontalVarianceUpdateX;
  rawMap_.get("horizontal_variance_y") += horizontalVarianceUpdateY;
  rawMap_.get("horizontal_variance_xy") += horizontalVarianceUpdateXY;
  clean();
  rawMap_.setTimestamp(time.toNSec());

  return true;
}
void ElevationMapping::mapUpdateTimerCallback(const ros::TimerEvent&)
{
  ROS_WARN("Elevation map is updated without data from the sensor.");

  boost::recursive_mutex::scoped_lock scopedLock(map_.getRawDataMutex());

  stopMapUpdateTimer();
  Time time = Time::now();

  // Update map from motion prediction.
  if (!updatePrediction(time)) {
    ROS_ERROR("Updating process noise failed.");
    resetMapUpdateTimer();
    return;
  }

  // Publish elevation map.
  map_.publishRawElevationMap();
  if (isContinouslyFusing_ && map_.hasFusedMapSubscribers()) {
    map_.fuseAll(true);
    map_.publishFusedElevationMap();
  }

  resetMapUpdateTimer();
}
Beispiel #21
0
bool
org::opensplice::core::EntitySet::contains(const dds::core::InstanceHandle& handle)
{
    org::opensplice::core::ScopedMutexLock scopedLock(this->mutex);
    bool contains = false;

    WeakReferenceSet<ObjectDelegate::weak_ref_type>::iterator it;

    for (it = this->entities.begin(); !contains && (it != this->entities.end()); it++) {
        org::opensplice::core::ObjectDelegate::ref_type ref = it->lock();
        if (ref) {
            org::opensplice::core::EntityDelegate::ref_type entity =
                    OSPL_CXX11_STD_MODULE::dynamic_pointer_cast<EntityDelegate>(ref);

            /* Check if current entity is the one that is searched for. */
            contains = (entity->instance_handle() == handle);
            if (!contains) {
                /* Check if current entity contains the one searched for. */
                contains = entity->contains_entity(handle);
            }
        }
    }

    return contains;
}
bool ElevationMapping::fuseEntireMap(std_srvs::Empty::Request&, std_srvs::Empty::Response&)
{
  boost::recursive_mutex::scoped_lock scopedLock(map_.getFusedDataMutex());
  map_.fuseAll(true);
  map_.publishFusedElevationMap();
  return true;
}
Beispiel #23
0
void OsmAnd::Concurrent::TaskHost::onOwnerIsBeingDestructed()
{
    // Mark that owner is being destructed
    _ownerIsBeingDestructed = true;

    // Ask all tasks to cancel
    {
        QReadLocker scopedLock(&_hostedTasksLock);

        for(auto itTask = _hostedTasks.cbegin(); itTask != _hostedTasks.cend(); itTask++)
        {
            const auto& task = *itTask;
            task->requestCancellation();
        }
    }

    // Hold until all tasks are released
    for(;;)
    {
        _hostedTasksLock.lockForRead();
        if(_hostedTasks.size() == 0)
        {
            _hostedTasksLock.unlock();
            break;
        }
        _unlockedCondition.wait(&_hostedTasksLock);
        _hostedTasksLock.unlock();
    }
}
Beispiel #24
0
OsmAnd::Concurrent::TaskHost::Bridge::~Bridge()
{
    {
        QReadLocker scopedLock(&_host->_hostedTasksLock);
        assert(_host->_hostedTasks.size() == 0);
    }
}
void IOSessionCommon::headerHandler(const boost::system::error_code& error)
{

	boost::mutex::scoped_lock scopedLock(_sessionMutex);
	if (!error)
	{

		Buffer::iterator readBufferIter = m_readBuffer.begin();
		MessageTypeIdentifier			messageTypeIdentifier;
		size_t							messageContentSize;

		Serializer<MessageTypeIdentifier>::deserializeItem(&messageTypeIdentifier,&readBufferIter);
		Serializer<uint32_t>::deserializeItem(&messageContentSize,&readBufferIter);

		MessageUID messageuid;
		size_t msguuid_size=0;
		Serializer<size_t>::deserializeItem(&msguuid_size,&readBufferIter);


		for(uint8_t index=0;index<MessageUID::static_size();++index)
			Serializer<uint8_t>::deserializeItem(&messageuid.data[index],&readBufferIter);	

		m_readBuffer.resize(m_readBuffer.size() + messageContentSize);

		async_read(m_socket,
			buffer(&m_readBuffer[IO_HEADER_SIZE],
				   messageContentSize),
			transfer_at_least(messageContentSize),
			bind(&IOSessionCommon::messageHandler,shared_from_this(),messageuid,messageTypeIdentifier,placeholders::error));
	}

}
Beispiel #26
0
ScopedCollectionMetadata MetadataManager::getActiveMetadata() {
    stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);
    if (!_activeMetadataTracker) {
        return ScopedCollectionMetadata();
    }

    return ScopedCollectionMetadata(this, _activeMetadataTracker.get());
}
Beispiel #27
0
    LockMode LockerImpl::getLockMode(const ResourceId& resId) const {
        scoped_spinlock scopedLock(_lock);

        const LockRequest* request = _find(resId);
        if (request == NULL) return MODE_NONE;

        return request->mode;
    }
Beispiel #28
0
org::opensplice::core::EntitySet::vector
org::opensplice::core::EntitySet::copy()
{
    org::opensplice::core::ScopedMutexLock scopedLock(this->mutex);
    vector vctr(this->entities.size());
    std::copy(this->entities.begin(), this->entities.end(), vctr.begin());
    return vctr;
}
void ElevationMapping::publishFusedMapCallback(const ros::TimerEvent&)
{
  if (!map_.hasFusedMapSubscribers()) return;
  ROS_DEBUG("Elevation map is fused and published from timer.");
  boost::recursive_mutex::scoped_lock scopedLock(map_.getFusedDataMutex());
  map_.fuseAll(false);
  map_.publishFusedElevationMap();
}
Beispiel #30
0
std::shared_ptr<OsmAnd::GPUAPI::SlotOnAtlasTextureInGPU> OsmAnd::GPUAPI::AtlasTexturesPool::allocateTile( AtlasTextureAllocatorSignature atlasTextureAllocator )
{
    // First look for freed slots
    {
        QMutexLocker scopedLock(&_freedSlotsMutex);

        while(!_freedSlots.isEmpty())
        {
            const auto& itFreedSlotEntry = _freedSlots.begin();

            // Mark slot as occupied
            const auto freedSlotEntry = itFreedSlotEntry.value();
            _freedSlots.erase(itFreedSlotEntry);

            // Return allocated slot
            const auto& atlasTexture = std::get<0>(freedSlotEntry);
            const auto& slotIndex = std::get<1>(freedSlotEntry);
            return std::shared_ptr<SlotOnAtlasTextureInGPU>(new SlotOnAtlasTextureInGPU(atlasTexture.lock(), slotIndex));
        }
    }
    
    {
        QMutexLocker scopedLock(&_unusedSlotsMutex);

        std::shared_ptr<AtlasTextureInGPU> atlasTexture;
        
        // If we've never allocated any atlases yet or next unused slot is beyond allocated spaced - allocate new atlas texture then
        if (!_lastNonFullAtlasTexture || _firstUnusedSlotIndex == _lastNonFullAtlasTexture->slotsPerSide*_lastNonFullAtlasTexture->slotsPerSide)
        {
            atlasTexture.reset(atlasTextureAllocator());

            _lastNonFullAtlasTexture = atlasTexture.get();
            _lastNonFullAtlasTextureWeak = atlasTexture;
            _firstUnusedSlotIndex = 0;
        }
        else
        {
            atlasTexture = _lastNonFullAtlasTextureWeak.lock();
        }

        // Or let's just continue using current atlas texture
        return std::shared_ptr<SlotOnAtlasTextureInGPU>(new SlotOnAtlasTextureInGPU(atlasTexture, _firstUnusedSlotIndex++));
    }

    return nullptr;
}