Exemple #1
0
static void unmake_curthread_key()
{
	ScopedLock lock;
	if( !tkl_destructed )
		lock.AssignAndLock( total_key_lock );

	if( --total_key_count > 0 ) return;

	if( curthread_key )
		pthread_key_delete( curthread_key );

	curthread_key = 0;
}
Exemple #2
0
    /** Load prims if they are not already loaded.
        Returns true if the lock was upgraded to a write lock.
        The @a lock should already be read-locked.
        @{ */
    bool    LoadIfNeeded(ScopedLock& lock, const SdfPath& path,
                         const UsdStageRefPtr& stage, bool haveLock)
            {
                UT_ASSERT(stage);

                if(IsLoaded(path))
                    return false;

                if(!haveLock)
                    lock.UpgradeToWriter();

                Load(path, stage);
                return true;
            }
Exemple #3
0
    bool    LoadIfNeeded(ScopedLock& lock, const UnorderedPathSet& paths,
                         const UsdStageRefPtr& stage, bool haveLock)
            {
                UT_ASSERT(stage);

                SdfPathSet unloaded;
                GetUnloaded(paths, unloaded);

                if(unloaded.size() == 0)
                    return false;

                if(!haveLock)
                    lock.UpgradeToWriter();
                
                Load(unloaded, stage);
                return true;
            }
Exemple #4
0
void ConditionalWait::Wait( const ScopedLock& scope )
{
  // Scope must be locked:
  DALI_ASSERT_DEBUG( &scope.GetLockedWait() == this );

  ++(mImpl->count);

  // pthread_cond_wait may wake up without anyone calling Notify so loop until
  // count has been reset in a notify:
  do
  {
    // wait while condition changes
    pthread_cond_wait( &mImpl->condition, &mImpl->mutex ); // releases the lock whilst waiting
  }
  while( 0 != mImpl->count );

  // We return with our mutex locked safe in the knowledge that the ScopedLock
  // passed in will unlock it in the caller.
}
Exemple #5
0
	void Release() {
		m_mtgs.m_RingBufferIsBusy = false;
		m_lock2.Release();
		m_lock1.Release();
	}
Exemple #6
0
	void Acquire() {
		m_lock1.Acquire();
		m_lock2.Acquire();
		m_mtgs.m_RingBufferIsBusy = true;
	}
Exemple #7
0
	void PartialRelease() {
		m_lock2.Release();
	}
Exemple #8
0
	void PartialAcquire() {
		m_lock2.Acquire();
	}
Exemple #9
0
	void Release() {
		m_mtgs.m_RingBufferIsBusy.store(false, std::memory_order_relaxed);
		m_lock2.Release();
		m_lock1.Release();
	}
Exemple #10
0
	void Acquire() {
		m_lock1.Acquire();
		m_lock2.Acquire();
		m_mtgs.m_RingBufferIsBusy.store(true, std::memory_order_relaxed);
	}
Exemple #11
0
bool
GusdUSD_StageProxy::_Load(ScopedLock& lock,
                          UsdStage::InitialLoadSet loadSet,
                          const UnorderedPathSet* pathsToLoad,
                          GusdUT_ErrorContext* err)
{
    if(!_microNode.requiresUpdate(0)) {
        /* XXX: Errors copied will currently only include errors,
           not warning. This is because TfErrorMark, which is being used to
           capture USD errors, is currently not able to capture warnings.
           
           This means that in the event that the stage is valid, very little
           work will be done. This is expected to change in Tf eventually,
           in which case every stage lookup may involve an excess amount of
           warning copying on every lookup, possibly impacting performance.
           May need to revisit this approach of copying all errors when Tf
           starts allowing warnings to be captured.*/
        if(err)
            _CopyErrors(*err);

        if(pathsToLoad && _primLoader) {
            if(_primLoader->LoadIfNeeded(lock, *pathsToLoad, _stage,
                                         /*have lock*/ false))
                lock.DowngradeToReader();
        }
        return _stage;
    }
    if(lock.UpgradeToWriter() || _microNode.requiresUpdate(0)) {
        /* Mark the proxy clean, so that we don't attempt to load
           again even if loading has failed. To attempt to reload,
           the node should be dirtied with MarkDirty() prior to
           the next load attempt.*/
        _microNode.update(0);

        _errors.clearAndDestroyErrors();

        GusdUT_ErrorManager errMgr(_errors);
        GusdUT_TfErrorScope scope(errMgr);
        
        if(_stage) {
            /* Asking to load when we already have a stage means
               we should reload the stage.*/
            _Reload(_stage);
            
            lock.DowngradeToReader();

            // XXX: Can reloading fail?
            return true;
        }

        if(SdfLayerRefPtr rootLyr =
           SdfLayer::FindOrOpen(_key->path.GetString())) {
            
            // Load the stage from the cache.
            UsdStageCacheContext ctx(_cache.GetCache());
            if(UsdStageRefPtr stage = UsdStage::Open(
                   rootLyr, _key->sessionLyr, _key->resolverCtx, loadSet)) {
                
                _realPath = TfToken(TfRealPath( _key->path));
                struct stat attrib;
                if( stat(_realPath.GetText(), &attrib) == 0 ) {
                    _mtime = attrib.st_mtime;
                }

                UT_ASSERT(_cache.GetCache().Contains(stage));
                _stage = stage;

                _InitLoadSet(loadSet);
                _stageData.Update(stage);

                if(pathsToLoad && _primLoader) {
                    _primLoader->Load(SdfPathSet(pathsToLoad->begin(),
                                                 pathsToLoad->end()), stage);
                }
            }
        } else {
            /* Sdf doesn't throw errors here, so we need
               to report the failure ourselves.*/
            UT_WorkBuffer buf;
            buf.sprintf("Failed to open layer: %s",
                        _key->path.GetString().c_str());
            GusdUT_LogGenericError(_errors, buf.buffer());
        }
    }
    if(err) 
        _CopyErrors(*err);
    lock.DowngradeToReader();
    return _stage;
}