Beispiel #1
0
void ReadWriteLock_ReleaseWrite(
    _Inout_ ReadWriteLock* self)
{
    volatile LockFields* lock = (LockFields*)self;
    size_t state, key;

    state = Atomic_Add(LockState(lock), -OWN_EXCLUSIVE);
    if (state != 0)
    {
        /* There is a queue.
         * Threads may be blocked waiting for us to leave. */
        key = (size_t)lock ^ LockExit(state);
        CondLock_Broadcast(key);

        //if (((LockEntry(state) - LockExit(state)) & (FIELD_SIZE - 1)) == 2 &&
        if (LockEntry(state) - LockExit(state) >= 2 &&
            ((CurrentTick() - LockUnfair(state)) & 14) == 0)
        {
            /* Under certain conditions, encourage the last group of threads in
             * line to stop spinning and acquire unfairly. */
            if (LockEntry(state) == LockWriter(state))
                key = (size_t)lock ^ (LockEntry(state) - 1);
            else
                key = (size_t)lock ^ LockWriter(state);
            CondLock_BroadcastSpinners(key);
        }
    }
}
Beispiel #2
0
void cParamServer::UpdateNet(const tInputInfo& in_info, tOutputInfo& out_info)
{
	int id = in_info.mID;
	cNeuralNet* grad_net = in_info.mGradNet;

	auto& entry = mPool[id];
	auto& net = mPool[id].mNet;

	LockEntry(id);

	net->CopyGrad(*grad_net);
	net->StepSolver(1);

	if (in_info.mIncIter)
	{
		++entry.mIter;
	}
	out_info.mIter = entry.mIter;

	if (out_info.mSyncNet != nullptr)
	{
		out_info.mSyncNet->CopyModel(*net);
	}

	UnlockEntry(id);
}
Beispiel #3
0
/*!
    \fn svn::InfoEntry::init()
 */
void svn::InfoEntry::init()
{
  m_name = "";
  m_last_changed_date=0;
  m_text_time = 0;
  m_prop_time = 0;
  m_hasWc = false;
  m_Lock = LockEntry();
  m_checksum = "";
  m_conflict_new = "";
  m_conflict_old = "";
  m_conflict_wrk = "";
  m_copyfrom_url = "";
  m_last_author = "";
  m_prejfile = "";
  m_repos_root = "";
  m_url = "";
  m_pUrl = "";
  m_UUID = "";
  m_kind = svn_node_none;
  m_copy_from_rev = SVN_INVALID_REVNUM;
  m_last_changed_rev = SVN_INVALID_REVNUM;
  m_revision = SVN_INVALID_REVNUM;
  m_schedule = svn_wc_schedule_normal;

  m_size = m_working_size = SVNQT_SIZE_UNKNOWN;
  m_changeList=QByteArray();
  m_depth = DepthUnknown;
}
Beispiel #4
0
/*!
    \fn svn::InfoEntry::init()
 */
void svn::InfoEntry::init()
{
    m_name.clear();
    m_last_changed_date = DateTime();
    m_text_time = DateTime();
    m_prop_time = DateTime();
    m_hasWc = false;
    m_Lock = LockEntry();
    m_checksum.clear();
#if SVN_API_VERSION >= SVN_VERSION_CHECK(1,7,0)
#else
    m_conflict_new.clear();
    m_conflict_old.clear();
    m_conflict_wrk.clear();
#endif
    m_copyfrom_url.clear();
    m_last_author.clear();
    m_prejfile.clear();
    m_repos_root.clear();
    m_url.clear();
    m_UUID.clear();
    m_kind = svn_node_none;
    m_copy_from_rev = SVN_INVALID_REVNUM;
    m_last_changed_rev = SVN_INVALID_REVNUM;
    m_revision = SVN_INVALID_REVNUM;
    m_schedule = svn_wc_schedule_normal;

    m_size = m_working_size = SVNQT_SIZE_UNKNOWN;
    m_changeList.clear();
    m_depth = DepthUnknown;
}
Beispiel #5
0
int cParamServer::GetIter(int id)
{
	auto& entry = mPool[id];
	LockEntry(id);
	int iter = entry.mIter;
	UnlockEntry(id);

	return iter;
}
Beispiel #6
0
void cParamServer::SyncNet(int id, cNeuralNet& out_net)
{
	auto& entry = mPool[id];
	auto& net = mPool[id].mNet;

	LockEntry(id);
	out_net.CopyModel(*net);
	UnlockEntry(id);
}
Beispiel #7
0
void cParamServer::UpdateInputOffsetScale(int id, const Eigen::VectorXd& offset, const Eigen::VectorXd& scale)
{
	auto& entry = mPool[id];
	auto& net = mPool[id].mNet;
	int& count = entry.mScaleUpdateCount;

	LockEntry(id);
	
	Eigen::VectorXd curr_offset = net->GetInputOffset();
	Eigen::VectorXd curr_scale = net->GetInputScale();
	
	curr_offset = (count * curr_offset + offset) / (count + 1);
	curr_scale = (count * curr_scale + scale) / (count + 1);
	net->SetInputOffsetScale(curr_offset, curr_scale);
	++count;

	UnlockEntry(id);
}
Beispiel #8
0
static void QueueAcquireWrite(
    _Inout_ ReadWriteLock* self
)
{
    volatile LockFields* lock = (LockFields*)self;
    size_t oldState, state, swapState, preQueuedState;
    size_t waitFor, key, spinState, spinCount;

    for (;;)
    {
        oldState = ReadLockState(lock);
        state = oldState;

        /* If there is no queue, we are the first one to wait;
         * allow unfairness for the current timer tick. */
        if (state <= OWN_EXCLUSIVE)
            LockUnfair(state) = CurrentTick();

        /* Wait for the most recent thread to enter the queue. */
        waitFor = LockEntry(state);

        if (++LockEntry(state) == LockExit(state))
        {
            /* The queue arithmetic will wrap if we continue. */
            Thread_Yield();
            continue;
        }

        /* Make reader threads coming in wait for us. */
        LockWriter(state) = LockEntry(state);

        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            break;
    }

    /* This thread now has a place in the queue.
     * Threads behind us may be depending on us to wake them up. */

    preQueuedState = oldState;
    key = (size_t)lock ^ waitFor;
    spinState = LockSpin(oldState);

    for (;;)
    {
        /* Avoid write prefetching since we expect to wait. */
        oldState = *(ptrdiff_t*)lock;

        if (LockExit(oldState) != waitFor || LockOwners(oldState) != 0)
        {
            /* The thread ahead of us still hasn't acquired,
             * or some reader or writer owns the lock right now. */
            if (((CurrentTick() - LockUnfair(oldState)) & 14) == 0 &&
                LockEntry(oldState) - LockExit(oldState) >= 2 &&
                LockEntry(oldState) == LockEntry(preQueuedState) + 1 &&
                LockOwners(oldState) == 0)
            {
                /* Under certain conditions, we can acquire immediately if we
                 * are the last thread in line and undo joining the queue. */
                if (preQueuedState <= OWN_EXCLUSIVE)
                    state = OWN_EXCLUSIVE;
                else
                {
                    state = oldState + OWN_EXCLUSIVE;
                    LockEntry(state) = LockEntry(preQueuedState);
                    LockWriter(state) = LockWriter(preQueuedState);
                }

                /* Atomically de-queue and acquire unfairly. */
                swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
                if (swapState == oldState)
                    return;
                continue;
            }

            /* spinState being low means spinning usually works.
             * Use a high count if it has been working recently. */
            spinCount = (spinState & SPIN_SIGN) ?
                CONDLOCK_LOW_SPINCOUNT :
                CONDLOCK_HIGH_SPINCOUNT;

            /* Spin and/or block until something changes.
             * Adjust the spin field based on whether spinning worked. */
            if (CondLock_Wait(key, (ptrdiff_t*)lock, oldState, spinCount))
                spinState = (spinState > 2) ? (spinState - 2) : 0;
            else
                spinState = (spinState < SPIN_MAX) ? (spinState + 1) : spinState;
            continue;
        }

        state = oldState + OWN_EXCLUSIVE;

        /* Bump the exit ticket number. We're leaving the queue. */
        LockExit(state)++;

        /* Zero the top 4 fields if the queue is now empty. */
        if (LockExit(state) == LockEntry(state))
            state = LockOwners(state);
        else
        {
            /* Not empty, but we just acquired fairly.
             * Allow unfairness for a while. */
            LockUnfair(state) = CurrentTick();
            LockSpin(state) = spinState;
        }

        /* Ready to take exclusive ownership. */
        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            return;
    }
}
Beispiel #9
0
static void QueueAcquireRead(
    _Inout_ ReadWriteLock* self
)
{
    volatile LockFields* lock = (LockFields*)self;
    size_t oldState, state, swapState, preQueuedState;
    size_t waitFor, diff, key, spinState, spinCount;

    for (;;)
    {
        oldState = ReadLockState(lock);
        state = oldState;

        /* If there is no queue, we are the first one to wait;
         * allow unfairness for the current timer tick. */
        if (state <= OWN_EXCLUSIVE)
            LockUnfair(state) = CurrentTick();

        /* Insert a barrier every half revolution.
         * This stops writer arithmetic from wrapping. */
        if ((LockEntry(state) & ~FIELD_SIGN) == 0)
            LockWriter(state) = LockEntry(state);

        if (++LockEntry(state) == LockExit(state))
        {
            /* The queue arithmetic will wrap if we continue. */
            Thread_Yield();
            continue;
        }

        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            break;
    }

    /* This thread now has a place in the queue.
     * Threads behind us may be depending on us to wake them up. */

    /* Wait for the most recent writer to enter the queue. */
    waitFor = LockWriter(state);
    key = (size_t)lock ^ waitFor;
    preQueuedState = oldState;
    spinState = LockSpin(oldState);

    for (;;)
    {
        /* Avoid write prefetching since we expect to wait. */
        oldState = *(ptrdiff_t*)lock;

        diff = LockExit(oldState) - waitFor;
        if ((diff & FIELD_SIGN) == 0)
        {
            /* The writer ahead of us in line already acquired.
             * Someone could have beat us unfairly.
             * Just wait for the current owner. */
            waitFor = LockExit(oldState);
            key = (size_t)lock ^ waitFor;
        }

        if ((diff & FIELD_SIGN) != 0 || (LockOwners(oldState) == OWN_EXCLUSIVE))
        {
            /* The writer ahead of us still hasn't acquired,
             * or someone owns the lock exclusively right now. */
            if (((CurrentTick() - LockUnfair(oldState)) & 14) == 0 &&
                LockEntry(oldState) - LockExit(oldState) >= 2 &&
                LockEntry(oldState) == LockEntry(preQueuedState) + 1 &&
                (LockOwners(oldState) < OWN_MAXSHARED))
            {
                /* Under certain conditions, we can acquire immediately if we
                 * are the last thread in line and undo joining the queue. */
                if (preQueuedState <= OWN_EXCLUSIVE)
                    state = LockOwners(oldState) + 1;
                else
                {
                    state = oldState + 1;
                    LockEntry(state) = LockEntry(preQueuedState);
                    LockWriter(state) = LockWriter(preQueuedState);
                }

                /* Atomically de-queue and acquire unfairly. */
                swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
                if (swapState == oldState)
                    return;
                continue;
            }

            /* spinState being low means spinning usually works.
             * Use a high count if it has been working recently. */
            spinCount = (spinState & SPIN_SIGN) ?
                CONDLOCK_LOW_SPINCOUNT :
                CONDLOCK_HIGH_SPINCOUNT;

            /* Spin and/or block until something changes.
             * Adjust the spin field based on whether spinning worked. */
            if (CondLock_Wait(key, (ptrdiff_t*)lock, oldState, spinCount))
                spinState = (spinState > 2) ? (spinState - 2) : 0;
            else
                spinState = (spinState < SPIN_MAX) ? (spinState + 1) : spinState;
            continue;
        }
        
        if (LockOwners(oldState) == OWN_MAXSHARED)
        {
            /* The owner arithmetic will overflow if we continue. */
            Thread_Yield();
            continue;
        }

        state = oldState + 1;

        /* Bump the exit ticket number. We're leaving the queue. */
        LockExit(state)++;

        /* Zero the top 4 fields if the queue is now empty. */
        if (LockExit(state) == LockEntry(state))
            state = LockOwners(state);
        else
        {
            /* Not empty, but we just acquired fairly.
             * Allow unfairness for a while. */
            LockUnfair(state) = CurrentTick();
            LockSpin(state) = spinState;
        }

        /* Ready to take shared ownership. */
        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            break;
    }

    if ((LockExit(state) & ~FIELD_SIGN) == 0)
    {
        /* Wakes those waiting on the artificial barrier inserted each half
         * revolution (see above). */
        key = (size_t)lock ^ LockExit(state);
        CondLock_Broadcast(key);
    }
}
Beispiel #10
0
/*!
    \fn svn::InfoEntry::init(const svn_info_t*)
 */
void svn::InfoEntry::init(const svn_info_t*item,const QString&path)
{
  if (!item) {
    init();
    return;
  }
  m_name = path;
  m_last_changed_date=item->last_changed_date;
  m_text_time = item->text_time;
  m_prop_time = item->prop_time;
  if (item->lock) {
    m_Lock.init(item->lock);
  } else {
    m_Lock = LockEntry();
  }
  m_checksum = QString::fromUtf8(item->checksum);
  m_conflict_new = QString::fromUtf8(item->conflict_new);
  m_conflict_old = QString::fromUtf8(item->conflict_old);
  m_conflict_wrk = QString::fromUtf8(item->conflict_wrk);
  m_copyfrom_url = QString::fromUtf8(item->copyfrom_url);
  m_last_author = QString::fromUtf8(item->last_changed_author);
  m_prejfile = QString::fromUtf8(item->prejfile);
  m_repos_root = QString::fromUtf8(item->repos_root_URL);
  m_url = QString::fromUtf8(item->URL);
  m_pUrl = prettyUrl(item->URL);
  m_UUID = QString::fromUtf8(item->repos_UUID);
  m_kind = item->kind;
  m_copy_from_rev = item->copyfrom_rev;
  m_last_changed_rev = item->last_changed_rev;
  m_revision = item->rev;
  m_hasWc = item->has_wc_info;
  m_schedule = item->schedule;

#if ((SVN_VER_MAJOR == 1) && (SVN_VER_MINOR >= 6)) || (SVN_VER_MAJOR > 1)
  m_size = item->size64!=SVN_INVALID_FILESIZE?qlonglong(item->size64):SVNQT_SIZE_UNKNOWN;
  m_working_size = item->working_size64!=SVN_INVALID_FILESIZE?qlonglong(item->working_size64):SVNQT_SIZE_UNKNOWN;
  if (m_working_size == SVNQT_SIZE_UNKNOWN) {
      m_working_size = item->working_size!=SVN_INFO_SIZE_UNKNOWN?qlonglong(item->working_size):SVNQT_SIZE_UNKNOWN;
  }
#elif (SVN_VER_MINOR == 5)
  m_size = item->size!=SVN_INFO_SIZE_UNKNOWN?qlonglong(item->size):SVNQT_SIZE_UNKNOWN;
  m_working_size = item->working_size!=SVN_INFO_SIZE_UNKNOWN?qlonglong(item->working_size):SVNQT_SIZE_UNKNOWN;
#endif

#if ((SVN_VER_MAJOR == 1) && (SVN_VER_MINOR >= 5)) || (SVN_VER_MAJOR > 1)
  if (item->changelist) {
      m_changeList = QByteArray(item->changelist,strlen(item->changelist));
  } else {
      m_changeList=QByteArray();
  }

  switch (item->depth) {
      case svn_depth_exclude:
          m_depth=DepthExclude;
          break;
      case svn_depth_empty:
          m_depth=DepthEmpty;
          break;
      case svn_depth_files:
          m_depth=DepthFiles;
          break;
      case svn_depth_immediates:
          m_depth=DepthImmediates;
          break;
      case svn_depth_infinity:
          m_depth=DepthInfinity;
          break;
      case svn_depth_unknown:
      default:
          m_depth=DepthUnknown;
          break;
  }
#else
  m_size = SVNQT_SIZE_UNKNOWN;
  m_working_size = SVNQT_SIZE_UNKNOWN;
  m_changeList=QByteArray();
  m_depth = DepthUnknown;
#endif
}
Beispiel #11
0
void svn::InfoEntry::init(const svn_client_info2_t *item, const QString &path)
{
    if (!item) {
        init();
        return;
    }
    m_name = path;
    m_last_changed_date = DateTime(item->last_changed_date);
    if (item->lock) {
        m_Lock.init(item->lock);
    } else {
        m_Lock = LockEntry();
    }
    m_size = item->size != SVN_INVALID_FILESIZE ? qlonglong(item->size) : SVNQT_SIZE_UNKNOWN;
    m_repos_root = QUrl::fromEncoded(item->repos_root_URL);
    m_url = QUrl::fromEncoded(item->URL);
    m_UUID = QString::fromUtf8(item->repos_UUID);
    m_kind = item->kind;
    m_revision = item->rev;
    m_last_changed_rev = item->last_changed_rev;
    m_last_author = QString::fromUtf8(item->last_changed_author);
    if (item->wc_info != 0) {
        m_hasWc = true;
        m_schedule = item->wc_info->schedule;
        if (item->wc_info->copyfrom_url)
            m_copyfrom_url = QUrl::fromEncoded(item->wc_info->copyfrom_url);
        else
            m_copyfrom_url.clear();
        m_copy_from_rev = item->wc_info->copyfrom_rev;
        if (item->wc_info->changelist) {
            m_changeList = QByteArray(item->wc_info->changelist, strlen(item->wc_info->changelist));
        } else {
            m_changeList = QByteArray();
        }
        if (item->wc_info->conflicts != 0) {
            for (int j = 0; j < item->wc_info->conflicts->nelts; ++j) {
                svn_wc_conflict_description2_t *_desc = ((svn_wc_conflict_description2_t **)item->wc_info->conflicts->elts)[j];
                m_conflicts.push_back(ConflictDescriptionP(new ConflictDescription(_desc)));
            }
        }

        switch (item->wc_info->depth) {
        case svn_depth_exclude:
            m_depth = DepthExclude;
            break;
        case svn_depth_empty:
            m_depth = DepthEmpty;
            break;
        case svn_depth_files:
            m_depth = DepthFiles;
            break;
        case svn_depth_immediates:
            m_depth = DepthImmediates;
            break;
        case svn_depth_infinity:
            m_depth = DepthInfinity;
            break;
        case svn_depth_unknown:
        default:
            m_depth = DepthUnknown;
            break;
        }
    } else {
        m_hasWc = false;
    }
}