Пример #1
0
unsigned long main_Mod2(void *parm)
{
	long i;
	for(i=0; i<OSI_MOD2LOOPS; i++) {
		osi_Log0(main_logp, "mod2");
		lock_ObtainMutex(&main_aMutex);
		lock_ObtainWrite(&main_bRWLock);
		a += 3;
		Sleep(0);
		b -= 3;
		osi_assert(a+b == 100);
		Sleep(0);
		lock_ReleaseWrite(&main_bRWLock);
		Sleep(0);
		lock_ReleaseMutex(&main_aMutex);
		Sleep(0);
		m2Loops = i;
                osi_Log4(main_logp, "mod2 done, %d %d %d %d", m2Loops, 2, 3, 4);
	}
	lock_ObtainWrite(&main_doneRWLock);
	done++;
	Sleep(0);
	lock_ReleaseWrite(&main_doneRWLock);
	return 0;
}
Пример #2
0
unsigned long main_Mod1(void *parm)
{
	long i;
	for(i=0; i<OSI_MOD1LOOPS; i++) {
		lock_ObtainMutex(&main_aMutex);
		osi_Log0(main_logp, "mod1");
		lock_ObtainWrite(&main_bRWLock);
		a -= 52;
		Sleep(0);
		b += 52;
		osi_assert(a+b == 100);
		Sleep(0);
		lock_ReleaseWrite(&main_bRWLock);
		Sleep(0);
		lock_ReleaseMutex(&main_aMutex);
		Sleep(0);
		m1Loops = i;
                osi_Log1(main_logp, "mod1 done, %d", m1Loops);
	}
	lock_ObtainWrite(&main_doneRWLock);
	done++;
	Sleep(0);
	lock_ReleaseWrite(&main_doneRWLock);
	return 0;
}
Пример #3
0
void lock_ObtainWrite(osi_rwlock_t *lockp)
{
    long i;
    CRITICAL_SECTION *csp;
    osi_queue_t * lockRefH, *lockRefT;
    osi_lock_ref_t *lockRefp;
    DWORD tid = thrd_Current();

    if ((i=lockp->type) != 0) {
        if (i >= 0 && i < OSI_NLOCKTYPES)
            (osi_lockOps[i]->ObtainWriteProc)(lockp);
        return;
    }

    if (lockOrderValidation) {
        lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
        lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);

        if (lockp->level != 0)
            lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
    }

    /* otherwise we're the fast base type */
    csp = &osi_baseAtomicCS[lockp->atomicIndex];
    EnterCriticalSection(csp);

    if (lockp->flags & OSI_LOCKFLAG_EXCL) {
        osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
    } else {
        for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
            osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
        }
    }

    /* here we have the fast lock, so see if we can obtain the real lock */
    if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
        (lockp->readers > 0)) {
        lockp->waiters++;
        osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
        lockp->waiters--;
        osi_assertx(lockp->waiters >= 0, "waiters underflow");
        osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
    } else {
        /* if we're here, all clear to set the lock */
        lockp->flags |= OSI_LOCKFLAG_EXCL;
        lockp->tid[0] = tid;
    }
    osi_assertx(lockp->readers == 0, "write lock readers present");

    LeaveCriticalSection(csp);

    if (lockOrderValidation) {
        lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
        osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
        TlsSetValue(tls_LockRefH, lockRefH);
        TlsSetValue(tls_LockRefT, lockRefT);
    }
}
Пример #4
0
/* utility function to atomically (with respect to WakeSched)
 * release an atomic counter spin lock and sleep on an
 * address (value).
 * Called with no locks held.
 */
void osi_SleepSpin(LONG_PTR sleepValue, CRITICAL_SECTION *releasep)
{
    int code;
    osi_sleepInfo_t *sp;
    CRITICAL_SECTION *csp;

    sp = TlsGetValue(osi_SleepSlot);
    if (sp == NULL) {
	sp = osi_AllocSleepInfo();
	TlsSetValue(osi_SleepSlot, sp);
    }
    else {
	_InterlockedAnd(&sp->states, 0);
    }
    sp->waitFor = 0;
    sp->value = sleepValue;
    sp->tidp = NULL;
    sp->idx = osi_SLEEPHASH(sleepValue);
    csp = &osi_critSec[sp->idx];
    EnterCriticalSection(csp);
    osi_QAddT((osi_queue_t **) &osi_sleepers[sp->idx], (osi_queue_t **) &osi_sleepersEnd[sp->idx], &sp->q);
    _InterlockedOr(&sp->states, OSI_SLEEPINFO_INHASH);
    LeaveCriticalSection(csp);
    LeaveCriticalSection(releasep);
    InterlockedIncrement(&osi_totalSleeps);	/* stats */
    while(1) {
	/* wait */
	code = WaitForSingleObject(sp->sema,
				    /* timeout */ INFINITE);

	/* if the reason for the wakeup was that we were signalled,
	* break out, otherwise try again, since the semaphore count is
	* decreased only when we get WAIT_OBJECT_0 back.
	*/
	if (code == WAIT_OBJECT_0) break;
    }

    /* now clean up */
    EnterCriticalSection(csp);

    /* must be signalled */
    osi_assert(sp->states & OSI_SLEEPINFO_SIGNALLED);

    /* free the sleep structure, must be done under bucket lock
     * so that we can check reference count and serialize with
     * those who change it.
     */
    osi_FreeSleepInfo(sp);

    LeaveCriticalSection(csp);
}
Пример #5
0
void osi_TWaitExt(osi_turnstile_t *turnp, int waitFor, void *patchp, DWORD *tidp, CRITICAL_SECTION *releasep, int prepend)
{
    osi_sleepInfo_t *sp;
    unsigned int code;

    sp = TlsGetValue(osi_SleepSlot);
    if (sp == NULL) {
	sp = osi_AllocSleepInfo();
	TlsSetValue(osi_SleepSlot, sp);
    }
    else {
	_InterlockedAnd(&sp->states, 0);
    }
    sp->waitFor = waitFor;
    sp->value = (LONG_PTR) patchp;
    sp->tidp = tidp;
    sp->idx = -1;
    if (prepend)
        osi_QAddH((osi_queue_t **) &turnp->firstp, (osi_queue_t **) &turnp->lastp, &sp->q);
    else
        osi_QAddT((osi_queue_t **) &turnp->firstp, (osi_queue_t **) &turnp->lastp, &sp->q);
    LeaveCriticalSection(releasep);

    /* now wait for the signal */
    while(1) {
        /* wait */
        code = WaitForSingleObject(sp->sema,
                                    /* timeout */ INFINITE);

        /* if the reason for the wakeup was that we were signalled,
         * break out, otherwise try again, since the semaphore count is
         * decreased only when we get WAIT_OBJECT_0 back.
         */
        if (code == WAIT_OBJECT_0) break;
    }	/* while we're waiting */

    /* we're the only one who should be looking at or changing this
     * structure after it gets signalled.  Sema sp->sema isn't signalled
     * any longer after we're back from WaitForSingleObject, so we can
     * free this element directly.
     */
    osi_assert(sp->states & OSI_SLEEPINFO_SIGNALLED);

    osi_FreeSleepInfo(sp);

    /* reobtain, since caller commonly needs it */
    EnterCriticalSection(releasep);
}
Пример #6
0
void lock_ObtainMutex(struct osi_mutex *lockp)
{
    long i;
    CRITICAL_SECTION *csp;
    osi_queue_t * lockRefH, *lockRefT;
    osi_lock_ref_t *lockRefp;

    if ((i=lockp->type) != 0) {
        if (i >= 0 && i < OSI_NLOCKTYPES)
            (osi_lockOps[i]->ObtainMutexProc)(lockp);
        return;
    }

    /* otherwise we're the fast base type */
    csp = &osi_baseAtomicCS[lockp->atomicIndex];
    EnterCriticalSection(csp);

    if (lockOrderValidation) {
        lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
        lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);

        if (lockp->level != 0)
            lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
    }

    /* here we have the fast lock, so see if we can obtain the real lock */
    if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
        lockp->waiters++;
        osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp);
        lockp->waiters--;
        osi_assertx(lockp->waiters >= 0, "waiters underflow");
        osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
    } else {
        /* if we're here, all clear to set the lock */
        lockp->flags |= OSI_LOCKFLAG_EXCL;
        lockp->tid = thrd_Current();
    }

    LeaveCriticalSection(csp);

    if (lockOrderValidation) {
        lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
        osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
        TlsSetValue(tls_LockRefH, lockRefH);
        TlsSetValue(tls_LockRefT, lockRefT);
    }
}
Пример #7
0
void lock_ConvertRToW(osi_rwlock_t *lockp)
{
    long i;
    CRITICAL_SECTION *csp;
    DWORD tid = thrd_Current();

    if ((i = lockp->type) != 0) {
        if (i >= 0 && i < OSI_NLOCKTYPES)
            (osi_lockOps[i]->ConvertRToWProc)(lockp);
        return;
    }

    /* otherwise we're the fast base type */
    csp = &osi_baseAtomicCS[lockp->atomicIndex];
    EnterCriticalSection(csp);

    osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
    osi_assertx(lockp->readers > 0, "read lock not held");

    for ( i=0; i < lockp->readers; i++) {
        if ( lockp->tid[i] == tid ) {
            for ( ; i < lockp->readers - 1; i++)
                lockp->tid[i] = lockp->tid[i+1];
            lockp->tid[i] = 0;
            break;
        }
    }

    if (--(lockp->readers) == 0) {
        /* convert read lock to write lock */
        lockp->flags |= OSI_LOCKFLAG_EXCL;
        lockp->tid[0] = tid;
    } else {
        osi_assertx(lockp->readers > 0, "read lock underflow");

        lockp->waiters++;
        osi_TWaitExt(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp, FALSE);
        lockp->waiters--;
        osi_assertx(lockp->waiters >= 0, "waiters underflow");
        osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
    }

    LeaveCriticalSection(csp);
}
Пример #8
0
unsigned long main_Scan1(unsigned long parm)
{
	while (1) {
		osi_Log0(main_logp, "scan1");
		/* check to see if we're done */
		lock_ObtainRead(&main_doneRWLock);
		lock_AssertRead(&main_doneRWLock);
		if (done >= 2) break;
		lock_ReleaseRead(&main_doneRWLock);

		/* check state for consistency */
		lock_ObtainMutex(&main_aMutex);
                lock_AssertMutex(&main_aMutex);
		Sleep(0);
		lock_ObtainRead(&main_bRWLock);
		Sleep(0);
		osi_assert(a+b == 100);
		lock_ReleaseRead(&main_bRWLock);
		Sleep(0);
		lock_ReleaseMutex(&main_aMutex);

		/* get a read lock here to test people getting stuck on RW lock alone */
		lock_ObtainRead(&main_bRWLock);
		Sleep(0);
		lock_ReleaseRead(&main_bRWLock);

		s1Loops++;

                osi_Log2(main_logp, "scan1 done %d %d", s1Loops, 2);
	}
	lock_ReleaseRead(&main_doneRWLock);
	lock_ObtainWrite(&main_doneRWLock);
        lock_AssertWrite(&main_doneRWLock);
	done++;
	lock_ReleaseWrite(&main_doneRWLock);
	return 0;
}