Exemplo n.º 1
0
void
mutex_destroy(kmutex_t *mp)
{
	mutex_impl_t *lp = (mutex_impl_t *)mp;

	if (lp->m_owner == 0 && !MUTEX_HAS_WAITERS(lp)) {
		MUTEX_DESTROY(lp);
	} else if (MUTEX_TYPE_SPIN(lp)) {
		LOCKSTAT_RECORD0(LS_MUTEX_DESTROY_RELEASE, lp);
		MUTEX_DESTROY(lp);
	} else if (MUTEX_TYPE_ADAPTIVE(lp)) {
		LOCKSTAT_RECORD0(LS_MUTEX_DESTROY_RELEASE, lp);
		if (MUTEX_OWNER(lp) != curthread)
			mutex_panic("mutex_destroy: not owner", lp);
		if (MUTEX_HAS_WAITERS(lp)) {
			turnstile_t *ts = turnstile_lookup(lp);
			turnstile_exit(lp);
			if (ts != NULL)
				mutex_panic("mutex_destroy: has waiters", lp);
		}
		MUTEX_DESTROY(lp);
	} else {
		mutex_panic("mutex_destroy: bad mutex", lp);
	}
}
Exemplo n.º 2
0
/** \brief Release the acquired mutex
 *
 * \b os_mutexUnlock calls \b pthread_mutex_unlock to release
 * the posix \b mutex.
 */
void os_mutexUnlock (os_mutex *mutex)
{
    assert (mutex != NULL);
#ifdef OSPL_STRICT_MEM
    assert (mutex->signature == OS_MUTEX_MAGIC_SIG);
#endif

    if (mutex->name[0] != '\0') {
        STATUS status;
        SEM_ID id;
#if 1
        /* NB: id lookup in cache is for debugging reasons */
        id = get_mutex (mutex);
#else
        id = pa_ldvoidp (&mutex->info.id);
#endif
        cas_mutex_info_id (mutex, id, 0);
        status = semGive (id);
        if (status != OK) {
            mutex_panic("unlock: semGive failed", status);
        }
    } else {
        int res = pthread_mutex_unlock (&mutex->info.posix_mutex);
        if (res != 0) {
            mutex_panic ("unlock: semGive failed", res);
        }
    }
}
Exemplo n.º 3
0
static SEM_ID get_mutex (os_mutex *mutex)
{
    SEM_ID result;
    int options;
    semTake (cache_lock, WAIT_FOREVER);
    result = (SEM_ID) ut_get (cache, mutex);

    if (!result)
    {
        /* In single process mode only "private" variables are required */
        options = (SEM_Q_PRIORITY | SEM_DELETE_SAFE);
        /* Set option for priority inheritance if feature is enabled */
        if (ospl_mtx_prio_inherit) {
            options = options | SEM_INVERSION_SAFE;
        }
        result = semOpen(mutex->name, SEM_TYPE_MUTEX, SEM_EMPTY, options, 0, NULL);
        if (result) {
            (void) ut_tableInsert ((ut_table) cache, mutex, result);
        } else {
            mutex_panic ("get_mutex", 0);
        }
    }

    semGive (cache_lock);
    return result;
}
Exemplo n.º 4
0
/** \brief Destroy the mutex
 *
 * \b os_mutexDestroy calls \b pthread_mutex_destroy to destroy the
 * posix \b mutex.
 */
void os_mutexDestroy (os_mutex *mutex)
{
    assert (mutex != NULL);
#ifdef OSPL_STRICT_MEM
    assert (mutex->signature == OS_MUTEX_MAGIC_SIG);
#endif

    if (mutex->name[0] != '\0') {
        SEM_ID id;

        /* shared mutex */
        id = ut_get (cache, mutex);
        cas_mutex_info_id (mutex, 0, invalid_sem_id);

        if (id) {
            semClose (id);
            semTake(cache_lock, WAIT_FOREVER);
            ut_remove (cache, mutex);
            semGive(cache_lock);
        }
    } else {
        int res = pthread_mutex_destroy (&mutex->info.posix_mutex);
        if (res != 0) {
            mutex_panic ("pthread_mutex_destroy failed", res);
        }
    }

#ifdef OSPL_STRICT_MEM
    mutex->signature = 0;
#endif
}
Exemplo n.º 5
0
os_result os_mutexInit (os_mutex *mutex, const os_mutexAttr *mutexAttr)
{
    os_mutexAttr defAttr;
    os_result rv;
    int options;
    SEM_ID id;

    assert (mutex != NULL);
    #ifdef OSPL_STRICT_MEM
    assert (mutex->signature != OS_MUTEX_MAGIC_SIG);
#endif

    if(!mutexAttr) {
        os_mutexAttrInit(&defAttr);
        mutexAttr = &defAttr;
    }

    options = (SEM_Q_PRIORITY | SEM_DELETE_SAFE);
    /* Set option for priority inheritance if feature is enabled */
    if (ospl_mtx_prio_inherit) {
        options = options | SEM_INVERSION_SAFE;
    }
    if (mutexAttr->scopeAttr == OS_SCOPE_SHARED) {
        /* create named mutex using shared address as unique name */
        snprintf (mutex->name, sizeof (mutex->name), "/%lu", (os_uint32) mutex);
        id = semOpen (mutex->name, SEM_TYPE_MUTEX, SEM_EMPTY, options, OM_CREATE|OM_EXCL, NULL);
        /* As it may exist from a previous run. remove leftover semaphore object */
        if (id != NULL) {
            /* a fresh one, initialise it and cache the SEM_ID */
            pa_st32 ((void *) &mutex->info.id, 0);
            semTake (cache_lock, WAIT_FOREVER);
            (void) ut_tableInsert ((ut_table) cache, mutex, id);
            semGive (cache_lock);
        } else {
            /* re-using an old one: it should have been destroyed and hence should have a marker in info.id */
            id = get_mutex (mutex);
            if (id != NULL) {
                semTake (id, WAIT_FOREVER);
                semGive (id);
                /*cas_mutex_info_id ((void*)&mutex->info.id, invalid_sem_id, 0);*/
            } else {
                mutex_panic ("init: attempt to reuse semaphore currently in use", 0);
            }
        }

        rv = (id != NULL) ? os_resultSuccess : os_resultFail;
    } else {
        int result;
        mutex->name[0] = '\0';
        /* TODO attrs */
        result = pthread_mutex_init(&mutex->info.posix_mutex, NULL);
        rv = (result == 0) ? os_resultSuccess : os_resultFail;
    }

#ifdef OSPL_STRICT_MEM
    mutex->signature = OS_MUTEX_MAGIC_SIG;
#endif
    return rv;
}
Exemplo n.º 6
0
static void cas_mutex_info_id (os_mutex *mutex, SEM_ID expected, SEM_ID id)
{
    SEM_ID cached_id;
    do {
        cached_id = pa_ldvoidp ((void*) &mutex->info.id);
        if (cached_id != expected) {
            mutex_panic ("sem id mismatch in cas_mutex_info_id", 0);
        }
    } while (!pa_casvoidp ((void*) &mutex->info.id, expected, id));
}
Exemplo n.º 7
0
/** \brief Try to acquire the mutex, immediately return if the mutex
 *         is already acquired by another thread
 *
 * \b os_mutexTryLock calls \b pthread_mutex_trylock to acquire
 * the posix \b mutex.
 */
os_result os_mutexTryLock (os_mutex *mutex)
{
    os_result rv = os_resultFail;

    assert (mutex != NULL);
#ifdef OSPL_STRICT_MEM
    assert (mutex->signature == OS_MUTEX_MAGIC_SIG);
#endif

    if (mutex->name[0] != '\0') {
        STATUS result;
        SEM_ID id;

        id = get_mutex (mutex);
        result = semTake(id, NO_WAIT);

        if (result == OK) {
            cas_mutex_info_id (mutex, 0, id);
            rv = os_resultSuccess;
        } else {
            if (os_getErrno() == S_objLib_OBJ_UNAVAILABLE) {
                rv = os_resultBusy;
            } else {
                mutex_panic ("trylock: semTake failed", result);
            }
        }
    } else {
        int res = pthread_mutex_trylock (&mutex->info.posix_mutex);

        if (res == 0) {
            rv = os_resultSuccess;
        } else if (res == EBUSY) {
            rv = os_resultBusy;
        } else {
            mutex_panic ("pthread_mutex_trylock failed", res);
        }
    }

    return rv;
}
Exemplo n.º 8
0
/*
 * mutex_vector_tryenter() is called from the assembly mutex_tryenter()
 * routine if the lock is held or is not of type MUTEX_ADAPTIVE.
 */
int
mutex_vector_tryenter(mutex_impl_t *lp)
{
	int s;

	if (MUTEX_TYPE_ADAPTIVE(lp))
		return (0);		/* we already tried in assembly */

	if (!MUTEX_TYPE_SPIN(lp)) {
		mutex_panic("mutex_tryenter: bad mutex", lp);
		return (0);
	}

	s = splr(lp->m_spin.m_minspl);
	if (lock_try(&lp->m_spin.m_spinlock)) {
		lp->m_spin.m_oldspl = (ushort_t)s;
		return (1);
	}
	splx(s);
	return (0);
}
Exemplo n.º 9
0
/*
 * mutex_vector_exit() is called from mutex_exit() if the lock is not
 * adaptive, has waiters, or is not owned by the current thread (panic).
 */
void
mutex_vector_exit(mutex_impl_t *lp)
{
	turnstile_t *ts;

	if (MUTEX_TYPE_SPIN(lp)) {
		lock_clear_splx(&lp->m_spin.m_spinlock, lp->m_spin.m_oldspl);
		return;
	}

	if (MUTEX_OWNER(lp) != curthread) {
		mutex_panic("mutex_exit: not owner", lp);
		return;
	}

	ts = turnstile_lookup(lp);
	MUTEX_CLEAR_LOCK_AND_WAITERS(lp);
	if (ts == NULL)
		turnstile_exit(lp);
	else
		turnstile_wakeup(ts, TS_WRITER_Q, ts->ts_waiters, NULL);
	LOCKSTAT_RECORD0(LS_MUTEX_EXIT_RELEASE, lp);
}
Exemplo n.º 10
0
void os_mutexLock (os_mutex *mutex)
{
    if (os_mutexLock_s (mutex) != os_resultSuccess) {
        mutex_panic ("mutexLock failed", 0);
    }
}
Exemplo n.º 11
0
/*
 * mutex_vector_enter() is called from the assembly mutex_enter() routine
 * if the lock is held or is not of type MUTEX_ADAPTIVE.
 */
void
mutex_vector_enter(mutex_impl_t *lp)
{
	kthread_id_t	owner;
	hrtime_t	sleep_time = 0;	/* how long we slept */
	uint_t		spin_count = 0;	/* how many times we spun */
	cpu_t 		*cpup, *last_cpu;
	extern cpu_t	*cpu_list;
	turnstile_t	*ts;
	volatile mutex_impl_t *vlp = (volatile mutex_impl_t *)lp;
	int		backoff;	/* current backoff */
	int		backctr;	/* ctr for backoff */
	int		sleep_count = 0;

	ASSERT_STACK_ALIGNED();

	if (MUTEX_TYPE_SPIN(lp)) {
		lock_set_spl(&lp->m_spin.m_spinlock, lp->m_spin.m_minspl,
		    &lp->m_spin.m_oldspl);
		return;
	}

	if (!MUTEX_TYPE_ADAPTIVE(lp)) {
		mutex_panic("mutex_enter: bad mutex", lp);
		return;
	}

	/*
	 * Adaptive mutexes must not be acquired from above LOCK_LEVEL.
	 * We can migrate after loading CPU but before checking CPU_ON_INTR,
	 * so we must verify by disabling preemption and loading CPU again.
	 */
	cpup = CPU;
	if (CPU_ON_INTR(cpup) && !panicstr) {
		kpreempt_disable();
		if (CPU_ON_INTR(CPU))
			mutex_panic("mutex_enter: adaptive at high PIL", lp);
		kpreempt_enable();
	}

	CPU_STATS_ADDQ(cpup, sys, mutex_adenters, 1);

	if (&plat_lock_delay) {
		backoff = 0;
	} else {
		backoff = BACKOFF_BASE;
	}

	for (;;) {
spin:
		spin_count++;
		/*
		 * Add an exponential backoff delay before trying again
		 * to touch the mutex data structure.
		 * the spin_count test and call to nulldev are to prevent
		 * the compiler optimizer from eliminating the delay loop.
		 */
		if (&plat_lock_delay) {
			plat_lock_delay(&backoff);
		} else {
			for (backctr = backoff; backctr; backctr--) {
				if (!spin_count) (void) nulldev();
			};    /* delay */
			backoff = backoff << 1;			/* double it */
			if (backoff > BACKOFF_CAP) {
				backoff = BACKOFF_CAP;
			}

			SMT_PAUSE();
		}

		if (panicstr)
			return;

		if ((owner = MUTEX_OWNER(vlp)) == NULL) {
			if (mutex_adaptive_tryenter(lp))
				break;
			continue;
		}

		if (owner == curthread)
			mutex_panic("recursive mutex_enter", lp);

		/*
		 * If lock is held but owner is not yet set, spin.
		 * (Only relevant for platforms that don't have cas.)
		 */
		if (owner == MUTEX_NO_OWNER)
			continue;

		/*
		 * When searching the other CPUs, start with the one where
		 * we last saw the owner thread.  If owner is running, spin.
		 *
		 * We must disable preemption at this point to guarantee
		 * that the list doesn't change while we traverse it
		 * without the cpu_lock mutex.  While preemption is
		 * disabled, we must revalidate our cached cpu pointer.
		 */
		kpreempt_disable();
		if (cpup->cpu_next == NULL)
			cpup = cpu_list;
		last_cpu = cpup;	/* mark end of search */
		do {
			if (cpup->cpu_thread == owner) {
				kpreempt_enable();
				goto spin;
			}
		} while ((cpup = cpup->cpu_next) != last_cpu);
		kpreempt_enable();

		/*
		 * The owner appears not to be running, so block.
		 * See the Big Theory Statement for memory ordering issues.
		 */
		ts = turnstile_lookup(lp);
		MUTEX_SET_WAITERS(lp);
		membar_enter();

		/*
		 * Recheck whether owner is running after waiters bit hits
		 * global visibility (above).  If owner is running, spin.
		 *
		 * Since we are at ipl DISP_LEVEL, kernel preemption is
		 * disabled, however we still need to revalidate our cached
		 * cpu pointer to make sure the cpu hasn't been deleted.
		 */
		if (cpup->cpu_next == NULL)
			last_cpu = cpup = cpu_list;
		do {
			if (cpup->cpu_thread == owner) {
				turnstile_exit(lp);
				goto spin;
			}
		} while ((cpup = cpup->cpu_next) != last_cpu);
		membar_consumer();

		/*
		 * If owner and waiters bit are unchanged, block.
		 */
		if (MUTEX_OWNER(vlp) == owner && MUTEX_HAS_WAITERS(vlp)) {
			sleep_time -= gethrtime();
			(void) turnstile_block(ts, TS_WRITER_Q, lp,
			    &mutex_sobj_ops, NULL, NULL);
			sleep_time += gethrtime();
			sleep_count++;
		} else {
			turnstile_exit(lp);
		}
	}

	ASSERT(MUTEX_OWNER(lp) == curthread);

	if (sleep_time != 0) {
		/*
		 * Note, sleep time is the sum of all the sleeping we
		 * did.
		 */
		LOCKSTAT_RECORD(LS_MUTEX_ENTER_BLOCK, lp, sleep_time);
	}

	/*
	 * We do not count a sleep as a spin.
	 */
	if (spin_count > sleep_count)
		LOCKSTAT_RECORD(LS_MUTEX_ENTER_SPIN, lp,
		    spin_count - sleep_count);

	LOCKSTAT_RECORD0(LS_MUTEX_ENTER_ACQUIRE, lp);
}