Esempio n. 1
0
/*
 *	Acquire a usimple_lock.
 *
 *	MACH_RT:  Returns with preemption disabled.  Note
 *	that the hw_lock routines are responsible for
 *	maintaining preemption state.
 */
void
usimple_lock(
	usimple_lock_t	l)
{
	int i;
	unsigned int	timeouttb;				/* Used to convert time to timebase ticks */
	pc_t		pc;
#if	ETAP_LOCK_TRACE
	etap_time_t	start_wait_time;
	int		no_miss_info = 0;
#endif	/* ETAP_LOCK_TRACE */
#if	USLOCK_DEBUG
	int		count = 0;
#endif 	/* USLOCK_DEBUG */

	OBTAIN_PC(pc, l);
	USLDBG(usld_lock_pre(l, pc));
#if	ETAP_LOCK_TRACE
	ETAP_TIME_CLEAR(start_wait_time);
#endif	/* ETAP_LOCK_TRACE */

	while (!hw_lock_try(&l->interlock)) {
		ETAPCALL(if (no_miss_info++ == 0)
			start_wait_time = etap_simplelock_miss(l));
		while (hw_lock_held(&l->interlock)) {
			/*
			 *	Spin watching the lock value in cache,
			 *	without consuming external bus cycles.
			 *	On most SMP architectures, the atomic
			 *	instruction(s) used by hw_lock_try
			 *	cost much, much more than an ordinary
			 *	memory read.
			 */
#if	USLOCK_DEBUG
			if (count++ > max_lock_loops
#if	MACH_KDB && NCPUS > 1
			    && l != &kdb_lock
#endif	/* MACH_KDB && NCPUS > 1 */
			    ) {
				if (l == &printf_lock) {
					return;
				}
				mp_disable_preemption();
#if MACH_KDB
				db_printf("cpu %d looping on simple_lock(%x)"
					  "(=%x)",
					  cpu_number(), l,
					  *hw_lock_addr(l->interlock));
				db_printf(" called by %x\n", pc);
#endif
				Debugger("simple lock deadlock detection");
				count = 0;
				mp_enable_preemption();
			}
#endif 	/* USLOCK_DEBUG */
		}
	}
	ETAPCALL(etap_simplelock_hold(l, pc, start_wait_time));
	USLDBG(usld_lock_post(l, pc));
}
Esempio n. 2
0
void
simple_unlock_no_trace(
	simple_lock_t	l)
{
	pc_t	pc;

	OBTAIN_PC(pc, l);
	USLDBG(usld_unlock(l, pc));
	hw_lock_unlock(&l->interlock);
}
Esempio n. 3
0
/*
 *	Release a usimple_lock.
 *
 *	MACH_RT:  Returns with preemption enabled.  Note
 *	that the hw_lock routines are responsible for
 *	maintaining preemption state.
 */
void
usimple_unlock(
	usimple_lock_t	l)
{
	pc_t	pc;

	OBTAIN_PC(pc, l);
	USLDBG(usld_unlock(l, pc));
	ETAPCALL(etap_simplelock_unlock(l));
	hw_lock_unlock(&l->interlock);
}
Esempio n. 4
0
/*
 *	Release a usimple_lock.
 *
 *	Returns with preemption enabled.  Note
 *	that the hw_lock routines are responsible for
 *	maintaining preemption state.
 */
void
usimple_unlock(
	usimple_lock_t	l)
{
#ifndef	MACHINE_SIMPLE_LOCK
	DECL_PC(pc);

	OBTAIN_PC(pc, l);
	USLDBG(usld_unlock(l, pc));
	hw_lock_unlock(&l->interlock);
#else
	simple_unlock_rwmb((simple_lock_t)l);
#endif
}
Esempio n. 5
0
int
simple_lock_try_no_trace(
	simple_lock_t	l)
{
	pc_t		pc;
	unsigned int	success;

	OBTAIN_PC(pc, l);
	USLDBG(usld_lock_try_pre(l, pc));
	if (success = hw_lock_try(&l->interlock)) {
		USLDBG(usld_lock_try_post(l, pc));
	}
	return success;
}
Esempio n. 6
0
/*
 *	Conditionally acquire a usimple_lock.
 *
 *	MACH_RT:  On success, returns with preemption disabled.
 *	On failure, returns with preemption in the same state
 *	as when first invoked.  Note that the hw_lock routines
 *	are responsible for maintaining preemption state.
 *
 *	XXX No stats are gathered on a miss; I preserved this
 *	behavior from the original assembly-language code, but
 *	doesn't it make sense to log misses?  XXX
 */
unsigned int
usimple_lock_try(
	usimple_lock_t	l)
{
	pc_t		pc;
	unsigned int	success;
	etap_time_t	zero_time;

	OBTAIN_PC(pc, l);
	USLDBG(usld_lock_try_pre(l, pc));
	if (success = hw_lock_try(&l->interlock)) {
		USLDBG(usld_lock_try_post(l, pc));
		ETAP_TIME_CLEAR(zero_time);
		ETAPCALL(etap_simplelock_hold(l, pc, zero_time));
	}
	return success;
}
Esempio n. 7
0
/*
 *	Conditionally acquire a usimple_lock.
 *
 *	On success, returns with preemption disabled.
 *	On failure, returns with preemption in the same state
 *	as when first invoked.  Note that the hw_lock routines
 *	are responsible for maintaining preemption state.
 *
 *	XXX No stats are gathered on a miss; I preserved this
 *	behavior from the original assembly-language code, but
 *	doesn't it make sense to log misses?  XXX
 */
unsigned int
usimple_lock_try(
	usimple_lock_t	l)
{
#ifndef	MACHINE_SIMPLE_LOCK
	unsigned int	success;
	DECL_PC(pc);

	OBTAIN_PC(pc, l);
	USLDBG(usld_lock_try_pre(l, pc));
	if ((success = hw_lock_try(&l->interlock))) {
		USLDBG(usld_lock_try_post(l, pc));
	}
	return success;
#else
	return(simple_lock_try((simple_lock_t)l));
#endif
}
Esempio n. 8
0
/*
 *	Acquire a usimple_lock.
 *
 *	Returns with preemption disabled.  Note
 *	that the hw_lock routines are responsible for
 *	maintaining preemption state.
 */
void
usimple_lock(
	usimple_lock_t	l)
{
#ifndef	MACHINE_SIMPLE_LOCK
	DECL_PC(pc);

	OBTAIN_PC(pc, l);
	USLDBG(usld_lock_pre(l, pc));

	if(!hw_lock_to(&l->interlock, LockTimeOutTSC))	/* Try to get the lock with a timeout */ 
		panic("simple lock deadlock detection: lock=%p, cpu=%d, owning thread=0x%x", l, cpu_number(), l->interlock.lock_data);

	USLDBG(usld_lock_post(l, pc));
#else
	simple_lock((simple_lock_t)l);
#endif
}
Esempio n. 9
0
void
simple_lock_no_trace(
	simple_lock_t	l)
{
	pc_t		pc;

	OBTAIN_PC(pc, l);
	USLDBG(usld_lock_pre(l, pc));
	while (!hw_lock_try(&l->interlock)) {
		while (hw_lock_held(&l->interlock)) {
			/*
			 *	Spin watching the lock value in cache,
			 *	without consuming external bus cycles.
			 *	On most SMP architectures, the atomic
			 *	instruction(s) used by hw_lock_try
			 *	cost much, much more than an ordinary
			 *	memory read.
			 */
		}
	}
	USLDBG(usld_lock_post(l, pc));
}
Esempio n. 10
0
void
etap_mutex_unlock(
	mutex_t		*l)
{
        unsigned short	dynamic = 0;
        unsigned short	trace   = 0;
        etap_time_t	total_time;
        etap_time_t	stop_hold_time;
        pc_t		pc;

	OBTAIN_PC(pc, l);
        ETAP_STAMP(lock_event_table(l), trace, dynamic);

        /*
         *  Calculate & collect hold time data only if
         *  the hold tracing was enabled throughout the
         *  whole operation.  This prevents collection of
         *  bogus data caused by mid-operation trace changes.
         *
         */

        if (ETAP_DURATION_ENABLED(trace) && ETAP_WHOLE_OP(l)) {
		ETAP_TIMESTAMP(stop_hold_time);
		ETAP_TOTAL_TIME(total_time, stop_hold_time,
				l->u.s.start_hold_time);
		CUM_HOLD_ACCUMULATE(l->cbuff_entry, total_time, dynamic, trace);
		MON_ASSIGN_PC(l->end_pc, pc, trace);
		MON_DATA_COLLECT(l,
				 l,
				 total_time,
				 MUTEX_LOCK,
				 MON_DURATION,
				 trace);
        }
        ETAP_CLEAR_TRACE_DATA(l);
}