Beispiel #1
0
/*
 * Get an exclusive spinlock the hard way.
 */
void
_mtx_spinlock(mtx_t *mtx)
{
	u_int	lock;
	u_int	nlock;
	int	bb = 1;
	int	bo;

	for (;;) {
		lock = mtx->mtx_lock;
		if (lock == 0) {
			nlock = MTX_EXCLUSIVE | 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
				mtx->mtx_owner = curthread;
				break;
			}
		} else if ((lock & MTX_EXCLUSIVE) &&
			   mtx->mtx_owner == curthread) {
			KKASSERT((lock & MTX_MASK) != MTX_MASK);
			nlock = lock + 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
				break;
		} else {
			/* MWAIT here */
			if (bb < 1000)
				++bb;
			cpu_pause();
			for (bo = 0; bo < bb; ++bo)
				;
		}
		cpu_pause();
	}
}
Beispiel #2
0
/*
 * Delete a link structure after tsleep has failed.  This code is not
 * in the critical path as most exclusive waits are chained.
 */
static
void
mtx_delete_link(mtx_t *mtx, mtx_link_t *link)
{
	thread_t td = curthread;
	u_int	lock;
	u_int	nlock;

	/*
	 * Acquire MTX_LINKSPIN.
	 *
	 * Do not use cmpxchg to wait for LINKSPIN to clear as this might
	 * result in too much cpu cache traffic.
	 */
	crit_enter_raw(td);
	for (;;) {
		lock = mtx->mtx_lock;
		if (lock & MTX_LINKSPIN) {
			cpu_pause();
			continue;
		}
		nlock = lock | MTX_LINKSPIN;
		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
			break;
		cpu_pause();
	}

	/*
	 * Delete the link and release LINKSPIN.
	 */
	nlock = MTX_LINKSPIN;	/* to clear */

	switch(link->state) {
	case MTX_LINK_LINKED_EX:
		if (link->next == link) {
			mtx->mtx_exlink = NULL;
			nlock |= MTX_EXWANTED;	/* to clear */
		} else {
			mtx->mtx_exlink = link->next;
			link->next->prev = link->prev;
			link->prev->next = link->next;
		}
		break;
	case MTX_LINK_LINKED_SH:
		if (link->next == link) {
			mtx->mtx_shlink = NULL;
			nlock |= MTX_SHWANTED;	/* to clear */
		} else {
			mtx->mtx_shlink = link->next;
			link->next->prev = link->prev;
			link->prev->next = link->next;
		}
		break;
	default:
		/* no change */
		break;
	}
	atomic_clear_int(&mtx->mtx_lock, nlock);
	crit_exit_raw(td);
}
Beispiel #3
0
void
_mtx_spinlock_sh(mtx_t mtx)
{
	u_int	lock;
	u_int	nlock;
	int	bb = 1;
	int	bo;

	for (;;) {
		lock = mtx->mtx_lock;
		if ((lock & MTX_EXCLUSIVE) == 0) {
			KKASSERT((lock & MTX_MASK) != MTX_MASK);
			nlock = lock + 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
				break;
		} else {
			/* MWAIT here */
			if (bb < 1000)
				++bb;
			cpu_pause();
			for (bo = 0; bo < bb; ++bo)
				;
			++mtx_contention_count;
		}
		cpu_pause();
		++mtx_collision_count;
	}
}
Beispiel #4
0
void
cpu_exit_wait(
	int cpu)
{
    	cpu_data_t	*cdp = cpu_datap(cpu);
	boolean_t	intrs_enabled;
	uint64_t	tsc_timeout;

	/*
	 * Wait until the CPU indicates that it has stopped.
	 * Disable interrupts while the topo lock is held -- arguably
	 * this should always be done but in this instance it can lead to
	 * a timeout if long-running interrupt were to occur here.
	 */
	intrs_enabled = ml_set_interrupts_enabled(FALSE);
	simple_lock(&x86_topo_lock);
	/* Set a generous timeout of several seconds (in TSC ticks) */
	tsc_timeout = rdtsc64() + (10ULL * 1000 * 1000 * 1000);
	while ((cdp->lcpu.state != LCPU_HALT)
	       && (cdp->lcpu.state != LCPU_OFF)
	       && !cdp->lcpu.stopped) {
	    simple_unlock(&x86_topo_lock);
	    ml_set_interrupts_enabled(intrs_enabled);
	    cpu_pause();
	    if (rdtsc64() > tsc_timeout)
		panic("cpu_exit_wait(%d) timeout", cpu);
	    ml_set_interrupts_enabled(FALSE);
	    simple_lock(&x86_topo_lock);
	}
	simple_unlock(&x86_topo_lock);
	ml_set_interrupts_enabled(intrs_enabled);
}
Beispiel #5
0
/*
 * If the lock is held exclusively it must be owned by the caller.  If the
 * lock is already a shared lock this operation is a NOP.  A panic will
 * occur if the lock is not held either shared or exclusive.
 *
 * The exclusive count is converted to a shared count.
 */
void
_mtx_downgrade(mtx_t *mtx)
{
	u_int	lock;
	u_int	nlock;

	for (;;) {
		lock = mtx->mtx_lock;
		cpu_ccfence();

		/*
		 * NOP if already shared.
		 */
		if ((lock & MTX_EXCLUSIVE) == 0) {
			KKASSERT((lock & MTX_MASK) > 0);
			break;
		}

		/*
		 * Transfer count to shared.  Any additional pending shared
		 * waiters must be woken up.
		 */
		if (lock & MTX_SHWANTED) {
			if (mtx_chain_link_sh(mtx, lock))
				break;
			/* retry */
		} else {
			nlock = lock & ~MTX_EXCLUSIVE;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
				break;
			/* retry */
		}
		cpu_pause();
	}
}
Beispiel #6
0
static __inline
int
_lwkt_trytokref_spin(lwkt_tokref_t ref, thread_t td, long mode)
{
	int spin;

	if (_lwkt_trytokref(ref, td, mode)) {
#ifdef DEBUG_LOCKS_LATENCY
		long j;
		for (j = tokens_add_latency; j > 0; --j)
			cpu_ccfence();
#endif
		return TRUE;
	}
	for (spin = lwkt_token_spin; spin > 0; --spin) {
		if (lwkt_token_delay)
			tsc_delay(lwkt_token_delay);
		else
			cpu_pause();
		if (_lwkt_trytokref(ref, td, mode)) {
#ifdef DEBUG_LOCKS_LATENCY
			long j;
			for (j = tokens_add_latency; j > 0; --j)
				cpu_ccfence();
#endif
			return TRUE;
		}
	}
	return FALSE;
}
Beispiel #7
0
/*
 * Put a CPU into "safe" mode with respect to power.
 *
 * Some systems cannot operate at a continuous "normal" speed without
 * exceeding the thermal design.  This is called per-CPU to place the
 * CPUs into a "safe" operating mode.
 */
void
pmSafeMode(x86_lcpu_t *lcpu, uint32_t flags)
{
    if (pmDispatch != NULL
	&& pmDispatch->pmCPUSafeMode != NULL)
	pmDispatch->pmCPUSafeMode(lcpu, flags);
    else {
	/*
	 * Do something reasonable if the KEXT isn't present.
	 *
	 * We only look at the PAUSE and RESUME flags.  The other flag(s)
	 * will not make any sense without the KEXT, so just ignore them.
	 *
	 * We set the CPU's state to indicate that it's halted.  If this
	 * is the CPU we're currently running on, then spin until the
	 * state becomes non-halted.
	 */
	if (flags & PM_SAFE_FL_PAUSE) {
	    lcpu->state = LCPU_PAUSE;
	    if (lcpu == x86_lcpu()) {
		while (lcpu->state == LCPU_PAUSE)
		    cpu_pause();
	    }
	}
	
	/*
	 * Clear the halted flag for the specified CPU, that will
	 * get it out of it's spin loop.
	 */
	if (flags & PM_SAFE_FL_RESUME) {
	    lcpu->state = LCPU_RUN;
	}
    }
}
/*
 * This inline is used when busy-waiting for an rw lock.
 * If interrupts were disabled when the lock primitive was called,
 * we poll the IPI handler for pending tlb flushes.
 * XXX This is a hack to avoid deadlocking on the pmap_system_lock.
 */
static inline void
lck_rw_lock_pause(boolean_t interrupts_enabled)
{
	if (!interrupts_enabled)
		handle_pending_TLB_flushes();
	cpu_pause();
}
Beispiel #9
0
/*
 * Attempt to acquire a spinlock, if we fail we must undo the
 * gd->gd_spinlocks_wr/gd->gd_curthead->td_critcount predisposition.
 *
 * Returns 0 on success, EAGAIN on failure.
 */
int
_mtx_spinlock_try(mtx_t mtx)
{
	globaldata_t gd = mycpu;
	u_int	lock;
	u_int	nlock;
	int	res = 0;

	for (;;) {
		lock = mtx->mtx_lock;
		if (lock == 0) {
			nlock = MTX_EXCLUSIVE | 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
				mtx->mtx_owner = gd->gd_curthread;
				break;
			}
		} else if ((lock & MTX_EXCLUSIVE) &&
			   mtx->mtx_owner == gd->gd_curthread) {
			KKASSERT((lock & MTX_MASK) != MTX_MASK);
			nlock = lock + 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
				break;
		} else {
			--gd->gd_spinlocks_wr;
			cpu_ccfence();
			--gd->gd_curthread->td_critcount;
			res = EAGAIN;
			break;
		}
		cpu_pause();
		++mtx_collision_count;
	}
	return res;
}
Beispiel #10
0
/*
 * Add a (pmap, va) pair to the invalidation list and protect access
 * as appropriate.
 *
 * CPUMASK_LOCK is used to interlock thread switchins, otherwise another
 * cpu can switch in a pmap that we are unaware of and interfere with our
 * pte operation.
 */
void
pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va)
{
    cpumask_t oactive;
#ifdef SMP
    cpumask_t nactive;

    DEBUG_PUSH_INFO("pmap_inval_interlock");
    for (;;) {
	oactive = pmap->pm_active;
	cpu_ccfence();
	nactive = oactive | CPUMASK_LOCK;
	if ((oactive & CPUMASK_LOCK) == 0 &&
	    atomic_cmpset_cpumask(&pmap->pm_active, oactive, nactive)) {
		break;
	}
	lwkt_process_ipiq();
	cpu_pause();
    }
    DEBUG_POP_INFO();
#else
    oactive = pmap->pm_active & ~CPUMASK_LOCK;
#endif
    KKASSERT((info->pir_flags & PIRF_CPUSYNC) == 0);
    info->pir_va = va;
    info->pir_flags = PIRF_CPUSYNC;
    lwkt_cpusync_init(&info->pir_cpusync, oactive, pmap_inval_callback, info);
    lwkt_cpusync_interlock(&info->pir_cpusync);
}
Beispiel #11
0
void Java_org_deadc0de_apple2ix_Apple2Preferences_nativeSetCPUSpeed(JNIEnv *env, jclass cls, jint percentSpeed) {
    LOG("percentSpeed : %d%%", percentSpeed);
#if TESTING
    cpu_scale_factor = CPU_SCALE_FASTEST;
    cpu_altscale_factor = CPU_SCALE_FASTEST;
    timing_initialize();
#else
    bool wasPaused = cpu_isPaused();

    if (!wasPaused) {
        cpu_pause();
    }

    cpu_scale_factor = percentSpeed/100.0;
    if (cpu_scale_factor > CPU_SCALE_FASTEST) {
        cpu_scale_factor = CPU_SCALE_FASTEST;
    }
    if (cpu_scale_factor < CPU_SCALE_SLOWEST) {
        cpu_scale_factor = CPU_SCALE_SLOWEST;
    }

    if (video_backend->animation_showCPUSpeed) {
        video_backend->animation_showCPUSpeed();
    }

    timing_initialize();

    if (!wasPaused) {
        cpu_resume();
    }
#endif
}
Beispiel #12
0
int
_mtx_lock_ex_try(mtx_t mtx)
{
	u_int	lock;
	u_int	nlock;
	int	error = 0;

	for (;;) {
		lock = mtx->mtx_lock;
		if (lock == 0) {
			nlock = MTX_EXCLUSIVE | 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
				mtx->mtx_owner = curthread;
				break;
			}
		} else if ((lock & MTX_EXCLUSIVE) &&
			   mtx->mtx_owner == curthread) {
			KKASSERT((lock & MTX_MASK) != MTX_MASK);
			nlock = lock + 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
				break;
		} else {
			error = EAGAIN;
			break;
		}
		cpu_pause();
		++mtx_collision_count;
	}
	return (error);
}
Beispiel #13
0
void spin_lock(atomic_t *lock, atomic_int value)
{

    uint32_t  i, n;

    for ( ;; ) {

        if (*lock == 0 && atomic_cas(lock, 0, value)) {
            return;
        }

        if (cpu_num > 1) {

            for (n = 1; n < pid; n <<= 1) {

                for (i = 0; i < n; i++) {
                    cpu_pause();
                }

                if (*lock == 0 && atomic_cas(lock, 0, value)) {
                    return;
                }
            }
        }

        /*   causes the calling thread to relinquish the CPU.  
		  *  The thread is moved to the end of the queue for its static priority and a new thread gets to run.
		  */
       sched_yield();
    }
}
Beispiel #14
0
void
ipi_process(struct cpu_info *ci, uint64_t ipi_mask)
{
	KASSERT(cpu_intr_p());

	if (ipi_mask & __BIT(IPI_NOP)) {
		ci->ci_evcnt_per_ipi[IPI_NOP].ev_count++;
		ipi_nop(ci);
	}
	if (ipi_mask & __BIT(IPI_AST)) {
		ci->ci_evcnt_per_ipi[IPI_AST].ev_count++;
		ipi_nop(ci);
	}
	if (ipi_mask & __BIT(IPI_SHOOTDOWN)) {
		ci->ci_evcnt_per_ipi[IPI_SHOOTDOWN].ev_count++;
		ipi_shootdown(ci);
	}
	if (ipi_mask & __BIT(IPI_SYNCICACHE)) {
		ci->ci_evcnt_per_ipi[IPI_SYNCICACHE].ev_count++;
		ipi_syncicache(ci);
	}
	if (ipi_mask & __BIT(IPI_SUSPEND)) {
		ci->ci_evcnt_per_ipi[IPI_SUSPEND].ev_count++;
		cpu_pause(NULL);
	}
	if (ipi_mask & __BIT(IPI_HALT)) {
		ci->ci_evcnt_per_ipi[IPI_HALT].ev_count++;
		ipi_halt();
	}
	if (ipi_mask & __BIT(IPI_XCALL)) {
		ci->ci_evcnt_per_ipi[IPI_XCALL].ev_count++;
		xc_ipi_handler();
	}
}
Beispiel #15
0
/*
 * If the lock is held exclusively it must be owned by the caller.  If the
 * lock is already a shared lock this operation is a NOP.  A panic will
 * occur if the lock is not held either shared or exclusive.
 *
 * The exclusive count is converted to a shared count.
 */
void
_mtx_downgrade(mtx_t mtx)
{
	u_int	lock;
	u_int	nlock;

	for (;;) {
		lock = mtx->mtx_lock;
		if ((lock & MTX_EXCLUSIVE) == 0) {
			KKASSERT((lock & MTX_MASK) > 0);
			break;
		}
		KKASSERT(mtx->mtx_owner == curthread);
		nlock = lock & ~(MTX_EXCLUSIVE | MTX_SHWANTED);
		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
			if (lock & MTX_SHWANTED) {
				wakeup(mtx);
				++mtx_wakeup_count;
			}
			break;
		}
		cpu_pause();
		++mtx_collision_count;
	}
}
Beispiel #16
0
/*
 * Upgrade a shared lock to an exclusive lock.  The upgrade will fail if
 * the shared lock has a count other then 1.  Optimize the most likely case
 * but note that a single cmpset can fail due to WANTED races.
 *
 * If the lock is held exclusively it must be owned by the caller and
 * this function will simply return without doing anything.   A panic will
 * occur if the lock is held exclusively by someone other then the caller.
 *
 * Returns 0 on success, EDEADLK on failure.
 */
int
_mtx_upgrade_try(mtx_t mtx)
{
	u_int	lock;
	u_int	nlock;
	int	error = 0;

	for (;;) {
		lock = mtx->mtx_lock;

		if ((lock & ~MTX_EXWANTED) == 1) {
			nlock = lock | MTX_EXCLUSIVE;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
				mtx->mtx_owner = curthread;
				break;
			}
		} else if (lock & MTX_EXCLUSIVE) {
			KKASSERT(mtx->mtx_owner == curthread);
			break;
		} else {
			error = EDEADLK;
			break;
		}
		cpu_pause();
		++mtx_collision_count;
	}
	return (error);
}
Beispiel #17
0
/*
 * Delete a link structure after tsleep has failed.  This code is not
 * in the critical path as most exclusive waits are chained.
 */
static
void
mtx_delete_link(mtx_t mtx, mtx_link_t link)
{
	thread_t td = curthread;
	u_int	lock;
	u_int	nlock;

	/*
	 * Acquire MTX_EXLINK.
	 *
	 * Do not use cmpxchg to wait for EXLINK to clear as this might
	 * result in too much cpu cache traffic.
	 */
	++td->td_critcount;
	for (;;) {
		lock = mtx->mtx_lock;
		if (lock & MTX_EXLINK) {
			cpu_pause();
			++mtx_collision_count;
			continue;
		}
		/* lock &= ~MTX_EXLINK; */
		nlock = lock | MTX_EXLINK;
		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
			break;
		cpu_pause();
		++mtx_collision_count;
	}

	/*
	 * Delete the link and release EXLINK.
	 */
	if (link->state == MTX_LINK_LINKED) {
		if (link->next == link) {
			mtx->mtx_link = NULL;
		} else {
			mtx->mtx_link = link->next;
			link->next->prev = link->prev;
			link->prev->next = link->next;
		}
		link->state = MTX_LINK_IDLE;
	}
	atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
	--td->td_critcount;
}
__private_extern__ kern_return_t
chudxnu_cpusig_send(int otherCPU, uint32_t request_code)
{
	int				thisCPU;
	kern_return_t			retval = KERN_FAILURE;
	chudcpu_signal_request_t	request;
	uint64_t			deadline;
	chudcpu_data_t			*target_chudp;
	boolean_t old_level;

	disable_preemption();
	// force interrupts on for a cross CPU signal.
	old_level = chudxnu_set_interrupts_enabled(TRUE);
	thisCPU = cpu_number();

	if ((unsigned) otherCPU < real_ncpus &&
	    thisCPU != otherCPU &&
	    cpu_data_ptr[otherCPU]->cpu_running) {

		target_chudp = (chudcpu_data_t *)
					cpu_data_ptr[otherCPU]->cpu_chud;

		/* Fill out request */
		request.req_sync = 0xFFFFFFFF;		/* set sync flag */
		//request.req_type = CPRQchud;		/* set request type */
		request.req_code = request_code;	/* set request */

		KERNEL_DEBUG_CONSTANT(
			MACHDBG_CODE(DBG_MACH_CHUD,
				     CHUD_CPUSIG_SEND) | DBG_FUNC_NONE,
			otherCPU, request_code, 0, 0, 0);

		/*
		 * Insert the new request in the target cpu's request queue
		 * and signal target cpu.
		 */
		mpenqueue_tail(&target_chudp->cpu_request_queue,
			       &request.req_entry);
		i386_signal_cpu(otherCPU, MP_CHUD, ASYNC);

		/* Wait for response or timeout */
		deadline = mach_absolute_time() + LockTimeOut;
		while (request.req_sync != 0) {
			if (mach_absolute_time() > deadline) {
				panic("chudxnu_cpusig_send(%d,%d) timed out\n",
					otherCPU, request_code);
			}
			cpu_pause();
		}
		retval = KERN_SUCCESS;
	} else {
		retval = KERN_INVALID_ARGUMENT;
	}

	chudxnu_set_interrupts_enabled(old_level);
	enable_preemption();
	return retval;
}
/*
 * Routine: 	lck_mtx_lock_spinwait
 *
 * Invoked trying to acquire a mutex when there is contention but
 * the holder is running on another processor. We spin for up to a maximum
 * time waiting for the lock to be released.
 *
 * Called with the interlock unlocked.
 */
void
lck_mtx_lock_spinwait(
	lck_mtx_t		*lck)
{
	thread_t		holder;
	volatile lck_mtx_t	*mutex;
	uint64_t		deadline;

	if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
		mutex = lck;
	else
		mutex = &lck->lck_mtx_ptr->lck_mtx;

	KERNEL_DEBUG(
		MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN) | DBG_FUNC_NONE,
		(int)lck, (int)mutex->lck_mtx_locked, 0, 0, 0);

	deadline = mach_absolute_time() + MutexSpin;
	/*
	 * Spin while:
	 *   - mutex is locked, and
	 *   - its locked as a spin lock, or
	 *   - owner is running on another processor, and
	 *   - owner (processor) is not idling, and
	 *   - we haven't spun for long enough.
	 */
	while ((holder = (thread_t) mutex->lck_mtx_locked) != NULL) {
	        if ((holder == (thread_t)MUTEX_LOCKED_AS_SPIN) ||
		    ((holder->machine.specFlags & OnProc) != 0 &&
		     (holder->state & TH_IDLE) == 0 &&
		     mach_absolute_time() < deadline)) {
		        cpu_pause();
			continue;
		}
		break;
	}
#if	CONFIG_DTRACE
	/*
	 * We've already kept a count via deadline of how long we spun.
	 * If dtrace is active, then we compute backwards to decide how
	 * long we spun.
	 *
	 * Note that we record a different probe id depending on whether
	 * this is a direct or indirect mutex.  This allows us to 
	 * penalize only lock groups that have debug/stats enabled
	 * with dtrace processing if desired.
	 */
	if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
		LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN, lck,
		    mach_absolute_time() - (deadline - MutexSpin));
	} else {
		LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_SPIN, lck,
		    mach_absolute_time() - (deadline - MutexSpin));
	}
	/* The lockstat acquire event is recorded by the assembly code beneath us. */
#endif
}
Beispiel #20
0
void
machine_delay_until(
	uint64_t interval,
	uint64_t		deadline)
{
	(void)interval;
	while (mach_absolute_time() < deadline) {
		cpu_pause();
	} 
}
Beispiel #21
0
static void afterExec(const sEvArgs *args) {
	UNUSED(args);
	tetra raw = cpu_getCurInstrRaw();
	if(OPCODE(raw) == PUSHGO || OPCODE(raw) == PUSHGOI ||
		OPCODE(raw) == PUSHJ || OPCODE(raw) == PUSHJB)
		level++;
	else if(OPCODE(raw) == POP)
		level--;
	if(level <= 0)
		cpu_pause();
}
Beispiel #22
0
// prepares to play the disc
void ldp::pre_play()
{
//	Uint32 cpu_hz;	// used to calculate elapsed cycles

	// safety check, if they try to play without checking the search result ...
	// THIS SAFETY CHECK CAN BE REMOVED ONCE ALL LDP DRIVERS HAVE BEEN CONVERTED OVER TO NON-BLOCKING SEEKING
	if (m_status == LDP_SEARCHING)
	{
		printline("LDP : tried to play without checking to see if we were still seeking! that's bad!");
		
		// if this ever happens, it is a bug in Daphne, so log it
		m_bug_log.push_back("LDP.CPP, pre_play() : tried to play without checking to see if we're still seeking!");
		
		return;
	}

	// we only want to refresh the frame calculation stuff if the disc is
	// not already playing
	if (m_status != LDP_PLAYING)
	{
		m_uElapsedMsSincePlay = 0;	// by definition, this must be reset since we are playing
		m_uBlockedMsSincePlay = 0;	// " " "
		m_iSkipOffsetSincePlay = 0;	// by definition, this must be reset since we are playing
		m_uCurrentOffsetFrame = 0;	// " " "
		m_uMsFrameBoundary = 1000000 / g_game->get_disc_fpks();	// how many ms must elapse before the first frame ends, 2nd frame begins

		// VLDP needs its timer reset with the rest of these timers before its play command is called.
		// Otherwise, it will think it's way behind and will try to catch up a bunch of frames.
		think();

		// if the disc may need to take a long time to spin up, then pause the cpu timers while the disc spins up
		if (m_status == LDP_STOPPED)
		{
			cpu_pause();
			m_play_time = play();
			cpu_unpause();
		}
		else
		{
			m_play_time = play();
		}

		m_bWaitingForVblankToPlay = true;	// don't start counting frames until the next vsync
		m_status = LDP_PLAYING;
	}
	else
	{
		printline("LDP : disc is already playing, play command ignored");
	}

	printline("Play");	// moved to the end of the function so as to not cause lag before play command could be issued
}
Beispiel #23
0
int ti_threadgroup_fork(ti_threadgroup_t *tg, int16_t ext_tid,
                        void **bcast_val)
{
    if (tg->tid_map[ext_tid] == 0) {
        tg->envelope = bcast_val ? *bcast_val : NULL;
        cpu_sfence();
        tg->forked = 1;
        tg->group_sense = tg->thread_sense[0]->sense;

        // if it's possible that threads are sleeping, signal them
        if (tg->sleep_threshold) {
            uv_mutex_lock(&tg->alarm_lock);
            uv_cond_broadcast(&tg->alarm);
            uv_mutex_unlock(&tg->alarm_lock);
        }
    }
    else {
        // spin up to threshold ns (count sheep), then sleep
        uint64_t spin_ns;
        uint64_t spin_start = 0;
        while (tg->group_sense !=
                tg->thread_sense[tg->tid_map[ext_tid]]->sense) {
            if (tg->sleep_threshold) {
                if (!spin_start) {
                    // Lazily initialize spin_start since uv_hrtime is expensive
                    spin_start = uv_hrtime();
                    continue;
                }
                spin_ns = uv_hrtime() - spin_start;
                // In case uv_hrtime is not monotonic, we'll sleep earlier
                if (spin_ns >= tg->sleep_threshold) {
                    uv_mutex_lock(&tg->alarm_lock);
                    if (tg->group_sense !=
                            tg->thread_sense[tg->tid_map[ext_tid]]->sense) {
                        uv_cond_wait(&tg->alarm, &tg->alarm_lock);
                    }
                    uv_mutex_unlock(&tg->alarm_lock);
                    spin_start = 0;
                    continue;
                }
            }
            cpu_pause();
        }
        cpu_lfence();
        if (bcast_val)
            *bcast_val = tg->envelope;
    }

    return 0;
}
Beispiel #24
0
static unsigned int NOINLINE
hw_lock_lock_contended(hw_lock_t lock, uintptr_t data, uint64_t timeout, boolean_t do_panic)
{
	uint64_t	end = 0;
	uintptr_t	holder = lock->lock_data;
	int		i;

	if (timeout == 0)
		timeout = LOCK_PANIC_TIMEOUT;
#if CONFIG_DTRACE
	uint64_t begin;
	boolean_t dtrace_enabled = lockstat_probemap[LS_LCK_SPIN_LOCK_SPIN] != 0;
	if (__improbable(dtrace_enabled))
		begin = mach_absolute_time();
#endif
	for ( ; ; ) {	
		for (i = 0; i < LOCK_SNOOP_SPINS; i++) {
			cpu_pause();
#if (!__ARM_ENABLE_WFE_) || (LOCK_PRETEST)
			holder = ordered_load_hw(lock);
			if (holder != 0)
				continue;
#endif
			if (atomic_compare_exchange(&lock->lock_data, 0, data,
			    memory_order_acquire_smp, TRUE)) {
#if CONFIG_DTRACE
				if (__improbable(dtrace_enabled)) {
					uint64_t spintime = mach_absolute_time() - begin;
					if (spintime > dtrace_spin_threshold)
						LOCKSTAT_RECORD2(LS_LCK_SPIN_LOCK_SPIN, lock, spintime, dtrace_spin_threshold);
				}
#endif
				return 1;
			}
		}
		if (end == 0) {
			end = ml_get_timebase() + timeout;
		}
		else if (ml_get_timebase() >= end)
			break;
	}
	if (do_panic) {
		// Capture the actual time spent blocked, which may be higher than the timeout
		// if a misbehaving interrupt stole this thread's CPU time.
		panic("Spinlock timeout after %llu ticks, %p = %lx",
			(ml_get_timebase() - end + timeout), lock, holder);
	}
	return 0;
}
Beispiel #25
0
// I called this MAME_Debug so that I could test mame cpu cores with daphne (obviously I can't ship mame cpu cores with
// daphne due to licensing issues)
void MAME_Debug(void)
{	
	// if we are in trace mode OR if we've got our desired breakpoint
	if (g_cpu_trace || (g_break && (get_cpu_struct(g_which_cpu)->getpc_callback() == g_breakpoint)))
	{
		// if the active cpu is the one to be debugged
		if (cpu_getactivecpu() == g_which_cpu)
		{
			// since we may be at the debug prompt for a long time, we pause the cpu timer here
			cpu_pause();
			debug_prompt();	// give them a prompt
			cpu_unpause();
		}
	}
}
Beispiel #26
0
void Java_org_deadc0de_apple2ix_Apple2Activity_nativeEmulationPause(JNIEnv *env, jobject obj) {
    if (appState != APP_RUNNING) {
        return;
    }
    if (cpu_isPaused()) {
        return;
    }
    LOG("...");

#if TESTING
    // test driver thread is managing CPU
#else
    cpu_pause();
#endif
}
Beispiel #27
0
/*
 * This function is used to acquire a contested lock.
 *
 * A *mtx value of 1 indicates locked normally.
 * A *mtx value of 2 indicates locked and contested.
 */
int
__thr_umtx_lock(volatile umtx_t *mtx, int id, int timo)
{
	int v;
	int errval;
	int ret = 0;
	int retry = 4;

	v = *mtx;
	cpu_ccfence();
	id &= 0x3FFFFFFF;

	for (;;) {
		cpu_pause();
		if (v == 0) {
			if (atomic_fcmpset_int(mtx, &v, id))
				break;
			continue;
		}
		if (--retry) {
			sched_yield();
			v = *mtx;
			continue;
		}

		/*
		 * Set the waiting bit.  If the fcmpset fails v is loaded
		 * with the current content of the mutex, and if the waiting
		 * bit is already set, we can also sleep.
		 */
		if (atomic_fcmpset_int(mtx, &v, v|0x40000000) ||
		    (v & 0x40000000)) {
			if (timo == 0) {
				_umtx_sleep_err(mtx, v|0x40000000, timo);
			} else if ((errval = _umtx_sleep_err(mtx, v|0x40000000, timo)) > 0) {
				if (errval == EAGAIN) {
					if (atomic_cmpset_acq_int(mtx, 0, id))
						ret = 0;
					else
						ret = ETIMEDOUT;
					break;
				}
			}
		}
		retry = 4;
	}
	return (ret);
}
int get_SW3_v2()
{
	int i;
	/* Read the current state of the switch */
	if(!(MCF_GPIO_SETTA & MCF_GPIO_SETTA_SETTA1))
	{
		cpu_pause(5000);
		if(!(MCF_GPIO_SETTA & MCF_GPIO_SETTA_SETTA1))
		{
			// Wait until button is released
			while(!(MCF_GPIO_SETTA & MCF_GPIO_SETTA_SETTA1));
			return 1;			
		}
	}
	return 0;
}
Beispiel #29
0
int ti_threadgroup_join(ti_threadgroup_t *tg, int16_t ext_tid)
{
    int i;

    tg->thread_sense[tg->tid_map[ext_tid]]->sense
        = !tg->thread_sense[tg->tid_map[ext_tid]]->sense;
    if (tg->tid_map[ext_tid] == 0) {
        for (i = 1;  i < tg->num_threads;  ++i) {
            while (tg->thread_sense[i]->sense == tg->group_sense)
                cpu_pause();
        }
        tg->forked = 0;
    }

    return 0;
}
Beispiel #30
0
static
void
lock_slow(struct thr_spin_lock* sl)
{
  unsigned int lockval = LOCK_VAL;
  volatile unsigned* val = &sl->m_lock;
test:
  do {
    cpu_pause();
  } while (* val == lockval);
  
  if (likely(xcng(val, lockval) == UNLOCK_VAL))
    return;
  
  goto test;
}