示例#1
0
/*
 * Further secondary CPU initialization.
 *
 * We are now running on our startup stack, with proper page tables.
 * There is nothing to do but display some details about the CPU and its CMMUs.
 */
void
secondary_main()
{
	struct cpu_info *ci = curcpu();
	int s;

	cpu_configuration_print(0);
	ncpus++;

	sched_init_cpu(ci);
	nanouptime(&ci->ci_schedstate.spc_runtime);
	ci->ci_curproc = NULL;
	ci->ci_randseed = (arc4random() & 0x7fffffff) + 1;

	/*
	 * Release cpu_hatch_mutex to let other secondary processors
	 * have a chance to run.
	 */
	hatch_pending_count--;
	__cpu_simple_unlock(&cpu_hatch_mutex);

	/* wait for cpu_boot_secondary_processors() */
	__cpu_simple_lock(&cpu_boot_mutex);
	__cpu_simple_unlock(&cpu_boot_mutex);

	spl0();
	SCHED_LOCK(s);
	set_psr(get_psr() & ~PSR_IND);

	SET(ci->ci_flags, CIF_ALIVE);

	cpu_switchto(NULL, sched_chooseproc());
}
示例#2
0
void
m197_mp_atomic_end(uint32_t psr, __cpu_simple_lock_t *lock, uint csr)
{
	__cpu_simple_unlock(lock);

	*(volatile uint8_t *)(BS_BASE + BS_CPINT) = csr & BS_CPI_IEN;

	set_psr(psr);
}
static void
db_resume_others(void)
{

	mp_resume_cpus_ddb();

	__cpu_simple_lock(&db_lock);
	ddb_cpu = NOCPU;
	__cpu_simple_unlock(&db_lock);
}
示例#4
0
static void
kgdb_resume_others(void)
{

	mp_resume_cpus();

	__cpu_simple_lock(&kgdb_lock);
	kgdb_cpu = NOCPU;
	__cpu_simple_unlock(&kgdb_lock);
}
示例#5
0
static void
linux_work_unlock(struct work_struct *work)
{
	struct cpu_info *ci;
	int s;

	__cpu_simple_unlock(&work->w_lock);

	/* XXX Copypasta of MUTEX_SPIN_SPLRESTORE.  */
	ci = curcpu();
	s = ci->ci_mtx_oldspl;
	__insn_barrier();
	if (++ci->ci_mtx_count == 0)
		splx(s);
}
static int
db_suspend_others(void)
{
	int cpu_me = cpu_number();
	int win;

	if (cpus == NULL)
		return 1;

	__cpu_simple_lock(&db_lock);
	if (ddb_cpu == NOCPU)
		ddb_cpu = cpu_me;
	win = (ddb_cpu == cpu_me);
	__cpu_simple_unlock(&db_lock);

	if (win)
		mp_pause_cpus_ddb();

	return win;
}
示例#7
0
/*
 * Secondary CPU early initialization routine.
 * Determine CPU number and set it, then return the startup stack.
 *
 * Running on a minimal stack here, with interrupts disabled; do nothing fancy.
 */
void *
secondary_pre_main()
{
	struct cpu_info *ci;

	/*
	 * Invoke the CMMU initialization routine as early as possible,
	 * so that we do not risk any memory writes to be lost during
	 * cache setup.
	 */
	cmmu_initialize_cpu(cmmu_cpu_number());

	/*
	 * Now initialize your cpu_info structure.
	 */
	set_cpu_number(cmmu_cpu_number());
	ci = curcpu();
	ci->ci_curproc = &proc0;
	platform->smp_setup(ci);

	splhigh();

	/*
	 * Enable MMU on this processor.
	 */
	pmap_bootstrap_cpu(ci->ci_cpuid);

	if (ci->ci_curpcb == NULL) {
		printf("cpu%d: unable to get startup stack\n", ci->ci_cpuid);
		/*
		 * Release cpu_hatch_mutex to let other secondary processors
		 * have a chance to run.
		 */
		__cpu_simple_unlock(&cpu_hatch_mutex);
		for (;;) ;
	}

	return ci->ci_curpcb;
}
示例#8
0
void
s3c2xx0_do_pending(int enable_int)
{
	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
	int oldirqstate, irqstate, spl_save;

	if (__cpu_simple_lock_try(&processing) == 0)
		return;

	spl_save = current_spl_level;

	oldirqstate = irqstate = disable_interrupts(I32_bit);

	if (enable_int)
		irqstate &= ~I32_bit;


#define	DO_SOFTINT(si,ipl)						\
	if (get_pending_softint() & SI_TO_IRQBIT(si)) {			\
		softint_pending &= ~SI_TO_IRQBIT(si);			\
                __raise(ipl);                                           \
		restore_interrupts(irqstate);				\
		softintr_dispatch(si);					\
		disable_interrupts(I32_bit);				\
		s3c2xx0_setipl(spl_save);				\
	}

	do {
		DO_SOFTINT(SI_SOFTSERIAL, IPL_SOFTSERIAL);
		DO_SOFTINT(SI_SOFTNET, IPL_SOFTNET);
		DO_SOFTINT(SI_SOFTCLOCK, IPL_SOFTCLOCK);
		DO_SOFTINT(SI_SOFT, IPL_SOFT);
	} while (get_pending_softint());

	__cpu_simple_unlock(&processing);

	restore_interrupts(oldirqstate);
}
示例#9
0
static void
pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp)
{

	__cpu_simple_unlock(alp);
}
示例#10
0
文件: shmqueue.c 项目: ryo/shmqueue
static inline void
shmqueue_unlock(struct shmqueue *header)
{
	__cpu_simple_unlock(&header->header->shmqueue_cpulock);
}
示例#11
0
/* _mcount; may be static, inline, etc */
_MCOUNT_DECL(u_long frompc, u_long selfpc)
{
	u_short *frompcindex;
	struct tostruct *top, *prevtop;
	struct gmonparam *p;
	long toindex;
#if defined(_KERNEL) && !defined(_RUMPKERNEL)
	int s;
#endif

#if defined(_REENTRANT) && !defined(_KERNEL)
	if (__isthreaded) {
		/* prevent re-entry via thr_getspecific */
		if (_gmonparam.state != GMON_PROF_ON)
			return;
		_gmonparam.state = GMON_PROF_BUSY;
		p = thr_getspecific(_gmonkey);
		if (p == NULL) {
			/* Prevent recursive calls while allocating */
			thr_setspecific(_gmonkey, &_gmondummy);
			p = _m_gmon_alloc();
		}
		_gmonparam.state = GMON_PROF_ON;
	} else
#endif
		p = &_gmonparam;
	/*
	 * check that we are profiling
	 * and that we aren't recursively invoked.
	 */
	if (p->state != GMON_PROF_ON)
		return;
#if defined(_KERNEL) && !defined(_RUMPKERNEL)
	MCOUNT_ENTER;
#ifdef MULTIPROCESSOR
	__cpu_simple_lock(&__mcount_lock);
	__insn_barrier();
#endif
#endif
	p->state = GMON_PROF_BUSY;
	/*
	 * check that frompcindex is a reasonable pc value.
	 * for example:	signal catchers get called from the stack,
	 *		not from text space.  too bad.
	 */
	frompc -= p->lowpc;
	if (frompc > p->textsize)
		goto done;

#if (HASHFRACTION & (HASHFRACTION - 1)) == 0
	if (p->hashfraction == HASHFRACTION)
		frompcindex =
		    &p->froms[
		    (size_t)(frompc / (HASHFRACTION * sizeof(*p->froms)))];
	else
#endif
		frompcindex =
		    &p->froms[
		    (size_t)(frompc / (p->hashfraction * sizeof(*p->froms)))];
	toindex = *frompcindex;
	if (toindex == 0) {
		/*
		 *	first time traversing this arc
		 */
		toindex = ++p->tos[0].link;
		if (toindex >= p->tolimit)
			/* halt further profiling */
			goto overflow;

		*frompcindex = (u_short)toindex;
		top = &p->tos[(size_t)toindex];
		top->selfpc = selfpc;
		top->count = 1;
		top->link = 0;
		goto done;
	}
	top = &p->tos[(size_t)toindex];
	if (top->selfpc == selfpc) {
		/*
		 * arc at front of chain; usual case.
		 */
		top->count++;
		goto done;
	}
	/*
	 * have to go looking down chain for it.
	 * top points to what we are looking at,
	 * prevtop points to previous top.
	 * we know it is not at the head of the chain.
	 */
	for (; /* goto done */; ) {
		if (top->link == 0) {
			/*
			 * top is end of the chain and none of the chain
			 * had top->selfpc == selfpc.
			 * so we allocate a new tostruct
			 * and link it to the head of the chain.
			 */
			toindex = ++p->tos[0].link;
			if (toindex >= p->tolimit)
				goto overflow;

			top = &p->tos[(size_t)toindex];
			top->selfpc = selfpc;
			top->count = 1;
			top->link = *frompcindex;
			*frompcindex = (u_short)toindex;
			goto done;
		}
		/*
		 * otherwise, check the next arc on the chain.
		 */
		prevtop = top;
		top = &p->tos[top->link];
		if (top->selfpc == selfpc) {
			/*
			 * there it is.
			 * increment its count
			 * move it to the head of the chain.
			 */
			top->count++;
			toindex = prevtop->link;
			prevtop->link = top->link;
			top->link = *frompcindex;
			*frompcindex = (u_short)toindex;
			goto done;
		}
	}
done:
	p->state = GMON_PROF_ON;
#if defined(_KERNEL) && !defined(_RUMPKERNEL)
#ifdef MULTIPROCESSOR
	__insn_barrier();
	__cpu_simple_unlock(&__mcount_lock);
#endif
	MCOUNT_EXIT;
#endif
	return;

overflow:
	p->state = GMON_PROF_ERROR;
#if defined(_KERNEL) && !defined(_RUMPKERNEL)
#ifdef MULTIPROCESSOR
	__insn_barrier();
	__cpu_simple_unlock(&__mcount_lock);
#endif
	MCOUNT_EXIT;
#endif
	return;
}