示例#1
0
/*
 * Further secondary CPU initialization.
 *
 * We are now running on our startup stack, with proper page tables.
 * There is nothing to do but display some details about the CPU and its CMMUs.
 */
void
secondary_main()
{
	struct cpu_info *ci = curcpu();
	int s;

	cpu_configuration_print(0);
	ncpus++;

	sched_init_cpu(ci);
	nanouptime(&ci->ci_schedstate.spc_runtime);
	ci->ci_curproc = NULL;
	ci->ci_randseed = (arc4random() & 0x7fffffff) + 1;

	/*
	 * Release cpu_hatch_mutex to let other secondary processors
	 * have a chance to run.
	 */
	hatch_pending_count--;
	__cpu_simple_unlock(&cpu_hatch_mutex);

	/* wait for cpu_boot_secondary_processors() */
	__cpu_simple_lock(&cpu_boot_mutex);
	__cpu_simple_unlock(&cpu_boot_mutex);

	spl0();
	SCHED_LOCK(s);
	set_psr(get_psr() & ~PSR_IND);

	SET(ci->ci_flags, CIF_ALIVE);

	cpu_switchto(NULL, sched_chooseproc());
}
static void
db_resume_others(void)
{

	mp_resume_cpus_ddb();

	__cpu_simple_lock(&db_lock);
	ddb_cpu = NOCPU;
	__cpu_simple_unlock(&db_lock);
}
示例#3
0
static void
kgdb_resume_others(void)
{

	mp_resume_cpus();

	__cpu_simple_lock(&kgdb_lock);
	kgdb_cpu = NOCPU;
	__cpu_simple_unlock(&kgdb_lock);
}
示例#4
0
uint32_t
m197_mp_atomic_begin(__cpu_simple_lock_t *lock, uint *csr)
{
	uint32_t psr;

	psr = get_psr();
	set_psr(psr | PSR_IND);

	*csr = *(volatile uint8_t *)(BS_BASE + BS_CPINT);
	*(volatile uint8_t *)(BS_BASE + BS_CPINT) = 0;

	__cpu_simple_lock(lock);

	return psr;
}
示例#5
0
static void
linux_work_lock(struct work_struct *work)
{
	struct cpu_info *ci;
	int cnt, s;

	/* XXX Copypasta of MUTEX_SPIN_SPLRAISE.  */
	s = splvm();
	ci = curcpu();
	cnt = ci->ci_mtx_count--;
	__insn_barrier();
	if (cnt == 0)
		ci->ci_mtx_oldspl = s;

	__cpu_simple_lock(&work->w_lock);
}
static int
db_suspend_others(void)
{
	int cpu_me = cpu_number();
	int win;

	if (cpus == NULL)
		return 1;

	__cpu_simple_lock(&db_lock);
	if (ddb_cpu == NOCPU)
		ddb_cpu = cpu_me;
	win = (ddb_cpu == cpu_me);
	__cpu_simple_unlock(&db_lock);

	if (win)
		mp_pause_cpus_ddb();

	return win;
}
示例#7
0
文件: shmqueue.c 项目: ryo/shmqueue
static inline void
shmqueue_lock(struct shmqueue *header)
{
	__cpu_simple_lock(&header->header->shmqueue_cpulock);
}
示例#8
0
/* _mcount; may be static, inline, etc */
_MCOUNT_DECL(u_long frompc, u_long selfpc)
{
	u_short *frompcindex;
	struct tostruct *top, *prevtop;
	struct gmonparam *p;
	long toindex;
#if defined(_KERNEL) && !defined(_RUMPKERNEL)
	int s;
#endif

#if defined(_REENTRANT) && !defined(_KERNEL)
	if (__isthreaded) {
		/* prevent re-entry via thr_getspecific */
		if (_gmonparam.state != GMON_PROF_ON)
			return;
		_gmonparam.state = GMON_PROF_BUSY;
		p = thr_getspecific(_gmonkey);
		if (p == NULL) {
			/* Prevent recursive calls while allocating */
			thr_setspecific(_gmonkey, &_gmondummy);
			p = _m_gmon_alloc();
		}
		_gmonparam.state = GMON_PROF_ON;
	} else
#endif
		p = &_gmonparam;
	/*
	 * check that we are profiling
	 * and that we aren't recursively invoked.
	 */
	if (p->state != GMON_PROF_ON)
		return;
#if defined(_KERNEL) && !defined(_RUMPKERNEL)
	MCOUNT_ENTER;
#ifdef MULTIPROCESSOR
	__cpu_simple_lock(&__mcount_lock);
	__insn_barrier();
#endif
#endif
	p->state = GMON_PROF_BUSY;
	/*
	 * check that frompcindex is a reasonable pc value.
	 * for example:	signal catchers get called from the stack,
	 *		not from text space.  too bad.
	 */
	frompc -= p->lowpc;
	if (frompc > p->textsize)
		goto done;

#if (HASHFRACTION & (HASHFRACTION - 1)) == 0
	if (p->hashfraction == HASHFRACTION)
		frompcindex =
		    &p->froms[
		    (size_t)(frompc / (HASHFRACTION * sizeof(*p->froms)))];
	else
#endif
		frompcindex =
		    &p->froms[
		    (size_t)(frompc / (p->hashfraction * sizeof(*p->froms)))];
	toindex = *frompcindex;
	if (toindex == 0) {
		/*
		 *	first time traversing this arc
		 */
		toindex = ++p->tos[0].link;
		if (toindex >= p->tolimit)
			/* halt further profiling */
			goto overflow;

		*frompcindex = (u_short)toindex;
		top = &p->tos[(size_t)toindex];
		top->selfpc = selfpc;
		top->count = 1;
		top->link = 0;
		goto done;
	}
	top = &p->tos[(size_t)toindex];
	if (top->selfpc == selfpc) {
		/*
		 * arc at front of chain; usual case.
		 */
		top->count++;
		goto done;
	}
	/*
	 * have to go looking down chain for it.
	 * top points to what we are looking at,
	 * prevtop points to previous top.
	 * we know it is not at the head of the chain.
	 */
	for (; /* goto done */; ) {
		if (top->link == 0) {
			/*
			 * top is end of the chain and none of the chain
			 * had top->selfpc == selfpc.
			 * so we allocate a new tostruct
			 * and link it to the head of the chain.
			 */
			toindex = ++p->tos[0].link;
			if (toindex >= p->tolimit)
				goto overflow;

			top = &p->tos[(size_t)toindex];
			top->selfpc = selfpc;
			top->count = 1;
			top->link = *frompcindex;
			*frompcindex = (u_short)toindex;
			goto done;
		}
		/*
		 * otherwise, check the next arc on the chain.
		 */
		prevtop = top;
		top = &p->tos[top->link];
		if (top->selfpc == selfpc) {
			/*
			 * there it is.
			 * increment its count
			 * move it to the head of the chain.
			 */
			top->count++;
			toindex = prevtop->link;
			prevtop->link = top->link;
			top->link = *frompcindex;
			*frompcindex = (u_short)toindex;
			goto done;
		}
	}
done:
	p->state = GMON_PROF_ON;
#if defined(_KERNEL) && !defined(_RUMPKERNEL)
#ifdef MULTIPROCESSOR
	__insn_barrier();
	__cpu_simple_unlock(&__mcount_lock);
#endif
	MCOUNT_EXIT;
#endif
	return;

overflow:
	p->state = GMON_PROF_ERROR;
#if defined(_KERNEL) && !defined(_RUMPKERNEL)
#ifdef MULTIPROCESSOR
	__insn_barrier();
	__cpu_simple_unlock(&__mcount_lock);
#endif
	MCOUNT_EXIT;
#endif
	return;
}