Example #1
0
void
cpu_vmspace_exec(lwp_t *l, vaddr_t start, vaddr_t end)
{
	/*
	 * We need to turn on/off UX so that copyout/copyin will work
	 * well before setreg gets called.
	 */
	uint32_t sr = mips_cp0_status_read();
	if (end != (uint32_t) end) {
		mips_cp0_status_write(sr | MIPS3_SR_UX);
	} else {
		mips_cp0_status_write(sr & ~MIPS3_SR_UX);
	}
}
Example #2
0
void
interrupt_init(void)
{
	static const char *softintr_names[] = IPL_SOFTNAMES;
	struct playstation2_soft_intr *asi;
	int i;

	evcnt_attach_static(&_playstation2_evcnt.clock);
	evcnt_attach_static(&_playstation2_evcnt.sbus);
	evcnt_attach_static(&_playstation2_evcnt.dmac);

	for (i = 0; i < _IPL_NSOFT; i++) {
		asi = &playstation2_soft_intrs[i];
		TAILQ_INIT(&asi->softintr_q);

		asi->softintr_ipl = IPL_SOFT + i;
		simple_lock_init(&asi->softintr_slock);
		evcnt_attach_dynamic(&asi->softintr_evcnt, EVCNT_TYPE_INTR,
		    NULL, "soft", softintr_names[i]);
	}

	/* XXX Establish legacy soft interrupt handlers. */
	softnet_intrhand = softintr_establish(IPL_SOFTNET,
	    (void (*)(void *))netintr, NULL);
	KDASSERT(softnet_intrhand != NULL);

	/* install software interrupt handler */
	intc_intr_establish(I_CH10_TIMER1, IPL_SOFT, timer1_intr, 0);
	intc_intr_establish(I_CH11_TIMER2, IPL_SOFTCLOCK, timer2_intr, 0);

	/* IPL_SOFTNET and IPL_SOFTSERIAL are shared interrupt. */
	intc_intr_establish(I_CH12_TIMER3, IPL_SOFTNET, timer3_intr, 0);

	/* enable SIF BIOS access */
	md_imask = ~D_STAT_CIM_BIT(D_CH5_SIF0);
	mips_cp0_status_write(0x00010801);
}
void
smp_init_secondary(u_int32_t cpuid)
{

	if (cpuid >=  MAXCPU)
		panic ("cpu id exceeds MAXCPU\n");

	/* tlb init */
	R4K_SetWIRED(0);
	R4K_TLBFlush(num_tlbentries);
	R4K_SetWIRED(VMWIRED_ENTRIES);
	MachSetPID(0);

	Mips_SyncCache();

	mips_cp0_status_write(0);
	while (!aps_ready)
		;

	mips_sync(); mips_sync();
	/* Initialize curthread. */
	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
	PCPU_SET(curthread, PCPU_GET(idlethread));

	mtx_lock_spin(&ap_boot_mtx);

	smp_cpus++;

	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));

	/* Build our map of 'other' CPUs. */
	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));

	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));

	if (smp_cpus == mp_ncpus) {
		smp_started = 1;
		smp_active = 1;
	}

	mtx_unlock_spin(&ap_boot_mtx);

	while (smp_started == 0)
		; /* nothing */
	/* Enable Interrupt */
	mips_cp0_status_write(SR_INT_ENAB);
	/* ok, now grab sched_lock and enter the scheduler */
	mtx_lock_spin(&sched_lock);

	/*
	 * Correct spinlock nesting.  The idle thread context that we are
	 * borrowing was created so that it would start out with a single
	 * spin lock (sched_lock) held in fork_trampoline().  Since we've
	 * explicitly acquired locks in this function, the nesting count
	 * is now 2 rather than 1.  Since we are nested, calling
	 * spinlock_exit() will simply adjust the counts without allowing
	 * spin lock using code to interrupt us.
	 */
	spinlock_exit();
	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));

	binuptime(PCPU_PTR(switchtime));
	PCPU_SET(switchticks, ticks);

	/* kick off the clock on this cpu */
	mips_start_timer();
	cpu_throw(NULL, choosethread());	/* doesn't return */

	panic("scheduler returned us to %s", __func__);
}