Example #1
0
static void
ap_start(phandle_t node, u_int mid, u_int cpu_impl)
{
	volatile struct cpu_start_args *csa;
	struct pcpu *pc;
	register_t s;
	vm_offset_t va;
	u_int cpuid;
	uint32_t clock;

	if (cpuids > mp_maxid)
		return;

	if (OF_getprop(node, "clock-frequency", &clock, sizeof(clock)) <= 0)
		panic("%s: couldn't determine CPU frequency", __func__);
	if (clock != PCPU_GET(clock))
		tick_et_use_stick = 1;

	csa = &cpu_start_args;
	csa->csa_state = 0;
	sun4u_startcpu(node, (void *)mp_tramp, 0);
	s = intr_disable();
	while (csa->csa_state != CPU_TICKSYNC)
		;
	membar(StoreLoad);
	csa->csa_tick = rd(tick);
	if (cpu_impl == CPU_IMPL_SPARC64V ||
	    cpu_impl >= CPU_IMPL_ULTRASPARCIII) {
		while (csa->csa_state != CPU_STICKSYNC)
			;
		membar(StoreLoad);
		csa->csa_stick = rdstick();
	}
	while (csa->csa_state != CPU_INIT)
		;
	csa->csa_tick = csa->csa_stick = 0;
	intr_restore(s);

	cpuid = cpuids++;
	cpuid_to_mid[cpuid] = mid;
	cpu_identify(csa->csa_ver, clock, cpuid);

	va = kmem_malloc(PCPU_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO);
	pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1;
	pcpu_init(pc, cpuid, sizeof(*pc));
	dpcpu_init((void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO), cpuid);
	pc->pc_addr = va;
	pc->pc_clock = clock;
	pc->pc_impl = cpu_impl;
	pc->pc_mid = mid;
	pc->pc_node = node;

	cache_init(pc);

	CPU_SET(cpuid, &all_cpus);
	intr_add_cpu(cpuid);
}
Example #2
0
physAddress_t
XX_VirtToPhys(void *addr)
{
	vm_paddr_t paddr;
	int cpu;

	cpu = PCPU_GET(cpuid);

	/* Handle NULL address */
	if (addr == NULL)
		return (-1);

	/* Check CCSR */
	if ((vm_offset_t)addr >= ccsrbar_va &&
	    (vm_offset_t)addr < ccsrbar_va + ccsrbar_size)
		return (((vm_offset_t)addr - ccsrbar_va) + ccsrbar_pa);

	/* Handle BMAN mappings */
	if (((vm_offset_t)addr >= XX_PInfo.portal_ce_va[BM_PORTAL]) &&
	    ((vm_offset_t)addr < XX_PInfo.portal_ce_va[BM_PORTAL] +
	    XX_PInfo.portal_ce_size[BM_PORTAL][cpu]))
		return (XX_PInfo.portal_ce_pa[BM_PORTAL][cpu] +
		    (vm_offset_t)addr - XX_PInfo.portal_ce_va[BM_PORTAL]);

	if (((vm_offset_t)addr >= XX_PInfo.portal_ci_va[BM_PORTAL]) &&
	    ((vm_offset_t)addr < XX_PInfo.portal_ci_va[BM_PORTAL] +
	    XX_PInfo.portal_ci_size[BM_PORTAL][cpu]))
		return (XX_PInfo.portal_ci_pa[BM_PORTAL][cpu] +
		    (vm_offset_t)addr - XX_PInfo.portal_ci_va[BM_PORTAL]);

	/* Handle QMAN mappings */
	if (((vm_offset_t)addr >= XX_PInfo.portal_ce_va[QM_PORTAL]) &&
	    ((vm_offset_t)addr < XX_PInfo.portal_ce_va[QM_PORTAL] +
	    XX_PInfo.portal_ce_size[QM_PORTAL][cpu]))
		return (XX_PInfo.portal_ce_pa[QM_PORTAL][cpu] +
		    (vm_offset_t)addr - XX_PInfo.portal_ce_va[QM_PORTAL]);

	if (((vm_offset_t)addr >= XX_PInfo.portal_ci_va[QM_PORTAL]) &&
	    ((vm_offset_t)addr < XX_PInfo.portal_ci_va[QM_PORTAL] +
	    XX_PInfo.portal_ci_size[QM_PORTAL][cpu]))
		return (XX_PInfo.portal_ci_pa[QM_PORTAL][cpu] +
		    (vm_offset_t)addr - XX_PInfo.portal_ci_va[QM_PORTAL]);

	if (PMAP_HAS_DMAP && (vm_offset_t)addr >= DMAP_BASE_ADDRESS &&
	    (vm_offset_t)addr <= DMAP_MAX_ADDRESS)
		return (DMAP_TO_PHYS((vm_offset_t)addr));
	else
		paddr = pmap_kextract((vm_offset_t)addr);

	if (paddr == 0)
		printf("NetCommSW: "
		    "Unable to translate virtual address %p!\n", addr);
	else
		pmap_track_page(kernel_pmap, (vm_offset_t)addr);

	return (paddr);
}
Example #3
0
void
lapic_setup(int boot)
{
	struct lapic *la;
	u_int32_t maxlvt;
	register_t saveintr;
	char buf[MAXCOMLEN + 1];

	la = &lapics[lapic_id()];
	KASSERT(la->la_present, ("missing APIC structure"));
	saveintr = intr_disable();
	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;

	/* Initialize the TPR to allow all interrupts. */
	lapic_set_tpr(0);

	/* Setup spurious vector and enable the local APIC. */
	lapic_enable();

	/* Program LINT[01] LVT entries. */
	lapic->lvt_lint0 = lvt_mode(la, LVT_LINT0, lapic->lvt_lint0);
	lapic->lvt_lint1 = lvt_mode(la, LVT_LINT1, lapic->lvt_lint1);

	/* Program the PMC LVT entry if present. */
	if (maxlvt >= LVT_PMC)
		lapic->lvt_pcint = lvt_mode(la, LVT_PMC, lapic->lvt_pcint);

	/* Program timer LVT and setup handler. */
	lapic->lvt_timer = lvt_mode(la, LVT_TIMER, lapic->lvt_timer);
	if (boot) {
		snprintf(buf, sizeof(buf), "cpu%d:timer", PCPU_GET(cpuid));
		intrcnt_add(buf, &la->la_timer_count);
	}

	/* Setup the timer if configured. */
	if (la->la_timer_mode != 0) {
		KASSERT(la->la_timer_period != 0, ("lapic%u: zero divisor",
		    lapic_id()));
		lapic_timer_set_divisor(lapic_timer_divisor);
		if (la->la_timer_mode == 1)
			lapic_timer_periodic(la->la_timer_period, 1);
		else
			lapic_timer_oneshot(la->la_timer_period, 1);
	}

	/* Program error LVT and clear any existing errors. */
	lapic->lvt_error = lvt_mode(la, LVT_ERROR, lapic->lvt_error);
	lapic->esr = 0;

	/* XXX: Thermal LVT */

	/* Program the CMCI LVT entry if present. */
	if (maxlvt >= LVT_CMCI)
		lapic->lvt_cmci = lvt_mode(la, LVT_CMCI, lapic->lvt_cmci);
	    
	intr_restore(saveintr);
}
Example #4
0
/*
 * Shutdown the system cleanly to prepare for reboot, halt, or power off.
 */
void
kern_reboot(int howto)
{
	static int once = 0;

#if defined(SMP)
	/*
	 * Bind us to CPU 0 so that all shutdown code runs there.  Some
	 * systems don't shutdown properly (i.e., ACPI power off) if we
	 * run on another processor.
	 */
	if (!SCHEDULER_STOPPED()) {
		thread_lock(curthread);
		sched_bind(curthread, 0);
		thread_unlock(curthread);
		KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0"));
	}
#endif
	/* We're in the process of rebooting. */
	rebooting = 1;

	/* We are out of the debugger now. */
	kdb_active = 0;

	/*
	 * Do any callouts that should be done BEFORE syncing the filesystems.
	 */
	EVENTHANDLER_INVOKE(shutdown_pre_sync, howto);

	/* 
	 * Now sync filesystems
	 */
	if (!cold && (howto & RB_NOSYNC) == 0 && once == 0) {
		once = 1;
		bufshutdown(show_busybufs);
	}

	print_uptime();

	cngrab();

	/*
	 * Ok, now do things that assume all filesystem activity has
	 * been completed.
	 */
	EVENTHANDLER_INVOKE(shutdown_post_sync, howto);

	if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold && !dumping) 
		doadump(TRUE);

	/* Now that we're going to really halt the system... */
	EVENTHANDLER_INVOKE(shutdown_final, howto);

	for(;;) ;	/* safety against shutdown_reset not working */
	/* NOTREACHED */
}
int
cpu_mp_probe(void)
{
	int i, cpus;

	/* XXX: Need to check for valid platforms here. */

	boot_cpu_id = PCPU_GET(cpuid);
	KASSERT(boot_cpu_id == 0, ("cpu_mp_probe() called on non-primary CPU"));
	all_cpus = PCPU_GET(cpumask);
	mp_ncpus = 1;

	/* Make sure we have at least one secondary CPU. */
	cpus = 0;
	for (i = 0; i < MAXCPU; i++) {
		cpus++;
	}
	return (cpus);
}
Example #6
0
void
pcpu_initclock(void)
{

	PCPU_SET(clockadj, 0);
	PCPU_SET(clock, ia64_get_itc());
	ia64_set_itm(PCPU_GET(clock) + ia64_clock_reload);
	ia64_set_itv(CLOCK_VECTOR);	/* highest priority class */
	ia64_srlz_d();
}
Example #7
0
int
mips_pic_intr(void *arg)
{
	struct mips_pic_softc *sc = arg;
	register_t cause, status;
	struct intr_irqsrc *isrc;
	int i, intr;

	cause = mips_rd_cause();
	status = mips_rd_status();
	intr = (cause & MIPS_INT_MASK) >> 8;
	/*
	 * Do not handle masked interrupts. They were masked by
	 * pre_ithread function (mips_mask_XXX_intr) and will be
	 * unmasked once ithread is through with handler
	 */
	intr &= (status & MIPS_INT_MASK) >> 8;
	while ((i = fls(intr)) != 0) {
		i--; /* Get a 0-offset interrupt. */
		intr &= ~(1 << i);

		isrc = sc->pic_irqs[i];
		if (isrc == NULL) {
			device_printf(sc->pic_dev,
			    "Stray interrupt %u detected\n", i);
			pic_irq_mask(sc, i);
			continue;
		}

		intr_irq_dispatch(isrc, curthread->td_intr_frame);
	}

	KASSERT(i == 0, ("all interrupts handled"));

#ifdef HWPMC_HOOKS
	if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN)) {
		struct trapframe *tf = PCPU_GET(curthread)->td_intr_frame;

		pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf);
	}
#endif
	return (FILTER_HANDLED);
}
Example #8
0
void
platform_ipi_clear(void)
{
	int cpuid = PCPU_GET(cpuid);
	uint32_t action;

	action = (cpuid == 0) ? mips_rd_xburst_mbox0() : mips_rd_xburst_mbox1();
	KASSERT(action == 1, ("CPU %d: unexpected IPIs: %#x", cpuid, action));
	mips_wr_xburst_core_sts(~(JZ_CORESTS_MIRQ0P << cpuid));
}
Example #9
0
void
gdb_cpu_setreg(int regnum, void *val)
{
	switch (regnum) {
	case GDB_REG_PC:
		kdb_thrctx->pcb_context[10] = *(register_t *)val;
		if (kdb_thread	== PCPU_GET(curthread))
			kdb_frame->pc = *(register_t *)val;
	}
}
Example #10
0
int
hardclockintr(struct trapframe *frame)
{

	if (PCPU_GET(cpuid) == 0)
		hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
	else
		hardclock_cpu(TRAPF_USERMODE(frame));
	return (FILTER_HANDLED);
}
Example #11
0
void
platform_mp_setmaxid(void)
{

	mp_maxid = PCPU_GET(cpuid);
	mp_ncpus = ofw_cpu_early_foreach(virt_maxid, true);
	if (mp_ncpus < 1)
		mp_ncpus = 1;
	mp_ncpus = MIN(mp_ncpus, MAXCPU);
}
Example #12
0
int
fill_fpregs(struct thread *td, struct fpreg *fpregs)
{
#if defined(CPU_HAVEFPU)
	if (td == PCPU_GET(fpcurthread))
		MipsSaveCurFPState(td);
	memcpy(fpregs, &td->td_frame->f0, sizeof(struct fpreg)); 
#endif
	return 0;
}
Example #13
0
int
ia64_highfp_enable(struct thread *td, struct trapframe *tf)
{
	struct pcb *pcb;
	struct pcpu *cpu;
	struct thread *td1;

	pcb = td->td_pcb;

	mtx_lock_spin(&ia64_highfp_mtx);
	cpu = pcb->pcb_fpcpu;
#ifdef SMP
	if (cpu != NULL && cpu != pcpup) {
		KASSERT(cpu->pc_fpcurthread == td,
		    ("cpu->pc_fpcurthread != td"));
		ia64_highfp_ipi(cpu);
	}
#endif
	td1 = PCPU_GET(fpcurthread);
	if (td1 != NULL && td1 != td) {
		KASSERT(td1->td_pcb->pcb_fpcpu == pcpup,
		    ("td1->td_pcb->pcb_fpcpu != pcpup"));
		save_high_fp(&td1->td_pcb->pcb_high_fp);
		td1->td_frame->tf_special.psr |= IA64_PSR_DFH;
		td1->td_pcb->pcb_fpcpu = NULL;
		PCPU_SET(fpcurthread, NULL);
		td1 = NULL;
	}
	if (td1 == NULL) {
		KASSERT(pcb->pcb_fpcpu == NULL, ("pcb->pcb_fpcpu != NULL"));
		KASSERT(PCPU_GET(fpcurthread) == NULL,
		    ("PCPU_GET(fpcurthread) != NULL"));
		restore_high_fp(&pcb->pcb_high_fp);
		PCPU_SET(fpcurthread, td);
		pcb->pcb_fpcpu = pcpup;
		tf->tf_special.psr &= ~IA64_PSR_MFH;
	}
	tf->tf_special.psr &= ~IA64_PSR_DFH;
	mtx_unlock_spin(&ia64_highfp_mtx);

	return ((td1 != NULL) ? 1 : 0);
}
Example #14
0
void *
XX_PhysToVirt(physAddress_t addr)
{
	struct pv_entry *pv;
	vm_page_t page;
	int cpu;

	/* Check CCSR */
	if (addr >= ccsrbar_pa && addr < ccsrbar_pa + ccsrbar_size)
		return ((void *)((vm_offset_t)(addr - ccsrbar_pa) +
		    ccsrbar_va));

	cpu = PCPU_GET(cpuid);

	/* Handle BMAN mappings */
	if ((addr >= XX_PInfo.portal_ce_pa[BM_PORTAL][cpu]) &&
	    (addr < XX_PInfo.portal_ce_pa[BM_PORTAL][cpu] +
	    XX_PInfo.portal_ce_size[BM_PORTAL][cpu]))
		return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] +
		    (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu])));

	if ((addr >= XX_PInfo.portal_ci_pa[BM_PORTAL][cpu]) &&
	    (addr < XX_PInfo.portal_ci_pa[BM_PORTAL][cpu] +
	    XX_PInfo.portal_ci_size[BM_PORTAL][cpu]))
		return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] +
		    (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu])));

	/* Handle QMAN mappings */
	if ((addr >= XX_PInfo.portal_ce_pa[QM_PORTAL][cpu]) &&
	    (addr < XX_PInfo.portal_ce_pa[QM_PORTAL][cpu] +
	    XX_PInfo.portal_ce_size[QM_PORTAL][cpu]))
		return ((void *)(XX_PInfo.portal_ce_va[QM_PORTAL] +
		    (vm_offset_t)(addr - XX_PInfo.portal_ce_pa[QM_PORTAL][cpu])));

	if ((addr >= XX_PInfo.portal_ci_pa[QM_PORTAL][cpu]) &&
	    (addr < XX_PInfo.portal_ci_pa[QM_PORTAL][cpu] +
	    XX_PInfo.portal_ci_size[QM_PORTAL][cpu]))
		return ((void *)(XX_PInfo.portal_ci_va[QM_PORTAL] +
		    (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[QM_PORTAL][cpu])));

	page = PHYS_TO_VM_PAGE(addr);
	pv = TAILQ_FIRST(&page->md.pv_list);

	if (pv != NULL)
		return ((void *)(pv->pv_va + ((vm_offset_t)addr & PAGE_MASK)));

	if (PMAP_HAS_DMAP)
		return ((void *)(uintptr_t)PHYS_TO_DMAP(addr));

	printf("NetCommSW: "
	    "Unable to translate physical address 0x%09jx!\n", (uintmax_t)addr);

	return (NULL);
}
Example #15
0
void
openpic_eoi(device_t dev, u_int irq __unused)
{
	struct openpic_softc *sc;
	u_int cpuid;

	cpuid = (dev == root_pic) ? PCPU_GET(cpuid) : 0;

	sc = device_get_softc(dev);
	openpic_write(sc, OPENPIC_PCPU_EOI(cpuid), 0);
}
Example #16
0
int
ia64_highfp_save_ipi(void)
{
	struct thread *td;

	mtx_lock_spin(&ia64_highfp_mtx);
	td = PCPU_GET(fpcurthread);
	if (td != NULL) {
		KASSERT(td->td_pcb->pcb_fpcpu == pcpup,
		    ("td->td_pcb->pcb_fpcpu != pcpup"));
		save_high_fp(&td->td_pcb->pcb_high_fp);
		td->td_frame->tf_special.psr |= IA64_PSR_DFH;
		td->td_pcb->pcb_fpcpu = NULL;
		PCPU_SET(fpcurthread, NULL);
	}
	mtx_unlock_spin(&ia64_highfp_mtx);
	wakeup(&PCPU_GET(fpcurthread));

	return ((td != NULL) ? 1 : 0);
}
Example #17
0
static void
chrp_smp_ap_init(platform_t platform)
{
	if (!(mfmsr() & PSL_HV)) {
		/* Set interrupt priority */
		phyp_hcall(H_CPPR, 0xff);

		/* Register VPA */
		phyp_hcall(H_REGISTER_VPA, 1UL, PCPU_GET(cpuid), splpar_vpa);
	}
}
Example #18
0
void
intr_irq_handler(struct trapframe *frame)
{
	struct intr_event *event;
	int i;

	VM_CNT_INC(v_intr);
	i = -1;
	while ((i = arm_get_next_irq(i)) != -1) {
		intrcnt[i]++;
		event = intr_events[i];
		if (intr_event_handle(event, frame) != 0) {
			/* XXX: Log stray IRQs */
			arm_mask_irq(i);
		}
	}
#ifdef HWPMC_HOOKS
	if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN))
		pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, frame);
#endif
}
Example #19
0
int
uiomoveco(void *cp, int n, struct uio *uio, int disposable)
{
	struct iovec *iov;
	u_int cnt;
	int error;

	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
	    ("uiomoveco: mode"));
	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
	    ("uiomoveco proc"));

	while (n > 0 && uio->uio_resid) {
		iov = uio->uio_iov;
		cnt = iov->iov_len;
		if (cnt == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			continue;
		}
		if (cnt > n)
			cnt = n;

		switch (uio->uio_segflg) {

		case UIO_USERSPACE:
			if (ticks - PCPU_GET(switchticks) >= hogticks)
				uio_yield();

			error = userspaceco(cp, cnt, uio, disposable);

			if (error)
				return (error);
			break;

		case UIO_SYSSPACE:
			if (uio->uio_rw == UIO_READ)
				bcopy(cp, iov->iov_base, cnt);
			else
				bcopy(iov->iov_base, cp, cnt);
			break;
		case UIO_NOCOPY:
			break;
		}
		iov->iov_base = (char *)iov->iov_base + cnt;
		iov->iov_len -= cnt;
		uio->uio_resid -= cnt;
		uio->uio_offset += cnt;
		cp = (char *)cp + cnt;
		n -= cnt;
	}
	return (0);
}
Example #20
0
/*
 * Save altivec state without dropping ownership.  This will only save state if
 * the current vector-thread is `td'.
 */
void
save_vec_nodrop(struct thread *td)
{
	struct thread *vtd;

	vtd = PCPU_GET(vecthread);
	if (td != vtd) {
		return;
	}

	save_vec_int(td);
}
Example #21
0
/*
 * Manually setup an iterator with the given details.
 */
int
vm_domain_iterator_set(struct vm_domain_iterator *vi,
    vm_domain_policy_type_t vt, int domain)
{

#ifdef VM_NUMA_ALLOC
	switch (vt) {
	case VM_POLICY_FIXED_DOMAIN:
		vi->policy = VM_POLICY_FIXED_DOMAIN;
		vi->domain = domain;
		vi->n = 1;
		break;
	case VM_POLICY_FIXED_DOMAIN_ROUND_ROBIN:
		vi->policy = VM_POLICY_FIXED_DOMAIN_ROUND_ROBIN;
		vi->domain = domain;
		vi->n = vm_ndomains;
		break;
	case VM_POLICY_FIRST_TOUCH:
		vi->policy = VM_POLICY_FIRST_TOUCH;
		vi->domain = PCPU_GET(domain);
		vi->n = 1;
		break;
	case VM_POLICY_FIRST_TOUCH_ROUND_ROBIN:
		vi->policy = VM_POLICY_FIRST_TOUCH_ROUND_ROBIN;
		vi->domain = PCPU_GET(domain);
		vi->n = vm_ndomains;
		break;
	case VM_POLICY_ROUND_ROBIN:
	default:
		vi->policy = VM_POLICY_ROUND_ROBIN;
		vi->domain = -1;
		vi->n = vm_ndomains;
		break;
	}
#else
	vi->domain = 0;
	vi->n = 1;
#endif
	return (0);
}
Example #22
0
static pt_entry_t *
rom_lev1map()
{
	struct alpha_pcb *apcb;
	struct pcb *cpcb;

	/*
	 * We may be called before the first context switch
	 * after alpha_init(), in which case we just need
	 * to use the kernel Lev1map.
	 */
	if (PCPU_GET(curpcb) == 0)
		return (Lev1map);

	/*
	 * Find the level 1 map that we're currently running on.
	 */
	cpcb = PCPU_GET(curpcb);
	apcb = (struct alpha_pcb *)ALPHA_PHYS_TO_K0SEG((vm_offset_t)cpcb);

	return ((pt_entry_t *)ALPHA_PHYS_TO_K0SEG(alpha_ptob(apcb->apcb_ptbr)));
}
Example #23
0
void
efi_arch_leave(void)
{
	pmap_t curpmap;

	curpmap = &curproc->p_vmspace->vm_pmap;
	if (pmap_pcid_enabled && !invpcid_works)
		PCPU_SET(curpmap, curpmap);
	load_cr3(curpmap->pm_cr3 | (pmap_pcid_enabled ?
	    curpmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid : 0));
	if (!pmap_pcid_enabled)
		invltlb();
}
Example #24
0
void
openpic_ipi(device_t dev, u_int cpu)
{
	struct openpic_softc *sc;

	KASSERT(dev == root_pic, ("Cannot send IPIs from non-root OpenPIC"));

	sc = device_get_softc(dev);
	sched_pin();
	openpic_write(sc, OPENPIC_PCPU_IPI_DISPATCH(PCPU_GET(cpuid), 0),
	    1u << cpu);
	sched_unpin();
}
Example #25
0
void
kdb_panic(const char *msg)
{
#ifdef SMP
    cpuset_t other_cpus;

    other_cpus = all_cpus;
    CPU_CLR(PCPU_GET(cpuid), &other_cpus);
    stop_cpus_hard(other_cpus);
#endif
    printf("KDB: panic\n");
    panic("%s", msg);
}
Example #26
0
/*---------------------- XEN diverged cpu operations -------------------------*/
static void
xen_hvm_cpu_resume(void)
{
	u_int cpuid = PCPU_GET(cpuid);

	/*
	 * Reset pending bitmap IPIs, because Xen doesn't preserve pending
	 * event channels on migration.
	 */
	cpu_ipi_pending[cpuid] = 0;

	/* register vcpu_info area */
	xen_hvm_cpu_init();
}
Example #27
0
/*
 * Flush a physical page from the data cache.
 */
void
cheetah_dcache_page_inval(vm_paddr_t spa)
{
	vm_paddr_t pa;
	void *cookie;

	KASSERT((spa & PAGE_MASK) == 0,
	    ("%s: pa not page aligned", __func__));
	cookie = ipi_dcache_page_inval(tl_ipi_cheetah_dcache_page_inval, spa);
	for (pa = spa; pa < spa + PAGE_SIZE;
	    pa += PCPU_GET(cache.dc_linesize))
		stxa_sync(pa, ASI_DCACHE_INVALIDATE, 0);
	ipi_wait(cookie);
}
void
update_gdt_fsbase(struct thread *td, uint32_t base)
{
	struct user_segment_descriptor *sd;

	if (td != curthread)
		return;
	td->td_pcb->pcb_full_iret = 1;
	critical_enter();
	sd = PCPU_GET(fs32p);
	sd->sd_lobase = base & 0xffffff;
	sd->sd_hibase = (base >> 24) & 0xff;
	critical_exit();
}
Example #29
0
/*
 * Handle the return of a child process from fork1().  This function
 * is called from the MD fork_trampoline() entry point.
 */
void
fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
    struct trapframe *frame)
{
	struct proc *p;
	struct thread *td;
	struct thread *dtd;

	td = curthread;
	p = td->td_proc;
	KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));

	CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
	    td, td_get_sched(td), p->p_pid, td->td_name);

	sched_fork_exit(td);
	/*
	* Processes normally resume in mi_switch() after being
	* cpu_switch()'ed to, but when children start up they arrive here
	* instead, so we must do much the same things as mi_switch() would.
	*/
	if ((dtd = PCPU_GET(deadthread))) {
		PCPU_SET(deadthread, NULL);
		thread_stash(dtd);
	}
	thread_unlock(td);

	/*
	 * cpu_fork_kthread_handler intercepts this function call to
	 * have this call a non-return function to stay in kernel mode.
	 * initproc has its own fork handler, but it does return.
	 */
	KASSERT(callout != NULL, ("NULL callout in fork_exit"));
	callout(arg, frame);

	/*
	 * Check if a kernel thread misbehaved and returned from its main
	 * function.
	 */
	if (p->p_flag & P_KPROC) {
		printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
		    td->td_name, p->p_pid);
		kthread_exit();
	}
	mtx_assert(&Giant, MA_NOTOWNED);

	if (p->p_sysent->sv_schedtail != NULL)
		(p->p_sysent->sv_schedtail)(td);
	td->td_pflags &= ~TDP_FORKING;
}
Example #30
0
static __inline void
openpic_set_priority(struct openpic_softc *sc, int pri)
{
	u_int tpr;
	uint32_t x;

	sched_pin();
	tpr = OPENPIC_PCPU_TPR((sc->sc_dev == root_pic) ? PCPU_GET(cpuid) : 0);
	x = openpic_read(sc, tpr);
	x &= ~OPENPIC_TPR_MASK;
	x |= pri;
	openpic_write(sc, tpr, x);
	sched_unpin();
}