Пример #1
0
/*
 * pserialize_switchpoint:
 *
 *	Monitor system context switch activity.  Called from machine
 *	independent code after mi_switch() returns.
 */ 
void
pserialize_switchpoint(void)
{
	pserialize_t psz, next;
	cpuid_t cid;

	/*
	 * If no updates pending, bail out.  No need to lock in order to
	 * test psz_work_todo; the only ill effect of missing an update
	 * would be to delay LWPs waiting in pserialize_perform().  That
	 * will not happen because updates are on the queue before an
	 * xcall is generated (serialization) to tickle every CPU.
	 */
	if (__predict_true(psz_work_todo == 0)) {
		return;
	}
	mutex_spin_enter(&psz_lock);
	cid = cpu_index(curcpu());

	/*
	 * At first, scan through the second queue and update each request,
	 * if passed all processors, then transfer to the third queue. 
	 */
	for (psz = TAILQ_FIRST(&psz_queue1); psz != NULL; psz = next) {
		next = TAILQ_NEXT(psz, psz_chain);
		kcpuset_set(psz->psz_pass, cid);
		if (!kcpuset_match(psz->psz_pass, psz->psz_target)) {
			continue;
		}
		kcpuset_zero(psz->psz_pass);
		TAILQ_REMOVE(&psz_queue1, psz, psz_chain);
		TAILQ_INSERT_TAIL(&psz_queue2, psz, psz_chain);
	}
	/*
	 * Scan through the first queue and update each request,
	 * if passed all processors, then move to the second queue. 
	 */
	for (psz = TAILQ_FIRST(&psz_queue0); psz != NULL; psz = next) {
		next = TAILQ_NEXT(psz, psz_chain);
		kcpuset_set(psz->psz_pass, cid);
		if (!kcpuset_match(psz->psz_pass, psz->psz_target)) {
			continue;
		}
		kcpuset_zero(psz->psz_pass);
		TAILQ_REMOVE(&psz_queue0, psz, psz_chain);
		TAILQ_INSERT_TAIL(&psz_queue1, psz, psz_chain);
	}
	/*
	 * Process the third queue: entries have been seen twice on every
	 * processor, remove from the queue and notify the updating thread.
	 */
	while ((psz = TAILQ_FIRST(&psz_queue2)) != NULL) {
		TAILQ_REMOVE(&psz_queue2, psz, psz_chain);
		kcpuset_zero(psz->psz_target);
		psz_work_todo--;
	}
	mutex_spin_exit(&psz_lock);
}
Пример #2
0
void
cpu_startup_common(void)
{
	vaddr_t minaddr, maxaddr;
	char pbuf[9];	/* "99999 MB" */

	pmap_tlb_info_evcnt_attach(&pmap_tlb0_info);

#ifdef MULTIPROCESSOR
	kcpuset_create(&cpus_halted, true);
		KASSERT(cpus_halted != NULL);
	kcpuset_create(&cpus_hatched, true);
		KASSERT(cpus_hatched != NULL);
	kcpuset_create(&cpus_paused, true);
		KASSERT(cpus_paused != NULL);
	kcpuset_create(&cpus_resumed, true);
		KASSERT(cpus_resumed != NULL);
	kcpuset_create(&cpus_running, true);
		KASSERT(cpus_running != NULL);
	kcpuset_set(cpus_hatched, cpu_number());
	kcpuset_set(cpus_running, cpu_number());
#endif

	cpu_hwrena_setup();

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf("%s%s", copyright, version);
	printf("%s\n", cpu_getmodel());
	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
	printf("total memory = %s\n", pbuf);

	minaddr = 0;
	/*
	 * Allocate a submap for physio.
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				    VM_PHYS_SIZE, 0, FALSE, NULL);

	/*
	 * (No need to allocate an mbuf cluster submap.  Mbuf clusters
	 * are allocated via the pool allocator, and we use KSEG/XKPHYS to
	 * map those pages.)
	 */

	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf("avail memory = %s\n", pbuf);

#if defined(__mips_n32)
	module_machine = "mips-n32";
#endif
}
Пример #3
0
void
rump_cpu_attach(struct cpu_info *ci)
{

	if (cpu_info_list == NULL)
		ci->ci_flags |= CPUF_PRIMARY;

	/* XXX: wrong order, but ... */
	ci->ci_next = cpu_info_list;
	cpu_info_list = ci;

	kcpuset_set(kcpuset_attached, cpu_index(ci));
	kcpuset_set(kcpuset_running, cpu_index(ci));
}
Пример #4
0
void
interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
{
	struct intr_source *is;

	kcpuset_zero(cpuset);

	is = intr_get_source(intrid);
	if (is != NULL)
		kcpuset_set(cpuset, 0);	/* XXX */
}
Пример #5
0
void
interrupt_get_available(kcpuset_t *cpuset)
{
	CPU_INFO_ITERATOR cii;
	struct cpu_info *ci;

	kcpuset_zero(cpuset);

	mutex_enter(&cpu_lock);
	for (CPU_INFO_FOREACH(cii, ci)) {
		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
			kcpuset_set(cpuset, cpu_index(ci));
	}
	mutex_exit(&cpu_lock);
}
Пример #6
0
/*
 * Resume a single cpu
 */
void
cpu_resume(cpuid_t cii)
{
	if (__predict_false(cold))
		return;

	struct cpu_info * const ci = curcpu();
	kcpuset_t *kcp = ci->ci_ddbcpus;

	kcpuset_set(kcp, cii);
	kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
	kcpuset_atomic_clear(cpus_paused, cii);

	cpu_ipi_wait("resume", cpus_resumed, kcp);
}
Пример #7
0
int
ipi_intr(void *v)
{
	struct cpu_info * const ci = curcpu();
	int cpu_id = cpu_index(ci);
	int msr;
	uint32_t ipi;

	ci->ci_ev_ipi.ev_count++;
	ipi = atomic_swap_32(&ci->ci_pending_ipis, 0);

	if (ipi == IPI_NOMESG)
		return 1;

	if (ipi & IPI_XCALL)
		xc_ipi_handler();

	if (ipi & IPI_GENERIC)
		ipi_cpu_handler();

	if (ipi & IPI_SUSPEND)
		cpu_pause(NULL);

	if (ipi & IPI_HALT) {
		struct cpuset_info * const csi = &cpuset_info;
		aprint_normal("halting CPU %d\n", cpu_id);
		kcpuset_set(csi->cpus_halted, cpu_id);
		msr = (mfmsr() & ~PSL_EE) | PSL_POW;
		for (;;) {
			__asm volatile ("sync; isync");
			mtmsr(msr);
		}
	}

	return 1;
}
Пример #8
0
void
cpu_hatch(struct cpu_info *ci)
{
	struct pmap_tlb_info * const ti = ci->ci_tlb_info;

	/*
	 * Invalidate all the TLB enties (even wired ones) and then reserve
	 * space for the wired TLB entries.
	 */
	mips3_cp0_wired_write(0);
	tlb_invalidate_all();
	mips3_cp0_wired_write(ti->ti_wired);

	/*
	 * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2).
	 */
	cpu_hwrena_setup();

	/*
	 * If we are using register zero relative addressing to access cpu_info
	 * in the exception vectors, enter that mapping into TLB now.
	 */
	if (ci->ci_tlb_slot >= 0) {
		const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V
		    | mips3_paddr_to_tlbpfn((vaddr_t)ci);
		const struct tlbmask tlbmask = {
			.tlb_hi = -PAGE_SIZE | KERNEL_PID,
#if (PGSHIFT & 1)
			.tlb_lo0 = tlb_lo,
			.tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
#else
			.tlb_lo0 = 0,
			.tlb_lo1 = tlb_lo,
#endif
			.tlb_mask = -1,
		};

		tlb_invalidate_addr(tlbmask.tlb_hi, KERNEL_PID);
		tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
	}

	/*
	 * Flush the icache just be sure.
	 */
	mips_icache_sync_all();

	/*
	 * Let this CPU do its own initialization (for things that have to be
	 * done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_init)(ci);

	// Show this CPU as present.
	atomic_or_ulong(&ci->ci_flags, CPUF_PRESENT);

	/*
	 * Announce we are hatched
	 */
	kcpuset_atomic_set(cpus_hatched, cpu_index(ci));

	/*
	 * Now wait to be set free!
	 */
	while (! kcpuset_isset(cpus_running, cpu_index(ci))) {
		/* spin, spin, spin */
	}

	/*
	 * initialize the MIPS count/compare clock
	 */
	mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
	KASSERT(ci->ci_cycles_per_hz != 0);
	ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
	ci->ci_data.cpu_cc_skew = 0;

	/*
	 * Let this CPU do its own post-running initialization
	 * (for things that have to be done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_run)(ci);

	/*
	 * Now turn on interrupts (and verify they are on).
	 */
	spl0();
	KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
	KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);

	kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci));
	kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci));

	/*
	 * And do a tail call to idle_loop
	 */
	idle_loop(NULL);
}

void
cpu_boot_secondary_processors(void)
{
	CPU_INFO_ITERATOR cii;
	struct cpu_info *ci;
	for (CPU_INFO_FOREACH(cii, ci)) {
		if (CPU_IS_PRIMARY(ci))
			continue;
		KASSERT(ci->ci_data.cpu_idlelwp);

		/*
		 * Skip this CPU if it didn't sucessfully hatch.
		 */
		if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
			continue;

		ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
		kcpuset_set(cpus_running, cpu_index(ci));
		// Spin until the cpu calls idle_loop
		for (u_int i = 0; i < 100; i++) {
			if (kcpuset_isset(cpus_running, cpu_index(ci)))
				break;
			delay(1000);
		}
	}
}