Exemplo n.º 1
0
/*
 * BERI startup conforms to the spin-table start method defined in the
 * ePAPR 1.0 spec.  The initial spin waiting for an address is started
 * by the CPU firmware.
 */
int
platform_start_ap(int cpuid)
{
	phandle_t cpu;
	char prop[16];
	struct spin_entry *se;

	KASSERT(cpuid != 0, ("%s: can't start CPU 0!\n", __func__));
	KASSERT((cpuid > 0 && cpuid < MAXCPU),
	    ("%s: invalid CPU id %d", __func__, cpuid));

	cpu = cpu_of_nodes[cpuid];
	if (OF_getprop(cpu, "status", &prop, sizeof(prop)) <= 0) {
		if (bootverbose)
			printf("%s: CPU %d has no status property, "
			    "trying parent\n", __func__, cpuid);
		if (OF_getprop(OF_parent(cpu), "status", &prop,
		    sizeof(prop)) <= 0)
			panic("%s: CPU %d has no status property", __func__,
			    cpuid);
	}
	if (strcmp("disabled", prop) != 0)
		panic("%s: CPU %d status is '%s' not 'disabled'",
		    __func__, cpuid, prop);

	if (OF_getprop(cpu, "enable-method", &prop, sizeof(prop)) <= 0) {
		if (bootverbose)
			printf("%s: CPU %d has no enable-method, "
			    "trying parent\n", __func__, cpuid);
		if (OF_getprop(OF_parent(cpu), "enable-method", &prop,
		    sizeof(prop)) <= 0)
			panic("%s: CPU %d has no enable-method property",
			    __func__, cpuid);
	}
	if (strcmp("spin-table", prop) != 0)
		panic("%s: CPU %d enable-method is '%s' not "
		    "'spin-table'", __func__, cpuid, prop);

	if (OF_getprop(cpu, "cpu-release-addr", &se, sizeof(se)) <= 0)
		panic("%s: CPU %d has missing or invalid cpu-release-addr",
		    __func__, cpuid);
	se->pir = cpuid;
	if (bootverbose)
		printf("%s: writing %p to %p\n", __func__, mpentry,
		    &se->entry_addr);

	mips_sync();	/* Ordering. */
	se->entry_addr = (intptr_t)mpentry;
	mips_sync();	/* Liveness. */

	return (0);
}
Exemplo n.º 2
0
void
__mp_lock(struct __mp_lock *mpl)
{
	register_t sr;
	struct cpu_info *ci = curcpu();

	/*
	 * Please notice that mpl_count gets incremented twice for the
	 * first lock. This is on purpose. The way we release the lock
	 * in mp_unlock is to decrement the mpl_count and then check if
	 * the lock should be released. Since mpl_count is what we're
	 * spinning on, decrementing it in mpl_unlock to 0 means that
	 * we can't clear mpl_cpu, because we're no longer holding the
	 * lock. In theory mpl_cpu doesn't need to be cleared, but it's
	 * safer to clear it and besides, setting mpl_count to 2 on the
	 * first lock makes most of this code much simpler.
	 */
	while (1) {
		sr = disableintr();
		if (__cpu_cas(&mpl->mpl_count, 0, 1) == 0) {
			mips_sync();
			mpl->mpl_cpu = ci;
		}

		if (mpl->mpl_cpu == ci) {
			mpl->mpl_count++;
			setsr(sr);
			break;
		}
		setsr(sr);
		
		__mp_lock_spin(mpl);
	}
}
Exemplo n.º 3
0
int64_t
cn30xxfau_op_inc_8(struct cn30xxfau_desc *fd, int64_t v)
{
	cn30xxfau_op_iobdma_store_data(fd->fd_scroff, v, 0, OCT_FAU_OP_SIZE_64/* XXX */,
	    fd->fd_regno);
	mips_sync();
	return octeon_cvmseg_read_8(fd->fd_scroff)/* XXX */;
}
Exemplo n.º 4
0
void
platform_ipi_send(int cpuid)
{

	/* XXX: single core/pic */
	mips_sync();	/* Ordering, liveness. */
	FDT_IC_SEND_IPI(picmap[cpuid], cpuid);
}
Exemplo n.º 5
0
void
tcc_IOSyncDCache(struct cpu_info *ci, vaddr_t _va, size_t _sz, int how)
{
	vaddr_t va;
	vsize_t sz;
	int partial_start, partial_end;

	/* extend the range to integral cache lines */
	va = _va & ~(TCC_CACHE_LINE - 1);
	sz = ((_va + _sz + TCC_CACHE_LINE - 1) & ~(TCC_CACHE_LINE - 1)) - va;

	mips_sync();

	switch (how) {
	default:
	case CACHE_SYNC_R:
		/* writeback partial cachelines */
		if (((_va | _sz) & (TCC_CACHE_LINE - 1)) != 0) {
			partial_start = va != _va;
			partial_end = va + sz != _va + _sz;
		} else {
			partial_start = partial_end = 0;
		}
		tcc_prefetch_invalidate();
		if (partial_start) {
			tcc_virtual(ci, va, TCC_CACHE_LINE,
			    TCC_CACHEOP_WRITEBACK | TCC_CACHEOP_INVALIDATE);
			va += TCC_CACHE_LINE;
			sz -= TCC_CACHE_LINE;
		}
		if (sz != 0 && partial_end) {
			sz -= TCC_CACHE_LINE;
			tcc_virtual(ci, va + sz, TCC_CACHE_LINE,
			    TCC_CACHEOP_WRITEBACK | TCC_CACHEOP_INVALIDATE);
		}
		if (sz != 0)
			tcc_virtual(ci, va, sz, TCC_CACHEOP_INVALIDATE);
		break;

	case CACHE_SYNC_X:
		tcc_prefetch_invalidate();
		tcc_virtual(ci, va, sz, TCC_CACHEOP_WRITEBACK);
		break;

	case CACHE_SYNC_W:
		tcc_prefetch_invalidate();
		tcc_virtual(ci, va, sz,
		    TCC_CACHEOP_WRITEBACK | TCC_CACHEOP_INVALIDATE);
		break;
	}
	tcc_prefetch_invalidate();

	tfp_IOSyncDCache(ci, _va, _sz, how);
}
Exemplo n.º 6
0
void
tcc_HitInvalidateDCache(struct cpu_info *ci, vaddr_t _va, size_t _sz)
{
	vaddr_t va;
	vsize_t sz;

	/* extend the range to integral cache lines */
	va = _va & ~(TCC_CACHE_LINE - 1);
	sz = ((_va + _sz + TCC_CACHE_LINE - 1) & ~(TCC_CACHE_LINE - 1)) - va;

	mips_sync();
	tcc_prefetch_invalidate();
	tcc_virtual(ci, va, sz, TCC_CACHEOP_INVALIDATE);
	tcc_prefetch_invalidate();
}
Exemplo n.º 7
0
void
tcc_SyncDCachePage(struct cpu_info *ci, vaddr_t va, paddr_t pa)
{
	vaddr_t epa;

	mips_sync();
	tcc_prefetch_invalidate();
	epa = pa + PAGE_SIZE;
	do {
		tcc_cache_hit(pa,
		    TCC_CACHEOP_WRITEBACK | TCC_CACHEOP_INVALIDATE);
		pa += TCC_CACHE_LINE;
	} while (pa != epa);
	tcc_prefetch_invalidate();
}
Exemplo n.º 8
0
void
macebus_splx(int newipl)
{
	struct cpu_info *ci = curcpu();

	/* Update masks to new ipl. Order highly important! */
	__asm__ (".set noreorder\n");
	ci->ci_ipl = newipl;
	mips_sync();
	__asm__ (".set reorder\n");
	crime_setintrmask(newipl);
	/* If we still have softints pending trigger processing. */
	if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
		setsoftintr0();
}
Exemplo n.º 9
0
uint64_t
__sync_val_compare_and_swap_8(uint64_t *mem, uint64_t expected,
    uint64_t desired)
{
	uint64_t old, temp;

	mips_sync();
	__asm volatile (
		"1:"
		"\tlld	%0, %5\n"	/* Load old value. */
		"\tbne	%0, %3, 2f\n"	/* Compare to expected value. */
		"\tmove	%2, %4\n"	/* Value to store. */
		"\tscd	%2, %1\n"	/* Attempt to store. */
		"\tbeqz	%2, 1b\n"	/* Spin if failed. */
		"2:"
		: "=&r" (old), "=m" (*mem), "=&r" (temp)
		: "r" (expected), "r" (desired), "m" (*mem));
	return (old);
}
Exemplo n.º 10
0
void
platform_reset(void)
{
	struct bcm_platform	*bp;
	bool			 bcm4785war;

	printf("bcm::platform_reset()\n");
	intr_disable();

#ifdef CFE
	/* Fall back on CFE if reset requested during platform
	 * data initialization */
	if (!bcm_platform_data_avail) {
		cfe_exit(0, 0);
		while (1);
	}
#endif

	bp = bcm_get_platform();
	bcm4785war = false;

	/* Handle BCM4785-specific behavior */
	if (bp->cid.chip_id == BHND_CHIPID_BCM4785) {
		bcm4785war = true;

		/* Switch to async mode */
		bcm_bmips_wr_pllcfg3(BMIPS_BCMCFG_PLLCFG3_SM);
	}

	/* Set watchdog (PMU or ChipCommon) */
	if (bp->pmu_addr != 0x0) {
		BCM_PMU_WRITE_4(bp, BHND_PMU_WATCHDOG, 1);
	} else
		BCM_CHIPC_WRITE_4(bp, CHIPC_WATCHDOG, 1);

	/* BCM4785 */
	if (bcm4785war) {
		mips_sync();
		__asm __volatile("wait");
	}

	while (1);
}
Exemplo n.º 11
0
int
__mp_release_all(struct __mp_lock *mpl)
{
	int rv = mpl->mpl_count - 1;
	register_t sr;

#ifdef MP_LOCKDEBUG
	if (mpl->mpl_cpu != curcpu()) {
		db_printf("__mp_release_all(%p): not held lock\n", mpl);
		Debugger();
	}
#endif

	sr = disableintr();
	mpl->mpl_cpu = NULL;
	mips_sync();
	mpl->mpl_count = 0;
	setsr(sr);

	return (rv);
}
Exemplo n.º 12
0
void
__mp_unlock(struct __mp_lock *mpl)
{
	register_t sr;

#ifdef MP_LOCKDEBUG
	if (mpl->mpl_cpu != curcpu()) {
		db_printf("__mp_unlock(%p): not held lock\n", mpl);
		Debugger();
	}
#endif

	sr = disableintr();
	if (--mpl->mpl_count == 1) {
		mpl->mpl_cpu = NULL;
		mips_sync();
		mpl->mpl_count = 0;
	}

	setsr(sr);
}
Exemplo n.º 13
0
void
tcc_SyncCache(struct cpu_info *ci)
{
	uint64_t idx;

	mips_sync();
	tcc_prefetch_invalidate();
	tfp_InvalidateICache(ci, 0, ci->ci_l1inst.size);

	/*
	 * The following relies upon the fact that the (line, set)
	 * fields are contiguous. Therefore by pretending there is
	 * a huge number of sets and only one line, we can span the
	 * whole cache.
	 */
	idx = (uint64_t)ci->ci_l2.size / TCC_CACHE_LINE;
	while (idx != 0) {
		idx--;
		tcc_cache_index(idx, 0,
		    TCC_CACHEOP_WRITEBACK | TCC_CACHEOP_INVALIDATE);
	}
	tcc_prefetch_invalidate();
}
Exemplo n.º 14
0
void
smp_init_secondary(u_int32_t cpuid)
{

	if (cpuid >=  MAXCPU)
		panic ("cpu id exceeds MAXCPU\n");

	/* tlb init */
	R4K_SetWIRED(0);
	R4K_TLBFlush(num_tlbentries);
	R4K_SetWIRED(VMWIRED_ENTRIES);
	MachSetPID(0);

	Mips_SyncCache();

	mips_cp0_status_write(0);
	while (!aps_ready)
		;

	mips_sync(); mips_sync();
	/* Initialize curthread. */
	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
	PCPU_SET(curthread, PCPU_GET(idlethread));

	mtx_lock_spin(&ap_boot_mtx);

	smp_cpus++;

	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));

	/* Build our map of 'other' CPUs. */
	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));

	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));

	if (smp_cpus == mp_ncpus) {
		smp_started = 1;
		smp_active = 1;
	}

	mtx_unlock_spin(&ap_boot_mtx);

	while (smp_started == 0)
		; /* nothing */
	/* Enable Interrupt */
	mips_cp0_status_write(SR_INT_ENAB);
	/* ok, now grab sched_lock and enter the scheduler */
	mtx_lock_spin(&sched_lock);

	/*
	 * Correct spinlock nesting.  The idle thread context that we are
	 * borrowing was created so that it would start out with a single
	 * spin lock (sched_lock) held in fork_trampoline().  Since we've
	 * explicitly acquired locks in this function, the nesting count
	 * is now 2 rather than 1.  Since we are nested, calling
	 * spinlock_exit() will simply adjust the counts without allowing
	 * spin lock using code to interrupt us.
	 */
	spinlock_exit();
	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));

	binuptime(PCPU_PTR(switchtime));
	PCPU_SET(switchticks, ticks);

	/* kick off the clock on this cpu */
	mips_start_timer();
	cpu_throw(NULL, choosethread());	/* doesn't return */

	panic("scheduler returned us to %s", __func__);
}
Exemplo n.º 15
0
void
imc_space_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t offs,
    bus_size_t len, int flags)
{
	mips_sync();
}
Exemplo n.º 16
0
uint64_t
cn30xxfau_op_save(struct cn30xxfau_desc *fd)
{
	mips_sync();
	return octeon_cvmseg_read_8(fd->fd_scroff);
}
Exemplo n.º 17
0
/*
 * Process legacy interrupts.
 *
 * XXX On 2F, ISA interrupts only occur on LOONGSON_INTR_INT0, but since
 * XXX the other LOONGSON_INTR_INT# are unmaskable, bad things will happen
 * XXX if they ever are triggered...
 */
uint32_t
lemote_isa_intr(uint32_t hwpend, struct trap_frame *frame)
{
	uint64_t imr, isr, mask;
	int bit;
	struct intrhand *ih;
	int rc;

	isr = lemote_get_isa_isr();
	imr = lemote_get_isa_imr();

	isr &= imr;
	isr &= ~(1 << 2);	/* cascade */
#ifdef DEBUG
	printf("isa interrupt: imr %04x isr %04x\n", imr, isr);
#endif
	if (isr == 0)
		return 0;	/* not for us */

	/*
	 * Mask all pending interrupts.
	 */

	loongson_set_isa_imr(imr & ~isr);

	/*
	 * If interrupts are spl-masked, mask them and wait for splx()
	 * to reenable them when necessary.
	 */
	if ((mask = isr & (BONITO_ISA_MASK(bonito_imask[frame->ipl]))) != 0) {
		isr &= ~mask;
		imr &= ~mask;
	}

	/*
	 * Now process allowed interrupts.
	 */
	if (isr != 0) {
		int lvl, bitno, ret;
		uint64_t tmpisr;

		/* Service higher level interrupts first */
		bit = BONITO_NISA - 1;
		for (lvl = IPL_HIGH - 1; lvl != IPL_NONE; lvl--) {
			tmpisr = isr & BONITO_ISA_MASK(bonito_imask[lvl] ^
			    bonito_imask[lvl - 1]);
			if (tmpisr == 0)
				continue;
			for (bitno = bit, mask = 1UL << bitno; mask != 0;
			    bitno--, mask >>= 1) {
				if ((tmpisr & mask) == 0)
					continue;

				rc = 0;
				for (ih = bonito_intrhand[BONITO_ISA_IRQ(bitno)];
				    ih != NULL; ih = ih->ih_next) {
					void *arg;

					splraise(ih->ih_level);
					if (ih->ih_arg != NULL)
						arg = ih->ih_arg;
					else
						/* clock interrupt */
						arg = frame;
					ret = (*ih->ih_fun)(arg);
					if (ret) {
						rc = 1;
						ih->ih_count.ec_count++;
					}
					__asm__ (".set noreorder\n");
					curcpu()->ci_ipl = frame->ipl;
					mips_sync();
					__asm__ (".set reorder\n");
					if (ret == 1)
						break;
				}
				if (rc == 0)
					printf("spurious isa interrupt %d\n",
					    bitno);

				loongson_isa_specific_eoi(bitno);

				if ((isr ^= mask) == 0)
					goto done;
				if ((tmpisr ^= mask) == 0)
					break;
			}
		}
done:

		/*
		 * Reenable interrupts which have been serviced.
		 */
		loongson_set_isa_imr(imr);
	}