示例#1
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	/*
	 * If we're only mapping 1 segment, use K0SEG, to avoid
	 * TLB thrashing.
	 */
#ifdef _LP64
	if (nsegs == 1) {
		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) == 0)
		&&  (flags & BUS_DMA_COHERENT))
			*kvap = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(
			    segs[0].ds_addr);
		else
			*kvap = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(
			    segs[0].ds_addr);
		return 0;
	}
#else
	if ((nsegs == 1) && (segs[0].ds_addr < MIPS_PHYS_MASK)) {
		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) == 0)
		&&  (flags & BUS_DMA_COHERENT))
			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr);
		else
			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr);
		return (0);
	}
#endif	/* _LP64 */

	size = round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);

	if (va == 0)
		return (ENOMEM);

	*kvap = (void *)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
示例#2
0
void *
cpu_uarea_alloc(bool system)
{
	struct pglist pglist;
#ifdef _LP64
	const paddr_t high = mips_avail_end;
#else
	const paddr_t high = MIPS_KSEG1_START - MIPS_KSEG0_START;
	/*
	 * Don't allocate a direct mapped uarea if aren't allocating for a
	 * system lwp and we have memory that can't be mapped via KSEG0.
	 * If 
	 */
	if (!system && high > mips_avail_end)
		return NULL;
#endif
	int error;

	/*
	 * Allocate a new physically contiguous uarea which can be
	 * direct-mapped.
	 */
	error = uvm_pglistalloc(USPACE, mips_avail_start, high,
	    USPACE_ALIGN, 0, &pglist, 1, 1);
	if (error) {
#ifdef _LP64
		if (!system)
			return NULL;
#endif
		panic("%s: uvm_pglistalloc failed: %d", __func__, error);
	}

	/*
	 * Get the physical address from the first page.
	 */
	const struct vm_page * const pg = TAILQ_FIRST(&pglist);
	KASSERT(pg != NULL);
	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
	KASSERTMSG(pa >= mips_avail_start,
	    "pa (%#"PRIxPADDR") < mips_avail_start (%#"PRIxPADDR")",
	     pa, mips_avail_start);
	KASSERTMSG(pa < mips_avail_end,
	    "pa (%#"PRIxPADDR") >= mips_avail_end (%#"PRIxPADDR")",
	     pa, mips_avail_end);

	/*
	 * we need to return a direct-mapped VA for the pa.
	 */
#ifdef _LP64
	const vaddr_t va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
#else
	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
#endif

	return (void *)va;
}
示例#3
0
/*
 * Like _bus_dmamap_load(), but for raw memory.
 */
int
_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
    bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
{

	struct vmspace * const vm = vmspace_kernel();
	const bool coherent_p = (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT);
	const bool cached_p = coherent_p || (flags & BUS_DMA_COHERENT) == 0;
	bus_size_t mapsize = 0;
	bool first = true;
	int curseg = 0;
	int error = 0;

	for (; error == 0 && nsegs-- > 0; segs++) {
		void *kva;
#ifdef _LP64
		if (cached_p) {
			kva = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(segs->ds_addr);
		} else {
			kva = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(segs->ds_addr);
		}
#else
		if (segs->ds_addr >= MIPS_PHYS_MASK)
			return EFBIG;
		if (cached_p) {
			kva = (void *)MIPS_PHYS_TO_KSEG0(segs->ds_addr);
		} else {
			kva = (void *)MIPS_PHYS_TO_KSEG1(segs->ds_addr);
		}
#endif	/* _LP64 */
		mapsize += segs->ds_len;
		error = _bus_dmamap_load_buffer(t, map, kva, segs->ds_len,
		    vm, flags, &curseg, first);
		first = false;
	}
	if (error == 0) {
		map->dm_mapsize = mapsize;
		map->dm_nsegs = curseg + 1;
		map->_dm_vmspace = vm;		/* always kernel */
		/*
		 * If our cache is coherent, then the map must be coherent too.
		 */
		if (coherent_p)
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
		return 0;
	}
	/*
	 * If bus_dmamem_alloc didn't return memory that didn't need bouncing
	 * that's a bug which we will not workaround.
	 */
	return error;
}
static void
cpucore_rmixl_attach(device_t parent, device_t self, void *aux)
{
	struct cpucore_softc * const sc = device_private(self);
	struct cpunode_attach_args *na = aux;
	struct cpucore_attach_args ca;
	u_int nthreads;
	struct rmixl_config *rcp = &rmixl_configuration;

	sc->sc_dev = self;
	sc->sc_core = na->na_core;
	KASSERT(sc->sc_hatched == false);

#if 0
#ifdef MULTIPROCESSOR
	/*
	 * Create the TLB structure needed - one per core and core0 uses the
	 * default one for the system.
	 */
	if (sc->sc_core == 0) {
		sc->sc_tlbinfo = &pmap_tlb0_info;
	} else {
		const vaddr_t va = (vaddr_t)&sc->sc_tlbinfo0;
		paddr_t pa;

		if (! pmap_extract(pmap_kernel(), va, &pa))
			panic("%s: pmap_extract fail, va %#"PRIxVADDR, __func__, va);
#ifdef _LP64
		sc->sc_tlbinfo = (struct pmap_tlb_info *)
			MIPS_PHYS_TO_XKPHYS_CACHED(pa);
#else
		sc->sc_tlbinfo = (struct pmap_tlb_info *)
			MIPS_PHYS_TO_KSEG0(pa);
#endif
		pmap_tlb_info_init(sc->sc_tlbinfo);
	}
#endif
#endif

	aprint_normal("\n");
	aprint_normal_dev(self, "%lu.%02luMHz (hz cycles = %lu, "
	    "delay divisor = %lu)\n",
	    curcpu()->ci_cpu_freq / 1000000,
	    (curcpu()->ci_cpu_freq % 1000000) / 10000,
	    curcpu()->ci_cycles_per_hz, curcpu()->ci_divisor_delay);

	aprint_normal("%s: ", device_xname(self));
	cpu_identify(self);

	nthreads = MIPS_CIDFL_RMI_NTHREADS(mips_options.mips_cpu->cpu_cidflags);
	aprint_normal_dev(self, "%d %s on core\n", nthreads,
		nthreads == 1 ? "thread" : "threads");

	/*
	 * Attach CPU (RMI thread contexts) devices
	 * according to userapp_cpu_map bitmask.
	 */
	u_int thread_mask = (1 << nthreads) - 1;
	u_int core_shft = sc->sc_core * nthreads;
	u_int threads_enb =
		(u_int)(rcp->rc_psb_info.userapp_cpu_map >> core_shft) & thread_mask;
	u_int threads_dis = (~threads_enb) & thread_mask;

	sc->sc_threads_dis = threads_dis;
	if (threads_dis != 0) {
		aprint_normal_dev(self, "threads");
		u_int d = threads_dis;
		while (d != 0) {
			const u_int t = ffs(d) - 1;
			d ^= (1 << t);
			aprint_normal(" %d%s", t, (d==0) ? "" : ",");
		}
		aprint_normal(" offline (disabled by firmware)\n");
	}

	u_int threads_try_attach = threads_enb;
	while (threads_try_attach != 0) {
		const u_int t = ffs(threads_try_attach) - 1;
		const u_int bit = 1 << t;
		threads_try_attach ^= bit;
		ca.ca_name = "cpu";
		ca.ca_thread = t;
		ca.ca_core = sc->sc_core;
		if (config_found(self, &ca, cpucore_rmixl_print) == NULL) {
			/*
			 * thread did not attach, e.g. not configured
			 * arrange to have it disabled in THREADEN PCR
			 */
			threads_enb ^= bit;
			threads_dis |= bit;
		}
	}

	sc->sc_threads_enb = threads_enb;
	sc->sc_threads_dis = threads_dis;

	/*
	 * when attaching the core of the primary cpu,
	 * do the post-running initialization here
	 */
	if (sc->sc_core == RMIXL_CPU_CORE((curcpu()->ci_cpuid)))
		cpucore_rmixl_run(self);
}
static int
__BS(alloc)(void *v, bus_addr_t rstart, bus_addr_t rend, bus_size_t size,
    bus_size_t align, bus_size_t boundary, int flags, bus_addr_t *addrp,
    bus_space_handle_t *bshp)
{
#ifdef CHIP_EXTENT
	struct mips_bus_space_translation mbst;
	u_long addr;	/* bogus but makes extent happy */
	int error;
#if CHIP_ALIGN_STRIDE != 0
	int linear = flags & BUS_SPACE_MAP_LINEAR;

	/*
	 * Can't map xxx space linearly.
	 */
	if (linear)
		return (EOPNOTSUPP);
#endif

	/*
	 * Do the requested allocation.
	 */
#ifdef EXTENT_DEBUG
	printf("%s: allocating from %#"PRIxBUSADDR" to %#"PRIxBUSADDR"\n",
		__S(__BS(alloc)), rstart, rend);
#endif
	error = extent_alloc_subregion(CHIP_EXTENT(v), rstart, rend, size,
	    align, boundary,
	    EX_FAST | EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0),
	    &addr);
	if (error) {
#ifdef EXTENT_DEBUG
		printf("%s: allocation failed (%d)\n", __S(__BS(alloc)), error);
		extent_print(CHIP_EXTENT(v));
#endif
		return (error);
	}

#ifdef EXTENT_DEBUG
	printf("%s: allocated 0x%lx to %#"PRIxBUSSIZE"\n",
		__S(__BS(alloc)), addr, addr + size - 1);
#endif

	error = __BS(translate)(v, addr, size, flags, &mbst);
	if (error) {
		(void) extent_free(CHIP_EXTENT(v), addr, size,
		    EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0));
		return (error);
	}

	*addrp = addr;
#if !defined(__mips_o32)
	if (flags & BUS_SPACE_MAP_CACHEABLE) {
		*bshp = MIPS_PHYS_TO_XKPHYS_CACHED(mbst.mbst_sys_start +
		    (addr - mbst.mbst_bus_start));
	} else {
		*bshp = MIPS_PHYS_TO_XKPHYS_UNCACHED(mbst.mbst_sys_start +
		    (addr - mbst.mbst_bus_start));
	}
#else
	if (flags & BUS_SPACE_MAP_CACHEABLE) {
		*bshp = MIPS_PHYS_TO_KSEG0(mbst.mbst_sys_start +
		    (addr - mbst.mbst_bus_start));
	} else
		*bshp = MIPS_PHYS_TO_KSEG1(mbst.mbst_sys_start +
		    (addr - mbst.mbst_bus_start));
#endif

	return (0);
#else /* ! CHIP_EXTENT */
	return (EOPNOTSUPP);
#endif /* CHIP_EXTENT */
}
static int
__BS(map)(void *v, bus_addr_t addr, bus_size_t size, int flags,
    bus_space_handle_t *hp, int acct)
{
	struct mips_bus_space_translation mbst;
	int error;

	/*
	 * Get the translation for this address.
	 */
	error = __BS(translate)(v, addr, size, flags, &mbst);
	if (error)
		return (error);

#ifdef CHIP_EXTENT
	if (acct == 0)
		goto mapit;

#ifdef EXTENT_DEBUG
	printf("%s: allocating %#"PRIxBUSADDR" to %#"PRIxBUSADDR"\n",
		__S(__BS(map)), addr, addr + size - 1);
#endif
	error = extent_alloc_region(CHIP_EXTENT(v), addr, size,
	    EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0));
	if (error) {
#ifdef EXTENT_DEBUG
		printf("%s: allocation failed (%d)\n", __S(__BS(map)), error);
		extent_print(CHIP_EXTENT(v));
#endif
		return (error);
	}

 mapit:
#endif /* CHIP_EXTENT */

	addr = mbst.mbst_sys_start + (addr - mbst.mbst_bus_start);

#if defined(__mips_n32) || defined(_LP64)
	if (flags & BUS_SPACE_MAP_CACHEABLE) {
#ifdef __mips_n32
		if (((addr + size) & ~MIPS_PHYS_MASK) == 0)
			*hp = (intptr_t)MIPS_PHYS_TO_KSEG0(addr);
		else
#endif
			*hp = MIPS_PHYS_TO_XKPHYS_CACHED(addr);
	} else if (flags & BUS_SPACE_MAP_PREFETCHABLE) {
		*hp = MIPS_PHYS_TO_XKPHYS_ACC(addr);
	} else {
#ifdef __mips_n32
		if (((addr + size) & ~MIPS_PHYS_MASK) == 0)
			*hp = (intptr_t)MIPS_PHYS_TO_KSEG1(addr);
		else
#endif
			*hp = MIPS_PHYS_TO_XKPHYS_UNCACHED(addr);
	}
#else
	if (((addr + size) & ~MIPS_PHYS_MASK) != 0) {
		vaddr_t va;
		paddr_t pa;
		int s;

		size = round_page((addr % PAGE_SIZE) + size);
		va = uvm_km_alloc(kernel_map, size, PAGE_SIZE,
			UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
		if (va == 0)
			return ENOMEM;

		/* check use of handle_is_km in BS(unmap) */
		KASSERT(!(MIPS_KSEG0_P(va) || MIPS_KSEG1_P(va)));

		*hp = va + (addr & PAGE_MASK);
		pa = trunc_page(addr);

		s = splhigh();
		while (size != 0) {
			pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
			pa += PAGE_SIZE;
			va += PAGE_SIZE;
			size -= PAGE_SIZE;
		}
		pmap_update(pmap_kernel());
		splx(s);
	} else {
		if (flags & BUS_SPACE_MAP_CACHEABLE)
			*hp = (intptr_t)MIPS_PHYS_TO_KSEG0(addr);
		else
			*hp = (intptr_t)MIPS_PHYS_TO_KSEG1(addr);
	}
#endif

	return (0);
}