/* * Common function for mapping DMA-safe memory. May be called by * bus-specific DMA memory map functions. */ int _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size, void **kvap, int flags) { vaddr_t va; bus_addr_t addr; int curseg; const uvm_flag_t kmflags = (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; /* * If we're only mapping 1 segment, use K0SEG, to avoid * TLB thrashing. */ #ifdef _LP64 if (nsegs == 1) { if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) == 0) && (flags & BUS_DMA_COHERENT)) *kvap = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED( segs[0].ds_addr); else *kvap = (void *)MIPS_PHYS_TO_XKPHYS_CACHED( segs[0].ds_addr); return 0; } #else if ((nsegs == 1) && (segs[0].ds_addr < MIPS_PHYS_MASK)) { if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) == 0) && (flags & BUS_DMA_COHERENT)) *kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr); else *kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr); return (0); } #endif /* _LP64 */ size = round_page(size); va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); if (va == 0) return (ENOMEM); *kvap = (void *)va; for (curseg = 0; curseg < nsegs; curseg++) { for (addr = segs[curseg].ds_addr; addr < (segs[curseg].ds_addr + segs[curseg].ds_len); addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { if (size == 0) panic("_bus_dmamem_map: size botch"); pmap_enter(pmap_kernel(), va, addr, VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); } } pmap_update(pmap_kernel()); return (0); }
/* * Like _bus_dmamap_load(), but for raw memory. */ int _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) { struct vmspace * const vm = vmspace_kernel(); const bool coherent_p = (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT); const bool cached_p = coherent_p || (flags & BUS_DMA_COHERENT) == 0; bus_size_t mapsize = 0; bool first = true; int curseg = 0; int error = 0; for (; error == 0 && nsegs-- > 0; segs++) { void *kva; #ifdef _LP64 if (cached_p) { kva = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(segs->ds_addr); } else { kva = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(segs->ds_addr); } #else if (segs->ds_addr >= MIPS_PHYS_MASK) return EFBIG; if (cached_p) { kva = (void *)MIPS_PHYS_TO_KSEG0(segs->ds_addr); } else { kva = (void *)MIPS_PHYS_TO_KSEG1(segs->ds_addr); } #endif /* _LP64 */ mapsize += segs->ds_len; error = _bus_dmamap_load_buffer(t, map, kva, segs->ds_len, vm, flags, &curseg, first); first = false; } if (error == 0) { map->dm_mapsize = mapsize; map->dm_nsegs = curseg + 1; map->_dm_vmspace = vm; /* always kernel */ /* * If our cache is coherent, then the map must be coherent too. */ if (coherent_p) map->_dm_flags |= _BUS_DMAMAP_COHERENT; return 0; } /* * If bus_dmamem_alloc didn't return memory that didn't need bouncing * that's a bug which we will not workaround. */ return error; }
static int __BS(alloc)(void *v, bus_addr_t rstart, bus_addr_t rend, bus_size_t size, bus_size_t align, bus_size_t boundary, int flags, bus_addr_t *addrp, bus_space_handle_t *bshp) { #ifdef CHIP_EXTENT struct mips_bus_space_translation mbst; u_long addr; /* bogus but makes extent happy */ int error; #if CHIP_ALIGN_STRIDE != 0 int linear = flags & BUS_SPACE_MAP_LINEAR; /* * Can't map xxx space linearly. */ if (linear) return (EOPNOTSUPP); #endif /* * Do the requested allocation. */ #ifdef EXTENT_DEBUG printf("%s: allocating from %#"PRIxBUSADDR" to %#"PRIxBUSADDR"\n", __S(__BS(alloc)), rstart, rend); #endif error = extent_alloc_subregion(CHIP_EXTENT(v), rstart, rend, size, align, boundary, EX_FAST | EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0), &addr); if (error) { #ifdef EXTENT_DEBUG printf("%s: allocation failed (%d)\n", __S(__BS(alloc)), error); extent_print(CHIP_EXTENT(v)); #endif return (error); } #ifdef EXTENT_DEBUG printf("%s: allocated 0x%lx to %#"PRIxBUSSIZE"\n", __S(__BS(alloc)), addr, addr + size - 1); #endif error = __BS(translate)(v, addr, size, flags, &mbst); if (error) { (void) extent_free(CHIP_EXTENT(v), addr, size, EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0)); return (error); } *addrp = addr; #if !defined(__mips_o32) if (flags & BUS_SPACE_MAP_CACHEABLE) { *bshp = MIPS_PHYS_TO_XKPHYS_CACHED(mbst.mbst_sys_start + (addr - mbst.mbst_bus_start)); } else { *bshp = MIPS_PHYS_TO_XKPHYS_UNCACHED(mbst.mbst_sys_start + (addr - mbst.mbst_bus_start)); } #else if (flags & BUS_SPACE_MAP_CACHEABLE) { *bshp = MIPS_PHYS_TO_KSEG0(mbst.mbst_sys_start + (addr - mbst.mbst_bus_start)); } else *bshp = MIPS_PHYS_TO_KSEG1(mbst.mbst_sys_start + (addr - mbst.mbst_bus_start)); #endif return (0); #else /* ! CHIP_EXTENT */ return (EOPNOTSUPP); #endif /* CHIP_EXTENT */ }
static int __BS(map)(void *v, bus_addr_t addr, bus_size_t size, int flags, bus_space_handle_t *hp, int acct) { struct mips_bus_space_translation mbst; int error; /* * Get the translation for this address. */ error = __BS(translate)(v, addr, size, flags, &mbst); if (error) return (error); #ifdef CHIP_EXTENT if (acct == 0) goto mapit; #ifdef EXTENT_DEBUG printf("%s: allocating %#"PRIxBUSADDR" to %#"PRIxBUSADDR"\n", __S(__BS(map)), addr, addr + size - 1); #endif error = extent_alloc_region(CHIP_EXTENT(v), addr, size, EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0)); if (error) { #ifdef EXTENT_DEBUG printf("%s: allocation failed (%d)\n", __S(__BS(map)), error); extent_print(CHIP_EXTENT(v)); #endif return (error); } mapit: #endif /* CHIP_EXTENT */ addr = mbst.mbst_sys_start + (addr - mbst.mbst_bus_start); #if defined(__mips_n32) || defined(_LP64) if (flags & BUS_SPACE_MAP_CACHEABLE) { #ifdef __mips_n32 if (((addr + size) & ~MIPS_PHYS_MASK) == 0) *hp = (intptr_t)MIPS_PHYS_TO_KSEG0(addr); else #endif *hp = MIPS_PHYS_TO_XKPHYS_CACHED(addr); } else if (flags & BUS_SPACE_MAP_PREFETCHABLE) { *hp = MIPS_PHYS_TO_XKPHYS_ACC(addr); } else { #ifdef __mips_n32 if (((addr + size) & ~MIPS_PHYS_MASK) == 0) *hp = (intptr_t)MIPS_PHYS_TO_KSEG1(addr); else #endif *hp = MIPS_PHYS_TO_XKPHYS_UNCACHED(addr); } #else if (((addr + size) & ~MIPS_PHYS_MASK) != 0) { vaddr_t va; paddr_t pa; int s; size = round_page((addr % PAGE_SIZE) + size); va = uvm_km_alloc(kernel_map, size, PAGE_SIZE, UVM_KMF_VAONLY | UVM_KMF_NOWAIT); if (va == 0) return ENOMEM; /* check use of handle_is_km in BS(unmap) */ KASSERT(!(MIPS_KSEG0_P(va) || MIPS_KSEG1_P(va))); *hp = va + (addr & PAGE_MASK); pa = trunc_page(addr); s = splhigh(); while (size != 0) { pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0); pa += PAGE_SIZE; va += PAGE_SIZE; size -= PAGE_SIZE; } pmap_update(pmap_kernel()); splx(s); } else { if (flags & BUS_SPACE_MAP_CACHEABLE) *hp = (intptr_t)MIPS_PHYS_TO_KSEG0(addr); else *hp = (intptr_t)MIPS_PHYS_TO_KSEG1(addr); } #endif return (0); }