int obio_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags, bus_space_handle_t *bshp) { const struct pmap_devmap *pd; paddr_t startpa, endpa, pa, offset; vaddr_t va; pt_entry_t *pte; if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) { /* Device was statically mapped. */ *bshp = pd->pd_va + (bpa - pd->pd_pa); return (0); } endpa = round_page(bpa + size); offset = bpa & PAGE_MASK; startpa = trunc_page(bpa); va = uvm_km_valloc(kernel_map, endpa - startpa); if (va == 0) return ENOMEM; *bshp = va + offset; for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) { pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE); pte = vtopte(va); *pte &= ~L2_S_CACHE_MASK; PTE_SYNC(pte); } pmap_update(pmap_kernel()); return (0); }
int s3c2xx0_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flag, bus_space_handle_t * bshp) { u_long startpa, endpa, pa; vm_offset_t va; pt_entry_t *pte; const struct pmap_devmap *pd; if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) { /* Device was statically mapped. */ *bshp = pd->pd_va + (bpa - pd->pd_pa); return 0; } startpa = trunc_page(bpa); endpa = round_page(bpa + size); va = kmem_alloc_nofault(kernel_map, endpa - startpa); if (!va) return (ENOMEM); *bshp = (bus_space_handle_t) (va + (bpa - startpa)); for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) { pmap_kenter(va, pa); pte = vtopte(va); if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) *pte &= ~L2_S_CACHE_MASK; } return (0); }
int i80321_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flag, bus_space_handle_t *bshp) { const struct pmap_devmap *pd; paddr_t startpa, endpa, pa, pagecnt; vaddr_t va; pt_entry_t *pte; if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) { /* Device was statically mapped. */ *bshp = pd->pd_va + (bpa - pd->pd_pa); return (0); } #if 0 printf("i80321_bs_map bpa %x, size %x flag %x\n", bpa, size, flag); #endif endpa = round_page(bpa + size); startpa = trunc_page(bpa); pagecnt = endpa - startpa; va = (vaddr_t)km_alloc(endpa - startpa, &kv_any, &kp_none, &kd_nowait); if (va == 0) return(ENOMEM); #if 0 printf("i80321_bs_map va %x pa %x, endpa %x, sz %x\n", va, startpa, endpa, endpa-startpa); #endif *bshp = (bus_space_handle_t)(va + (bpa - startpa)); for (pa = startpa; pagecnt > 0; pa += PAGE_SIZE, va += PAGE_SIZE, pagecnt -= PAGE_SIZE) { pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE); if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) { pte = vtopte(va); *pte &= ~L2_S_CACHE_MASK; PTE_SYNC(pte); } } pmap_update(pmap_kernel()); return (0); }
/* mem bs */ int ixp425_pci_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int cacheable, bus_space_handle_t *bshp) { const struct pmap_devmap *pd; paddr_t startpa; paddr_t endpa; paddr_t pa; paddr_t offset; vaddr_t va; pt_entry_t *pte; if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) { /* Device was statically mapped. */ *bshp = pd->pd_va + (bpa - pd->pd_pa); return 0; } endpa = round_page(bpa + size); offset = bpa & PAGE_MASK; startpa = trunc_page(bpa); /* Get some VM. */ va = uvm_km_alloc(kernel_map, endpa - startpa, 0, UVM_KMF_VAONLY | UVM_KMF_NOWAIT); if (va == 0) return ENOMEM; /* Store the bus space handle */ *bshp = va + offset; /* Now map the pages */ for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) { pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0); pte = vtopte(va); *pte &= ~L2_S_CACHE_MASK; PTE_SYNC(pte); } pmap_update(pmap_kernel()); return(0); }
int ifpga_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int cacheable, bus_space_handle_t *bshp) { bus_addr_t startpa, endpa; vaddr_t va; const struct pmap_devmap *pd; bus_addr_t pa = bpa + (bus_addr_t) t; if ((pd = pmap_devmap_find_pa(pa, size)) != NULL) { /* Device was statically mapped. */ *bshp = pd->pd_va + (pa - pd->pd_pa); return 0; } /* Round the allocation to page boundries */ startpa = trunc_page(bpa); endpa = round_page(bpa + size); /* Get some VM. */ va = uvm_km_alloc(kernel_map, endpa - startpa, 0, UVM_KMF_VAONLY | UVM_KMF_NOWAIT); if (va == 0) return ENOMEM; /* Store the bus space handle */ *bshp = va + (bpa & PGOFSET); /* Now map the pages */ /* The cookie is the physical base address for the I/O area */ while (startpa < endpa) { /* XXX pmap_kenter_pa maps pages cacheable -- not what we want. */ pmap_enter(pmap_kernel(), va, (bus_addr_t)t + startpa, VM_PROT_READ | VM_PROT_WRITE, 0); va += PAGE_SIZE; startpa += PAGE_SIZE; } pmap_update(pmap_kernel()); return 0; }
int mpcore_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flag, bus_space_handle_t *bshp) { u_long startpa, endpa, pa; vaddr_t va; const struct pmap_devmap *pd; if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) { /* Device was statically mapped. */ *bshp = pd->pd_va + (bpa - pd->pd_pa); return 0; } startpa = trunc_page(bpa); endpa = round_page(bpa + size); /* XXX use extent manager to check duplicate mapping */ va = uvm_km_alloc(kernel_map, endpa - startpa, 0, UVM_KMF_VAONLY | UVM_KMF_NOWAIT); if (! va) return(ENOMEM); *bshp = (bus_space_handle_t)(va + (bpa - startpa)); const int pmapflags = (flag & (BUS_SPACE_MAP_CACHEABLE|BUS_SPACE_MAP_PREFETCHABLE)) ? 0 : PMAP_NOCACHE; for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) { pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, pmapflags); } pmap_update(pmap_kernel()); return(0); }
int generic_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags, bus_space_handle_t *bshp) { const struct pmap_devmap *pd; vm_paddr_t startpa, endpa, pa, offset; vm_offset_t va; pt_entry_t *pte; if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) { /* Device was statically mapped. */ *bshp = pd->pd_va + (bpa - pd->pd_pa); return (0); } endpa = round_page(bpa + size); offset = bpa & PAGE_MASK; startpa = trunc_page(bpa); va = kmem_alloc(kernel_map, endpa - startpa); if (va == 0) return (ENOMEM); *bshp = va + offset; for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) { pmap_kenter(va, pa); pte = vtopte(va); if (!(flags & BUS_SPACE_MAP_CACHEABLE)) { *pte &= ~L2_S_CACHE_MASK; PTE_SYNC(pte); } } return (0); }