void physunaccess(void *vaddr, int size) { pt_entry_t *pte; pte = kvtopte(vaddr); for (size = btoc(size); size; size--) *pte++ = PG_NV; TBIAS(); }
/* * Map `size' bytes of physical memory starting at `paddr' into * kernel VA space at `vaddr'. Read/write and cache-inhibit status * are specified by `prot'. */ void physaccess(void *vaddr, void *paddr, int size, int prot) { pt_entry_t *pte; u_int page; pte = kvtopte(vaddr); page = (u_int)paddr & PG_FRAME; for (size = btoc(size); size; size--) { *pte++ = PG_V | prot | page; page += PAGE_SIZE; } TBIAS(); }
/* * Common function for mapping DMA-safe memory. May be called by * bus-specific DMA memory map functions. */ int _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size, void **kvap, int flags) { vaddr_t va; bus_addr_t addr; int curseg; const uvm_flag_t kmflags = (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; size = round_page(size); va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); if (va == 0) return ENOMEM; *kvap = (void *)va; for (curseg = 0; curseg < nsegs; curseg++) { for (addr = segs[curseg].ds_addr; addr < (segs[curseg].ds_addr + segs[curseg].ds_len); addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { if (size == 0) panic("_bus_dmamem_map: size botch"); pmap_enter(pmap_kernel(), va, addr, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); /* Cache-inhibit the page if necessary */ if ((flags & BUS_DMA_COHERENT) != 0) _pmap_set_page_cacheinhibit(pmap_kernel(), va); segs[curseg]._ds_flags &= ~BUS_DMA_COHERENT; segs[curseg]._ds_flags |= (flags & BUS_DMA_COHERENT); } } pmap_update(pmap_kernel()); if ((flags & BUS_DMA_COHERENT) != 0) TBIAS(); return 0; }