kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa, mach_vm_size_t length, unsigned int options) { vm_prot_t prot; unsigned int flags; ppnum_t pagenum; pmap_t pmap = map->pmap; prot = (options & kIOMapReadOnly) ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE); pagenum = (ppnum_t)atop_64(pa); switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */ case kIOMapDefaultCache: default: flags = IODefaultCacheBits(pa); break; case kIOMapInhibitCache: flags = VM_WIMG_IO; break; case kIOMapWriteThruCache: flags = VM_WIMG_WTHRU; break; case kIOMapWriteCombineCache: flags = VM_WIMG_WCOMB; break; case kIOMapCopybackCache: flags = VM_WIMG_COPYBACK; break; case kIOMapCopybackInnerCache: flags = VM_WIMG_INNERWBACK; break; } pmap_set_cache_attributes(pagenum, flags); vm_map_set_cache_attr(map, (vm_map_offset_t)va); // Set up a block mapped area pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0); return( KERN_SUCCESS ); }
__private_extern__ uint32_t chudxnu_phys_addr_wimg(uint64_t phys_addr) { return IODefaultCacheBits(phys_addr); }