/* * vm_contig_pg_kmap: * * Map previously allocated (vm_contig_pg_alloc) range of pages from * vm_page_array[] into the KVA. Once mapped, the pages are part of * the Kernel, and are to free'ed with kmem_free(&kernel_map, addr, size). * * No requirements. */ static vm_offset_t vm_contig_pg_kmap(int start, u_long size, vm_map_t map, int flags) { vm_offset_t addr; vm_paddr_t pa; vm_page_t pga = vm_page_array; u_long offset; if (size == 0) panic("vm_contig_pg_kmap: size must not be 0"); size = round_page(size); addr = kmem_alloc_pageable(&kernel_map, size); if (addr) { pa = VM_PAGE_TO_PHYS(&pga[start]); for (offset = 0; offset < size; offset += PAGE_SIZE) pmap_kenter_quick(addr + offset, pa + offset); smp_invltlb(); if (flags & M_ZERO) bzero((void *)addr, size); } return(addr); }
/* * p->p_token is held on entry. */ static int procfs_rwmem(struct proc *curp, struct proc *p, struct uio *uio) { int error; int writing; struct vmspace *vm; vm_map_t map; vm_offset_t pageno = 0; /* page number */ vm_prot_t reqprot; vm_offset_t kva; /* * if the vmspace is in the midst of being allocated or deallocated, * or the process is exiting, don't try to grab anything. The * page table usage in that process may be messed up. */ vm = p->p_vmspace; if (p->p_stat == SIDL || p->p_stat == SZOMB) return EFAULT; if ((p->p_flags & (P_WEXIT | P_INEXEC)) || sysref_isinactive(&vm->vm_sysref)) return EFAULT; /* * The map we want... */ vmspace_hold(vm); map = &vm->vm_map; writing = (uio->uio_rw == UIO_WRITE); reqprot = VM_PROT_READ; if (writing) reqprot |= VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE; kva = kmem_alloc_pageable(&kernel_map, PAGE_SIZE); /* * Only map in one page at a time. We don't have to, but it * makes things easier. This way is trivial - right? */ do { vm_offset_t uva; vm_offset_t page_offset; /* offset into page */ size_t len; vm_page_t m; uva = (vm_offset_t) uio->uio_offset; /* * Get the page number of this segment. */ pageno = trunc_page(uva); page_offset = uva - pageno; /* * How many bytes to copy */ len = szmin(PAGE_SIZE - page_offset, uio->uio_resid); /* * Fault the page on behalf of the process */ m = vm_fault_page(map, pageno, reqprot, VM_FAULT_NORMAL, &error); if (error) { KKASSERT(m == NULL); error = EFAULT; break; } /* * Cleanup tmap then create a temporary KVA mapping and * do the I/O. We can switch between cpus so don't bother * synchronizing across all cores. */ pmap_kenter_quick(kva, VM_PAGE_TO_PHYS(m)); error = uiomove((caddr_t)(kva + page_offset), len, uio); pmap_kremove_quick(kva); /* * release the page and we are done */ vm_page_unhold(m); } while (error == 0 && uio->uio_resid > 0); vmspace_drop(vm); kmem_free(&kernel_map, kva, PAGE_SIZE); return (error); }