/*===========================================================================* * rs_memctl_map_prealloc * *===========================================================================*/ static int rs_memctl_map_prealloc(struct vmproc *vmp, vir_bytes *addr, size_t *len) { struct vir_region *vr; vir_bytes base, top; int is_vm; if(*len <= 0) { return EINVAL; } *len = CLICK_CEIL(*len); is_vm = (vmp->vm_endpoint == VM_PROC_NR); base = is_vm ? VM_OWN_MMAPBASE : VM_MMAPBASE; top = is_vm ? VM_OWN_MMAPTOP : VM_MMAPTOP; if (!(vr = map_page_region(vmp, base, top, *len, VR_ANON|VR_WRITABLE|VR_UNINITIALIZED, MF_PREALLOC, &mem_type_anon))) { return ENOMEM; } vr->flags |= VR_PREALLOC_MAP; *addr = vr->vaddr; return OK; }
static struct vir_region *mmap_region(struct vmproc *vmp, vir_bytes addr, u32_t vmm_flags, size_t len, u32_t vrflags, mem_type_t *mt, int execpriv) { u32_t mfflags = 0; struct vir_region *vr = NULL; if(vmm_flags & MAP_LOWER16M) vrflags |= VR_LOWER16MB; if(vmm_flags & MAP_LOWER1M) vrflags |= VR_LOWER1MB; if(vmm_flags & MAP_ALIGNMENT_64KB) vrflags |= VR_PHYS64K; if(vmm_flags & MAP_PREALLOC) mfflags |= MF_PREALLOC; if(vmm_flags & MAP_UNINITIALIZED) { if(!execpriv) return NULL; vrflags |= VR_UNINITIALIZED; } if(len <= 0) { return NULL; } if(len % VM_PAGE_SIZE) len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE); if (addr && (vmm_flags & MAP_FIXED)) { int r = map_unmap_range(vmp, addr, len); if(r != OK) { printf("mmap_region: map_unmap_range failed (%d)\n", r); return NULL; } } if (addr || (vmm_flags & MAP_FIXED)) { /* An address is given, first try at that address. */ vr = map_page_region(vmp, addr, 0, len, vrflags, mfflags, mt); if(!vr && (vmm_flags & MAP_FIXED)) return NULL; } if (!vr) { /* No address given or address already in use. */ vr = map_page_region(vmp, VM_PAGE_SIZE, VM_DATATOP, len, vrflags, mfflags, mt); } return vr; }
/*===========================================================================* * do_mmap * *===========================================================================*/ PUBLIC int do_mmap(message *m) { int r, n; struct vmproc *vmp; int mfflags = 0; struct vir_region *vr = NULL; if((r=vm_isokendpt(m->m_source, &n)) != OK) { panic("do_mmap: message from strange source: %d", m->m_source); } vmp = &vmproc[n]; if(!(vmp->vm_flags & VMF_HASPT)) return ENXIO; if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANON)) { int s; vir_bytes v; u32_t vrflags = VR_ANON | VR_WRITABLE; size_t len = (vir_bytes) m->VMM_LEN; if(m->VMM_FD != -1) { return EINVAL; } /* Contiguous phys memory has to be preallocated. */ if((m->VMM_FLAGS & (MAP_CONTIG|MAP_PREALLOC)) == MAP_CONTIG) { return EINVAL; } if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC; if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB; if(m->VMM_FLAGS & MAP_LOWER1M) vrflags |= VR_LOWER1MB; if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K; if(m->VMM_FLAGS & MAP_SHARED) vrflags |= VR_SHARED; if(m->VMM_FLAGS & MAP_CONTIG) vrflags |= VR_CONTIG; if(len % VM_PAGE_SIZE) len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE); if(!(vr = map_page_region(vmp, arch_vir2map(vmp, m->VMM_ADDR ? m->VMM_ADDR : vmp->vm_stacktop), VM_DATATOP, len, MAP_NONE, vrflags, mfflags))) { return ENOMEM; } } else { return ENOSYS; } /* Return mapping, as seen from process. */ assert(vr); m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr); return OK; }
/*===========================================================================* * do_map_phys * *===========================================================================*/ PUBLIC int do_map_phys(message *m) { int r, n; struct vmproc *vmp; endpoint_t target; struct vir_region *vr; vir_bytes len; phys_bytes startaddr; size_t offset; target = m->VMMP_EP; len = m->VMMP_LEN; if (len <= 0) return EINVAL; if(target == SELF) target = m->m_source; if((r=vm_isokendpt(target, &n)) != OK) return EINVAL; startaddr = (vir_bytes)m->VMMP_PHADDR; /* First check permission, then round range down/up. Caller can't * help it if we can't map in lower than page granularity. */ if(map_perm_check(m->m_source, target, startaddr, len) != OK) { printf("VM: unauthorized mapping of 0x%lx by %d\n", startaddr, m->m_source); return EPERM; } vmp = &vmproc[n]; if(!(vmp->vm_flags & VMF_HASPT)) return ENXIO; offset = startaddr % VM_PAGE_SIZE; len += offset; startaddr -= offset; if(len % VM_PAGE_SIZE) len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE); if(!(vr = map_page_region(vmp, arch_vir2map(vmp, vmp->vm_stacktop), VM_DATATOP, len, startaddr, VR_DIRECT | VR_NOPF | VR_WRITABLE, 0))) { return ENOMEM; } m->VMMP_VADDR_REPLY = (void *) (arch_map2vir(vmp, vr->vaddr) + offset); return OK; }
static void boot_alloc(struct exec_info *execi, off_t vaddr, size_t len, int flags) { struct vmproc *vmp = ((struct vm_exec_info *) execi->opaque)->vmp; if(!(map_page_region(vmp, vaddr, 0, len, VR_ANON | VR_WRITABLE | VR_UNINITIALIZED, flags, &mem_type_anon))) { panic("VM: exec: map_page_region for boot process failed"); } }
/*===========================================================================* * do_map_phys * *===========================================================================*/ int do_map_phys(message *m) { int r, n; struct vmproc *vmp; endpoint_t target; struct vir_region *vr; vir_bytes len; phys_bytes startaddr; size_t offset; target = m->m_lsys_vm_map_phys.ep; len = m->m_lsys_vm_map_phys.len; if (len <= 0) return EINVAL; if(target == SELF) target = m->m_source; if((r=vm_isokendpt(target, &n)) != OK) return EINVAL; startaddr = (vir_bytes)m->m_lsys_vm_map_phys.phaddr; /* First check permission, then round range down/up. Caller can't * help it if we can't map in lower than page granularity. */ if(map_perm_check(m->m_source, target, startaddr, len) != OK) { printf("VM: unauthorized mapping of 0x%lx by %d\n", startaddr, m->m_source); return EPERM; } vmp = &vmproc[n]; offset = startaddr % VM_PAGE_SIZE; len += offset; startaddr -= offset; if(len % VM_PAGE_SIZE) len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE); if(!(vr = map_page_region(vmp, 0, VM_DATATOP, len, VR_DIRECT | VR_WRITABLE, 0, &mem_type_directphys))) { return ENOMEM; } phys_setphys(vr, startaddr); m->m_lsys_vm_map_phys.reply = (void *) (vr->vaddr + offset); return OK; }
int scall_mmap(kipc_msg_t *m) { int err, n; struct vmproc *vmp; int mfflags = 0; struct vir_region *vr = NULL; if((err = vm_isokendpt(m->m_source, &n)) != 0) { vm_panic("do_mmap: message from strange source", m->m_source); } vmp = &vmproc[n]; if(!(vmp->vm_flags & VMF_HASPT)) return -ENXIO; if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANONYMOUS)) { int s; vir_bytes v; u32_t vrflags = VR_ANON | VR_WRITABLE; size_t len = (vir_bytes) m->VMM_LEN; if(m->VMM_FD != -1) { return -EINVAL; } if(m->VMM_FLAGS & MAP_CONTIG) mfflags |= MF_CONTIG; if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC; if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB; if(m->VMM_FLAGS & MAP_LOWER1M) vrflags |= VR_LOWER1MB; if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K; if(m->VMM_FLAGS & MAP_SHARED) vrflags |= VR_SHARED; if(len % VM_PAGE_SIZE) len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE); if(!(vr = map_page_region(vmp, arch_vir2map(vmp, m->VMM_ADDR ? m->VMM_ADDR : vmp->vm_stacktop), VM_DATATOP, len, MAP_NONE, vrflags, mfflags))) { return -ENOMEM; } } else { return -ENOSYS; } /* Return mapping, as seen from process. */ vm_assert(vr); m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr); return m->VMM_RETADDR; }
/*===========================================================================* * do_remap * *===========================================================================*/ int do_remap(message *m) { int dn, sn; vir_bytes da, sa; size_t size; u32_t flags; struct vir_region *src_region, *vr; struct vmproc *dvmp, *svmp; int r; int readonly; if(m->m_type == VM_REMAP) readonly = 0; else if(m->m_type == VM_REMAP_RO) readonly = 1; else panic("do_remap: can't be"); da = (vir_bytes) m->m_lsys_vm_vmremap.dest_addr; sa = (vir_bytes) m->m_lsys_vm_vmremap.src_addr; size = m->m_lsys_vm_vmremap.size; if (size <= 0) return EINVAL; if ((r = vm_isokendpt((endpoint_t) m->m_lsys_vm_vmremap.destination, &dn)) != OK) return EINVAL; if ((r = vm_isokendpt((endpoint_t) m->m_lsys_vm_vmremap.source, &sn)) != OK) return EINVAL; dvmp = &vmproc[dn]; svmp = &vmproc[sn]; if (!(src_region = map_lookup(svmp, sa, NULL))) return EINVAL; if(src_region->vaddr != sa) { printf("VM: do_remap: not start of region.\n"); return EFAULT; } if (size % VM_PAGE_SIZE) size += VM_PAGE_SIZE - size % VM_PAGE_SIZE; if(size != src_region->length) { printf("VM: do_remap: not size of region.\n"); return EFAULT; } flags = VR_SHARED; if(!readonly) flags |= VR_WRITABLE; if(da) vr = map_page_region(dvmp, da, 0, size, flags, 0, &mem_type_shared); else vr = map_page_region(dvmp, 0, VM_DATATOP, size, flags, 0, &mem_type_shared); if(!vr) { printf("VM: re-map of shared area failed\n"); return ENOMEM; } shared_setsource(vr, svmp->vm_endpoint, src_region); m->m_lsys_vm_vmremap.ret_addr = (void *) vr->vaddr; return OK; }
/*===========================================================================* * do_mmap * *===========================================================================*/ int do_mmap(message *m) { int r, n; struct vmproc *vmp; int mfflags = 0; vir_bytes addr; struct vir_region *vr = NULL; int execpriv = 0; /* RS and VFS can do slightly more special mmap() things */ if(m->m_source == VFS_PROC_NR || m->m_source == RS_PROC_NR) execpriv = 1; if(m->VMM_FLAGS & MAP_THIRDPARTY) { if(!execpriv) return EPERM; if((r=vm_isokendpt(m->VMM_FORWHOM, &n)) != OK) return ESRCH; } else { /* regular mmap, i.e. for caller */ if((r=vm_isokendpt(m->m_source, &n)) != OK) { panic("do_mmap: message from strange source: %d", m->m_source); } } vmp = &vmproc[n]; if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANON)) { mem_type_t *mt; u32_t vrflags = VR_ANON | VR_WRITABLE; size_t len = (vir_bytes) m->VMM_LEN; if(m->VMM_FD != -1 || len <= 0) { return EINVAL; } /* Contiguous phys memory has to be preallocated. */ if((m->VMM_FLAGS & (MAP_CONTIG|MAP_PREALLOC)) == MAP_CONTIG) { return EINVAL; } if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC; if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB; if(m->VMM_FLAGS & MAP_LOWER1M) vrflags |= VR_LOWER1MB; if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K; if(m->VMM_FLAGS & MAP_UNINITIALIZED) { if(!execpriv) return EPERM; vrflags |= VR_UNINITIALIZED; } if(m->VMM_FLAGS & MAP_CONTIG) { vrflags |= VR_CONTIG; mt = &mem_type_anon_contig; } else mt = &mem_type_anon; if(len % VM_PAGE_SIZE) len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE); vr = NULL; if (m->VMM_ADDR || (m->VMM_FLAGS & MAP_FIXED)) { /* An address is given, first try at that address. */ addr = m->VMM_ADDR; vr = map_page_region(vmp, addr, 0, len, vrflags, mfflags, mt); if(!vr && (m->VMM_FLAGS & MAP_FIXED)) return ENOMEM; } if (!vr) { /* No address given or address already in use. */ vr = map_page_region(vmp, 0, VM_DATATOP, len, vrflags, mfflags, mt); } if (!vr) { return ENOMEM; } } else { return ENOSYS; } /* Return mapping, as seen from process. */ assert(vr); m->VMM_RETADDR = vr->vaddr; return OK; }
int do_mapcache(message *msg) { dev_t dev = msg->m_u.m_vmmcp.dev; u64_t dev_off = (u64_t) msg->m_u.m_vmmcp.dev_offset_pages * VM_PAGE_SIZE; u64_t ino_off = (u64_t) msg->m_u.m_vmmcp.ino_offset_pages * VM_PAGE_SIZE; int n; int bytes = msg->m_u.m_vmmcp.pages * VM_PAGE_SIZE; struct vir_region *vr; struct vmproc *caller; vir_bytes offset; int io = 0; if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source"); caller = &vmproc[n]; if(bytes < VM_PAGE_SIZE) return EINVAL; if(!(vr = map_page_region(caller, VM_PAGE_SIZE, VM_DATATOP, bytes, VR_ANON | VR_WRITABLE, 0, &mem_type_cache))) { printf("VM: map_page_region failed\n"); return ENOMEM; } assert(vr->length == bytes); for(offset = 0; offset < bytes; offset += VM_PAGE_SIZE) { struct cached_page *hb; assert(vr->length == bytes); assert(offset < vr->length); if(!(hb = find_cached_page_bydev(dev, dev_off + offset, msg->m_u.m_vmmcp.ino, ino_off + offset, 1))) { map_unmap_region(caller, vr, 0, bytes); return ENOENT; } assert(!vr->param.pb_cache); vr->param.pb_cache = hb->page; assert(vr->length == bytes); assert(offset < vr->length); if(map_pf(caller, vr, offset, 1, NULL, NULL, 0, &io) != OK) { map_unmap_region(caller, vr, 0, bytes); printf("VM: map_pf failed\n"); return ENOMEM; } assert(!vr->param.pb_cache); } memset(msg, 0, sizeof(*msg)); msg->m_u.m_vmmcp_reply.addr = (void *) vr->vaddr; assert(vr); #if CACHE_SANITY cache_sanitycheck_internal(); #endif return OK; }