/*===========================================================================* * pt_ptmap * *===========================================================================*/ PUBLIC int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp) { /* Transfer mappings to page dir and page tables from source process and * destination process. Make sure all the mappings are above the stack, not * to corrupt valid mappings in the data segment of the destination process. */ int pde, r; phys_bytes physaddr; vir_bytes viraddr; pt_t *pt; assert(src_vmp->vm_stacktop == dst_vmp->vm_stacktop); pt = &src_vmp->vm_pt; #if LU_DEBUG printf("VM: pt_ptmap: src = %d, dst = %d\n", src_vmp->vm_endpoint, dst_vmp->vm_endpoint); #endif /* Transfer mapping to the page directory. */ assert((vir_bytes) pt->pt_dir >= src_vmp->vm_stacktop); viraddr = arch_vir2map(src_vmp, (vir_bytes) pt->pt_dir); physaddr = pt->pt_dir_phys & I386_VM_ADDR_MASK; if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, I386_PAGE_SIZE, I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, WMF_OVERWRITE)) != OK) { return r; } #if LU_DEBUG printf("VM: pt_ptmap: transferred mapping to page dir: 0x%08x (0x%08x)\n", viraddr, physaddr); #endif /* Scan all non-reserved page-directory entries. */ for(pde=proc_pde; pde < I386_VM_DIR_ENTRIES; pde++) { if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) { continue; } /* Transfer mapping to the page table. */ assert((vir_bytes) pt->pt_pt[pde] >= src_vmp->vm_stacktop); viraddr = arch_vir2map(src_vmp, (vir_bytes) pt->pt_pt[pde]); physaddr = pt->pt_dir[pde] & I386_VM_ADDR_MASK; if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, I386_PAGE_SIZE, I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, WMF_OVERWRITE)) != OK) { return r; } } #if LU_DEBUG printf("VM: pt_ptmap: transferred mappings to page tables, pde range %d - %d\n", proc_pde, I386_VM_DIR_ENTRIES - 1); #endif return OK; }
/*===========================================================================* * vm_pagelock * *===========================================================================*/ PUBLIC void vm_pagelock(void *vir, int lockflag) { /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */ vir_bytes m; int r; u32_t flags = I386_VM_PRESENT | I386_VM_USER; pt_t *pt; pt = &vmprocess->vm_pt; m = arch_vir2map(vmprocess, (vir_bytes) vir); assert(!(m % I386_PAGE_SIZE)); if(!lockflag) flags |= I386_VM_WRITE; /* Update flags. */ if((r=pt_writemap(vmprocess, pt, m, 0, I386_PAGE_SIZE, flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) { panic("vm_lockpage: pt_writemap failed"); } if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { panic("VMCTL_FLUSHTLB failed: %d", r); } return; }
/*===========================================================================* * do_unmap_phys * *===========================================================================*/ PUBLIC int do_unmap_phys(message *m) { int r, n; struct vmproc *vmp; endpoint_t target; struct vir_region *region; target = m->VMUP_EP; if(target == SELF) target = m->m_source; if((r=vm_isokendpt(target, &n)) != OK) return EINVAL; vmp = &vmproc[n]; if(!(region = map_lookup(vmp, arch_vir2map(vmp, (vir_bytes) m->VMUM_ADDR)))) { return EINVAL; } if(!(region->flags & VR_DIRECT)) { return EINVAL; } if(map_unmap_region(vmp, region, region->length) != OK) { return EINVAL; } return OK; }
/*===========================================================================* * munmap (override for VM) * *===========================================================================*/ PUBLIC int minix_munmap(void *addr, size_t len) { vir_bytes laddr; if(!unmap_ok) return ENOSYS; laddr = (vir_bytes) arch_vir2map(&vmproc[VM_PROC_NR], (vir_bytes) addr); return munmap_lin(laddr, len); }
/*===========================================================================* * do_mmap * *===========================================================================*/ PUBLIC int do_mmap(message *m) { int r, n; struct vmproc *vmp; int mfflags = 0; struct vir_region *vr = NULL; if((r=vm_isokendpt(m->m_source, &n)) != OK) { panic("do_mmap: message from strange source: %d", m->m_source); } vmp = &vmproc[n]; if(!(vmp->vm_flags & VMF_HASPT)) return ENXIO; if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANON)) { int s; vir_bytes v; u32_t vrflags = VR_ANON | VR_WRITABLE; size_t len = (vir_bytes) m->VMM_LEN; if(m->VMM_FD != -1) { return EINVAL; } /* Contiguous phys memory has to be preallocated. */ if((m->VMM_FLAGS & (MAP_CONTIG|MAP_PREALLOC)) == MAP_CONTIG) { return EINVAL; } if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC; if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB; if(m->VMM_FLAGS & MAP_LOWER1M) vrflags |= VR_LOWER1MB; if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K; if(m->VMM_FLAGS & MAP_SHARED) vrflags |= VR_SHARED; if(m->VMM_FLAGS & MAP_CONTIG) vrflags |= VR_CONTIG; if(len % VM_PAGE_SIZE) len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE); if(!(vr = map_page_region(vmp, arch_vir2map(vmp, m->VMM_ADDR ? m->VMM_ADDR : vmp->vm_stacktop), VM_DATATOP, len, MAP_NONE, vrflags, mfflags))) { return ENOMEM; } } else { return ENOSYS; } /* Return mapping, as seen from process. */ assert(vr); m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr); return OK; }
/*===========================================================================* * do_remap * *===========================================================================*/ PUBLIC int do_remap(message *m) { int dn, sn; vir_bytes da, sa, startv; size_t size; struct vir_region *vr, *region; struct vmproc *dvmp, *svmp; int r; da = (vir_bytes) m->VMRE_DA; sa = (vir_bytes) m->VMRE_SA; size = m->VMRE_SIZE; if ((r = vm_isokendpt((endpoint_t) m->VMRE_D, &dn)) != OK) return EINVAL; if ((r = vm_isokendpt((endpoint_t) m->VMRE_S, &sn)) != OK) return EINVAL; dvmp = &vmproc[dn]; svmp = &vmproc[sn]; /* da is not translated by arch_vir2map(), * it's handled a little differently, * since in map_remap(), we have to know * about whether the user needs to bind to * THAT address or be chosen by the system. */ sa = arch_vir2map(svmp, sa); if (!(region = map_lookup(svmp, sa))) return EINVAL; if(region->vaddr != sa) { printf("VM: do_remap: not start of region.\n"); return EFAULT; } if(!(region->flags & VR_SHARED)) { printf("VM: do_remap: not shared.\n"); return EFAULT; } if (size % VM_PAGE_SIZE) size += VM_PAGE_SIZE - size % VM_PAGE_SIZE; if(size != region->length) { printf("VM: do_remap: not size of region.\n"); return EFAULT; } if ((r = map_remap(dvmp, da, size, region, &startv)) != OK) return r; m->VMRE_RETA = (char *) arch_map2vir(dvmp, startv); return OK; }
/*===========================================================================* * do_map_phys * *===========================================================================*/ PUBLIC int do_map_phys(message *m) { int r, n; struct vmproc *vmp; endpoint_t target; struct vir_region *vr; vir_bytes len; phys_bytes startaddr; size_t offset; target = m->VMMP_EP; len = m->VMMP_LEN; if (len <= 0) return EINVAL; if(target == SELF) target = m->m_source; if((r=vm_isokendpt(target, &n)) != OK) return EINVAL; startaddr = (vir_bytes)m->VMMP_PHADDR; /* First check permission, then round range down/up. Caller can't * help it if we can't map in lower than page granularity. */ if(map_perm_check(m->m_source, target, startaddr, len) != OK) { printf("VM: unauthorized mapping of 0x%lx by %d\n", startaddr, m->m_source); return EPERM; } vmp = &vmproc[n]; if(!(vmp->vm_flags & VMF_HASPT)) return ENXIO; offset = startaddr % VM_PAGE_SIZE; len += offset; startaddr -= offset; if(len % VM_PAGE_SIZE) len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE); if(!(vr = map_page_region(vmp, arch_vir2map(vmp, vmp->vm_stacktop), VM_DATATOP, len, startaddr, VR_DIRECT | VR_NOPF | VR_WRITABLE, 0))) { return ENOMEM; } m->VMMP_VADDR_REPLY = (void *) (arch_map2vir(vmp, vr->vaddr) + offset); return OK; }
int scall_mmap(kipc_msg_t *m) { int err, n; struct vmproc *vmp; int mfflags = 0; struct vir_region *vr = NULL; if((err = vm_isokendpt(m->m_source, &n)) != 0) { vm_panic("do_mmap: message from strange source", m->m_source); } vmp = &vmproc[n]; if(!(vmp->vm_flags & VMF_HASPT)) return -ENXIO; if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANONYMOUS)) { int s; vir_bytes v; u32_t vrflags = VR_ANON | VR_WRITABLE; size_t len = (vir_bytes) m->VMM_LEN; if(m->VMM_FD != -1) { return -EINVAL; } if(m->VMM_FLAGS & MAP_CONTIG) mfflags |= MF_CONTIG; if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC; if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB; if(m->VMM_FLAGS & MAP_LOWER1M) vrflags |= VR_LOWER1MB; if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K; if(m->VMM_FLAGS & MAP_SHARED) vrflags |= VR_SHARED; if(len % VM_PAGE_SIZE) len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE); if(!(vr = map_page_region(vmp, arch_vir2map(vmp, m->VMM_ADDR ? m->VMM_ADDR : vmp->vm_stacktop), VM_DATATOP, len, MAP_NONE, vrflags, mfflags))) { return -ENOMEM; } } else { return -ENOSYS; } /* Return mapping, as seen from process. */ vm_assert(vr); m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr); return m->VMM_RETADDR; }
/*===========================================================================* * vm_freepages * *===========================================================================*/ PRIVATE void vm_freepages(vir_bytes vir, vir_bytes phys, int pages, int reason) { vm_assert(reason >= 0 && reason < VMP_CATEGORIES); if(vir >= vmp->vm_stacktop) { vm_assert(!(vir % I386_PAGE_SIZE)); vm_assert(!(phys % I386_PAGE_SIZE)); FREE_MEM(ABS2CLICK(phys), pages); if(pt_writemap(&vmp->vm_pt, arch_vir2map(vmp, vir), MAP_NONE, pages*I386_PAGE_SIZE, 0, WMF_OVERWRITE) != OK) vm_panic("vm_freepages: pt_writemap failed", NO_NUM); } else { printf("VM: vm_freepages not freeing VM heap pages (%d)\n", pages); } }
int scall_munmap(kipc_msg_t *m) { int r, n; struct vmproc *vmp; vir_bytes addr, len; struct vir_region *vr; if((r = vm_isokendpt(m->m_source, &n)) != 0) { vm_panic("do_mmap: message from strange source", m->m_source); } vmp = &vmproc[n]; if (!(vmp->vm_flags & VMF_HASPT)) return -ENXIO; if (m->m_type == NNR_VM_MUNMAP) { addr = (vir_bytes) arch_vir2map(vmp, (vir_bytes) m->VMUM_ADDR); } else if(m->m_type == NNR_VM_MUNMAP_TEXT) { addr = (vir_bytes) arch_vir2map_text(vmp, (vir_bytes) m->VMUM_ADDR); } else { vm_panic("do_munmap: strange type", NO_NUM); } if (!(vr = map_lookup(vmp, addr))) { printk("VM: unmap: virtual address 0x%lx not found in %d\n", m->VMUM_ADDR, vmp->vm_endpoint); return -EFAULT; } len = m->VMUM_LEN; if (len % VM_PAGE_SIZE) len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE); if (addr != vr->vaddr || len > vr->length || len < VM_PAGE_SIZE) { return -EFAULT; } if (map_unmap_region(vmp, vr, len) != 0) vm_panic("do_munmap: map_unmap_region failed", NO_NUM); return 0; }
/*===========================================================================* * do_munmap * *===========================================================================*/ PUBLIC int do_munmap(message *m) { int r, n; struct vmproc *vmp; vir_bytes addr, len; struct vir_region *vr; if((r=vm_isokendpt(m->m_source, &n)) != OK) { panic("do_mmap: message from strange source: %d", m->m_source); } vmp = &vmproc[n]; if(!(vmp->vm_flags & VMF_HASPT)) return ENXIO; if(m->m_type == VM_MUNMAP) { addr = (vir_bytes) arch_vir2map(vmp, (vir_bytes) m->VMUM_ADDR); } else if(m->m_type == VM_MUNMAP_TEXT) { addr = (vir_bytes) arch_vir2map_text(vmp, (vir_bytes) m->VMUM_ADDR); } else { panic("do_munmap: strange type"); } if(!(vr = map_lookup(vmp, addr))) { printf("VM: unmap: virtual address %p not found in %d\n", m->VMUM_ADDR, vmp->vm_endpoint); return EFAULT; } len = m->VMUM_LEN; if (len % VM_PAGE_SIZE) len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE); if(addr != vr->vaddr || len > vr->length || len < VM_PAGE_SIZE) { return EFAULT; } if(map_unmap_region(vmp, vr, len) != OK) panic("do_munmap: map_unmap_region failed"); return OK; }
/*===========================================================================* * vm_addrok * *===========================================================================*/ PUBLIC int vm_addrok(void *vir, int writeflag) { /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */ pt_t *pt = &vmprocess->vm_pt; int pde, pte; vir_bytes v = arch_vir2map(vmprocess, (vir_bytes) vir); /* No PT yet? Don't bother looking. */ if(!(vmprocess->vm_flags & VMF_HASPT)) { return 1; } pde = I386_VM_PDE(v); pte = I386_VM_PTE(v); if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) { printf("addr not ok: missing pde %d\n", pde); return 0; } if(writeflag && !(pt->pt_dir[pde] & I386_VM_WRITE)) { printf("addr not ok: pde %d present but pde unwritable\n", pde); return 0; } if(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) { printf("addr not ok: missing pde %d / pte %d\n", pde, pte); return 0; } if(writeflag && !(pt->pt_pt[pde][pte] & I386_VM_WRITE)) { printf("addr not ok: pde %d / pte %d present but unwritable\n", pde, pte); return 0; } return 1; }
/*===========================================================================* * do_remap * *===========================================================================*/ int do_remap(kipc_msg_t *m) { int d, dn, s, sn; vir_bytes da, sa, startv; size_t size; struct vir_region *vr, *region; struct vmproc *dvmp, *svmp; int r; d = m->VMRE_D; s = m->VMRE_S; da = (vir_bytes) m->VMRE_DA; sa = (vir_bytes) m->VMRE_SA; size = m->VMRE_SIZE; if ((r = vm_isokendpt(d, &dn)) != 0) return -EINVAL; if ((r = vm_isokendpt(s, &sn)) != 0) return -EINVAL; dvmp = &vmproc[dn]; svmp = &vmproc[sn]; /* da is not translated by arch_vir2map(), * it's handled a little differently, * since in map_remap(), we have to know * about whether the user needs to bind to * THAT address or be chosen by the system. */ sa = arch_vir2map(svmp, sa); if (!(region = map_lookup(svmp, sa))) return -EINVAL; if ((r = map_remap(dvmp, da, size, region, &startv)) != 0) return r; m->VMRE_RETA = (char *) arch_map2vir(dvmp, startv); return 0; }
/*===========================================================================* * do_get_refcount * *===========================================================================*/ PUBLIC int do_get_refcount(message *m) { int r, n; struct vmproc *vmp; endpoint_t target; u8_t cnt; vir_bytes addr; target = m->VMREFCNT_ENDPT; addr = m->VMREFCNT_ADDR; if ((r = vm_isokendpt(target, &n)) != OK) return EINVAL; vmp = &vmproc[n]; addr = arch_vir2map(vmp, addr); r = map_get_ref(vmp, addr, &cnt); m->VMREFCNT_RETC = cnt; return r; }
/*===========================================================================* * do_get_phys * *===========================================================================*/ PUBLIC int do_get_phys(message *m) { int r, n; struct vmproc *vmp; endpoint_t target; phys_bytes ret; vir_bytes addr; target = m->VMPHYS_ENDPT; addr = m->VMPHYS_ADDR; if ((r = vm_isokendpt(target, &n)) != OK) return EINVAL; vmp = &vmproc[n]; addr = arch_vir2map(vmp, addr); r = map_get_phys(vmp, addr, &ret); m->VMPHYS_RETA = ret; return r; }
/*===========================================================================* * do_shared_unmap * *===========================================================================*/ PUBLIC int do_shared_unmap(message *m) { int r, n; struct vmproc *vmp; endpoint_t target; struct vir_region *vr; vir_bytes addr; target = m->VMUN_ENDPT; if (target == SELF) target = m->m_source; if ((r = vm_isokendpt(target, &n)) != OK) return EINVAL; vmp = &vmproc[n]; addr = arch_vir2map(vmp, m->VMUN_ADDR); if(!(vr = map_lookup(vmp, addr))) { printf("VM: addr 0x%lx not found.\n", m->VMUN_ADDR); return EFAULT; } if(vr->vaddr != addr) { printf("VM: wrong address for shared_unmap.\n"); return EFAULT; } if(!(vr->flags & VR_SHARED)) { printf("VM: address does not point to shared region.\n"); return EFAULT; } if(map_unmap_region(vmp, vr, vr->length) != OK) panic("do_shared_unmap: map_unmap_region failed"); return OK; }
/*===========================================================================* * do_shared_unmap * *===========================================================================*/ int do_shared_unmap(kipc_msg_t *m) { int r, n; struct vmproc *vmp; endpoint_t target; struct vir_region *vr; vir_bytes addr; target = m->VMUN_ENDPT; if ((r = vm_isokendpt(target, &n)) != 0) return -EINVAL; vmp = &vmproc[n]; addr = arch_vir2map(vmp, m->VMUN_ADDR); if(!(vr = map_lookup(vmp, addr))) { printk("VM: addr 0x%lx not found.\n", m->VMUN_ADDR); return -EFAULT; } if(vr->vaddr != addr) { printk("VM: wrong address for shared_unmap.\n"); return -EFAULT; } if(!(vr->flags & VR_SHARED)) { printk("VM: address does not point to shared region.\n"); return -EFAULT; } if(map_unmap_region(vmp, vr, vr->length) != 0) vm_panic("do_shared_unmap: map_unmap_region failed", NO_NUM); return 0; }
/*===========================================================================* * vm_freepages * *===========================================================================*/ PRIVATE void vm_freepages(vir_bytes vir, vir_bytes phys, int pages, int reason) { assert(reason >= 0 && reason < VMP_CATEGORIES); if(vir >= vmprocess->vm_stacktop) { assert(!(vir % I386_PAGE_SIZE)); assert(!(phys % I386_PAGE_SIZE)); free_mem(ABS2CLICK(phys), pages); if(pt_writemap(vmprocess, &vmprocess->vm_pt, arch_vir2map(vmprocess, vir), MAP_NONE, pages*I386_PAGE_SIZE, 0, WMF_OVERWRITE) != OK) panic("vm_freepages: pt_writemap failed"); } else { printf("VM: vm_freepages not freeing VM heap pages (%d)\n", pages); } #if SANITYCHECKS /* If SANITYCHECKS are on, flush tlb so accessing freed pages is * always trapped, also if not in tlb. */ if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { panic("VMCTL_FLUSHTLB failed"); } #endif }
/*===========================================================================* * do_fork * *===========================================================================*/ PUBLIC int do_fork(message *msg) { int r, proc, s, childproc, fullvm; struct vmproc *vmp, *vmc; pt_t origpt; vir_bytes msgaddr; SANITYCHECK(SCL_FUNCTIONS); if(vm_isokendpt(msg->VMF_ENDPOINT, &proc) != OK) { printf("VM: bogus endpoint VM_FORK %d\n", msg->VMF_ENDPOINT); SANITYCHECK(SCL_FUNCTIONS); return EINVAL; } childproc = msg->VMF_SLOTNO; if(childproc < 0 || childproc >= NR_PROCS) { printf("VM: bogus slotno VM_FORK %d\n", msg->VMF_SLOTNO); SANITYCHECK(SCL_FUNCTIONS); return EINVAL; } vmp = &vmproc[proc]; /* parent */ vmc = &vmproc[childproc]; /* child */ assert(vmc->vm_slot == childproc); if(vmp->vm_flags & VMF_HAS_DMA) { printf("VM: %d has DMA memory and may not fork\n", msg->VMF_ENDPOINT); return EINVAL; } fullvm = vmp->vm_flags & VMF_HASPT; /* The child is basically a copy of the parent. */ origpt = vmc->vm_pt; *vmc = *vmp; vmc->vm_slot = childproc; vmc->vm_regions = NULL; yielded_init(&vmc->vm_yielded_blocks); vmc->vm_endpoint = NONE; /* In case someone tries to use it. */ vmc->vm_pt = origpt; vmc->vm_flags &= ~VMF_HASPT; #if VMSTATS vmc->vm_bytecopies = 0; #endif if(pt_new(&vmc->vm_pt) != OK) { printf("VM: fork: pt_new failed\n"); return ENOMEM; } vmc->vm_flags |= VMF_HASPT; if(fullvm) { SANITYCHECK(SCL_DETAIL); if(map_proc_copy(vmc, vmp) != OK) { printf("VM: fork: map_proc_copy failed\n"); pt_free(&vmc->vm_pt); return(ENOMEM); } if(vmp->vm_heap) { vmc->vm_heap = map_region_lookup_tag(vmc, VRT_HEAP); assert(vmc->vm_heap); } SANITYCHECK(SCL_DETAIL); } else { vir_bytes sp; struct vir_region *heap, *stack; vir_bytes text_bytes, data_bytes, stack_bytes, parent_gap_bytes, child_gap_bytes; /* Get SP of new process (using parent). */ if(get_stack_ptr(vmp->vm_endpoint, &sp) != OK) { printf("VM: fork: get_stack_ptr failed for %d\n", vmp->vm_endpoint); return ENOMEM; } /* Update size of stack segment using current SP. */ if(adjust(vmp, vmp->vm_arch.vm_seg[D].mem_len, sp) != OK) { printf("VM: fork: adjust failed for %d\n", vmp->vm_endpoint); return ENOMEM; } /* Copy newly adjust()ed stack segment size to child. */ vmc->vm_arch.vm_seg[S] = vmp->vm_arch.vm_seg[S]; text_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[T].mem_len); data_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[D].mem_len); stack_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_len); /* how much space after break and before lower end (which is the * logical top) of stack for the parent */ parent_gap_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_vir - vmc->vm_arch.vm_seg[D].mem_len); /* how much space can the child stack grow downwards, below * the current SP? The rest of the gap is available for the * heap to grow upwards. */ child_gap_bytes = VM_PAGE_SIZE; if((r=proc_new(vmc, VM_PROCSTART, text_bytes, data_bytes, stack_bytes, child_gap_bytes, 0, 0, CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_vir + vmc->vm_arch.vm_seg[S].mem_len), 1)) != OK) { printf("VM: fork: proc_new failed\n"); return r; } if(!(heap = map_region_lookup_tag(vmc, VRT_HEAP))) panic("couldn't lookup heap"); assert(heap->phys); if(!(stack = map_region_lookup_tag(vmc, VRT_STACK))) panic("couldn't lookup stack"); assert(stack->phys); /* Now copy the memory regions. */ if(vmc->vm_arch.vm_seg[T].mem_len > 0) { struct vir_region *text; if(!(text = map_region_lookup_tag(vmc, VRT_TEXT))) panic("couldn't lookup text"); assert(text->phys); if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys), text, 0, text_bytes) != OK) panic("couldn't copy text"); } if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys), heap, 0, data_bytes) != OK) panic("couldn't copy heap"); if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys + vmc->vm_arch.vm_seg[D].mem_len) + parent_gap_bytes, stack, child_gap_bytes, stack_bytes) != OK) panic("couldn't copy stack"); } /* Only inherit these flags. */ vmc->vm_flags &= (VMF_INUSE|VMF_SEPARATE|VMF_HASPT); /* inherit the priv call bitmaps */ memcpy(&vmc->vm_call_mask, &vmp->vm_call_mask, sizeof(vmc->vm_call_mask)); /* Tell kernel about the (now successful) FORK. */ if((r=sys_fork(vmp->vm_endpoint, childproc, &vmc->vm_endpoint, vmc->vm_arch.vm_seg, PFF_VMINHIBIT, &msgaddr)) != OK) { panic("do_fork can't sys_fork: %d", r); } if(fullvm) { vir_bytes vir; /* making these messages writable is an optimisation * and its return value needn't be checked. */ vir = arch_vir2map(vmc, msgaddr); handle_memory(vmc, vir, sizeof(message), 1); vir = arch_vir2map(vmp, msgaddr); handle_memory(vmp, vir, sizeof(message), 1); } if((r=pt_bind(&vmc->vm_pt, vmc)) != OK) panic("fork can't pt_bind: %d", r); /* Inform caller of new child endpoint. */ msg->VMF_CHILD_ENDPOINT = vmc->vm_endpoint; SANITYCHECK(SCL_FUNCTIONS); return OK; }
/*===========================================================================* * vm_allocpage * *===========================================================================*/ PUBLIC void *vm_allocpage(phys_bytes *phys, int reason) { /* Allocate a page for use by VM itself. */ phys_bytes newpage; vir_bytes loc; pt_t *pt; int r; static int level = 0; void *ret; pt = &vmprocess->vm_pt; assert(reason >= 0 && reason < VMP_CATEGORIES); level++; assert(level >= 1); assert(level <= 2); if(level > 1 || !(vmprocess->vm_flags & VMF_HASPT) || !meminit_done) { int r; void *s; s=vm_getsparepage(phys); level--; if(!s) { util_stacktrace(); printf("VM: warning: out of spare pages\n"); } return s; } /* VM does have a pagetable, so get a page and map it in there. * Where in our virtual address space can we put it? */ loc = findhole(pt, arch_vir2map(vmprocess, vmprocess->vm_stacktop), vmprocess->vm_arch.vm_data_top); if(loc == NO_MEM) { level--; printf("VM: vm_allocpage: findhole failed\n"); return NULL; } /* Allocate page of memory for use by VM. As VM * is trusted, we don't have to pre-clear it. */ if((newpage = alloc_mem(CLICKSPERPAGE, 0)) == NO_MEM) { level--; printf("VM: vm_allocpage: alloc_mem failed\n"); return NULL; } *phys = CLICK2ABS(newpage); /* Map this page into our address space. */ if((r=pt_writemap(vmprocess, pt, loc, *phys, I386_PAGE_SIZE, I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) { free_mem(newpage, CLICKSPERPAGE); printf("vm_allocpage writemap failed\n"); level--; return NULL; } if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { panic("VMCTL_FLUSHTLB failed: %d", r); } level--; /* Return user-space-ready pointer to it. */ ret = (void *) arch_map2vir(vmprocess, loc); return ret; }