void ipc_kobject_destroy( ipc_port_t port) { switch (ip_kotype(port)) { case IKOT_PAGER: vm_object_destroy(port); break; case IKOT_PAGER_TERMINATING: vm_object_pager_wakeup(port); break; case IKOT_DEVICE: case IKOT_SEMAPHORE: case IKOT_LOCK_SET: break; default: #if MACH_ASSERT printf("ipc_kobject_destroy: port 0x%x, kobj 0x%x, type %d\n", port, port->ip_kobject, ip_kotype(port)); #endif /* MACH_ASSERT */ break; } }
/* * Allocate (or lookup) pager for a vnode. * Handle is a vnode pointer. * * MPSAFE */ vm_object_t vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t offset, struct ucred *cred) { vm_object_t object; struct vnode *vp; /* * Pageout to vnode, no can do yet. */ if (handle == NULL) return (NULL); vp = (struct vnode *) handle; /* * If the object is being terminated, wait for it to * go away. */ retry: while ((object = vp->v_object) != NULL) { VM_OBJECT_LOCK(object); if ((object->flags & OBJ_DEAD) == 0) break; vm_object_set_flag(object, OBJ_DISCONNECTWNT); msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0); } if (vp->v_usecount == 0) panic("vnode_pager_alloc: no vnode reference"); if (object == NULL) { /* * Add an object of the appropriate size */ object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size))); object->un_pager.vnp.vnp_size = size; object->un_pager.vnp.writemappings = 0; object->handle = handle; VI_LOCK(vp); if (vp->v_object != NULL) { /* * Object has been created while we were sleeping */ VI_UNLOCK(vp); vm_object_destroy(object); goto retry; } vp->v_object = object; VI_UNLOCK(vp); } else { object->ref_count++; VM_OBJECT_UNLOCK(object); } vref(vp); return (object); }
/* * as_destroy: wipe out an address space by destroying its components. * Synchronization: none. */ void as_destroy(struct addrspace *as) { struct vm_object *vmo; int i; for (i = 0; i < array_getnum(as->as_objects); i++) { vmo = array_getguy(as->as_objects, i); vm_object_destroy(as, vmo); } array_destroy(as->as_objects); kfree(as); }
/* * as_copy: duplicate an address space. Creates a new address space and * copies each vm_object in the source address space into the new one. * Implements the VM system part of fork(). * * Synchronization: none. */ int as_copy(struct addrspace *as, struct addrspace **ret) { struct addrspace *newas; struct vm_object *vmo, *newvmo; int i, result; newas = as_create(); if (newas==NULL) { return ENOMEM; } /* * We assume that as belongs to curthread, and furthermore that * it's not shared with any other threads. (The latter restriction * is easily lifted; the former is not.) * * We assume that nothing is going to modify the source address * space except for the usual page evictions by other processes. */ assert(as==curthread->t_vmspace); /* copy the vmos */ for (i = 0; i < array_getnum(as->as_objects); i++) { vmo = array_getguy(as->as_objects, i); result = vm_object_copy(vmo, newas, &newvmo); if (result) { goto fail; } result = array_add(newas->as_objects, newvmo); if (result) { vm_object_destroy(newas, newvmo); goto fail; } } *ret = newas; return 0; fail: as_destroy(newas); return result; }
/* * vm_object_copy: clone a vm_object. * * Synchronization: None; lpage_copy does the hard stuff. */ int vm_object_copy(struct vm_object *vmo, struct addrspace *newas, struct vm_object **ret) { struct vm_object *newvmo; struct lpage *newlp, *lp; unsigned j; int result; newvmo = vm_object_create(lpage_array_num(vmo->vmo_lpages)); if (newvmo == NULL) { return ENOMEM; } newvmo->vmo_base = vmo->vmo_base; newvmo->vmo_lower_redzone = vmo->vmo_lower_redzone; for (j = 0; j < lpage_array_num(vmo->vmo_lpages); j++) { lp = lpage_array_get(vmo->vmo_lpages, j); newlp = lpage_array_get(newvmo->vmo_lpages, j); /* new guy should be initialized to all zerofill */ KASSERT(newlp == NULL); if (lp == NULL) { /* old guy is zerofill too, don't do anything */ continue; } result = lpage_copy(lp, &newlp); if (result) { goto fail; } lpage_array_set(newvmo->vmo_lpages, j, newlp); } *ret = newvmo; return 0; fail: vm_object_destroy(newas, newvmo); return result; }
/* * Set up a segment at virtual address VADDR of size MEMSIZE. The * segment in memory extends from VADDR up to (but not including) * VADDR+MEMSIZE. * * The READABLE, WRITEABLE, and EXECUTABLE flags are set if read, * write, or execute permission should be set on the segment. At the * moment, these are ignored. * * Does not allow overlapping regions. */ int as_define_region(struct addrspace *as, vaddr_t vaddr, size_t sz, size_t lower_redzone, int readable, int writeable, int executable) { struct vm_object *vmo; int i, result; vaddr_t check_vaddr; /* vaddr to use for overlap check */ (void)readable; (void)writeable; // XYZ (void)executable; /* base address must be aligned */ assert((vaddr & PAGE_FRAME)==vaddr); /* redzone must be aligned */ assert((lower_redzone & PAGE_FRAME)==lower_redzone); /* redzone must fit */ assert(vaddr >= lower_redzone); check_vaddr = vaddr - lower_redzone; /* size may not be */ sz = ROUNDUP(sz, PAGE_SIZE); /* * Check for overlaps. */ for (i = 0; i < array_getnum(as->as_objects); i++) { vaddr_t bot, top; vmo = array_getguy(as->as_objects, i); assert(vmo != NULL); bot = vmo->vmo_base; top = bot + PAGE_SIZE*array_getnum(vmo->vmo_lpages); /* Check guard band, if any */ assert(bot >= vmo->vmo_lower_redzone); bot = bot - vmo->vmo_lower_redzone; if (check_vaddr+sz > bot && check_vaddr < top) { /* overlap */ return EINVAL; } } /* Create a new vmo. All pages are marked zerofilled. */ vmo = vm_object_create(sz/PAGE_SIZE); if (vmo == NULL) { return ENOMEM; } vmo->vmo_base = vaddr; vmo->vmo_lower_redzone = lower_redzone; /* Add it to the parent address space. */ result = array_add(as->as_objects, vmo); if (result) { vm_object_destroy(as, vmo); return result; } /* Done */ return 0; }