/* * The object must be locked. */ static void vnode_pager_dealloc(vm_object_t object) { struct vnode *vp; int refs; vp = object->handle; if (vp == NULL) panic("vnode_pager_dealloc: pager already dealloced"); VM_OBJECT_ASSERT_WLOCKED(object); vm_object_pip_wait(object, "vnpdea"); refs = object->ref_count; object->handle = NULL; object->type = OBJT_DEAD; if (object->flags & OBJ_DISCONNECTWNT) { vm_object_clear_flag(object, OBJ_DISCONNECTWNT); wakeup(object); } ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc"); if (object->un_pager.vnp.writemappings > 0) { object->un_pager.vnp.writemappings = 0; VOP_ADD_WRITECOUNT(vp, -1); CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", __func__, vp, vp->v_writecount); } vp->v_object = NULL; VOP_UNSET_TEXT(vp); VM_OBJECT_WUNLOCK(object); while (refs-- > 0) vunref(vp); VM_OBJECT_WLOCK(object); }
/* * shmfd object management including creation and reference counting * routines. */ static struct shmfd * shm_alloc(struct ucred *ucred, mode_t mode) { struct shmfd *shmfd; shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO); shmfd->shm_size = 0; shmfd->shm_uid = ucred->cr_uid; shmfd->shm_gid = ucred->cr_gid; shmfd->shm_mode = mode; shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL, shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred); KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate")); VM_OBJECT_LOCK(shmfd->shm_object); vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING); vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT); VM_OBJECT_UNLOCK(shmfd->shm_object); vfs_timestamp(&shmfd->shm_birthtime); shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime = shmfd->shm_birthtime; refcount_init(&shmfd->shm_refs, 1); #ifdef MAC mac_posixshm_init(shmfd); mac_posixshm_create(ucred, shmfd); #endif return (shmfd); }
/* * No requirements. */ static int do_vmtotal(SYSCTL_HANDLER_ARGS) { struct vmtotal total; struct vmtotal *totalp; struct vm_object marker; vm_object_t object; long collisions; int burst; bzero(&total, sizeof(total)); totalp = &total; bzero(&marker, sizeof(marker)); marker.type = OBJT_MARKER; collisions = vmobj_token.t_collisions; #if 0 /* * Mark all objects as inactive. */ lwkt_gettoken(&vmobj_token); for (object = TAILQ_FIRST(&vm_object_list); object != NULL; object = TAILQ_NEXT(object,object_list)) { if (object->type == OBJT_MARKER) continue; vm_object_clear_flag(object, OBJ_ACTIVE); } lwkt_reltoken(&vmobj_token); #endif /* * Calculate process statistics. */ allproc_scan(do_vmtotal_callback, totalp); /* * Calculate object memory usage statistics. */ lwkt_gettoken(&vmobj_token); TAILQ_INSERT_HEAD(&vm_object_list, &marker, object_list); burst = 0; for (object = TAILQ_FIRST(&vm_object_list); object != NULL; object = TAILQ_NEXT(object, object_list)) { /* * devices, like /dev/mem, will badly skew our totals. * markers aren't real objects. */ if (object->type == OBJT_MARKER) continue; if (object->type == OBJT_DEVICE) continue; if (object->size >= 0x7FFFFFFF) { /* * Probably unbounded anonymous memory (really * bounded by related vm_map_entry structures which * we do not have access to in this loop). */ totalp->t_vm += object->resident_page_count; } else { /* * It's questionable how useful this is but... */ totalp->t_vm += object->size; } totalp->t_rm += object->resident_page_count; if (object->flags & OBJ_ACTIVE) { totalp->t_avm += object->size; totalp->t_arm += object->resident_page_count; } if (object->shadow_count > 1) { /* shared object */ totalp->t_vmshr += object->size; totalp->t_rmshr += object->resident_page_count; if (object->flags & OBJ_ACTIVE) { totalp->t_avmshr += object->size; totalp->t_armshr += object->resident_page_count; } } /* * Don't waste time unnecessarily */ if (++burst < 25) continue; burst = 0; /* * Don't hog the vmobj_token if someone else wants it. */ TAILQ_REMOVE(&vm_object_list, &marker, object_list); TAILQ_INSERT_AFTER(&vm_object_list, object, &marker, object_list); object = ▮ if (collisions != vmobj_token.t_collisions) { tsleep(&vm_object_list, 0, "breath", 1); collisions = vmobj_token.t_collisions; } else { lwkt_yield(); } } TAILQ_REMOVE(&vm_object_list, &marker, object_list); lwkt_reltoken(&vmobj_token); totalp->t_free = vmstats.v_free_count + vmstats.v_cache_count; return (sysctl_handle_opaque(oidp, totalp, sizeof total, req)); }
/* * No requirements. */ static int do_vmtotal(SYSCTL_HANDLER_ARGS) { struct vmtotal total; struct vmtotal *totalp; vm_object_t object; bzero(&total, sizeof(total)); totalp = &total; /* * Mark all objects as inactive. */ lwkt_gettoken(&vmobj_token); for (object = TAILQ_FIRST(&vm_object_list); object != NULL; object = TAILQ_NEXT(object,object_list)) { if (object->type == OBJT_MARKER) continue; vm_object_clear_flag(object, OBJ_ACTIVE); } lwkt_reltoken(&vmobj_token); /* * Calculate process statistics. */ allproc_scan(do_vmtotal_callback, totalp); /* * Calculate object memory usage statistics. */ lwkt_gettoken(&vmobj_token); for (object = TAILQ_FIRST(&vm_object_list); object != NULL; object = TAILQ_NEXT(object, object_list)) { /* * devices, like /dev/mem, will badly skew our totals. * markers aren't real objects. */ if (object->type == OBJT_MARKER) continue; if (object->type == OBJT_DEVICE) continue; if (object->size >= 0x7FFFFFFF) { /* * Probably unbounded anonymous memory (really * bounded by related vm_map_entry structures which * we do not have access to in this loop). */ totalp->t_vm += object->resident_page_count; } else { /* * It's questionable how useful this is but... */ totalp->t_vm += object->size; } totalp->t_rm += object->resident_page_count; if (object->flags & OBJ_ACTIVE) { totalp->t_avm += object->size; totalp->t_arm += object->resident_page_count; } if (object->shadow_count > 1) { /* shared object */ totalp->t_vmshr += object->size; totalp->t_rmshr += object->resident_page_count; if (object->flags & OBJ_ACTIVE) { totalp->t_avmshr += object->size; totalp->t_armshr += object->resident_page_count; } } } lwkt_reltoken(&vmobj_token); totalp->t_free = vmstats.v_free_count + vmstats.v_cache_count; return (sysctl_handle_opaque(oidp, totalp, sizeof total, req)); }