/* * shmfd object management including creation and reference counting * routines. */ static struct shmfd * shm_alloc(struct ucred *ucred, mode_t mode) { struct shmfd *shmfd; shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO); shmfd->shm_size = 0; shmfd->shm_uid = ucred->cr_uid; shmfd->shm_gid = ucred->cr_gid; shmfd->shm_mode = mode; shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL, shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred); KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate")); VM_OBJECT_LOCK(shmfd->shm_object); vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING); vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT); VM_OBJECT_UNLOCK(shmfd->shm_object); vfs_timestamp(&shmfd->shm_birthtime); shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime = shmfd->shm_birthtime; refcount_init(&shmfd->shm_refs, 1); #ifdef MAC mac_posixshm_init(shmfd); mac_posixshm_create(ucred, shmfd); #endif return (shmfd); }
/* * Allocate (or lookup) pager for a vnode. * Handle is a vnode pointer. * * MPSAFE */ vm_object_t vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t offset, struct ucred *cred) { vm_object_t object; struct vnode *vp; /* * Pageout to vnode, no can do yet. */ if (handle == NULL) return (NULL); vp = (struct vnode *) handle; /* * If the object is being terminated, wait for it to * go away. */ retry: while ((object = vp->v_object) != NULL) { VM_OBJECT_LOCK(object); if ((object->flags & OBJ_DEAD) == 0) break; vm_object_set_flag(object, OBJ_DISCONNECTWNT); msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0); } if (vp->v_usecount == 0) panic("vnode_pager_alloc: no vnode reference"); if (object == NULL) { /* * Add an object of the appropriate size */ object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size))); object->un_pager.vnp.vnp_size = size; object->un_pager.vnp.writemappings = 0; object->handle = handle; VI_LOCK(vp); if (vp->v_object != NULL) { /* * Object has been created while we were sleeping */ VI_UNLOCK(vp); vm_object_destroy(object); goto retry; } vp->v_object = object; VI_UNLOCK(vp); } else { object->ref_count++; VM_OBJECT_UNLOCK(object); } vref(vp); return (object); }
/* Create the VM system backing object for this vnode */ int vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td) { vm_object_t object; vm_ooffset_t size = isize; struct vattr va; if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) return (0); while ((object = vp->v_object) != NULL) { VM_OBJECT_WLOCK(object); if (!(object->flags & OBJ_DEAD)) { VM_OBJECT_WUNLOCK(object); return (0); } VOP_UNLOCK(vp, 0); vm_object_set_flag(object, OBJ_DISCONNECTWNT); VM_OBJECT_SLEEP(object, object, PDROP | PVM, "vodead", 0); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); } if (size == 0) { if (vn_isdisk(vp, NULL)) { size = IDX_TO_OFF(INT_MAX); } else { if (VOP_GETATTR(vp, &va, td->td_ucred)) return (0); size = va.va_size; } } object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred); /* * Dereference the reference we just created. This assumes * that the object is associated with the vp. */ VM_OBJECT_WLOCK(object); object->ref_count--; VM_OBJECT_WUNLOCK(object); vrele(vp); KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object")); return (0); }
/* * Allocate a VM object for a vnode, typically a regular file vnode. * * Some additional information is required to generate a properly sized * object which covers the entire buffer cache buffer straddling the file * EOF. Userland does not see the extra pages as the VM fault code tests * against v_filesize. */ vm_object_t vnode_pager_alloc(void *handle, off_t length, vm_prot_t prot, off_t offset, int blksize, int boff) { vm_object_t object; struct vnode *vp; off_t loffset; vm_pindex_t lsize; /* * Pageout to vnode, no can do yet. */ if (handle == NULL) return (NULL); /* * XXX hack - This initialization should be put somewhere else. */ if (vnode_pbuf_freecnt < 0) { vnode_pbuf_freecnt = nswbuf / 2 + 1; } /* * Serialize potential vnode/object teardowns and interlocks */ vp = (struct vnode *)handle; lwkt_gettoken(&vp->v_token); /* * If the object is being terminated, wait for it to * go away. */ object = vp->v_object; if (object) { vm_object_hold(object); KKASSERT((object->flags & OBJ_DEAD) == 0); } if (VREFCNT(vp) <= 0) panic("vnode_pager_alloc: no vnode reference"); /* * Round up to the *next* block, then destroy the buffers in question. * Since we are only removing some of the buffers we must rely on the * scan count to determine whether a loop is necessary. * * Destroy any pages beyond the last buffer. */ if (boff < 0) boff = (int)(length % blksize); if (boff) loffset = length + (blksize - boff); else loffset = length; lsize = OFF_TO_IDX(round_page64(loffset)); if (object == NULL) { /* * And an object of the appropriate size */ object = vm_object_allocate_hold(OBJT_VNODE, lsize); object->handle = handle; vp->v_object = object; vp->v_filesize = length; if (vp->v_mount && (vp->v_mount->mnt_kern_flag & MNTK_NOMSYNC)) vm_object_set_flag(object, OBJ_NOMSYNC); vref(vp); } else { vm_object_reference_quick(object); /* also vref's */ if (object->size != lsize) { kprintf("vnode_pager_alloc: Warning, objsize " "mismatch %jd/%jd vp=%p obj=%p\n", (intmax_t)object->size, (intmax_t)lsize, vp, object); } if (vp->v_filesize != length) { kprintf("vnode_pager_alloc: Warning, filesize " "mismatch %jd/%jd vp=%p obj=%p\n", (intmax_t)vp->v_filesize, (intmax_t)length, vp, object); } } vm_object_drop(object); lwkt_reltoken(&vp->v_token); return (object); }
/* * Allocates a new node of type 'type' inside the 'tmp' mount point, with * its owner set to 'uid', its group to 'gid' and its mode set to 'mode', * using the credentials of the process 'p'. * * If the node type is set to 'VDIR', then the parent parameter must point * to the parent directory of the node being created. It may only be NULL * while allocating the root node. * * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter * specifies the device the node represents. * * If the node type is set to 'VLNK', then the parameter target specifies * the file name of the target file for the symbolic link that is being * created. * * Note that new nodes are retrieved from the available list if it has * items or, if it is empty, from the node pool as long as there is enough * space to create them. * * Returns zero on success or an appropriate error code on failure. */ int tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type, uid_t uid, gid_t gid, mode_t mode, char *target, int rmajor, int rminor, struct tmpfs_node **node) { struct tmpfs_node *nnode; struct timespec ts; udev_t rdev; KKASSERT(IFF(type == VLNK, target != NULL)); KKASSERT(IFF(type == VBLK || type == VCHR, rmajor != VNOVAL)); if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max) return (ENOSPC); nnode = objcache_get(tmp->tm_node_pool, M_WAITOK | M_NULLOK); if (nnode == NULL) return (ENOSPC); /* Generic initialization. */ nnode->tn_type = type; vfs_timestamp(&ts); nnode->tn_ctime = nnode->tn_mtime = nnode->tn_atime = ts.tv_sec; nnode->tn_ctimensec = nnode->tn_mtimensec = nnode->tn_atimensec = ts.tv_nsec; nnode->tn_uid = uid; nnode->tn_gid = gid; nnode->tn_mode = mode; nnode->tn_id = tmpfs_fetch_ino(tmp); nnode->tn_advlock.init_done = 0; KKASSERT(nnode->tn_links == 0); /* Type-specific initialization. */ switch (nnode->tn_type) { case VBLK: case VCHR: rdev = makeudev(rmajor, rminor); if (rdev == NOUDEV) { objcache_put(tmp->tm_node_pool, nnode); return(EINVAL); } nnode->tn_rdev = rdev; break; case VDIR: RB_INIT(&nnode->tn_dir.tn_dirtree); RB_INIT(&nnode->tn_dir.tn_cookietree); nnode->tn_size = 0; break; case VFIFO: /* FALLTHROUGH */ case VSOCK: break; case VLNK: nnode->tn_size = strlen(target); nnode->tn_link = kmalloc(nnode->tn_size + 1, tmp->tm_name_zone, M_WAITOK | M_NULLOK); if (nnode->tn_link == NULL) { objcache_put(tmp->tm_node_pool, nnode); return (ENOSPC); } bcopy(target, nnode->tn_link, nnode->tn_size); nnode->tn_link[nnode->tn_size] = '\0'; break; case VREG: nnode->tn_reg.tn_aobj = swap_pager_alloc(NULL, 0, VM_PROT_DEFAULT, 0); nnode->tn_reg.tn_aobj_pages = 0; nnode->tn_size = 0; vm_object_set_flag(nnode->tn_reg.tn_aobj, OBJ_NOPAGEIN); break; default: panic("tmpfs_alloc_node: type %p %d", nnode, (int)nnode->tn_type); } TMPFS_NODE_LOCK(nnode); TMPFS_LOCK(tmp); LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries); tmp->tm_nodes_inuse++; TMPFS_UNLOCK(tmp); TMPFS_NODE_UNLOCK(nnode); *node = nnode; return 0; }
/* * The caller must hold proc_token. */ static int do_vmtotal_callback(struct proc *p, void *data) { struct vmtotal *totalp = data; struct lwp *lp; vm_map_entry_t entry; vm_map_t map; int paging; if (p->p_flag & P_SYSTEM) return(0); FOREACH_LWP_IN_PROC(lp, p) { switch (lp->lwp_stat) { case LSSTOP: case LSSLEEP: if ((p->p_flag & P_SWAPPEDOUT) == 0) { if ((lp->lwp_flag & LWP_SINTR) == 0) totalp->t_dw++; else if (lp->lwp_slptime < maxslp) totalp->t_sl++; } else if (lp->lwp_slptime < maxslp) { totalp->t_sw++; } if (lp->lwp_slptime >= maxslp) return(0); break; case LSRUN: if (p->p_flag & P_SWAPPEDOUT) totalp->t_sw++; else totalp->t_rq++; if (p->p_stat == SIDL) return(0); break; default: return (0); } } /* * Note active objects. */ paging = 0; lwkt_gettoken(&vm_token); if (p->p_vmspace) { map = &p->p_vmspace->vm_map; vm_map_lock_read(map); for (entry = map->header.next; entry != &map->header; entry = entry->next) { if (entry->maptype != VM_MAPTYPE_NORMAL && entry->maptype != VM_MAPTYPE_VPAGETABLE) { continue; } if (entry->object.vm_object == NULL) continue; vm_object_set_flag(entry->object.vm_object, OBJ_ACTIVE); paging |= entry->object.vm_object->paging_in_progress; } vm_map_unlock_read(map); } lwkt_reltoken(&vm_token); if (paging) totalp->t_pw++; return(0); }