time_t r2lease_time(rnode4_t *rp) { nfs4_server_t *sp; time_t lease_time; mntinfo4_t *mi = VTOMI4(RTOV4(rp)); (void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER, 0); /* this locks down sp if it is found */ sp = find_nfs4_server(VTOMI4(RTOV4(rp))); if (VTOMI4(RTOV4(rp))->mi_vfsp->vfs_flag & VFS_UNMOUNTED) { if (sp != NULL) { mutex_exit(&sp->s_lock); nfs4_server_rele(sp); } nfs_rw_exit(&mi->mi_recovlock); return (1); /* 1 second */ } ASSERT(sp != NULL); lease_time = sp->s_lease_time; mutex_exit(&sp->s_lock); nfs4_server_rele(sp); nfs_rw_exit(&mi->mi_recovlock); return (lease_time); }
static int isrootfh(nfs4_sharedfh_t *fh, rnode4_t *rp) { int isroot; isroot = 0; if (SFH4_SAME(VTOMI4(RTOV4(rp))->mi_rootfh, fh)) isroot = 1; return (isroot); }
int nfs4_setopts(vnode_t *vp, model_t model, struct nfs_args *buf) { mntinfo4_t *mi; /* mount info, pointed at by vfs */ STRUCT_HANDLE(nfs_args, args); int flags; #ifdef lint model = model; #endif STRUCT_SET_HANDLE(args, model, buf); flags = STRUCT_FGET(args, flags); /* * Set option fields in mount info record */ mi = VTOMI4(vp); if (flags & NFSMNT_NOAC) { mutex_enter(&mi->mi_lock); mi->mi_flags |= MI4_NOAC; mutex_exit(&mi->mi_lock); PURGE_ATTRCACHE4(vp); } mutex_enter(&mi->mi_lock); if (flags & NFSMNT_NOCTO) mi->mi_flags |= MI4_NOCTO; if (flags & NFSMNT_LLOCK) mi->mi_flags |= MI4_LLOCK; if (flags & NFSMNT_GRPID) mi->mi_flags |= MI4_GRPID; mutex_exit(&mi->mi_lock); if (flags & NFSMNT_RETRANS) { if (STRUCT_FGET(args, retrans) < 0) return (EINVAL); mi->mi_retrans = STRUCT_FGET(args, retrans); } if (flags & NFSMNT_TIMEO) { if (STRUCT_FGET(args, timeo) <= 0) return (EINVAL); mi->mi_timeo = STRUCT_FGET(args, timeo); } if (flags & NFSMNT_RSIZE) { if (STRUCT_FGET(args, rsize) <= 0) return (EINVAL); mi->mi_tsize = MIN(mi->mi_tsize, STRUCT_FGET(args, rsize)); mi->mi_curread = MIN(mi->mi_curread, mi->mi_tsize); } if (flags & NFSMNT_WSIZE) { if (STRUCT_FGET(args, wsize) <= 0) return (EINVAL); mi->mi_stsize = MIN(mi->mi_stsize, STRUCT_FGET(args, wsize)); mi->mi_curwrite = MIN(mi->mi_curwrite, mi->mi_stsize); } if (flags & NFSMNT_ACREGMIN) { if (STRUCT_FGET(args, acregmin) < 0) mi->mi_acregmin = SEC2HR(ACMINMAX); else mi->mi_acregmin = SEC2HR(MIN(STRUCT_FGET(args, acregmin), ACMINMAX)); } if (flags & NFSMNT_ACREGMAX) { if (STRUCT_FGET(args, acregmax) < 0) mi->mi_acregmax = SEC2HR(ACMAXMAX); else mi->mi_acregmax = SEC2HR(MIN(STRUCT_FGET(args, acregmax), ACMAXMAX)); } if (flags & NFSMNT_ACDIRMIN) { if (STRUCT_FGET(args, acdirmin) < 0) mi->mi_acdirmin = SEC2HR(ACMINMAX); else mi->mi_acdirmin = SEC2HR(MIN(STRUCT_FGET(args, acdirmin), ACMINMAX)); } if (flags & NFSMNT_ACDIRMAX) { if (STRUCT_FGET(args, acdirmax) < 0) mi->mi_acdirmax = SEC2HR(ACMAXMAX); else mi->mi_acdirmax = SEC2HR(MIN(STRUCT_FGET(args, acdirmax), ACMAXMAX)); } return (0); }
/* * Put an rnode on the free list. * * Rnodes which were allocated above and beyond the normal limit * are immediately freed. */ void rp4_addfree(rnode4_t *rp, cred_t *cr) { vnode_t *vp; vnode_t *xattr; struct vfs *vfsp; vp = RTOV4(rp); ASSERT(vp->v_count >= 1); ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL); /* * If we have too many rnodes allocated and there are no * references to this rnode, or if the rnode is no longer * accessible by it does not reside in the hash queues, * or if an i/o error occurred while writing to the file, * then just free it instead of putting it on the rnode * freelist. */ vfsp = vp->v_vfsp; if (((rnode4_new > nrnode || !(rp->r_flags & R4HASHED) || #ifdef DEBUG (nfs4_rnode_nofreelist != 0) || #endif rp->r_error || (rp->r_flags & R4RECOVERR) || (vfsp->vfs_flag & VFS_UNMOUNTED)) && rp->r_count == 0)) { if (rp->r_flags & R4HASHED) { rw_enter(&rp->r_hashq->r_lock, RW_WRITER); mutex_enter(&vp->v_lock); if (vp->v_count > 1) { vp->v_count--; mutex_exit(&vp->v_lock); rw_exit(&rp->r_hashq->r_lock); return; } mutex_exit(&vp->v_lock); rp4_rmhash_locked(rp); rw_exit(&rp->r_hashq->r_lock); } /* * Make sure we don't have a delegation on this rnode * before destroying it. */ if (rp->r_deleg_type != OPEN_DELEGATE_NONE) { (void) nfs4delegreturn(rp, NFS4_DR_FORCE|NFS4_DR_PUSH|NFS4_DR_REOPEN); } r4inactive(rp, cr); /* * Recheck the vnode reference count. We need to * make sure that another reference has not been * acquired while we were not holding v_lock. The * rnode is not in the rnode hash queues; one * way for a reference to have been acquired * is for a VOP_PUTPAGE because the rnode was marked * with R4DIRTY or for a modified page. This * reference may have been acquired before our call * to r4inactive. The i/o may have been completed, * thus allowing r4inactive to complete, but the * reference to the vnode may not have been released * yet. In any case, the rnode can not be destroyed * until the other references to this vnode have been * released. The other references will take care of * either destroying the rnode or placing it on the * rnode freelist. If there are no other references, * then the rnode may be safely destroyed. */ mutex_enter(&vp->v_lock); if (vp->v_count > 1) { vp->v_count--; mutex_exit(&vp->v_lock); return; } mutex_exit(&vp->v_lock); destroy_rnode4(rp); return; } /* * Lock the hash queue and then recheck the reference count * to ensure that no other threads have acquired a reference * to indicate that the rnode should not be placed on the * freelist. If another reference has been acquired, then * just release this one and let the other thread complete * the processing of adding this rnode to the freelist. */ again: rw_enter(&rp->r_hashq->r_lock, RW_WRITER); mutex_enter(&vp->v_lock); if (vp->v_count > 1) { vp->v_count--; mutex_exit(&vp->v_lock); rw_exit(&rp->r_hashq->r_lock); return; } mutex_exit(&vp->v_lock); /* * Make sure we don't put an rnode with a delegation * on the free list. */ if (rp->r_deleg_type != OPEN_DELEGATE_NONE) { rw_exit(&rp->r_hashq->r_lock); (void) nfs4delegreturn(rp, NFS4_DR_FORCE|NFS4_DR_PUSH|NFS4_DR_REOPEN); goto again; } /* * Now that we have the hash queue lock, and we know there * are not anymore references on the vnode, check to make * sure there aren't any open streams still on the rnode. * If so, drop the hash queue lock, remove the open streams, * and recheck the v_count. */ mutex_enter(&rp->r_os_lock); if (list_head(&rp->r_open_streams) != NULL) { mutex_exit(&rp->r_os_lock); rw_exit(&rp->r_hashq->r_lock); if (nfs_zone() != VTOMI4(vp)->mi_zone) nfs4_clear_open_streams(rp); else (void) nfs4close_all(vp, cr); goto again; } mutex_exit(&rp->r_os_lock); /* * Before we put it on the freelist, make sure there are no pages. * If there are, flush and commit of all of the dirty and * uncommitted pages, assuming the file system isn't read only. */ if (!(vp->v_vfsp->vfs_flag & VFS_RDONLY) && nfs4_dross_pages(vp)) { rw_exit(&rp->r_hashq->r_lock); r4flushpages(rp, cr); goto again; } /* * Before we put it on the freelist, make sure there is no * active xattr directory cached, the freelist will not * have its entries r4inactive'd if there is still an active * rnode, thus nothing in the freelist can hold another * rnode active. */ xattr = rp->r_xattr_dir; rp->r_xattr_dir = NULL; /* * If there is no cached data or metadata for this file, then * put the rnode on the front of the freelist so that it will * be reused before other rnodes which may have cached data or * metadata associated with them. */ mutex_enter(&rp4freelist_lock); if (rp4freelist == NULL) { rp->r_freef = rp; rp->r_freeb = rp; rp4freelist = rp; } else { rp->r_freef = rp4freelist; rp->r_freeb = rp4freelist->r_freeb; rp4freelist->r_freeb->r_freef = rp; rp4freelist->r_freeb = rp; if (!nfs4_has_pages(vp) && rp->r_dir == NULL && rp->r_symlink.contents == NULL && rp->r_secattr == NULL) rp4freelist = rp; } mutex_exit(&rp4freelist_lock); rw_exit(&rp->r_hashq->r_lock); if (xattr) VN_RELE(xattr); }
void r4_do_attrcache(vnode_t *vp, nfs4_ga_res_t *garp, int newnode, hrtime_t t, cred_t *cr, int index) { int is_stub; vattr_t *attr; /* * Don't add to attrcache if time overflow, but * no need to check because either attr is null or the time * values in it were processed by nfs4_time_ntov(), which checks * for time overflows. */ attr = garp ? &garp->n4g_va : NULL; if (attr) { if (!newnode) { rw_exit(&rtable4[index].r_lock); #ifdef DEBUG if (vp->v_type != attr->va_type && vp->v_type != VNON && attr->va_type != VNON) { zcmn_err(VTOMI4(vp)->mi_zone->zone_id, CE_WARN, "makenfs4node: type (%d) doesn't " "match type of found node at %p (%d)", attr->va_type, (void *)vp, vp->v_type); } #endif nfs4_attr_cache(vp, garp, t, cr, TRUE, NULL); } else { rnode4_t *rp = VTOR4(vp); vp->v_type = attr->va_type; vp->v_rdev = attr->va_rdev; /* * Turn this object into a "stub" object if we * crossed an underlying server fs boundary. * To make this check, during mount we save the * fsid of the server object being mounted. * Here we compare this object's server fsid * with the fsid we saved at mount. If they * are different, we crossed server fs boundary. * * The stub type is set (or not) at rnode * creation time and it never changes for life * of the rnode. * * The stub type is also set during RO failover, * nfs4_remap_file(). * * This stub will be for a mirror-mount. * * We don't bother with taking r_state_lock to * set the stub type because this is a new rnode * and we're holding the hash bucket r_lock RW_WRITER. * No other thread could have obtained access * to this rnode. */ is_stub = 0; if (garp->n4g_fsid_valid) { fattr4_fsid ga_fsid = garp->n4g_fsid; servinfo4_t *svp = rp->r_server; rp->r_srv_fsid = ga_fsid; (void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0); if (!FATTR4_FSID_EQ(&ga_fsid, &svp->sv_fsid)) is_stub = 1; nfs_rw_exit(&svp->sv_lock); } if (is_stub) r4_stub_mirrormount(rp); else r4_stub_none(rp); /* Can not cache partial attr */ if (attr->va_mask == AT_ALL) nfs4_attrcache_noinval(vp, garp, t); else PURGE_ATTRCACHE4(vp); rw_exit(&rtable4[index].r_lock); } } else { if (newnode) { PURGE_ATTRCACHE4(vp); } rw_exit(&rtable4[index].r_lock); } }