int afs_mount(struct mount *mp, char *path, caddr_t data, struct nameidata *ndp, CTX_TYPE ctx) #endif { /* ndp contains the mounted-from device. Just ignore it. * we also don't care about our proc struct. */ size_t size; int error; #ifdef AFS_DARWIN80_ENV struct vfsioattr ioattr; /* vfs_statfs advertised as RO, but isn't */ /* new api will be needed to initialize this information (nfs needs to set mntfromname too) */ #endif STATFS_TYPE *mnt_stat = vfs_statfs(mp); if (vfs_isupdate(mp)) return EINVAL; AFS_GLOCK(); AFS_STATCNT(afs_mount); if (data == 0 && afs_globalVFS) { /* Don't allow remounts. */ AFS_GUNLOCK(); return (EBUSY); } afs_globalVFS = mp; #ifdef AFS_DARWIN80_ENV vfs_ioattr(mp, &ioattr); ioattr.io_devblocksize = (16 * 32768); vfs_setioattr(mp, &ioattr); /* f_iosize is handled in VFS_GETATTR */ #else mp->vfs_bsize = 8192; mp->mnt_stat.f_iosize = 8192; #endif vfs_getnewfsid(mp); #ifndef AFS_DARWIN80_ENV (void)copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); memset(mp->mnt_stat.f_mntonname + size, 0, MNAMELEN - size); #endif memset(mnt_stat->f_mntfromname, 0, MNAMELEN); if (data == 0) { strcpy(mnt_stat->f_mntfromname, "AFS"); /* null terminated string "AFS" will fit, just leave it be. */ vfs_setfsprivate(mp, NULL); } else { struct VenusFid *rootFid = NULL; struct volume *tvp; char volName[MNAMELEN]; (void)copyinstr(data, volName, MNAMELEN - 1, &size); memset(volName + size, 0, MNAMELEN - size); if (volName[0] == 0) { strcpy(mnt_stat->f_mntfromname, "AFS"); vfs_setfsprivate(mp, &afs_rootFid); } else { struct cell *localcell = afs_GetPrimaryCell(READ_LOCK); if (localcell == NULL) { AFS_GUNLOCK(); return ENODEV; } /* Set the volume identifier to "AFS:volume.name" */ snprintf(mnt_stat->f_mntfromname, MNAMELEN - 1, "AFS:%s", volName); tvp = afs_GetVolumeByName(volName, localcell->cellNum, 1, (struct vrequest *)0, READ_LOCK); if (tvp) { int volid = (tvp->roVol ? tvp->roVol : tvp->volume); MALLOC(rootFid, struct VenusFid *, sizeof(*rootFid), M_UFSMNT, M_WAITOK); rootFid->Cell = localcell->cellNum; rootFid->Fid.Volume = volid; rootFid->Fid.Vnode = 1; rootFid->Fid.Unique = 1; } else { AFS_GUNLOCK(); return ENODEV; } vfs_setfsprivate(mp, &rootFid); } }
afs_root(struct mount *mp, struct vnode **vpp) #endif { int error; struct vrequest treq; register struct vcache *tvp = 0; #ifdef AFS_FBSD50_ENV #ifndef AFS_FBSD53_ENV struct thread *td = curthread; #endif struct ucred *cr = td->td_ucred; #else struct proc *p = curproc; struct ucred *cr = p->p_cred->pc_ucred; #endif AFS_GLOCK(); AFS_STATCNT(afs_root); crhold(cr); if (afs_globalVp && (afs_globalVp->f.states & CStatd)) { tvp = afs_globalVp; error = 0; } else { tryagain: #ifndef AFS_FBSD80_ENV if (afs_globalVp) { afs_PutVCache(afs_globalVp); /* vrele() needed here or not? */ afs_globalVp = NULL; } #endif if (!(error = afs_InitReq(&treq, cr)) && !(error = afs_CheckInit())) { tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL); /* we really want this to stay around */ if (tvp) afs_globalVp = tvp; else error = ENOENT; } } if (tvp) { struct vnode *vp = AFSTOV(tvp); #ifdef AFS_FBSD50_ENV ASSERT_VI_UNLOCKED(vp, "afs_root"); #endif AFS_GUNLOCK(); /* * I'm uncomfortable about this. Shouldn't this happen at a * higher level, and shouldn't we busy the top-level directory * to prevent recycling? */ #ifdef AFS_FBSD50_ENV error = vget(vp, LK_EXCLUSIVE | LK_RETRY, td); vp->v_vflag |= VV_ROOT; #else error = vget(vp, LK_EXCLUSIVE | LK_RETRY, p); vp->v_flag |= VROOT; #endif AFS_GLOCK(); if (error != 0) goto tryagain; afs_globalVFS = mp; *vpp = vp; } afs_Trace2(afs_iclSetp, CM_TRACE_VFSROOT, ICL_TYPE_POINTER, tvp ? AFSTOV(tvp) : NULL, ICL_TYPE_INT32, error); AFS_GUNLOCK(); crfree(cr); return error; }
int osi_NetReceive(osi_socket so, struct sockaddr_in *addr, struct iovec *dvec, int nvecs, int *alength) { #ifdef AFS_DARWIN80_ENV socket_t asocket = (socket_t)so; struct msghdr msg; struct sockaddr_storage ss; int rlen; mbuf_t m; #else struct socket *asocket = (struct socket *)so; struct uio u; #endif int i; struct iovec iov[RX_MAXIOVECS]; struct sockaddr *sa = NULL; int code; size_t resid; int haveGlock = ISAFS_GLOCK(); /*AFS_STATCNT(osi_NetReceive); */ if (nvecs > RX_MAXIOVECS) osi_Panic("osi_NetReceive: %d: Too many iovecs.\n", nvecs); for (i = 0; i < nvecs; i++) iov[i] = dvec[i]; if ((afs_termState == AFSOP_STOP_RXK_LISTENER) || (afs_termState == AFSOP_STOP_COMPLETE)) return -1; if (haveGlock) AFS_GUNLOCK(); #if defined(KERNEL_FUNNEL) thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); #endif #ifdef AFS_DARWIN80_ENV resid = *alength; memset(&msg, 0, sizeof(struct msghdr)); msg.msg_name = &ss; msg.msg_namelen = sizeof(struct sockaddr_storage); sa =(struct sockaddr *) &ss; code = sock_receivembuf(asocket, &msg, &m, 0, alength); if (!code) { size_t offset=0,sz; resid = *alength; for (i=0;i<nvecs && resid;i++) { sz=MIN(resid, iov[i].iov_len); code = mbuf_copydata(m, offset, sz, iov[i].iov_base); if (code) break; resid-=sz; offset+=sz; } } mbuf_freem(m); #else u.uio_iov = &iov[0]; u.uio_iovcnt = nvecs; u.uio_offset = 0; u.uio_resid = *alength; u.uio_segflg = UIO_SYSSPACE; u.uio_rw = UIO_READ; u.uio_procp = NULL; code = soreceive(asocket, &sa, &u, NULL, NULL, NULL); resid = u.uio_resid; #endif #if defined(KERNEL_FUNNEL) thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); #endif if (haveGlock) AFS_GLOCK(); if (code) return code; *alength -= resid; if (sa) { if (sa->sa_family == AF_INET) { if (addr) *addr = *(struct sockaddr_in *)sa; } else printf("Unknown socket family %d in NetReceive\n", sa->sa_family); #ifndef AFS_DARWIN80_ENV FREE(sa, M_SONAME); #endif } return code; }
/** * Connects to a server by it's server address. * * @param sap Server address. * @param aport Server port. * @param acell * @param tu Connect as this user. * @param force_if_down * @param create * @param locktype Specifies type of lock to be used for this function. * * @return The new connection. */ struct afs_conn * afs_ConnBySA(struct srvAddr *sap, unsigned short aport, afs_int32 acell, struct unixuser *tu, int force_if_down, afs_int32 create, afs_int32 locktype, struct rx_connection **rxconn) { int glocked, foundvec; struct afs_conn *tc = NULL; struct sa_conn_vector *tcv = NULL; struct rx_securityClass *csec; /*Security class object */ int isec; /*Security index */ int service; *rxconn = NULL; /* find cached connection */ ObtainSharedLock(&afs_xconn, 15); foundvec = 0; for (tcv = sap->conns; tcv; tcv = tcv->next) { if (tcv->user == tu && tcv->port == aport) { /* return most eligible conn */ if (!foundvec) foundvec = 1; UpgradeSToWLock(&afs_xconn, 37); tc = find_preferred_connection(tcv, create); ConvertWToSLock(&afs_xconn); break; } } if (!tc && !create) { /* Not found and can't create a new one. */ ReleaseSharedLock(&afs_xconn); return NULL; } if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) { afs_warnuser("afs_ConnBySA: disconnected\n"); ReleaseSharedLock(&afs_xconn); return NULL; } if (!foundvec && create) { /* No such connection vector exists. Create one and splice it in. * Make sure the server record has been marked as used (for the purposes * of calculating up & down times, it's now considered to be an * ``active'' server). Also make sure the server's lastUpdateEvalTime * gets set, marking the time of its ``birth''. */ UpgradeSToWLock(&afs_xconn, 37); new_conn_vector(tcv); tcv->user = tu; tcv->port = aport; tcv->srvr = sap; tcv->next = sap->conns; sap->conns = tcv; /* all struct afs_conn ptrs come from here */ tc = find_preferred_connection(tcv, create); afs_ActivateServer(sap); ConvertWToSLock(&afs_xconn); } /* end of if (!tcv) */ if (!tc) { /* Not found and no alternatives. */ ReleaseSharedLock(&afs_xconn); return NULL; } if (tu->states & UTokensBad) { /* we may still have an authenticated RPC connection here, * we'll have to create a new, unauthenticated, connection. * Perhaps a better way to do this would be to set * conn->forceConnectFS on all conns when the token first goes * bad, but that's somewhat trickier, due to locking * constraints (though not impossible). */ if (tc->id && (rx_SecurityClassOf(tc->id) != 0)) { tc->forceConnectFS = 1; /* force recreation of connection */ } tu->states &= ~UHasTokens; /* remove the authentication info */ } glocked = ISAFS_GLOCK(); if (tc->forceConnectFS) { UpgradeSToWLock(&afs_xconn, 38); csec = (struct rx_securityClass *)0; if (tc->id) { if (glocked) AFS_GUNLOCK(); rx_SetConnSecondsUntilNatPing(tc->id, 0); rx_DestroyConnection(tc->id); if (glocked) AFS_GLOCK(); } /* * Stupid hack to determine if using vldb service or file system * service. */ if (aport == sap->server->cell->vlport) service = 52; else service = 1; isec = 0; csec = afs_pickSecurityObject(tc, &isec); if (glocked) AFS_GUNLOCK(); tc->id = rx_NewConnection(sap->sa_ip, aport, service, csec, isec); if (glocked) AFS_GLOCK(); if (service == 52) { rx_SetConnHardDeadTime(tc->id, afs_rx_harddead); } /* set to a RX_CALL_TIMEOUT error to allow MTU retry to trigger */ rx_SetServerConnIdleDeadErr(tc->id, RX_CALL_DEAD); rx_SetConnIdleDeadTime(tc->id, afs_rx_idledead); /* * Only do this for the base connection, not per-user. * Will need to be revisited if/when CB gets security. */ if ((isec == 0) && (service != 52) && !(tu->states & UTokensBad) && (tu->viceId == UNDEFVID) #ifndef UKERNEL /* ukernel runs as just one uid anyway */ && (tu->uid == 0) #endif ) rx_SetConnSecondsUntilNatPing(tc->id, 20); tc->forceConnectFS = 0; /* apparently we're appropriately connected now */ if (csec) rxs_Release(csec); ConvertWToSLock(&afs_xconn); } /* end of if (tc->forceConnectFS)*/ *rxconn = tc->id; rx_GetConnection(*rxconn); ReleaseSharedLock(&afs_xconn); return tc; }
int afs_frlock(OSI_VN_DECL(vp), int cmd, struct flock *lfp, int flag, off_t offset, #ifdef AFS_SGI65_ENV vrwlock_t vrwlock, #endif cred_t * cr) { int error; OSI_VN_CONVERT(vp); #ifdef AFS_SGI65_ENV struct flid flid; int pid; get_current_flid(&flid); pid = flid.fl_pid; #endif /* * Since AFS doesn't support byte-wise locks (and simply * says yes! we handle byte locking locally only. * This makes lots of things work much better * XXX This doesn't properly handle moving from a * byte-wise lock up to a full file lock (we should * remove the byte locks ..) Of course neither did the * regular AFS way ... * * For GETLK we do a bit more - we first check any byte-wise * locks - if none then check for full AFS file locks */ if (cmd == F_GETLK || lfp->l_whence != 0 || lfp->l_start != 0 || (lfp->l_len != MAXEND && lfp->l_len != 0)) { AFS_RWLOCK(vp, VRWLOCK_WRITE); AFS_GUNLOCK(); #ifdef AFS_SGI65_ENV error = fs_frlock(OSI_VN_ARG(vp), cmd, lfp, flag, offset, vrwlock, cr); #else error = fs_frlock(vp, cmd, lfp, flag, offset, cr); #endif AFS_GLOCK(); AFS_RWUNLOCK(vp, VRWLOCK_WRITE); if (error || cmd != F_GETLK) return error; if (lfp->l_type != F_UNLCK) /* found some blocking lock */ return 0; /* fall through to check for full AFS file locks */ } /* map BSD style to plain - we don't call reclock() * and its only there that the difference is important */ switch (cmd) { case F_GETLK: case F_RGETLK: break; case F_SETLK: case F_RSETLK: break; case F_SETBSDLK: cmd = F_SETLK; break; case F_SETLKW: case F_RSETLKW: break; case F_SETBSDLKW: cmd = F_SETLKW; break; default: return EINVAL; } AFS_GUNLOCK(); error = convoff(vp, lfp, 0, offset, SEEKLIMIT #ifdef AFS_SGI64_ENV , OSI_GET_CURRENT_CRED() #endif /* AFS_SGI64_ENV */ ); AFS_GLOCK(); if (!error) { #ifdef AFS_SGI65_ENV error = afs_lockctl(vp, lfp, cmd, cr, pid); #else error = afs_lockctl(vp, lfp, cmd, cr, OSI_GET_CURRENT_PID()); #endif } return error; }
static int afs_export_get_name(struct dentry *parent, char *name, struct dentry *child) { struct afs_fakestat_state fakestate; struct get_name_data data; struct vrequest treq; struct volume *tvp; struct vcache *vcp; struct dcache *tdc; cred_t *credp; afs_size_t dirOffset, dirLen; afs_int32 code = 0; if (!parent->d_inode) { #ifdef OSI_EXPORT_DEBUG /* can't lookup name in a negative dentry */ printk("afs: get_name(%s, %s): no parent inode\n", parent->d_name.name ? (char *)parent->d_name.name : "?", child->d_name.name ? (char *)child->d_name.name : "?"); #endif return -EIO; } if (!child->d_inode) { #ifdef OSI_EXPORT_DEBUG /* can't find the FID of negative dentry */ printk("afs: get_name(%s, %s): no child inode\n", parent->d_name.name ? (char *)parent->d_name.name : "?", child->d_name.name ? (char *)child->d_name.name : "?"); #endif return -ENOENT; } afs_InitFakeStat(&fakestate); credp = crref(); AFS_GLOCK(); vcp = VTOAFS(child->d_inode); /* special case dynamic mount directory */ if (afs_IsDynrootMount(vcp)) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_name(%s, 0x%08x/%d/%d.%d): this is the dynmount dir\n", parent->d_name.name ? (char *)parent->d_name.name : "?", vcp->f.fid.Cell, vcp->f.fid.Fid.Volume, vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique); #endif data.fid = vcp->f.fid; if (VTOAFS(parent->d_inode) == afs_globalVp) strcpy(name, AFS_DYNROOT_MOUNTNAME); else code = -ENOENT; goto done; } /* Figure out what FID to look for */ if (vcp->mvstat == 2) { /* volume root */ tvp = afs_GetVolume(&vcp->f.fid, 0, READ_LOCK); if (!tvp) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_name(%s, 0x%08x/%d/%d.%d): no volume for root\n", parent->d_name.name ? (char *)parent->d_name.name : "?", vcp->f.fid.Cell, vcp->f.fid.Fid.Volume, vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique); #endif code = ENOENT; goto done; } data.fid = tvp->mtpoint; afs_PutVolume(tvp, READ_LOCK); } else { data.fid = vcp->f.fid; } vcp = VTOAFS(parent->d_inode); #ifdef OSI_EXPORT_DEBUG printk("afs: get_name(%s, 0x%08x/%d/%d.%d): parent is 0x%08x/%d/%d.%d\n", parent->d_name.name ? (char *)parent->d_name.name : "?", data.fid.Cell, data.fid.Fid.Volume, data.fid.Fid.Vnode, data.fid.Fid.Unique, vcp->f.fid.Cell, vcp->f.fid.Fid.Volume, vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique); #endif code = afs_InitReq(&treq, credp); if (code) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_name(%s, 0x%08x/%d/%d.%d): afs_InitReq: %d\n", parent->d_name.name ? (char *)parent->d_name.name : "?", data.fid.Cell, data.fid.Fid.Volume, data.fid.Fid.Vnode, data.fid.Fid.Unique, code); #endif goto done; } /* a dynamic mount point in the dynamic mount directory */ if (afs_IsDynrootMount(vcp) && afs_IsDynrootAnyFid(&data.fid) && VNUM_TO_VNTYPE(data.fid.Fid.Vnode) == VN_TYPE_MOUNT) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_name(%s, 0x%08x/%d/%d.%d): dynamic mount point\n", parent->d_name.name ? (char *)parent->d_name.name : "?", data.fid.Cell, data.fid.Fid.Volume, data.fid.Fid.Vnode, data.fid.Fid.Unique); #endif vcp = afs_GetVCache(&data.fid, &treq, NULL, NULL); if (vcp) { ObtainReadLock(&vcp->lock); if (strlen(vcp->linkData + 1) <= NAME_MAX) strcpy(name, vcp->linkData + 1); else code = ENOENT; ReleaseReadLock(&vcp->lock); afs_PutVCache(vcp); } else { #ifdef OSI_EXPORT_DEBUG printk("afs: get_name(%s, 0x%08x/%d/%d.%d): no vcache\n", parent->d_name.name ? (char *)parent->d_name.name : "?", data.fid.Cell, data.fid.Fid.Volume, data.fid.Fid.Vnode, data.fid.Fid.Unique); #endif code = ENOENT; } goto done; } code = afs_EvalFakeStat(&vcp, &fakestate, &treq); if (code) goto done; if (vcp->f.fid.Cell != data.fid.Cell || vcp->f.fid.Fid.Volume != data.fid.Fid.Volume) { /* parent is not the expected cell and volume; thus it * cannot possibly contain the fid we are looking for */ #ifdef OSI_EXPORT_DEBUG printk("afs: get_name(%s, 0x%08x/%d/%d.%d): wrong parent 0x%08x/%d\n", parent->d_name.name ? (char *)parent->d_name.name : "?", data.fid.Cell, data.fid.Fid.Volume, data.fid.Fid.Vnode, data.fid.Fid.Unique, vcp->f.fid.Cell, vcp->f.fid.Fid.Volume); #endif code = ENOENT; goto done; } redo: if (!(vcp->f.states & CStatd)) { if ((code = afs_VerifyVCache2(vcp, &treq))) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_name(%s, 0x%08x/%d/%d.%d): VerifyVCache2(0x%08x/%d/%d.%d): %d\n", parent->d_name.name ? (char *)parent->d_name.name : "?", data.fid.Cell, data.fid.Fid.Volume, data.fid.Fid.Vnode, data.fid.Fid.Unique, vcp->f.fid.Cell, vcp->f.fid.Fid.Volume, vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code); #endif goto done; } } tdc = afs_GetDCache(vcp, (afs_size_t) 0, &treq, &dirOffset, &dirLen, 1); if (!tdc) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_name(%s, 0x%08x/%d/%d.%d): GetDCache(0x%08x/%d/%d.%d): %d\n", parent->d_name.name ? (char *)parent->d_name.name : "?", data.fid.Cell, data.fid.Fid.Volume, data.fid.Fid.Vnode, data.fid.Fid.Unique, vcp->f.fid.Cell, vcp->f.fid.Fid.Volume, vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code); #endif code = EIO; goto done; } ObtainReadLock(&vcp->lock); ObtainReadLock(&tdc->lock); /* * Make sure that the data in the cache is current. There are two * cases we need to worry about: * 1. The cache data is being fetched by another process. * 2. The cache data is no longer valid */ while ((vcp->f.states & CStatd) && (tdc->dflags & DFFetching) && hsame(vcp->f.m.DataVersion, tdc->f.versionNo)) { ReleaseReadLock(&tdc->lock); ReleaseReadLock(&vcp->lock); afs_osi_Sleep(&tdc->validPos); ObtainReadLock(&vcp->lock); ObtainReadLock(&tdc->lock); } if (!(vcp->f.states & CStatd) || !hsame(vcp->f.m.DataVersion, tdc->f.versionNo)) { ReleaseReadLock(&tdc->lock); ReleaseReadLock(&vcp->lock); afs_PutDCache(tdc); #ifdef OSI_EXPORT_DEBUG printk("afs: get_name(%s, 0x%08x/%d/%d.%d): dir (0x%08x/%d/%d.%d) changed; retrying\n", parent->d_name.name ? (char *)parent->d_name.name : "?", data.fid.Cell, data.fid.Fid.Volume, data.fid.Fid.Vnode, data.fid.Fid.Unique, vcp->f.fid.Cell, vcp->f.fid.Fid.Volume, vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique); #endif goto redo; } data.name = name; data.found = 0; code = afs_dir_EnumerateDir(tdc, get_name_hook, &data); if (!code && !data.found) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_name(%s, 0x%08x/%d/%d.%d): not found\n", parent->d_name.name ? (char *)parent->d_name.name : "?", data.fid.Cell, data.fid.Fid.Volume, data.fid.Fid.Vnode, data.fid.Fid.Unique); #endif code = ENOENT; } else if (code) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_name(%s, 0x%08x/%d/%d.%d): Enumeratedir(0x%08x/%d/%d.%d): %d\n", parent->d_name.name ? (char *)parent->d_name.name : "?", data.fid.Cell, data.fid.Fid.Volume, data.fid.Fid.Vnode, data.fid.Fid.Unique, vcp->f.fid.Cell, vcp->f.fid.Fid.Volume, vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code); #endif } ReleaseReadLock(&tdc->lock); ReleaseReadLock(&vcp->lock); afs_PutDCache(tdc); done: if (!code) { printk("afs: get_name(%s, 0x%08x/%d/%d.%d) => %s\n", parent->d_name.name ? (char *)parent->d_name.name : "?", data.fid.Cell, data.fid.Fid.Volume, data.fid.Fid.Vnode, data.fid.Fid.Unique, name); } afs_PutFakeStat(&fakestate); AFS_GUNLOCK(); crfree(credp); code = afs_CheckCode(code, &treq, 102); return -code; }
static struct dentry *afs_export_get_parent(struct dentry *child) { struct VenusFid tfid; struct vrequest treq; struct cell *tcell; struct vcache *vcp; struct dentry *dp = NULL; cred_t *credp; afs_uint32 cellidx; int code; if (!child->d_inode) { /* can't find the parent of a negative dentry */ #ifdef OSI_EXPORT_DEBUG printk("afs: get_parent(%s): no inode\n", child->d_name.name ? (char *)child->d_name.name : "?"); #endif return ERR_PTR(-EIO); } credp = crref(); AFS_GLOCK(); vcp = VTOAFS(child->d_inode); if (afs_IsDynrootMount(vcp)) { /* the dynmount directory; parent is always the AFS root */ tfid = afs_globalVp->f.fid; } else if (afs_IsDynrootAny(vcp) && VNUM_TO_VNTYPE(vcp->f.fid.Fid.Vnode) == VN_TYPE_MOUNT) { /* a mount point in the dynmount directory */ afs_GetDynrootMountFid(&tfid); } else if (vcp->mvstat == 2) { /* volume root */ ObtainReadLock(&vcp->lock); if (vcp->mvid && vcp->mvid->Fid.Volume) { tfid = *vcp->mvid; ReleaseReadLock(&vcp->lock); } else { ReleaseReadLock(&vcp->lock); tcell = afs_GetCell(vcp->f.fid.Cell, READ_LOCK); if (!tcell) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_parent(0x%08x/%d/%d.%d): no cell\n", vcp->f.fid.Cell, vcp->f.fid.Fid.Volume, vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique); #endif dp = ERR_PTR(-ENOENT); goto done; } cellidx = tcell->cellIndex; afs_PutCell(tcell, READ_LOCK); afs_GetDynrootMountFid(&tfid); tfid.Fid.Vnode = VNUM_FROM_TYPEID(VN_TYPE_MOUNT, cellidx << 2); tfid.Fid.Unique = vcp->f.fid.Fid.Volume; } } else { /* any other vnode */ if (vType(vcp) == VDIR && !vcp->f.parent.vnode && vcp->mvstat != 1) { code = afs_InitReq(&treq, credp); if (code) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_parent(0x%08x/%d/%d.%d): InitReq: %d\n", vcp->f.fid.Cell, vcp->f.fid.Fid.Volume, vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code); #endif dp = ERR_PTR(-ENOENT); goto done; } else { code = update_dir_parent(&treq, vcp); if (code) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_parent(0x%08x/%d/%d.%d): update_dir_parent: %d\n", vcp->f.fid.Cell, vcp->f.fid.Fid.Volume, vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code); #endif dp = ERR_PTR(-ENOENT); goto done; } } } tfid.Cell = vcp->f.fid.Cell; tfid.Fid.Volume = vcp->f.fid.Fid.Volume; tfid.Fid.Vnode = vcp->f.parent.vnode; tfid.Fid.Unique = vcp->f.parent.unique; } #ifdef OSI_EXPORT_DEBUG printk("afs: get_parent(0x%08x/%d/%d.%d): => 0x%08x/%d/%d.%d\n", vcp->f.fid.Cell, vcp->f.fid.Fid.Volume, vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, tfid.Cell, tfid.Fid.Volume, tfid.Fid.Vnode, tfid.Fid.Unique); #endif dp = get_dentry_from_fid(credp, &tfid); if (!dp) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_parent(0x%08x/%d/%d.%d): no dentry\n", vcp->f.fid.Cell, vcp->f.fid.Fid.Volume, vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique); #endif dp = ERR_PTR(-ENOENT); } done: AFS_GUNLOCK(); crfree(credp); return dp; }
/* this call, unlike osi_FlushText, is supposed to discard caches that may contain invalid information if a file is written remotely, but that may contain valid information that needs to be written back if the file is being written locally. It doesn't subsume osi_FlushText, since the latter function may be needed to flush caches that are invalidated by local writes. avc->pvnLock is already held, avc->lock is guaranteed not to be held (by us, of course). */ void osi_FlushPages(struct vcache *avc, afs_ucred_t *credp) { #ifdef AFS_FBSD70_ENV int vfslocked; #endif afs_hyper_t origDV; #if defined(AFS_CACHE_BYPASS) /* The optimization to check DV under read lock below is identical a * change in CITI cache bypass work. The problem CITI found in 1999 * was that this code and background daemon doing prefetching competed * for the vcache entry shared lock. It's not clear to me from the * tech report, but it looks like CITI fixed the general prefetch code * path as a bonus when experimenting on prefetch for cache bypass, see * citi-tr-01-3. */ #endif ObtainReadLock(&avc->lock); /* If we've already purged this version, or if we're the ones * writing this version, don't flush it (could lose the * data we're writing). */ if ((hcmp((avc->f.m.DataVersion), (avc->mapDV)) <= 0) || ((avc->execsOrWriters > 0) && afs_DirtyPages(avc))) { ReleaseReadLock(&avc->lock); return; } ReleaseReadLock(&avc->lock); ObtainWriteLock(&avc->lock, 10); /* Check again */ if ((hcmp((avc->f.m.DataVersion), (avc->mapDV)) <= 0) || ((avc->execsOrWriters > 0) && afs_DirtyPages(avc))) { ReleaseWriteLock(&avc->lock); return; } if (hiszero(avc->mapDV)) { hset(avc->mapDV, avc->f.m.DataVersion); ReleaseWriteLock(&avc->lock); return; } AFS_STATCNT(osi_FlushPages); hset(origDV, avc->f.m.DataVersion); afs_Trace3(afs_iclSetp, CM_TRACE_FLUSHPAGES, ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, origDV.low, ICL_TYPE_INT32, avc->f.m.Length); ReleaseWriteLock(&avc->lock); #ifdef AFS_FBSD70_ENV vfslocked = VFS_LOCK_GIANT(AFSTOV(avc)->v_mount); #endif #ifndef AFS_FBSD70_ENV AFS_GUNLOCK(); #endif osi_VM_FlushPages(avc, credp); #ifndef AFS_FBSD70_ENV AFS_GLOCK(); #endif #ifdef AFS_FBSD70_ENV VFS_UNLOCK_GIANT(vfslocked); #endif ObtainWriteLock(&avc->lock, 88); /* do this last, and to original version, since stores may occur * while executing above PUTPAGE call */ hset(avc->mapDV, origDV); ReleaseWriteLock(&avc->lock); }
int osi_UFSTruncate(struct osi_file *afile, afs_int32 asize) { afs_int32 code; struct osi_stat tstat; struct iattr newattrs; struct inode *inode = OSIFILE_INODE(afile); AFS_STATCNT(osi_Truncate); /* This routine only shrinks files, and most systems * have very slow truncates, even when the file is already * small enough. Check now and save some time. */ code = afs_osi_Stat(afile, &tstat); if (code || tstat.size <= asize) return code; ObtainWriteLock(&afs_xosi, 321); AFS_GUNLOCK(); #ifdef STRUCT_INODE_HAS_I_ALLOC_SEM down_write(&inode->i_alloc_sem); #endif #ifdef STRUCT_INODE_HAS_I_MUTEX mutex_lock(&inode->i_mutex); #else down(&inode->i_sem); #endif newattrs.ia_size = asize; newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME; #if defined(AFS_LINUX24_ENV) newattrs.ia_ctime = CURRENT_TIME; /* avoid notify_change() since it wants to update dentry->d_parent */ lock_kernel(); code = inode_change_ok(inode, &newattrs); if (!code) { #ifdef INODE_SETATTR_NOT_VOID code = inode_setattr(inode, &newattrs); #else inode_setattr(inode, &newattrs); #endif } unlock_kernel(); if (!code) truncate_inode_pages(&inode->i_data, asize); #else i_size_write(inode, asize); if (inode->i_sb->s_op && inode->i_sb->s_op->notify_change) { code = inode->i_sb->s_op->notify_change(&afile->dentry, &newattrs); } if (!code) { truncate_inode_pages(inode, asize); if (inode->i_op && inode->i_op->truncate) inode->i_op->truncate(inode); } #endif code = -code; #ifdef STRUCT_INODE_HAS_I_MUTEX mutex_unlock(&inode->i_mutex); #else up(&inode->i_sem); #endif #ifdef STRUCT_INODE_HAS_I_ALLOC_SEM up_write(&inode->i_alloc_sem); #endif AFS_GLOCK(); ReleaseWriteLock(&afs_xosi); return code; }
/** * Reset volume name to volume id mapping cache. * @param flags */ void afs_CheckVolumeNames(int flags) { afs_int32 i, j; struct volume *tv; unsigned int now; struct vcache *tvc; afs_int32 *volumeID, *cellID, vsize, nvols; #ifdef AFS_DARWIN80_ENV vnode_t tvp; #endif AFS_STATCNT(afs_CheckVolumeNames); nvols = 0; volumeID = cellID = NULL; vsize = 0; ObtainReadLock(&afs_xvolume); if (flags & AFS_VOLCHECK_EXPIRED) { /* * allocate space to hold the volumeIDs and cellIDs, only if * we will be invalidating the mountpoints later on */ for (i = 0; i < NVOLS; i++) for (tv = afs_volumes[i]; tv; tv = tv->next) ++vsize; volumeID = (afs_int32 *) afs_osi_Alloc(2 * vsize * sizeof(*volumeID)); cellID = (volumeID) ? volumeID + vsize : 0; } now = osi_Time(); for (i = 0; i < NVOLS; i++) { for (tv = afs_volumes[i]; tv; tv = tv->next) { if (flags & AFS_VOLCHECK_EXPIRED) { if (((tv->expireTime < (now + 10)) && (tv->states & VRO)) || (flags & AFS_VOLCHECK_FORCE)) { afs_ResetVolumeInfo(tv); /* also resets status */ if (volumeID) { volumeID[nvols] = tv->volume; cellID[nvols] = tv->cell; } ++nvols; continue; } } /* ??? */ if (flags & (AFS_VOLCHECK_BUSY | AFS_VOLCHECK_FORCE)) { for (j = 0; j < MAXHOSTS; j++) tv->status[j] = not_busy; } } } ReleaseReadLock(&afs_xvolume); /* next ensure all mt points are re-evaluated */ if (nvols || (flags & (AFS_VOLCHECK_FORCE | AFS_VOLCHECK_MTPTS))) { loop: ObtainReadLock(&afs_xvcache); for (i = 0; i < VCSIZE; i++) { for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) { /* if the volume of "mvid" of the vcache entry is among the * ones we found earlier, then we re-evaluate it. Also, if the * force bit is set or we explicitly asked to reevaluate the * mt-pts, we clean the cmvalid bit */ if ((flags & (AFS_VOLCHECK_FORCE | AFS_VOLCHECK_MTPTS)) || (tvc->mvid && inVolList(tvc->mvid, nvols, volumeID, cellID))) tvc->f.states &= ~CMValid; /* If the volume that this file belongs to was reset earlier, * then we should remove its callback. * Again, if forced, always do it. */ if ((tvc->f.states & CRO) && (inVolList(&tvc->f.fid, nvols, volumeID, cellID) || (flags & AFS_VOLCHECK_FORCE))) { if (tvc->f.states & CVInit) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop; } #ifdef AFS_DARWIN80_ENV if (tvc->f.states & CDeadVnode) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop; } tvp = AFSTOV(tvc); if (vnode_get(tvp)) continue; if (vnode_ref(tvp)) { AFS_GUNLOCK(); /* AFSTOV(tvc) may be NULL */ vnode_put(tvp); AFS_GLOCK(); continue; } #else AFS_FAST_HOLD(tvc); #endif ReleaseReadLock(&afs_xvcache); ObtainWriteLock(&afs_xcbhash, 485); /* LOCKXXX: We aren't holding tvc write lock? */ afs_DequeueCallback(tvc); tvc->f.states &= ~CStatd; ReleaseWriteLock(&afs_xcbhash); if (tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR)) osi_dnlc_purgedp(tvc); #ifdef AFS_DARWIN80_ENV vnode_put(AFSTOV(tvc)); /* our tvc ptr is still good until now */ AFS_FAST_RELE(tvc); ObtainReadLock(&afs_xvcache); #else ObtainReadLock(&afs_xvcache); /* our tvc ptr is still good until now */ AFS_FAST_RELE(tvc); #endif } } } osi_dnlc_purge(); /* definitely overkill, but it's safer this way. */ ReleaseReadLock(&afs_xvcache); } if (volumeID) afs_osi_Free(volumeID, 2 * vsize * sizeof(*volumeID)); } /*afs_CheckVolumeNames */
osi_UFSOpen(afs_int32 ainode) #endif { register struct osi_file *afile = NULL; extern int cacheDiskType; struct inode *tip = NULL; struct dentry *dp = NULL; struct file *filp = NULL; #if !defined(HAVE_IGET) || defined(LINUX_USE_FH) struct fid fid; #endif AFS_STATCNT(osi_UFSOpen); if (cacheDiskType != AFS_FCACHE_TYPE_UFS) { osi_Panic("UFSOpen called for non-UFS cache\n"); } if (!afs_osicred_initialized) { /* valid for alpha_osf, SunOS, Ultrix */ memset((char *)&afs_osi_cred, 0, sizeof(struct AFS_UCRED)); crhold(&afs_osi_cred); /* don't let it evaporate, since it is static */ afs_osicred_initialized = 1; } afile = (struct osi_file *)osi_AllocLargeSpace(sizeof(struct osi_file)); AFS_GUNLOCK(); if (!afile) { osi_Panic("osi_UFSOpen: Failed to allocate %d bytes for osi_file.\n", sizeof(struct osi_file)); } memset(afile, 0, sizeof(struct osi_file)); #if defined(HAVE_IGET) tip = iget(afs_cacheSBp, (u_long) ainode); if (!tip) osi_Panic("Can't get inode %d\n", ainode); dp = d_alloc_anon(tip); #else #if defined(LINUX_USE_FH) dp = afs_cacheSBp->s_export_op->fh_to_dentry(afs_cacheSBp, fh, sizeof(struct fid), fh_type); #else fid.i32.ino = ainode; fid.i32.gen = 0; dp = afs_cacheSBp->s_export_op->fh_to_dentry(afs_cacheSBp, &fid, sizeof(fid), FILEID_INO32_GEN); #endif if (!dp) osi_Panic("Can't get dentry\n"); tip = dp->d_inode; #endif tip->i_flags |= MS_NOATIME; /* Disable updating access times. */ #if defined(STRUCT_TASK_HAS_CRED) filp = dentry_open(dp, mntget(afs_cacheMnt), O_RDWR, current_cred()); #else filp = dentry_open(dp, mntget(afs_cacheMnt), O_RDWR); #endif if (IS_ERR(filp)) #if defined(LINUX_USE_FH) osi_Panic("Can't open file\n"); #else osi_Panic("Can't open inode %d\n", ainode); #endif afile->filp = filp; afile->size = i_size_read(FILE_INODE(filp)); AFS_GLOCK(); afile->offset = 0; afile->proc = (int (*)())0; #if defined(LINUX_USE_FH) afile->inum = tip->i_ino; /* for hint validity checking */ #else afile->inum = ainode; /* for hint validity checking */ #endif return (void *)afile; }
void * osi_UfsOpen(afs_dcache_id_t *ainode) { #ifdef AFS_CACHE_VNODE_PATH struct vnode *vp; #else struct inode *ip; #endif struct osi_file *afile = NULL; afs_int32 code = 0; int dummy; #ifdef AFS_CACHE_VNODE_PATH char namebuf[1024]; struct pathname lookpn; #endif struct osi_stat tstat; afile = osi_AllocSmallSpace(sizeof(struct osi_file)); AFS_GUNLOCK(); /* * AFS_CACHE_VNODE_PATH can be used with any file system, including ZFS or tmpfs. * The ainode is not an inode number but a path. */ #ifdef AFS_CACHE_VNODE_PATH /* Can not use vn_open or lookupname, they use user's CRED() * We need to run as root So must use low level lookuppnvp * assume fname starts with / */ code = pn_get_buf(ainode->ufs, AFS_UIOSYS, &lookpn, namebuf, sizeof(namebuf)); if (code != 0) osi_Panic("UfsOpen: pn_get_buf failed %ld %s", code, ainode->ufs); VN_HOLD(rootdir); /* released in loopuppnvp */ code = lookuppnvp(&lookpn, NULL, FOLLOW, NULL, &vp, rootdir, rootdir, afs_osi_credp); if (code != 0) osi_Panic("UfsOpen: lookuppnvp failed %ld %s", code, ainode->ufs); #ifdef AFS_SUN511_ENV code = VOP_OPEN(&vp, FREAD|FWRITE, afs_osi_credp, NULL); #else code = VOP_OPEN(&vp, FREAD|FWRITE, afs_osi_credp); #endif if (code != 0) osi_Panic("UfsOpen: VOP_OPEN failed %ld %s", code, ainode->ufs); #else code = igetinode(afs_cacheVfsp, (dev_t) cacheDev.dev, ainode->ufs, &ip, CRED(), &dummy); #endif AFS_GLOCK(); if (code) { osi_FreeSmallSpace(afile); osi_Panic("UfsOpen: igetinode failed %ld %s", code, ainode->ufs); } #ifdef AFS_CACHE_VNODE_PATH afile->vnode = vp; code = afs_osi_Stat(afile, &tstat); afile->size = tstat.size; #else afile->vnode = ITOV(ip); afile->size = VTOI(afile->vnode)->i_size; #endif afile->offset = 0; afile->proc = (int (*)())0; return (void *)afile; }
/* set the real time */ void afs_osi_SetTime(osi_timeval_t * atv) { #if defined(AFS_AIX32_ENV) struct timestruc_t t; t.tv_sec = atv->tv_sec; t.tv_nsec = atv->tv_usec * 1000; ksettimer(&t); /* Was -> settimer(TIMEOFDAY, &t); */ #elif defined(AFS_SUN55_ENV) stime(atv->tv_sec); #elif defined(AFS_SUN5_ENV) /* * To get more than second resolution we can use adjtime. The problem * is that the usecs from the server are wrong (by now) so it isn't * worth complicating the following code. */ struct stimea { time_t time; } sta; sta.time = atv->tv_sec; stime(&sta, NULL); #elif defined(AFS_SGI_ENV) struct stimea { sysarg_t time; } sta; AFS_GUNLOCK(); sta.time = atv->tv_sec; stime(&sta); AFS_GLOCK(); #elif defined(AFS_DARWIN_ENV) #ifndef AFS_DARWIN80_ENV AFS_GUNLOCK(); setthetime(atv); AFS_GLOCK(); #endif #else /* stolen from kern_time.c */ #ifndef AFS_AUX_ENV boottime.tv_sec += atv->tv_sec - time.tv_sec; #endif #ifdef AFS_HPUX_ENV { #if !defined(AFS_HPUX1122_ENV) /* drop the setting of the clock for now. spl7 is not * known on hpux11.22 */ register ulong_t s; struct timeval t; t.tv_sec = atv->tv_sec; t.tv_usec = atv->tv_usec; s = spl7(); time = t; (void)splx(s); resettodr(atv); #endif } #else { register int s; s = splclock(); time = *atv; (void)splx(s); } resettodr(); #endif #ifdef AFS_AUX_ENV logtchg(atv->tv_sec); #endif #endif /* AFS_DARWIN_ENV */ AFS_STATCNT(osi_SetTime); }
int afs_UFSRead(register struct vcache *avc, struct uio *auio, struct AFS_UCRED *acred, daddr_t albn, struct buf **abpp, int noLock) { afs_size_t totalLength; afs_size_t transferLength; afs_size_t filePos; afs_size_t offset, len, tlen; afs_int32 trimlen; struct dcache *tdc = 0; afs_int32 error; #ifdef AFS_DARWIN80_ENV uio_t tuiop=NULL; #else struct uio tuio; struct uio *tuiop = &tuio; struct iovec *tvec; #endif struct osi_file *tfile; afs_int32 code; int trybusy = 1; struct vrequest treq; AFS_STATCNT(afs_UFSRead); if (avc && avc->vc_error) return EIO; AFS_DISCON_LOCK(); /* check that we have the latest status info in the vnode cache */ if ((code = afs_InitReq(&treq, acred))) return code; if (!noLock) { if (!avc) osi_Panic("null avc in afs_UFSRead"); else { code = afs_VerifyVCache(avc, &treq); if (code) { code = afs_CheckCode(code, &treq, 11); /* failed to get it */ AFS_DISCON_UNLOCK(); return code; } } } #ifndef AFS_VM_RDWR_ENV if (AFS_NFSXLATORREQ(acred)) { if (!afs_AccessOK (avc, PRSFS_READ, &treq, CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) { AFS_DISCON_UNLOCK(); return afs_CheckCode(EACCES, &treq, 12); } } #endif #ifndef AFS_DARWIN80_ENV tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec)); #endif totalLength = AFS_UIO_RESID(auio); filePos = AFS_UIO_OFFSET(auio); afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_INT32, totalLength, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length)); error = 0; transferLength = 0; if (!noLock) ObtainReadLock(&avc->lock); #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV) if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) { hset(avc->flushDV, avc->f.m.DataVersion); } #endif if (filePos >= avc->f.m.Length) { if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */ len = 0; #ifdef AFS_DARWIN80_ENV trimlen = len; tuiop = afsio_darwin_partialcopy(auio, trimlen); #else afsio_copy(auio, &tuio, tvec); trimlen = len; afsio_trim(&tuio, trimlen); #endif AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, tuiop, code); } while (avc->f.m.Length > 0 && totalLength > 0) { /* read all of the cached info */ if (filePos >= avc->f.m.Length) break; /* all done */ if (noLock) { if (tdc) { ReleaseReadLock(&tdc->lock); afs_PutDCache(tdc); } tdc = afs_FindDCache(avc, filePos); if (tdc) { ObtainReadLock(&tdc->lock); offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk); len = tdc->validPos - filePos; } } else { /* a tricky question: does the presence of the DFFetching flag * mean that we're fetching the latest version of the file? No. * The server could update the file as soon as the fetch responsible * for the setting of the DFFetching flag completes. * * However, the presence of the DFFetching flag (visible under * a dcache read lock since it is set and cleared only under a * dcache write lock) means that we're fetching as good a version * as was known to this client at the time of the last call to * afs_VerifyVCache, since the latter updates the stat cache's * m.DataVersion field under a vcache write lock, and from the * time that the DFFetching flag goes on in afs_GetDCache (before * the fetch starts), to the time it goes off (after the fetch * completes), afs_GetDCache keeps at least a read lock on the * vcache entry. * * This means that if the DFFetching flag is set, we can use that * data for any reads that must come from the current version of * the file (current == m.DataVersion). * * Another way of looking at this same point is this: if we're * fetching some data and then try do an afs_VerifyVCache, the * VerifyVCache operation will not complete until after the * DFFetching flag is turned off and the dcache entry's f.versionNo * field is updated. * * Note, by the way, that if DFFetching is set, * m.DataVersion > f.versionNo (the latter is not updated until * after the fetch completes). */ if (tdc) { ReleaseReadLock(&tdc->lock); afs_PutDCache(tdc); /* before reusing tdc */ } tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2); #ifdef AFS_DISCON_ENV if (!tdc) { printf("Network down in afs_read"); error = ENETDOWN; break; } #endif /* AFS_DISCON_ENV */ ObtainReadLock(&tdc->lock); /* now, first try to start transfer, if we'll need the data. If * data already coming, we don't need to do this, obviously. Type * 2 requests never return a null dcache entry, btw. */ if (!(tdc->dflags & DFFetching) && !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) { /* have cache entry, it is not coming in now, and we'll need new data */ tagain: if (trybusy && !afs_BBusy()) { struct brequest *bp; /* daemon is not busy */ ObtainSharedLock(&tdc->mflock, 667); if (!(tdc->mflags & DFFetchReq)) { UpgradeSToWLock(&tdc->mflock, 668); tdc->mflags |= DFFetchReq; bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred, (afs_size_t) filePos, (afs_size_t) 0, tdc); if (!bp) { /* Bkg table full; retry deadlocks */ tdc->mflags &= ~DFFetchReq; trybusy = 0; /* Avoid bkg daemon since they're too busy */ ReleaseWriteLock(&tdc->mflock); goto tagain; } ConvertWToSLock(&tdc->mflock); } code = 0; ConvertSToRLock(&tdc->mflock); while (!code && tdc->mflags & DFFetchReq) { afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING, __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER, tdc, ICL_TYPE_INT32, tdc->dflags); /* don't need waiting flag on this one */ ReleaseReadLock(&tdc->mflock); ReleaseReadLock(&tdc->lock); ReleaseReadLock(&avc->lock); code = afs_osi_SleepSig(&tdc->validPos); ObtainReadLock(&avc->lock); ObtainReadLock(&tdc->lock); ObtainReadLock(&tdc->mflock); } ReleaseReadLock(&tdc->mflock); if (code) { error = code; break; } } } /* now data may have started flowing in (if DFFetching is on). If * data is now streaming in, then wait for some interesting stuff. */ code = 0; while (!code && (tdc->dflags & DFFetching) && tdc->validPos <= filePos) { /* too early: wait for DFFetching flag to vanish, * or data to appear */ afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING, __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER, tdc, ICL_TYPE_INT32, tdc->dflags); ReleaseReadLock(&tdc->lock); ReleaseReadLock(&avc->lock); code = afs_osi_SleepSig(&tdc->validPos); ObtainReadLock(&avc->lock); ObtainReadLock(&tdc->lock); } if (code) { error = code; break; } /* fetching flag gone, data is here, or we never tried * (BBusy for instance) */ if (tdc->dflags & DFFetching) { /* still fetching, some new data is here: * compute length and offset */ offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk); len = tdc->validPos - filePos; } else { /* no longer fetching, verify data version (avoid new * GetDCache call) */ if (hsame(avc->f.m.DataVersion, tdc->f.versionNo) && ((len = tdc->validPos - filePos) > 0)) { offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk); } else { /* don't have current data, so get it below */ afs_Trace3(afs_iclSetp, CM_TRACE_VERSIONNO, ICL_TYPE_INT64, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_HYPER, &avc->f.m.DataVersion, ICL_TYPE_HYPER, &tdc->f.versionNo); ReleaseReadLock(&tdc->lock); afs_PutDCache(tdc); tdc = NULL; } } if (!tdc) { /* If we get, it was not possible to start the * background daemon. With flag == 1 afs_GetDCache * does the FetchData rpc synchronously. */ ReleaseReadLock(&avc->lock); tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1); ObtainReadLock(&avc->lock); if (tdc) ObtainReadLock(&tdc->lock); } } if (!tdc) { error = EIO; break; } len = tdc->validPos - filePos; afs_Trace3(afs_iclSetp, CM_TRACE_VNODEREAD, ICL_TYPE_POINTER, tdc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(offset), ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(len)); if (len > totalLength) len = totalLength; /* will read len bytes */ if (len <= 0) { /* shouldn't get here if DFFetching is on */ afs_Trace4(afs_iclSetp, CM_TRACE_VNODEREAD2, ICL_TYPE_POINTER, tdc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(tdc->validPos), ICL_TYPE_INT32, tdc->f.chunkBytes, ICL_TYPE_INT32, tdc->dflags); /* read past the end of a chunk, may not be at next chunk yet, and yet * also not at eof, so may have to supply fake zeros */ len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */ if (len > totalLength) len = totalLength; /* and still within xfr request */ tlen = avc->f.m.Length - offset; /* and still within file */ if (len > tlen) len = tlen; if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */ #ifdef AFS_DARWIN80_ENV trimlen = len; tuiop = afsio_darwin_partialcopy(auio, trimlen); #else afsio_copy(auio, &tuio, tvec); trimlen = len; afsio_trim(&tuio, trimlen); #endif AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, tuiop, code); if (code) { error = code; break; } } else { /* get the data from the file */ #ifdef IHINT if (tfile = tdc->ihint) { if (tdc->f.inode != tfile->inum) { afs_warn("afs_UFSRead: %x hint mismatch tdc %d inum %d\n", tdc, tdc->f.inode, tfile->inum); osi_UFSClose(tfile); tdc->ihint = tfile = 0; nihints--; } } if (tfile != 0) { usedihint++; } else #endif /* IHINT */ #if defined(LINUX_USE_FH) tfile = (struct osi_file *)osi_UFSOpen_fh(&tdc->f.fh, tdc->f.fh_type); #else tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode); #endif #ifdef AFS_DARWIN80_ENV trimlen = len; tuiop = afsio_darwin_partialcopy(auio, trimlen); uio_setoffset(tuiop, offset); #else /* mung uio structure to be right for this transfer */ afsio_copy(auio, &tuio, tvec); trimlen = len; afsio_trim(&tuio, trimlen); tuio.afsio_offset = offset; #endif #if defined(AFS_AIX41_ENV) AFS_GUNLOCK(); code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL, NULL, afs_osi_credp); AFS_GLOCK(); #elif defined(AFS_AIX32_ENV) code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL); /* Flush all JFS pages now for big performance gain in big file cases * If we do something like this, must check to be sure that AFS file * isn't mmapped... see afs_gn_map() for why. */ /* if (tfile->vnode->v_gnode && tfile->vnode->v_gnode->gn_seg) { many different ways to do similar things: so far, the best performing one is #2, but #1 might match it if we straighten out the confusion regarding which pages to flush. It really does matter. 1. vm_flushp(tfile->vnode->v_gnode->gn_seg, 0, len/PAGESIZE - 1); 2. vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE, (len + PAGESIZE-1)/PAGESIZE); 3. vms_inactive(tfile->vnode->v_gnode->gn_seg) Doesn't work correctly 4. vms_delete(tfile->vnode->v_gnode->gn_seg) probably also fails tfile->vnode->v_gnode->gn_seg = NULL; 5. deletep 6. ipgrlse 7. ifreeseg Unfortunately, this seems to cause frequent "cache corruption" episodes. vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE, (len + PAGESIZE-1)/PAGESIZE); } */ #elif defined(AFS_AIX_ENV) code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t) & offset, &tuio, NULL, NULL, -1); #elif defined(AFS_SUN5_ENV) AFS_GUNLOCK(); #ifdef AFS_SUN510_ENV { caller_context_t ct; VOP_RWLOCK(tfile->vnode, 0, &ct); code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp, &ct); VOP_RWUNLOCK(tfile->vnode, 0, &ct); } #else VOP_RWLOCK(tfile->vnode, 0); code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp); VOP_RWUNLOCK(tfile->vnode, 0); #endif AFS_GLOCK(); #elif defined(AFS_SGI_ENV) AFS_GUNLOCK(); AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ); AFS_VOP_READ(tfile->vnode, &tuio, IO_ISLOCKED, afs_osi_credp, code); AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ); AFS_GLOCK(); #elif defined(AFS_OSF_ENV) tuio.uio_rw = UIO_READ; AFS_GUNLOCK(); VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp, code); AFS_GLOCK(); #elif defined(AFS_HPUX100_ENV) AFS_GUNLOCK(); code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, afs_osi_credp); AFS_GLOCK(); #elif defined(AFS_LINUX20_ENV) AFS_GUNLOCK(); code = osi_rdwr(tfile, &tuio, UIO_READ); AFS_GLOCK(); #elif defined(AFS_DARWIN80_ENV) AFS_GUNLOCK(); code = VNOP_READ(tfile->vnode, tuiop, 0, afs_osi_ctxtp); AFS_GLOCK(); #elif defined(AFS_DARWIN_ENV) AFS_GUNLOCK(); VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc()); code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp); VOP_UNLOCK(tfile->vnode, 0, current_proc()); AFS_GLOCK(); #elif defined(AFS_FBSD80_ENV) AFS_GUNLOCK(); VOP_LOCK(tfile->vnode, LK_EXCLUSIVE); code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp); VOP_UNLOCK(tfile->vnode, 0); AFS_GLOCK(); #elif defined(AFS_FBSD50_ENV) AFS_GUNLOCK(); VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread); code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp); VOP_UNLOCK(tfile->vnode, 0, curthread); AFS_GLOCK(); #elif defined(AFS_XBSD_ENV) AFS_GUNLOCK(); VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc); code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp); VOP_UNLOCK(tfile->vnode, 0, curproc); AFS_GLOCK(); #else code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, afs_osi_credp); #endif #ifdef IHINT if (!tdc->ihint && nihints < maxIHint) { tdc->ihint = tfile; nihints++; } else #endif /* IHINT */ osi_UFSClose(tfile); if (code) { error = code; break; } } /* otherwise we've read some, fixup length, etc and continue with next seg */ len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */ trimlen = len; afsio_skip(auio, trimlen); /* update input uio structure */ totalLength -= len; transferLength += len; filePos += len; if (len <= 0) break; /* surprise eof */ #ifdef AFS_DARWIN80_ENV if (tuiop) { uio_free(tuiop); tuiop = 0; } #endif } /* if we make it here with tdc non-zero, then it is the last chunk we * dealt with, and we have to release it when we're done. We hold on * to it in case we need to do a prefetch, obviously. */ if (tdc) { ReleaseReadLock(&tdc->lock); #if !defined(AFS_VM_RDWR_ENV) /* try to queue prefetch, if needed */ if (!noLock) { if (!(tdc->mflags & DFNextStarted)) afs_PrefetchChunk(avc, tdc, acred, &treq); } #endif afs_PutDCache(tdc); } if (!noLock) ReleaseReadLock(&avc->lock); #ifdef AFS_DARWIN80_ENV if (tuiop) uio_free(tuiop); #else osi_FreeSmallSpace(tvec); #endif AFS_DISCON_UNLOCK(); error = afs_CheckCode(error, &treq, 13); return error; }
static struct dentry *afs_decode_fh(struct super_block *sb, __u32 *fh, int fh_len, int fh_type, int (*acceptable)(void *, struct dentry *), void *context) #endif { struct VenusFid fid; struct cell *tc; struct dentry *result; #if defined(NEW_EXPORT_OPS) __u32 *fh = (__u32 *)fh_fid->raw; #endif switch (fh_type) { case AFSFH_VENUSFID: if (fh_len != 4) return NULL; fid.Cell = fh[0]; fid.Fid.Volume = fh[1]; fid.Fid.Vnode = fh[2]; fid.Fid.Unique = fh[3]; break; case AFSFH_CELLFID: if (fh_len != 7) return NULL; AFS_GLOCK(); tc = afs_GetCellByHandle((void *)fh, READ_LOCK); if (!tc) { AFS_GUNLOCK(); return NULL; } fid.Cell = tc->cellNum; fid.Fid.Volume = fh[4]; fid.Fid.Vnode = fh[5]; fid.Fid.Unique = fh[6]; afs_PutCell(tc, READ_LOCK); AFS_GUNLOCK(); break; case AFSFH_NET_VENUSFID: fid.Cell = ntohl(fh[0]); fid.Fid.Volume = ntohl(fh[1]); fid.Fid.Vnode = ntohl(fh[2]); fid.Fid.Unique = ntohl(fh[3]); break; case AFSFH_NET_CELLFID: if (fh_len != 7) return NULL; AFS_GLOCK(); tc = afs_GetCellByHandle((void *)fh, READ_LOCK); if (!tc) { AFS_GUNLOCK(); return NULL; } fid.Cell = tc->cellNum; fid.Fid.Volume = ntohl(fh[4]); fid.Fid.Vnode = ntohl(fh[5]); fid.Fid.Unique = ntohl(fh[6]); afs_PutCell(tc, READ_LOCK); AFS_GUNLOCK(); break; case AFSFH_DYN_RO_CELL: case AFSFH_DYN_RW_CELL: if (fh_len != 4) return NULL; AFS_GLOCK(); tc = afs_GetCellByHandle((void *)fh, READ_LOCK); if (!tc) { AFS_GUNLOCK(); return NULL; } afs_GetDynrootFid(&fid); fid.Fid.Vnode = VNUM_FROM_CIDX_RW(tc->cellIndex, fh_type & 1); fid.Fid.Unique = 1; afs_PutCell(tc, READ_LOCK); AFS_GUNLOCK(); break; case AFSFH_DYN_RO_LINK: case AFSFH_DYN_RW_LINK: if (fh_len != 4) return NULL; AFS_GLOCK(); tc = afs_GetCellByHandle((void *)fh, READ_LOCK); if (!tc) { AFS_GUNLOCK(); return NULL; } afs_GetDynrootFid(&fid); fid.Fid.Vnode = VNUM_FROM_CAIDX_RW(tc->cellIndex, fh_type & 1); fid.Fid.Unique = 1; afs_PutCell(tc, READ_LOCK); AFS_GUNLOCK(); break; case AFSFH_DYN_MOUNT: if (fh_len != 5) return NULL; AFS_GLOCK(); tc = afs_GetCellByHandle((void *)fh, READ_LOCK); if (!tc) { AFS_GUNLOCK(); return NULL; } afs_GetDynrootFid(&fid); fid.Fid.Vnode = VNUM_FROM_TYPEID(VN_TYPE_MOUNT, tc->cellIndex << 2); fid.Fid.Unique = ntohl(fh[4]); afs_PutCell(tc, READ_LOCK); AFS_GUNLOCK(); break; case AFSFH_DYN_SYMLINK: /* XXX parse dynroot symlink filehandle */ /* break; */ default: return NULL; } #if defined(NEW_EXPORT_OPS) result = afs_export_get_dentry(sb, &fid); #else result = sb->s_export_op->find_exported_dentry(sb, &fid, 0, acceptable, context); #endif #ifdef OSI_EXPORT_DEBUG if (!result) { printk("afs: decode_fh(0x%08x/%d/%d.%d): no dentry\n", fid.Cell, fid.Fid.Volume, fid.Fid.Vnode, fid.Fid.Unique); } else if (IS_ERR(result)) { printk("afs: decode_fh(0x%08x/%d/%d.%d): error %ld\n", fid.Cell, fid.Fid.Volume, fid.Fid.Vnode, fid.Fid.Unique, PTR_ERR(result)); } #endif return result; }
struct buf * afs_get_bioreq() { struct buf *bp = NULL; struct buf *bestbp; struct buf **bestlbpP, **lbpP; long bestage, stop; struct buf *t1P, *t2P; /* temp pointers for list manipulation */ int oldPriority; afs_uint32 wait_ret; struct afs_bioqueue *s; /* ??? Does the forward pointer of the returned buffer need to be NULL? */ /* Disable interrupts from the strategy function, and save the * prior priority level and lock access to the afs_asyncbuf. */ AFS_GUNLOCK(); oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock); while (1) { if (afs_asyncbuf) { /* look for oldest buffer */ bp = bestbp = afs_asyncbuf; bestage = (long)bestbp->av_back; bestlbpP = &afs_asyncbuf; while (1) { lbpP = &bp->av_forw; bp = *lbpP; if (!bp) break; if ((long)bp->av_back - bestage < 0) { bestbp = bp; bestlbpP = lbpP; bestage = (long)bp->av_back; } } bp = bestbp; *bestlbpP = bp->av_forw; break; } else { /* If afs_asyncbuf is null, it is necessary to go to sleep. * e_wakeup_one() ensures that only one thread wakes. */ int interrupted; /* The LOCK_HANDLER indicates to e_sleep_thread to only drop the * lock on an MP machine. */ interrupted = e_sleep_thread(&afs_asyncbuf_cv, &afs_asyncbuf_lock, LOCK_HANDLER | INTERRUPTIBLE); if (interrupted == THREAD_INTERRUPTED) { /* re-enable interrupts from strategy */ unlock_enable(oldPriority, &afs_asyncbuf_lock); AFS_GLOCK(); return (NULL); } } /* end of "else asyncbuf is empty" */ } /* end of "inner loop" */ /*assert (bp); */ unlock_enable(oldPriority, &afs_asyncbuf_lock); AFS_GLOCK(); /* For the convenience of other code, replace the gnodes in * the b_vp field of bp and the other buffers on the b_work * chain with the corresponding vnodes. * * ??? what happens to the gnodes? They're not just cut loose, * are they? */ for (t1P = bp;;) { t2P = (struct buf *)t1P->b_work; t1P->b_vp = ((struct gnode *)t1P->b_vp)->gn_vnode; if (!t2P) break; t1P = (struct buf *)t2P->b_work; t2P->b_vp = ((struct gnode *)t2P->b_vp)->gn_vnode; if (!t1P) break; } /* If the buffer does not specify I/O, it may immediately * be returned to the caller. This condition is detected * by examining the buffer's flags (the b_flags field). If * the B_PFPROT bit is set, the buffer represents a protection * violation, rather than a request for I/O. The remainder * of the outer loop handles the case where the B_PFPROT bit is clear. */ if (bp->b_flags & B_PFPROT) { return (bp); } return (bp); } /* end of function get_bioreq() */
/* * Given a FID, obtain or construct a dentry, or return an error. * This should be called with the BKL and AFS_GLOCK held. */ static struct dentry *get_dentry_from_fid(cred_t *credp, struct VenusFid *afid) { struct vrequest treq; struct vcache *vcp; struct vattr vattr; struct inode *ip; struct dentry *dp; afs_int32 code; code = afs_InitReq(&treq, credp); if (code) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_dentry_from_fid(0x%08x/%d/%d.%d): afs_InitReq: %d\n", afid->Cell, afid->Fid.Volume, afid->Fid.Vnode, afid->Fid.Unique, code); #endif return ERR_PTR(-afs_CheckCode(code, &treq, 101)); } vcp = afs_GetVCache(afid, &treq, NULL, NULL); if (vcp == NULL) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_dentry_from_fid(0x%08x/%d/%d.%d): no vcache\n", afid->Cell, afid->Fid.Volume, afid->Fid.Vnode, afid->Fid.Unique); #endif return NULL; } /* * Now, it might be that we just caused a directory vnode to * spring into existence, in which case its parent FID is unset. * We need to do something about that, but only because we care * in our own get_parent(), below -- the common code never looks * at parentVnode on directories, except for VIOCGETVCXSTATUS. * So, if this fails, we don't really care very much. */ if (vType(vcp) == VDIR && vcp->mvstat != 2 && !vcp->f.parent.vnode) update_dir_parent(&treq, vcp); /* * If this is a volume root directory and fakestat is enabled, * we might need to replace the directory by a mount point. */ code = UnEvalFakeStat(&treq, &vcp); if (code) { #ifdef OSI_EXPORT_DEBUG printk("afs: get_dentry_from_fid(0x%08x/%d/%d.%d): UnEvalFakeStat: %d\n", afid->Cell, afid->Fid.Volume, afid->Fid.Vnode, afid->Fid.Unique, code); #endif afs_PutVCache(vcp); return ERR_PTR(-afs_CheckCode(code, &treq, 101)); } ip = AFSTOV(vcp); afs_getattr(vcp, &vattr, credp); afs_fill_inode(ip, &vattr); /* d_alloc_anon might block, so we shouldn't hold the glock */ AFS_GUNLOCK(); dp = d_alloc_anon(ip); AFS_GLOCK(); if (!dp) { iput(ip); #ifdef OSI_EXPORT_DEBUG printk("afs: get_dentry_from_fid(0x%08x/%d/%d.%d): out of memory\n", afid->Cell, afid->Fid.Volume, afid->Fid.Vnode, afid->Fid.Unique); #endif return ERR_PTR(-ENOMEM); } dp->d_op = &afs_dentry_operations; return dp; }
int afs_BioDaemon(afs_int32 nbiods) { afs_int32 code, s, pflg = 0; label_t jmpbuf; struct buf *bp, *bp1, *tbp1, *tbp2; /* temp pointers only */ caddr_t tmpaddr; struct vnode *vp; struct vcache *vcp; char tmperr; if (!afs_initbiod) { /* XXX ###1 XXX */ afs_initbiod = 1; /* pin lock, since we'll be using it in an interrupt. */ lock_alloc(&afs_asyncbuf_lock, LOCK_ALLOC_PIN, 2, 1); simple_lock_init(&afs_asyncbuf_lock); pin(&afs_asyncbuf, sizeof(struct buf *)); pin(&afs_asyncbuf_cv, sizeof(afs_int32)); } /* Ignore HUP signals... */ { sigset_t sigbits, osigbits; /* * add SIGHUP to the set of already masked signals */ SIGFILLSET(sigbits); /* allow all signals */ SIGDELSET(sigbits, SIGHUP); /* except SIGHUP */ limit_sigs(&sigbits, &osigbits); /* and already masked */ } /* Main body starts here -- this is an intentional infinite loop, and * should NEVER exit * * Now, the loop will exit if get_bioreq() returns NULL, indicating * that we've been interrupted. */ while (1) { bp = afs_get_bioreq(); if (!bp) break; /* we were interrupted */ if (code = setjmpx(&jmpbuf)) { /* This should not have happend, maybe a lack of resources */ AFS_GUNLOCK(); s = disable_lock(INTMAX, &afs_asyncbuf_lock); for (bp1 = bp; bp; bp = bp1) { if (bp1) bp1 = (struct buf *)bp1->b_work; bp->b_actf = 0; bp->b_error = code; bp->b_flags |= B_ERROR; iodone(bp); } unlock_enable(s, &afs_asyncbuf_lock); AFS_GLOCK(); continue; } vcp = VTOAFS(bp->b_vp); if (bp->b_flags & B_PFSTORE) { /* XXXX */ ObtainWriteLock(&vcp->lock, 404); if (vcp->v.v_gnode->gn_mwrcnt) { afs_offs_t newlength = (afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount; if (vcp->f.m.Length < newlength) { afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING, __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(vcp->f.m.Length), ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength)); vcp->f.m.Length = newlength; } } ReleaseWriteLock(&vcp->lock); } /* If the buffer represents a protection violation, rather than * an actual request for I/O, no special action need be taken. */ if (bp->b_flags & B_PFPROT) { iodone(bp); /* Notify all users of the buffer that we're done */ clrjmpx(&jmpbuf); continue; } if (DOvmlock) ObtainWriteLock(&vcp->pvmlock, 211); /* * First map its data area to a region in the current address space * by calling vm_att with the subspace identifier, and a pointer to * the data area. vm_att returns a new data area pointer, but we * also want to hang onto the old one. */ tmpaddr = bp->b_baddr; bp->b_baddr = (caddr_t) vm_att(bp->b_xmemd.subspace_id, tmpaddr); tmperr = afs_ustrategy(bp); /* temp variable saves offset calculation */ if (tmperr) { /* in non-error case */ bp->b_flags |= B_ERROR; /* should other flags remain set ??? */ bp->b_error = tmperr; } /* Unmap the buffer's data area by calling vm_det. Reset data area * to the value that we saved above. */ vm_det(bp->b_baddr); bp->b_baddr = tmpaddr; /* * buffer may be linked with other buffers via the b_work field. * See also afs_gn_strategy. For each buffer in the chain (including * bp) notify all users of the buffer that the daemon is finished * using it by calling iodone. * assumes iodone can modify the b_work field. */ for (tbp1 = bp;;) { tbp2 = (struct buf *)tbp1->b_work; iodone(tbp1); if (!tbp2) break; tbp1 = (struct buf *)tbp2->b_work; iodone(tbp2); if (!tbp1) break; } if (DOvmlock) ReleaseWriteLock(&vcp->pvmlock); /* Unlock the vnode. */ clrjmpx(&jmpbuf); } /* infinite loop (unless we're interrupted) */ } /* end of afs_BioDaemon() */
static int afs_encode_fh(struct dentry *de, __u32 *fh, int *max_len, int connectable) { struct vcache *tvc; struct cell *tc; int vntype; if (!de->d_inode) /* encode a negative dentry?! */ return 255; if (*max_len < 4) /* not enough space */ return 255; tvc = VTOAFS(de->d_inode); #ifdef OSI_EXPORT_DEBUG printk("afs: encode_fh(0x%08x/%d/%d.%d)\n", tvc->f.fid.Cell, tvc->f.fid.Fid.Volume, tvc->f.fid.Fid.Vnode, tvc->f.fid.Fid.Unique); #endif if (afs_IsDynrootAnyFid(&tvc->f.fid)) { vntype = VNUM_TO_VNTYPE(tvc->f.fid.Fid.Vnode); switch (vntype) { case 0: /* encode as a normal filehandle */ break; case VN_TYPE_MOUNT: if (*max_len < 5) { return 255; } /* fall through */ case VN_TYPE_CELL: case VN_TYPE_ALIAS: AFS_GLOCK(); tc = afs_GetCellByIndex(VNUM_TO_CIDX(tvc->f.fid.Fid.Vnode), READ_LOCK); if (!tc) { AFS_GUNLOCK(); return 255; } memcpy((void *)fh, tc->cellHandle, 16); afs_PutCell(tc, READ_LOCK); AFS_GUNLOCK(); if (vntype == VN_TYPE_MOUNT) { fh[4] = htonl(tvc->f.fid.Fid.Unique); *max_len = 5; return AFSFH_DYN_MOUNT; } *max_len = 4; if (vntype == VN_TYPE_CELL) { return AFSFH_DYN_RO_CELL | VNUM_TO_RW(tvc->f.fid.Fid.Vnode); } else { return AFSFH_DYN_RO_LINK | VNUM_TO_RW(tvc->f.fid.Fid.Vnode); } case VN_TYPE_SYMLINK: /* XXX fill in filehandle for dynroot symlink */ /* XXX return AFSFH_DYN_SYMLINK; */ default: return 255; } } if (*max_len < 7) { /* not big enough for a migratable filehandle */ /* always encode in network order */ fh[0] = htonl(tvc->f.fid.Cell); fh[1] = htonl(tvc->f.fid.Fid.Volume); fh[2] = htonl(tvc->f.fid.Fid.Vnode); fh[3] = htonl(tvc->f.fid.Fid.Unique); *max_len = 4; return AFSFH_NET_VENUSFID; } AFS_GLOCK(); tc = afs_GetCell(tvc->f.fid.Cell, READ_LOCK); if (!tc) { AFS_GUNLOCK(); return 255; } memcpy((void *)fh, tc->cellHandle, 16); afs_PutCell(tc, READ_LOCK); AFS_GUNLOCK(); /* always encode in network order */ fh[4] = htonl(tvc->f.fid.Fid.Volume); fh[5] = htonl(tvc->f.fid.Fid.Vnode); fh[6] = htonl(tvc->f.fid.Fid.Unique); *max_len = 7; return AFSFH_NET_CELLFID; }
/* this call, unlike osi_FlushText, is supposed to discard caches that may contain invalid information if a file is written remotely, but that may contain valid information that needs to be written back if the file is being written locally. It doesn't subsume osi_FlushText, since the latter function may be needed to flush caches that are invalidated by local writes. avc->pvnLock is already held, avc->lock is guaranteed not to be held (by us, of course). */ void osi_FlushPages(struct vcache *avc, afs_ucred_t *credp) { afs_hyper_t origDV; #if defined(AFS_CACHE_BYPASS) /* The optimization to check DV under read lock below is identical a * change in CITI cache bypass work. The problem CITI found in 1999 * was that this code and background daemon doing prefetching competed * for the vcache entry shared lock. It's not clear to me from the * tech report, but it looks like CITI fixed the general prefetch code * path as a bonus when experimenting on prefetch for cache bypass, see * citi-tr-01-3. */ #endif if (vType(avc) == VDIR) { /* not applicable to directories; they're never mapped or stored in * pages */ return; } ObtainReadLock(&avc->lock); /* If we've already purged this version, or if we're the ones * writing this version, don't flush it (could lose the * data we're writing). */ if ((hcmp((avc->f.m.DataVersion), (avc->mapDV)) <= 0) || ((avc->execsOrWriters > 0) && afs_DirtyPages(avc))) { ReleaseReadLock(&avc->lock); return; } ReleaseReadLock(&avc->lock); ObtainWriteLock(&avc->lock, 10); /* Check again */ if ((hcmp((avc->f.m.DataVersion), (avc->mapDV)) <= 0) || ((avc->execsOrWriters > 0) && afs_DirtyPages(avc))) { ReleaseWriteLock(&avc->lock); return; } /* At this point, you might think that we can skip trying to flush pages * if mapDV is zero, since a file with a zero DV will not have any data in * it. However, some platforms (notably Linux 2.6.22+) will keep a page * full of zeroes around for an empty file. So play it safe and always * flush pages. */ AFS_STATCNT(osi_FlushPages); hset(origDV, avc->f.m.DataVersion); afs_Trace3(afs_iclSetp, CM_TRACE_FLUSHPAGES, ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, origDV.low, ICL_TYPE_INT32, avc->f.m.Length); ReleaseWriteLock(&avc->lock); #ifndef AFS_FBSD70_ENV AFS_GUNLOCK(); #endif osi_VM_FlushPages(avc, credp); #ifndef AFS_FBSD70_ENV AFS_GLOCK(); #endif ObtainWriteLock(&avc->lock, 88); /* do this last, and to original version, since stores may occur * while executing above PUTPAGE call */ hset(avc->mapDV, origDV); ReleaseWriteLock(&avc->lock); }
struct vcache * osi_dnlc_lookup(struct vcache *adp, char *aname, int locktype) { struct vcache *tvc; unsigned int key, skey; char *ts = aname; struct nc *tnc; int safety; #ifdef AFS_DARWIN80_ENV vnode_t tvp; #endif if (!afs_usednlc) return 0; dnlcHash(ts, key); /* leaves ts pointing at the NULL */ if (ts - aname >= AFSNCNAMESIZE) return 0; skey = key & (NHSIZE - 1); TRACE(osi_dnlc_lookupT, skey); dnlcstats.lookups++; ObtainReadLock(&afs_xvcache); ObtainReadLock(&afs_xdnlc); for (tvc = NULL, tnc = nameHash[skey], safety = 0; tnc; tnc = tnc->next, safety++) { if ( /* (tnc->key == key) && */ (tnc->dirp == adp) && (!strcmp((char *)tnc->name, aname))) { tvc = tnc->vp; break; } else if (tnc->next == nameHash[skey]) { /* end of list */ break; } else if (safety > NCSIZE) { afs_warn("DNLC cycle"); dnlcstats.cycles++; ReleaseReadLock(&afs_xdnlc); ReleaseReadLock(&afs_xvcache); osi_dnlc_purge(); return (0); } } ReleaseReadLock(&afs_xdnlc); if (!tvc) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; } else { if ((tvc->f.states & CVInit) #ifdef AFS_DARWIN80_ENV ||(tvc->f.states & CDeadVnode) #endif ) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); return 0; } #if defined(AFS_DARWIN80_ENV) tvp = AFSTOV(tvc); if (vnode_get(tvp)) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); return 0; } if (vnode_ref(tvp)) { ReleaseReadLock(&afs_xvcache); AFS_GUNLOCK(); vnode_put(tvp); AFS_GLOCK(); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); return 0; } #else osi_vnhold(tvc, 0); #endif ReleaseReadLock(&afs_xvcache); } return tvc; }
int osi_TryEvictVCache(struct vcache *avc, int *slept, int defersleep) { int code; struct dentry *dentry; struct inode *inode = AFSTOV(avc); struct list_head *cur, *head; /* First, see if we can evict the inode from the dcache */ if (defersleep && avc != afs_globalVp && VREFCOUNT(avc) > 1 && avc->opens == 0) { *slept = 1; ReleaseWriteLock(&afs_xvcache); AFS_GUNLOCK(); #if defined(HAVE_DCACHE_LOCK) spin_lock(&dcache_lock); head = &inode->i_dentry; restart: cur = head; while ((cur = cur->next) != head) { dentry = list_entry(cur, struct dentry, d_alias); if (d_unhashed(dentry)) continue; dget_locked(dentry); spin_unlock(&dcache_lock); if (d_invalidate(dentry) == -EBUSY) { dput(dentry); /* perhaps lock and try to continue? (use cur as head?) */ goto inuse; } dput(dentry); spin_lock(&dcache_lock); goto restart; } spin_unlock(&dcache_lock); #else /* HAVE_DCACHE_LOCK */ spin_lock(&inode->i_lock); head = &inode->i_dentry; restart: cur = head; while ((cur = cur->next) != head) { dentry = list_entry(cur, struct dentry, d_alias); spin_lock(&dentry->d_lock); if (d_unhashed(dentry)) { spin_unlock(&dentry->d_lock); continue; } spin_unlock(&dentry->d_lock); dget(dentry); spin_unlock(&inode->i_lock); if (d_invalidate(dentry) == -EBUSY) { dput(dentry); /* perhaps lock and try to continue? (use cur as head?) */ goto inuse; } dput(dentry); spin_lock(&inode->i_lock); goto restart; } spin_unlock(&inode->i_lock); #endif /* HAVE_DCACHE_LOCK */ inuse: AFS_GLOCK(); ObtainWriteLock(&afs_xvcache, 733); }
/* Try to discard pages, in order to recycle a vcache entry. * * We also make some sanity checks: ref count, open count, held locks. * * We also do some non-VM-related chores, such as releasing the cred pointer * (for AIX and Solaris) and releasing the gnode (for AIX). * * Locking: afs_xvcache lock is held. It must not be dropped. */ int osi_VM_FlushVCache(struct vcache *avc) { int s, code; vnode_t *vp = &avc->v; if (avc->vrefCount != 0) return EBUSY; if (avc->opens != 0) return EBUSY; /* * Just in case someone is still referring to the vnode we give up * trying to get rid of this guy. */ if (CheckLock(&avc->lock) || LockWaiters(&avc->lock)) return EBUSY; s = VN_LOCK(vp); /* * we just need to avoid the race * in vn_rele between the ref count going to 0 and VOP_INACTIVE * finishing up. * Note that although we checked vcount above, we didn't have the lock */ if (vp->v_count > 0 || (vp->v_flag & VINACT)) { VN_UNLOCK(vp, s); return EBUSY; } VN_UNLOCK(vp, s); /* * Since we store on last close and on VOP_INACTIVE * there should be NO dirty pages * Note that we hold the xvcache lock the entire time. */ AFS_GUNLOCK(); PTOSSVP(vp, (off_t) 0, (off_t) MAXLONG); AFS_GLOCK(); /* afs_chkpgoob will drop and re-acquire the global lock. */ afs_chkpgoob(vp, 0); osi_Assert(!VN_GET_PGCNT(vp)); osi_Assert(!AFS_VN_MAPPED(vp)); osi_Assert(!AFS_VN_DIRTY(&avc->v)); #if defined(AFS_SGI65_ENV) if (vp->v_filocks) cleanlocks(vp, IGN_PID, 0); mutex_destroy(&vp->v_filocksem); #else /* AFS_SGI65_ENV */ if (vp->v_filocksem) { if (vp->v_filocks) #ifdef AFS_SGI64_ENV cleanlocks(vp, &curprocp->p_flid); #else cleanlocks(vp, IGN_PID, 0); #endif osi_Assert(vp->v_filocks == NULL); mutex_destroy(vp->v_filocksem); kmem_free(vp->v_filocksem, sizeof *vp->v_filocksem); vp->v_filocksem = NULL; } #endif /* AFS_SGI65_ENV */ if (avc->vrefCount) osi_Panic("flushVcache: vm race"); #ifdef AFS_SGI64_ENV AFS_GUNLOCK(); vnode_pcache_reclaim(vp); /* this can sleep */ vnode_pcache_free(vp); if (vp->v_op != &Afs_vnodeops) { VOP_RECLAIM(vp, FSYNC_WAIT, code); } AFS_GLOCK(); #ifdef AFS_SGI65_ENV #ifdef VNODE_TRACING ktrace_free(vp->v_trace); #endif /* VNODE_TRACING */ vn_bhv_remove(VN_BHV_HEAD(vp), &(avc->vc_bhv_desc)); vn_bhv_head_destroy(&(vp->v_bh)); destroy_bitlock(&vp->v_pcacheflag); mutex_destroy(&vp->v_buf_lock); #else bhv_remove(VN_BHV_HEAD(vp), &(avc->vc_bhv_desc)); bhv_head_destroy(&(vp->v_bh)); #endif vp->v_flag = 0; /* debug */ #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK) destroy_bitlock(&vp->v_flag); #endif #ifdef INTR_KTHREADS AFS_VN_DESTROY_BUF_LOCK(vp); #endif #endif /* AFS_SGI64_ENV */ return 0; }
/* * This is almost exactly like the PFlush() routine in afs_pioctl.c, * but that routine is static. We are about to change a file from * normal caching to bypass it's caching. Therefore, we want to * free up any cache space in use by the file, and throw out any * existing VM pages for the file. We keep track of the number of * times we go back and forth from caching to bypass. */ void afs_TransitionToBypass(struct vcache *avc, afs_ucred_t *acred, int aflags) { afs_int32 code; int setDesire = 0; int setManual = 0; if (!avc) return; if (aflags & TRANSChangeDesiredBit) setDesire = 1; if (aflags & TRANSSetManualBit) setManual = 1; AFS_GLOCK(); ObtainWriteLock(&avc->lock, 925); /* * Someone may have beat us to doing the transition - we had no lock * when we checked the flag earlier. No cause to panic, just return. */ if (avc->cachingStates & FCSBypass) goto done; /* If we never cached this, just change state */ if (setDesire && (!(avc->cachingStates & FCSBypass))) { avc->cachingStates |= FCSBypass; goto done; } /* cg2v, try to store any chunks not written 20071204 */ if (avc->execsOrWriters > 0) { struct vrequest *treq = NULL; code = afs_CreateReq(&treq, acred); if (!code) { code = afs_StoreAllSegments(avc, treq, AFS_SYNC | AFS_LASTSTORE); afs_DestroyReq(treq); } } #if 0 /* also cg2v, don't dequeue the callback */ ObtainWriteLock(&afs_xcbhash, 956); afs_DequeueCallback(avc); ReleaseWriteLock(&afs_xcbhash); #endif avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */ /* now find the disk cache entries */ afs_TryToSmush(avc, acred, 1); osi_dnlc_purgedp(avc); if (avc->linkData && !(avc->f.states & CCore)) { afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1); avc->linkData = NULL; } avc->cachingStates |= FCSBypass; /* Set the bypass flag */ if(setDesire) avc->cachingStates |= FCSDesireBypass; if(setManual) avc->cachingStates |= FCSManuallySet; avc->cachingTransitions++; done: ReleaseWriteLock(&avc->lock); AFS_GUNLOCK(); }
int afs_fill_super(struct super_block *sb, void *data, int silent) { int code = 0; #if defined(HAVE_LINUX_BDI_INIT) int bdi_init_done = 0; #endif AFS_GLOCK(); if (afs_was_mounted) { printf ("You must reload the AFS kernel extensions before remounting AFS.\n"); AFS_GUNLOCK(); return -EINVAL; } afs_was_mounted = 1; /* Set basics of super_block */ __module_get(THIS_MODULE); afs_globalVFS = sb; sb->s_flags |= MS_NOATIME; sb->s_blocksize = 1024; sb->s_blocksize_bits = 10; sb->s_magic = AFS_VFSMAGIC; sb->s_op = &afs_sops; /* Super block (vfs) ops */ #if defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP) sb->s_d_op = &afs_dentry_operations; #endif /* used for inodes backing_dev_info field, also */ afs_backing_dev_info = kzalloc(sizeof(struct backing_dev_info), GFP_NOFS); #if defined(HAVE_LINUX_BDI_INIT) code = bdi_init(afs_backing_dev_info); if (code) goto out; bdi_init_done = 1; #endif #if defined(STRUCT_BACKING_DEV_INFO_HAS_NAME) afs_backing_dev_info->name = "openafs"; #endif afs_backing_dev_info->ra_pages = 32; #if defined (STRUCT_SUPER_BLOCK_HAS_S_BDI) sb->s_bdi = afs_backing_dev_info; /* The name specified here will appear in the flushing thread name - flush-afs */ bdi_register(afs_backing_dev_info, NULL, "afs"); #endif #if !defined(AFS_NONFSTRANS) sb->s_export_op = &afs_export_ops; #endif #if defined(MAX_NON_LFS) #ifdef AFS_64BIT_CLIENT #if !defined(MAX_LFS_FILESIZE) #if BITS_PER_LONG==32 #define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) #elif BITS_PER_LONG==64 #define MAX_LFS_FILESIZE 0x7fffffffffffffff #endif #endif sb->s_maxbytes = MAX_LFS_FILESIZE; #else sb->s_maxbytes = MAX_NON_LFS; #endif #endif code = afs_root(sb); out: if (code) { afs_globalVFS = NULL; afs_FlushAllVCaches(); #if defined(HAVE_LINUX_BDI_INIT) if (bdi_init_done) bdi_destroy(afs_backing_dev_info); #endif kfree(afs_backing_dev_info); module_put(THIS_MODULE); } AFS_GUNLOCK(); return code ? -EINVAL : 0; }
/* dispatch a no-cache read request */ afs_int32 afs_ReadNoCache(struct vcache *avc, struct nocache_read_request *bparms, afs_ucred_t *acred) { afs_int32 code; afs_int32 bcnt; struct brequest *breq; struct vrequest *areq = NULL; if (avc->vc_error) { code = EIO; afs_warn("afs_ReadNoCache VCache Error!\n"); goto cleanup; } AFS_GLOCK(); /* the receiver will free areq */ code = afs_CreateReq(&areq, acred); if (code) { afs_warn("afs_ReadNoCache afs_CreateReq error!\n"); } else { code = afs_VerifyVCache(avc, areq); if (code) { afs_warn("afs_ReadNoCache Failed to verify VCache!\n"); } } AFS_GUNLOCK(); if (code) { code = afs_CheckCode(code, areq, 11); /* failed to get it */ goto cleanup; } bparms->areq = areq; /* and queue this one */ bcnt = 1; AFS_GLOCK(); while(bcnt < 20) { breq = afs_BQueue(BOP_FETCH_NOCACHE, avc, B_DONTWAIT, 0, acred, 1, 1, bparms, (void *)0, (void *)0); if(breq != 0) { code = 0; break; } afs_osi_Wait(10 * bcnt, 0, 0); } AFS_GUNLOCK(); if(!breq) { code = EBUSY; goto cleanup; } return code; cleanup: /* If there's a problem before we queue the request, we need to * do everything that would normally happen when the request was * processed, like unlocking the pages and freeing memory. */ unlock_and_release_pages(bparms->auio); AFS_GLOCK(); afs_DestroyReq(areq); AFS_GUNLOCK(); osi_Free(bparms->auio->uio_iov, bparms->auio->uio_iovcnt * sizeof(struct iovec)); osi_Free(bparms->auio, sizeof(struct uio)); osi_Free(bparms, sizeof(struct nocache_read_request)); return code; }
/* set the real time */ void afs_osi_SetTime(osi_timeval_t * atv) { #if defined(AFS_AIX32_ENV) struct timestruc_t t; t.tv_sec = atv->tv_sec; t.tv_nsec = atv->tv_usec * 1000; ksettimer(&t); /* Was -> settimer(TIMEOFDAY, &t); */ #elif defined(AFS_SUN5_ENV) stime(atv->tv_sec); #elif defined(AFS_SGI_ENV) struct stimea { sysarg_t time; } sta; AFS_GUNLOCK(); sta.time = atv->tv_sec; stime(&sta); AFS_GLOCK(); #elif defined(AFS_DARWIN_ENV) #ifndef AFS_DARWIN80_ENV AFS_GUNLOCK(); setthetime(atv); AFS_GLOCK(); #endif #else /* stolen from kern_time.c */ #ifndef AFS_AUX_ENV boottime.tv_sec += atv->tv_sec - time.tv_sec; #endif #ifdef AFS_HPUX_ENV { #if !defined(AFS_HPUX1122_ENV) /* drop the setting of the clock for now. spl7 is not * known on hpux11.22 */ ulong_t s; struct timeval t; t.tv_sec = atv->tv_sec; t.tv_usec = atv->tv_usec; s = spl7(); time = t; (void)splx(s); resettodr(atv); #endif } #else { int s; s = splclock(); time = *atv; (void)splx(s); } resettodr(); #endif #ifdef AFS_AUX_ENV logtchg(atv->tv_sec); #endif #endif /* AFS_DARWIN_ENV */ AFS_STATCNT(osi_SetTime); }
/** * Connects to a server by it's server address. * * @param sap Server address. * @param aport Server port. * @param acell * @param tu Connect as this user. * @param force_if_down * @param create * @param locktype Specifies type of lock to be used for this function. * * @return The new connection. */ struct afs_conn * afs_ConnBySA(struct srvAddr *sap, unsigned short aport, afs_int32 acell, struct unixuser *tu, int force_if_down, afs_int32 create, afs_int32 locktype) { struct afs_conn *tc = 0; struct rx_securityClass *csec; /*Security class object */ int isec; /*Security index */ int service; if (!sap || ((sap->sa_flags & SRVR_ISDOWN) && !force_if_down)) { /* sa is known down, and we don't want to force it. */ return NULL; } ObtainSharedLock(&afs_xconn, 15); /* Get conn by port and user. */ for (tc = sap->conns; tc; tc = tc->next) { if (tc->user == tu && tc->port == aport) { break; } } if (!tc && !create) { /* Not found and can't create a new one. */ ReleaseSharedLock(&afs_xconn); return NULL; } if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) { afs_warnuser("afs_ConnBySA: disconnected\n"); ReleaseSharedLock(&afs_xconn); return NULL; } if (!tc) { /* No such connection structure exists. Create one and splice it in. * Make sure the server record has been marked as used (for the purposes * of calculating up & down times, it's now considered to be an * ``active'' server). Also make sure the server's lastUpdateEvalTime * gets set, marking the time of its ``birth''. */ UpgradeSToWLock(&afs_xconn, 37); tc = (struct afs_conn *)afs_osi_Alloc(sizeof(struct afs_conn)); memset(tc, 0, sizeof(struct afs_conn)); tc->user = tu; tc->port = aport; tc->srvr = sap; tc->refCount = 0; /* bumped below */ tc->forceConnectFS = 1; tc->id = (struct rx_connection *)0; tc->next = sap->conns; sap->conns = tc; afs_ActivateServer(sap); ConvertWToSLock(&afs_xconn); } /* end of if (!tc) */ tc->refCount++; if (tu->states & UTokensBad) { /* we may still have an authenticated RPC connection here, * we'll have to create a new, unauthenticated, connection. * Perhaps a better way to do this would be to set * conn->forceConnectFS on all conns when the token first goes * bad, but that's somewhat trickier, due to locking * constraints (though not impossible). */ if (tc->id && (rx_SecurityClassOf(tc->id) != 0)) { tc->forceConnectFS = 1; /* force recreation of connection */ } tu->vid = UNDEFVID; /* forcibly disconnect the authentication info */ } if (tc->forceConnectFS) { UpgradeSToWLock(&afs_xconn, 38); csec = (struct rx_securityClass *)0; if (tc->id) { AFS_GUNLOCK(); rx_DestroyConnection(tc->id); AFS_GLOCK(); } /* * Stupid hack to determine if using vldb service or file system * service. */ if (aport == sap->server->cell->vlport) service = 52; else service = 1; isec = 0; csec = afs_pickSecurityObject(tc, &isec); AFS_GUNLOCK(); tc->id = rx_NewConnection(sap->sa_ip, aport, service, csec, isec); AFS_GLOCK(); if (service == 52) { rx_SetConnHardDeadTime(tc->id, afs_rx_harddead); } /* set to a RX_CALL_TIMEOUT error to allow MTU retry to trigger */ rx_SetServerConnIdleDeadErr(tc->id, RX_CALL_DEAD); rx_SetConnIdleDeadTime(tc->id, afs_rx_idledead); rx_SetMsgsizeRetryErr(tc->id, RX_MSGSIZE); /* * Only do this for the base connection, not per-user. * Will need to be revisited if/when CB gets security. */ if ((isec == 0) && (service != 52) && !(tu->states & UTokensBad) && (tu->vid == UNDEFVID)) rx_SetConnSecondsUntilNatPing(tc->id, 20); tc->forceConnectFS = 0; /* apparently we're appropriately connected now */ if (csec) rxs_Release(csec); ConvertWToSLock(&afs_xconn); } /* end of if (tc->forceConnectFS)*/ ReleaseSharedLock(&afs_xconn); return tc; }
int osi_NetSend(osi_socket so, struct sockaddr_in *addr, struct iovec *dvec, int nvecs, afs_int32 alength, int istack) { #ifdef AFS_DARWIN80_ENV socket_t asocket = (socket_t)so; struct msghdr msg; size_t slen; #else struct socket *asocket = (struct socket *)so; struct uio u; #endif afs_int32 code; int i; struct iovec iov[RX_MAXIOVECS]; int haveGlock = ISAFS_GLOCK(); AFS_STATCNT(osi_NetSend); if (nvecs > RX_MAXIOVECS) osi_Panic("osi_NetSend: %d: Too many iovecs.\n", nvecs); for (i = 0; i < nvecs; i++) iov[i] = dvec[i]; addr->sin_len = sizeof(struct sockaddr_in); if ((afs_termState == AFSOP_STOP_RXK_LISTENER) || (afs_termState == AFSOP_STOP_COMPLETE)) return -1; if (haveGlock) AFS_GUNLOCK(); #if defined(KERNEL_FUNNEL) thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); #endif #ifdef AFS_DARWIN80_ENV memset(&msg, 0, sizeof(struct msghdr)); msg.msg_name = addr; msg.msg_namelen = ((struct sockaddr *)addr)->sa_len; msg.msg_iov = &iov[0]; msg.msg_iovlen = nvecs; code = sock_send(asocket, &msg, 0, &slen); #else u.uio_iov = &iov[0]; u.uio_iovcnt = nvecs; u.uio_offset = 0; u.uio_resid = alength; u.uio_segflg = UIO_SYSSPACE; u.uio_rw = UIO_WRITE; u.uio_procp = NULL; code = sosend(asocket, (struct sockaddr *)addr, &u, NULL, NULL, 0); #endif #if defined(KERNEL_FUNNEL) thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); #endif if (haveGlock) AFS_GLOCK(); return code; }
void * osi_UFSOpen(afs_dcache_id_t *ainode) { struct vnode *vp; struct vattr va; struct osi_file *afile = NULL; extern int cacheDiskType; afs_int32 code = 0; int dummy; char fname[1024]; struct osi_stat tstat; AFS_STATCNT(osi_UFSOpen); if (cacheDiskType != AFS_FCACHE_TYPE_UFS) { osi_Panic("UFSOpen called for non-UFS cache\n"); } if (!afs_osicred_initialized) { memset(&afs_osi_cred, 0, sizeof(afs_ucred_t)); afs_osi_cred.cr_ref++; #ifndef AFS_DARWIN110_ENV afs_osi_cred.cr_ngroups = 1; #endif afs_osicred_initialized = 1; } afile = osi_AllocSmallSpace(sizeof(struct osi_file)); AFS_GUNLOCK(); #ifdef AFS_CACHE_VNODE_PATH if (!ainode->ufs) { osi_Panic("No cache inode\n"); } code = vnode_open(ainode->ufs, O_RDWR, 0, 0, &vp, afs_osi_ctxtp); #else #ifndef AFS_DARWIN80_ENV if (afs_CacheFSType == AFS_APPL_HFS_CACHE) code = igetinode(afs_cacheVfsp, (dev_t) cacheDev.dev, &ainode->ufs, &vp, &va, &dummy); /* XXX hfs is broken */ else if (afs_CacheFSType == AFS_APPL_UFS_CACHE) #endif code = igetinode(afs_cacheVfsp, (dev_t) cacheDev.dev, (ino_t) ainode->ufs, &vp, &va, &dummy); #ifndef AFS_DARWIN80_ENV else panic("osi_UFSOpen called before cacheops initialized\n"); #endif #endif AFS_GLOCK(); if (code) { osi_FreeSmallSpace(afile); osi_Panic("UFSOpen: igetinode failed"); } afile->vnode = vp; afile->offset = 0; afile->proc = (int (*)())0; #ifndef AFS_CACHE_VNODE_PATH afile->size = va.va_size; #else code = afs_osi_Stat(afile, &tstat); afile->size = tstat.size; #endif return (void *)afile; }