static void BStore(struct brequest *ab) { struct vcache *tvc; afs_int32 code; struct vrequest *treq = NULL; #if defined(AFS_SGI_ENV) struct cred *tmpcred; #endif AFS_STATCNT(BStore); if ((code = afs_CreateReq(&treq, ab->cred))) return; tvc = ab->vc; #if defined(AFS_SGI_ENV) /* * Since StoreOnLastReference can end up calling osi_SyncVM which * calls into VM code that assumes that u.u_cred has the * correct credentials, we set our to theirs for this xaction */ tmpcred = OSI_GET_CURRENT_CRED(); OSI_SET_CURRENT_CRED(ab->cred); /* * To avoid recursion since the WriteLock may be released during VM * operations, we hold the VOP_RWLOCK across this transaction as * do the other callers of StoreOnLastReference */ AFS_RWLOCK((vnode_t *) tvc, 1); #endif ObtainWriteLock(&tvc->lock, 209); code = afs_StoreOnLastReference(tvc, treq); ReleaseWriteLock(&tvc->lock); #if defined(AFS_SGI_ENV) OSI_SET_CURRENT_CRED(tmpcred); AFS_RWUNLOCK((vnode_t *) tvc, 1); #endif /* now set final return code, and wakeup anyone waiting */ if ((ab->flags & BUVALID) == 0) { /* To explain code_raw/code_checkcode: * Anyone that's waiting won't have our treq, so they won't be able to * call afs_CheckCode themselves on the return code we provide here. * But if we give back only the afs_CheckCode value, they won't know * what the "raw" value was. So give back both values, so the waiter * can know the "raw" value for interpreting the value internally, as * well as the afs_CheckCode value to give to the OS. */ ab->code_raw = code; ab->code_checkcode = afs_CheckCode(code, treq, 430); ab->flags |= BUVALID; if (ab->flags & BUWAIT) { ab->flags &= ~BUWAIT; afs_osi_Wakeup(ab); } } afs_DestroyReq(treq); }
static void BStore(struct brequest *ab) { struct vcache *tvc; afs_int32 code; struct vrequest treq; #if defined(AFS_SGI_ENV) struct cred *tmpcred; #endif AFS_STATCNT(BStore); if ((code = afs_InitReq(&treq, ab->cred))) return; code = 0; tvc = ab->vc; #if defined(AFS_SGI_ENV) /* * Since StoreOnLastReference can end up calling osi_SyncVM which * calls into VM code that assumes that u.u_cred has the * correct credentials, we set our to theirs for this xaction */ tmpcred = OSI_GET_CURRENT_CRED(); OSI_SET_CURRENT_CRED(ab->cred); /* * To avoid recursion since the WriteLock may be released during VM * operations, we hold the VOP_RWLOCK across this transaction as * do the other callers of StoreOnLastReference */ AFS_RWLOCK((vnode_t *) tvc, 1); #endif ObtainWriteLock(&tvc->lock, 209); code = afs_StoreOnLastReference(tvc, &treq); ReleaseWriteLock(&tvc->lock); #if defined(AFS_SGI_ENV) OSI_SET_CURRENT_CRED(tmpcred); AFS_RWUNLOCK((vnode_t *) tvc, 1); #endif /* now set final return code, and wakeup anyone waiting */ if ((ab->flags & BUVALID) == 0) { ab->code = afs_CheckCode(code, &treq, 43); /* set final code, since treq doesn't go across processes */ ab->flags |= BUVALID; if (ab->flags & BUWAIT) { ab->flags &= ~BUWAIT; afs_osi_Wakeup(ab); } } }
int afs_frlock(OSI_VN_DECL(vp), int cmd, struct flock *lfp, int flag, off_t offset, #ifdef AFS_SGI65_ENV vrwlock_t vrwlock, #endif cred_t * cr) { int error; OSI_VN_CONVERT(vp); #ifdef AFS_SGI65_ENV struct flid flid; int pid; get_current_flid(&flid); pid = flid.fl_pid; #endif /* * Since AFS doesn't support byte-wise locks (and simply * says yes! we handle byte locking locally only. * This makes lots of things work much better * XXX This doesn't properly handle moving from a * byte-wise lock up to a full file lock (we should * remove the byte locks ..) Of course neither did the * regular AFS way ... * * For GETLK we do a bit more - we first check any byte-wise * locks - if none then check for full AFS file locks */ if (cmd == F_GETLK || lfp->l_whence != 0 || lfp->l_start != 0 || (lfp->l_len != MAXEND && lfp->l_len != 0)) { AFS_RWLOCK(vp, VRWLOCK_WRITE); AFS_GUNLOCK(); #ifdef AFS_SGI65_ENV error = fs_frlock(OSI_VN_ARG(vp), cmd, lfp, flag, offset, vrwlock, cr); #else error = fs_frlock(vp, cmd, lfp, flag, offset, cr); #endif AFS_GLOCK(); AFS_RWUNLOCK(vp, VRWLOCK_WRITE); if (error || cmd != F_GETLK) return error; if (lfp->l_type != F_UNLCK) /* found some blocking lock */ return 0; /* fall through to check for full AFS file locks */ } /* map BSD style to plain - we don't call reclock() * and its only there that the difference is important */ switch (cmd) { case F_GETLK: case F_RGETLK: break; case F_SETLK: case F_RSETLK: break; case F_SETBSDLK: cmd = F_SETLK; break; case F_SETLKW: case F_RSETLKW: break; case F_SETBSDLKW: cmd = F_SETLKW; break; default: return EINVAL; } AFS_GUNLOCK(); error = convoff(vp, lfp, 0, offset, SEEKLIMIT #ifdef AFS_SGI64_ENV , OSI_GET_CURRENT_CRED() #endif /* AFS_SGI64_ENV */ ); AFS_GLOCK(); if (!error) { #ifdef AFS_SGI65_ENV error = afs_lockctl(vp, lfp, cmd, cr, pid); #else error = afs_lockctl(vp, lfp, cmd, cr, OSI_GET_CURRENT_PID()); #endif } return error; }
int afs_lockctl(struct vcache * avc, struct AFS_FLOCK * af, int acmd, afs_ucred_t * acred) #endif { struct vrequest treq; afs_int32 code; struct afs_fakestat_state fakestate; AFS_STATCNT(afs_lockctl); if ((code = afs_InitReq(&treq, acred))) return code; afs_InitFakeStat(&fakestate); AFS_DISCON_LOCK(); code = afs_EvalFakeStat(&avc, &fakestate, &treq); if (code) { goto done; } #if defined(AFS_SGI_ENV) if ((acmd == F_GETLK) || (acmd == F_RGETLK)) { #else if (acmd == F_GETLK) { #endif if (af->l_type == F_UNLCK) { code = 0; goto done; } code = HandleGetLock(avc, af, &treq, clid); code = afs_CheckCode(code, &treq, 2); /* defeat buggy AIX optimz */ goto done; } else if ((acmd == F_SETLK) || (acmd == F_SETLKW) #if defined(AFS_SGI_ENV) || (acmd == F_RSETLK) || (acmd == F_RSETLKW)) { #else ) { #endif if ((avc->f.states & CRO)) { /* for RO volumes, don't do anything for locks; the fileserver doesn't * even track them. A write lock should not be possible, though. */ if (af->l_type == F_WRLCK) { code = EBADF; } else { code = 0; } goto done; } /* Java VMs ask for l_len=(long)-1 regardless of OS/CPU */ if ((sizeof(af->l_len) == 8) && (af->l_len == 0x7fffffffffffffffLL)) af->l_len = 0; /* next line makes byte range locks always succeed, * even when they should block */ if (af->l_whence != 0 || af->l_start != 0 || af->l_len != 0) { DoLockWarning(acred); code = 0; goto done; } /* otherwise we can turn this into a whole-file flock */ if (af->l_type == F_RDLCK) code = LOCK_SH; else if (af->l_type == F_WRLCK) code = LOCK_EX; else if (af->l_type == F_UNLCK) code = LOCK_UN; else { code = EINVAL; /* unknown lock type */ goto done; } if (((acmd == F_SETLK) #if defined(AFS_SGI_ENV) || (acmd == F_RSETLK) #endif ) && code != LOCK_UN) code |= LOCK_NB; /* non-blocking, s.v.p. */ #if defined(AFS_DARWIN_ENV) code = HandleFlock(avc, code, &treq, clid, 0 /*!onlymine */ ); #elif defined(AFS_SGI_ENV) AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE); code = HandleFlock(avc, code, &treq, clid, 0 /*!onlymine */ ); AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE); #else code = HandleFlock(avc, code, &treq, 0, 0 /*!onlymine */ ); #endif code = afs_CheckCode(code, &treq, 3); /* defeat AIX -O bug */ goto done; } code = EINVAL; done: afs_PutFakeStat(&fakestate); AFS_DISCON_UNLOCK(); return code; } /* * Get a description of the first lock which would * block the lock specified. If the specified lock * would succeed, fill in the lock structure with 'F_UNLCK'. * * To do that, we have to ask the server for the lock * count if: * 1. The file is not locked by this machine. * 2. Asking for write lock, and only the current * PID has the file read locked. */ static int HandleGetLock(struct vcache *avc, struct AFS_FLOCK *af, struct vrequest *areq, int clid) { afs_int32 code; struct AFS_FLOCK flock; lockIdSet(&flock, NULL, clid); ObtainWriteLock(&avc->lock, 122); if (avc->flockCount == 0) { /* * We don't know ourselves, so ask the server. Unfortunately, we * don't know the pid. Not even the server knows the pid. Besides, * the process with the lock is on another machine */ code = GetFlockCount(avc, areq); if (code == 0 || (af->l_type == F_RDLCK && code > 0)) { af->l_type = F_UNLCK; goto unlck_leave; } if (code > 0) af->l_type = F_RDLCK; else af->l_type = F_WRLCK; af->l_pid = 0; #if defined(AFS_HAVE_FLOCK_SYSID) af->l_sysid = 0; #endif goto done; } if (af->l_type == F_RDLCK) { /* * We want a read lock. If there are only * read locks, or we are the one with the * write lock, say it is unlocked. */ if (avc->flockCount > 0 || /* only read locks */ !lockIdcmp2(&flock, avc, NULL, 1, clid)) { af->l_type = F_UNLCK; goto unlck_leave; } /* one write lock, but who? */ af->l_type = F_WRLCK; /* not us, so lock would block */ if (avc->slocks) { /* we know who, so tell */ af->l_pid = avc->slocks->pid; #if defined(AFS_HAVE_FLOCK_SYSID) af->l_sysid = avc->slocks->sysid; #endif } else { af->l_pid = 0; /* XXX can't happen?? */ #if defined(AFS_HAVE_FLOCK_SYSID) af->l_sysid = 0; #endif } goto done; } /* * Ok, we want a write lock. If there is a write lock * already, and it is not this process, we fail. */ if (avc->flockCount < 0) { if (lockIdcmp2(&flock, avc, NULL, 1, clid)) { af->l_type = F_WRLCK; if (avc->slocks) { af->l_pid = avc->slocks->pid; #if defined(AFS_HAVE_FLOCK_SYSID) af->l_sysid = avc->slocks->sysid; #endif } else { af->l_pid = 0; /* XXX can't happen?? */ #if defined(AFS_HAVE_FLOCK_SYSID) af->l_sysid = 0; #endif } goto done; } /* we are the one with the write lock */ af->l_type = F_UNLCK; goto unlck_leave; } /* * Want a write lock, and we know there are read locks. * If there is more than one, or it isn't us, we cannot lock. */ if ((avc->flockCount > 1) || lockIdcmp2(&flock, avc, NULL, 1, clid)) { struct SimpleLocks *slp; af->l_type = F_RDLCK; af->l_pid = 0; #if defined(AFS_HAVE_FLOCK_SYSID) af->l_sysid = 0; #endif /* find a pid that isn't our own */ for (slp = avc->slocks; slp; slp = slp->next) { if (lockIdcmp2(&flock, NULL, slp, 1, clid)) { af->l_pid = slp->pid; #if defined(AFS_HAVE_FLOCK_SYSID) af->l_sysid = avc->slocks->sysid; #endif break; } } goto done; } /* * Ok, we want a write lock. If there is a write lock * already, and it is not this process, we fail. */ if (avc->flockCount < 0) { if (lockIdcmp2(&flock, avc, NULL, 1, clid)) { af->l_type = F_WRLCK; if (avc->slocks) { af->l_pid = avc->slocks->pid; #if defined(AFS_HAVE_FLOCK_SYSID) af->l_sysid = avc->slocks->sysid; #endif } else { af->l_pid = 0; /* XXX can't happen?? */ #if defined(AFS_HAVE_FLOCK_SYSID) af->l_sysid = 0; #endif } goto done; } /* we are the one with the write lock */ af->l_type = F_UNLCK; goto unlck_leave; } /* * Want a write lock, and we know there are read locks. * If there is more than one, or it isn't us, we cannot lock. */ if ((avc->flockCount > 1) || lockIdcmp2(&flock, avc, NULL, 1, clid)) { struct SimpleLocks *slp; af->l_type = F_RDLCK; af->l_pid = 0; #if defined(AFS_HAVE_FLOCK_SYSID) af->l_sysid = 0; #endif /* find a pid that isn't our own */ for (slp = avc->slocks; slp; slp = slp->next) { if (lockIdcmp2(&flock, NULL, slp, 1, clid)) { af->l_pid = slp->pid; #if defined(AFS_HAVE_FLOCK_SYSID) af->l_sysid = avc->slocks->sysid; #endif break; } } goto done; } /* * Want a write lock, and there is just one read lock, and it * is this process with a read lock. Ask the server if there * are any more processes with the file locked. */ code = GetFlockCount(avc, areq); if (code == 0 || code == 1) { af->l_type = F_UNLCK; goto unlck_leave; } if (code > 0) af->l_type = F_RDLCK; else af->l_type = F_WRLCK; af->l_pid = 0; #if defined(AFS_HAVE_FLOCK_SYSID) af->l_sysid = 0; #endif done: af->l_whence = 0; af->l_start = 0; af->l_len = 0; /* to end of file */ unlck_leave: ReleaseWriteLock(&avc->lock); return 0; } /* Get the 'flock' count from the server. This comes back in a 'spare' * field from a GetStatus RPC. If we have any problems with the RPC, * we lie and say the file is unlocked. If we ask any 'old' fileservers, * the spare field will be a zero, saying the file is unlocked. This is * OK, as a further 'lock' request will do the right thing. */ static int GetFlockCount(struct vcache *avc, struct vrequest *areq) { struct afs_conn *tc; afs_int32 code; struct AFSFetchStatus OutStatus; struct AFSCallBack CallBack; struct AFSVolSync tsync; struct rx_connection *rxconn; int temp; XSTATS_DECLS; temp = areq->flags & O_NONBLOCK; areq->flags |= O_NONBLOCK; /* If we're disconnected, lie and say that we've got no locks. Ick */ if (AFS_IS_DISCONNECTED) return 0; do { tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn); if (tc) { XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS); RX_AFS_GUNLOCK(); code = RXAFS_FetchStatus(rxconn, (struct AFSFid *)&avc->f.fid.Fid, &OutStatus, &CallBack, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; } else code = -1; } while (afs_Analyze (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS, SHARED_LOCK, NULL)); if (temp) areq->flags &= ~O_NONBLOCK; if (code) { return (0); /* failed, say it is 'unlocked' */ } else { return ((int)OutStatus.lockCount); } }
/* clid - nonzero on sgi sunos osf1 only */ int HandleFlock(struct vcache *avc, int acom, struct vrequest *areq, pid_t clid, int onlymine) { struct afs_conn *tc; struct SimpleLocks *slp, *tlp, **slpp; afs_int32 code; struct AFSVolSync tsync; afs_int32 lockType; struct AFS_FLOCK flock; XSTATS_DECLS; AFS_STATCNT(HandleFlock); code = 0; /* default when we don't make any network calls */ lockIdSet(&flock, NULL, clid); #if defined(AFS_SGI_ENV) osi_Assert(valusema(&avc->vc_rwlock) <= 0); osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid); #endif ObtainWriteLock(&avc->lock, 118); if (acom & LOCK_UN) { int stored_segments = 0; retry_unlock: /* defect 3083 */ #ifdef AFS_AIX_ENV /* If the lock is held exclusive, then only the owning process * or a child can unlock it. Use pid and ppid because they are * unique identifiers. */ if ((avc->flockCount < 0) && (getpid() != avc->ownslock)) { #ifdef AFS_AIX41_ENV if (onlymine || (getppid() != avc->ownslock)) { #else if (onlymine || (u.u_procp->p_ppid != avc->ownslock)) { #endif ReleaseWriteLock(&avc->lock); return 0; } } #endif if (lockIdcmp2(&flock, avc, NULL, onlymine, clid)) { ReleaseWriteLock(&avc->lock); return 0; } #ifdef AFS_AIX_ENV avc->ownslock = 0; #endif if (avc->flockCount == 0) { ReleaseWriteLock(&avc->lock); return 0 /*ENOTTY*/; /* no lock held */ } /* unlock the lock */ if (avc->flockCount > 0) { slpp = &avc->slocks; for (slp = *slpp; slp;) { if (!lockIdcmp2(&flock, avc, slp, onlymine, clid)) { avc->flockCount--; tlp = *slpp = slp->next; osi_FreeSmallSpace(slp); slp = tlp; } else { slpp = &slp->next; slp = *slpp; } } } else if (avc->flockCount == -1) { if (!stored_segments) { afs_StoreAllSegments(avc, areq, AFS_SYNC | AFS_VMSYNC); /* fsync file early */ /* afs_StoreAllSegments can drop and reacquire the write lock * on avc and GLOCK, so the flocks may be completely different * now. Go back and perform all checks again. */ stored_segments = 1; goto retry_unlock; } avc->flockCount = 0; /* And remove the (only) exclusive lock entry from the list... */ osi_FreeSmallSpace(avc->slocks); avc->slocks = 0; } if (avc->flockCount == 0) { if (!AFS_IS_DISCONNECTED) { struct rx_connection *rxconn; do { tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn); if (tc) { XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK); RX_AFS_GUNLOCK(); code = RXAFS_ReleaseLock(rxconn, (struct AFSFid *) &avc->f.fid.Fid, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; } else code = -1; } while (afs_Analyze (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK, NULL)); } else { /*printf("Network is dooooooowwwwwwwnnnnnnn\n");*/ code = ENETDOWN; } } } else { while (1) { /* set a new lock */ /* * Upgrading from shared locks to Exclusive and vice versa * is a bit tricky and we don't really support it yet. But * we try to support the common used one which is upgrade * a shared lock to an exclusive for the same process... */ if ((avc->flockCount > 0 && (acom & LOCK_EX)) || (avc->flockCount == -1 && (acom & LOCK_SH))) { /* * Upgrading from shared locks to an exclusive one: * For now if all the shared locks belong to the * same process then we unlock them on the server * and proceed with the upgrade. Unless we change the * server's locking interface impl we prohibit from * unlocking other processes's shared locks... * Upgrading from an exclusive lock to a shared one: * Again only allowed to be done by the same process. */ slpp = &avc->slocks; for (slp = *slpp; slp;) { if (!lockIdcmp2 (&flock, avc, slp, 1 /*!onlymine */ , clid)) { if (acom & LOCK_EX) avc->flockCount--; else avc->flockCount = 0; tlp = *slpp = slp->next; osi_FreeSmallSpace(slp); slp = tlp; } else { code = EWOULDBLOCK; slpp = &slp->next; slp = *slpp; } } if (!code && avc->flockCount == 0) { if (!AFS_IS_DISCONNECTED) { struct rx_connection *rxconn; do { tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn); if (tc) { XSTATS_START_TIME (AFS_STATS_FS_RPCIDX_RELEASELOCK); RX_AFS_GUNLOCK(); code = RXAFS_ReleaseLock(rxconn, (struct AFSFid *)&avc-> f.fid.Fid, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; } else code = -1; } while (afs_Analyze (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK, NULL)); } } } else if (avc->flockCount == -1 && (acom & LOCK_EX)) { if (lockIdcmp2(&flock, avc, NULL, 1, clid)) { code = EWOULDBLOCK; } else { code = 0; /* We've just re-grabbed an exclusive lock, so we don't * need to contact the fileserver, and we don't need to * add the lock to avc->slocks (since we already have a * lock there). So, we are done. */ break; } } if (code == 0) { /* compatible here, decide if needs to go to file server. If * we've already got the file locked (and thus read-locked, since * we've already checked for compatibility), we shouldn't send * the call through to the server again */ if (avc->flockCount == 0) { struct rx_connection *rxconn; /* we're the first on our block, send the call through */ lockType = ((acom & LOCK_EX) ? LockWrite : LockRead); if (!AFS_IS_DISCONNECTED) { do { tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn); if (tc) { XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK); RX_AFS_GUNLOCK(); code = RXAFS_SetLock(rxconn, (struct AFSFid *) &avc->f.fid.Fid, lockType, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; } else code = -1; } while (afs_Analyze (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_SETLOCK, SHARED_LOCK, NULL)); if ((lockType == LockWrite) && (code == VREADONLY)) code = EBADF; /* per POSIX; VREADONLY == EROFS */ } else /* XXX - Should probably try and log this when we're * XXX - running with logging enabled. But it's horrid */ code = 0; /* pretend we worked - ick!!! */ } else code = 0; /* otherwise, pretend things worked */ } if (code == 0) { slp = (struct SimpleLocks *) osi_AllocSmallSpace(sizeof(struct SimpleLocks)); if (acom & LOCK_EX) { /* defect 3083 */ #ifdef AFS_AIX_ENV /* Record unique id of process owning exclusive lock. */ avc->ownslock = getpid(); #endif slp->type = LockWrite; slp->next = NULL; avc->slocks = slp; avc->flockCount = -1; } else { slp->type = LockRead; slp->next = avc->slocks; avc->slocks = slp; avc->flockCount++; } lockIdSet(&flock, slp, clid); break; } /* now, if we got EWOULDBLOCK, and we're supposed to wait, we do */ if (((code == EWOULDBLOCK) || (code == EAGAIN) || (code == UAEWOULDBLOCK) || (code == UAEAGAIN)) && !(acom & LOCK_NB)) { /* sleep for a second, allowing interrupts */ ReleaseWriteLock(&avc->lock); #if defined(AFS_SGI_ENV) AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE); #endif code = afs_osi_Wait(1000, NULL, 1); #if defined(AFS_SGI_ENV) AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE); #endif ObtainWriteLock(&avc->lock, 120); if (code) { code = EINTR; /* return this if ^C typed */ break; } } else break; } /* while loop */ } ReleaseWriteLock(&avc->lock); code = afs_CheckCode(code, areq, 1); /* defeat a buggy AIX optimization */ return code; } /* warn a user that a lock has been ignored */ afs_int32 lastWarnTime = 0; /* this is used elsewhere */ static afs_int32 lastWarnPid = 0; static void DoLockWarning(afs_ucred_t * acred) { afs_int32 now; pid_t pid = MyPidxx2Pid(MyPidxx); char *procname; now = osi_Time(); AFS_STATCNT(DoLockWarning); /* check if we've already warned this user recently */ if (!((now < lastWarnTime + 120) && (lastWarnPid == pid))) { procname = afs_osi_Alloc(256); if (!procname) return; /* Copies process name to allocated procname, see osi_machdeps for details of macro */ osi_procname(procname, 256); procname[255] = '\0'; /* otherwise, it is time to nag the user */ lastWarnTime = now; lastWarnPid = pid; #ifdef AFS_LINUX26_ENV afs_warnuser ("afs: byte-range locks only enforced for processes on this machine (pid %d (%s), user %ld).\n", pid, procname, (long)afs_cr_uid(acred)); #else afs_warnuser ("afs: byte-range lock/unlock ignored; make sure no one else is running this program (pid %d (%s), user %ld).\n", pid, procname, (long)afs_cr_uid(acred)); #endif afs_osi_Free(procname, 256); } return; }