示例#1
0
int
SRXAFSCB_GetCE(struct rx_call *a_call, afs_int32 a_index,
	       struct AFSDBCacheEntry *a_result)
{

    int i;		/*Loop variable */
    struct vcache *tvc;	/*Ptr to current cache entry */
    int code;			/*Return code */
    XSTATS_DECLS;

    RX_AFS_GLOCK();

    XSTATS_START_CMTIME(AFS_STATS_CM_RPCIDX_GETCE);

    AFS_STATCNT(SRXAFSCB_GetCE);
    for (i = 0; i < VCSIZE; i++) {
	for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
	    if (a_index == 0)
		goto searchDone;
	    a_index--;
	}			/*Zip through current hash chain */
    }				/*Zip through hash chains */

  searchDone:
    if (tvc == NULL) {
	/*Past EOF */
	code = 1;
	goto fcnDone;
    }

    /*
     * Copy out the located entry.
     */
    a_result->addr = afs_data_pointer_to_int32(tvc);
    a_result->cell = tvc->f.fid.Cell;
    a_result->netFid.Volume = tvc->f.fid.Fid.Volume;
    a_result->netFid.Vnode = tvc->f.fid.Fid.Vnode;
    a_result->netFid.Unique = tvc->f.fid.Fid.Unique;
    a_result->lock.waitStates = tvc->lock.wait_states;
    a_result->lock.exclLocked = tvc->lock.excl_locked;
    a_result->lock.readersReading = tvc->lock.readers_reading;
    a_result->lock.numWaiting = tvc->lock.num_waiting;
#if defined(INSTRUMENT_LOCKS)
    a_result->lock.pid_last_reader = MyPidxx2Pid(tvc->lock.pid_last_reader);
    a_result->lock.pid_writer = MyPidxx2Pid(tvc->lock.pid_writer);
    a_result->lock.src_indicator = tvc->lock.src_indicator;
#else
    /* On osf20 , the vcache does not maintain these three fields */
    a_result->lock.pid_last_reader = 0;
    a_result->lock.pid_writer = 0;
    a_result->lock.src_indicator = 0;
#endif /* INSTRUMENT_LOCKS */
#ifdef AFS_64BIT_CLIENT
    a_result->Length = (afs_int32) tvc->f.m.Length & 0xffffffff;
#else /* AFS_64BIT_CLIENT */
    a_result->Length = tvc->f.m.Length;
#endif /* AFS_64BIT_CLIENT */
    a_result->DataVersion = hgetlo(tvc->f.m.DataVersion);
    a_result->callback = afs_data_pointer_to_int32(tvc->callback);	/* XXXX Now a pointer; change it XXXX */
    a_result->cbExpires = tvc->cbExpires;
    if (tvc->f.states & CVInit) {
        a_result->refCount = 1;
    } else {
#ifdef AFS_DARWIN80_ENV
    a_result->refCount = vnode_isinuse(AFSTOV(tvc),0)?1:0; /* XXX fix */
#else
    a_result->refCount = VREFCOUNT(tvc);
#endif
    }
    a_result->opens = tvc->opens;
    a_result->writers = tvc->execsOrWriters;
    a_result->mvstat = tvc->mvstat;
    a_result->states = tvc->f.states;
    code = 0;

    /*
     * Return our results.
     */
  fcnDone:
    XSTATS_END_TIME;

    RX_AFS_GUNLOCK();

    return (code);

}				/*SRXAFSCB_GetCE */
示例#2
0
/* clid - nonzero on sgi sunos osf1 only */
int
HandleFlock(struct vcache *avc, int acom, struct vrequest *areq,
	    pid_t clid, int onlymine)
{
    struct afs_conn *tc;
    struct SimpleLocks *slp, *tlp, **slpp;
    afs_int32 code;
    struct AFSVolSync tsync;
    afs_int32 lockType;
    struct AFS_FLOCK flock;
    XSTATS_DECLS;
    AFS_STATCNT(HandleFlock);
    code = 0;			/* default when we don't make any network calls */
    lockIdSet(&flock, NULL, clid);

#if defined(AFS_SGI_ENV)
    osi_Assert(valusema(&avc->vc_rwlock) <= 0);
    osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid);
#endif
    ObtainWriteLock(&avc->lock, 118);
    if (acom & LOCK_UN) {
	int stored_segments = 0;
     retry_unlock:

/* defect 3083 */

#ifdef AFS_AIX_ENV
	/* If the lock is held exclusive, then only the owning process
	 * or a child can unlock it. Use pid and ppid because they are
	 * unique identifiers.
	 */
	if ((avc->flockCount < 0) && (getpid() != avc->ownslock)) {
#ifdef	AFS_AIX41_ENV
	    if (onlymine || (getppid() != avc->ownslock)) {
#else
	    if (onlymine || (u.u_procp->p_ppid != avc->ownslock)) {
#endif
		ReleaseWriteLock(&avc->lock);
		return 0;
	    }
	}
#endif
	if (lockIdcmp2(&flock, avc, NULL, onlymine, clid)) {
	    ReleaseWriteLock(&avc->lock);
	    return 0;
	}
#ifdef AFS_AIX_ENV
	avc->ownslock = 0;
#endif
	if (avc->flockCount == 0) {
	    ReleaseWriteLock(&avc->lock);
	    return 0 /*ENOTTY*/;
	    /* no lock held */
	}
	/* unlock the lock */
	if (avc->flockCount > 0) {
	    slpp = &avc->slocks;
	    for (slp = *slpp; slp;) {
		if (!lockIdcmp2(&flock, avc, slp, onlymine, clid)) {
		    avc->flockCount--;
		    tlp = *slpp = slp->next;
		    osi_FreeSmallSpace(slp);
		    slp = tlp;
		} else {
		    slpp = &slp->next;
		    slp = *slpp;
		}
	    }
	} else if (avc->flockCount == -1) {
	    if (!stored_segments) {
		afs_StoreAllSegments(avc, areq, AFS_SYNC | AFS_VMSYNC);	/* fsync file early */
		/* afs_StoreAllSegments can drop and reacquire the write lock
		 * on avc and GLOCK, so the flocks may be completely different
		 * now. Go back and perform all checks again. */
		 stored_segments = 1;
		 goto retry_unlock;
	    }
	    avc->flockCount = 0;
	    /* And remove the (only) exclusive lock entry from the list... */
	    osi_FreeSmallSpace(avc->slocks);
	    avc->slocks = 0;
	}
	if (avc->flockCount == 0) {
	    if (!AFS_IS_DISCONNECTED) {
		struct rx_connection *rxconn;
	        do {
		    tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
		    if (tc) {
		        XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK);
		        RX_AFS_GUNLOCK();
		        code = RXAFS_ReleaseLock(rxconn, (struct AFSFid *)
					         &avc->f.fid.Fid, &tsync);
		        RX_AFS_GLOCK();
		        XSTATS_END_TIME;
		    } else
		    code = -1;
	        } while (afs_Analyze
		         (tc, rxconn, code, &avc->f.fid, areq,
		          AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK, NULL));
	    } else {
	  	/*printf("Network is dooooooowwwwwwwnnnnnnn\n");*/
	       code = ENETDOWN;
	    }
	}
    } else {
	while (1) {		/* set a new lock */
	    /*
	     * Upgrading from shared locks to Exclusive and vice versa
	     * is a bit tricky and we don't really support it yet. But
	     * we try to support the common used one which is upgrade
	     * a shared lock to an exclusive for the same process...
	     */
	    if ((avc->flockCount > 0 && (acom & LOCK_EX))
		|| (avc->flockCount == -1 && (acom & LOCK_SH))) {
		/*
		 * Upgrading from shared locks to an exclusive one:
		 * For now if all the shared locks belong to the
		 * same process then we unlock them on the server
		 * and proceed with the upgrade.  Unless we change the
		 * server's locking interface impl we prohibit from
		 * unlocking other processes's shared locks...
		 * Upgrading from an exclusive lock to a shared one:
		 * Again only allowed to be done by the same process.
		 */
		slpp = &avc->slocks;
		for (slp = *slpp; slp;) {
		    if (!lockIdcmp2
			(&flock, avc, slp, 1 /*!onlymine */ , clid)) {
			if (acom & LOCK_EX)
			    avc->flockCount--;
			else
			    avc->flockCount = 0;
			tlp = *slpp = slp->next;
			osi_FreeSmallSpace(slp);
			slp = tlp;
		    } else {
			code = EWOULDBLOCK;
			slpp = &slp->next;
			slp = *slpp;
		    }
		}
		if (!code && avc->flockCount == 0) {
		    if (!AFS_IS_DISCONNECTED) {
			struct rx_connection *rxconn;
		        do {
			    tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
			    if (tc) {
			        XSTATS_START_TIME
				    (AFS_STATS_FS_RPCIDX_RELEASELOCK);
			        RX_AFS_GUNLOCK();
			        code =
				    RXAFS_ReleaseLock(rxconn,
						      (struct AFSFid *)&avc->
						      f.fid.Fid, &tsync);
			        RX_AFS_GLOCK();
			       XSTATS_END_TIME;
			    } else
			        code = -1;
		        } while (afs_Analyze
			         (tc, rxconn, code, &avc->f.fid, areq,
			          AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK,
			          NULL));
		    }
		}
	    } else if (avc->flockCount == -1 && (acom & LOCK_EX)) {
		if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
		    code = EWOULDBLOCK;
		} else {
		    code = 0;
		    /* We've just re-grabbed an exclusive lock, so we don't
		     * need to contact the fileserver, and we don't need to
		     * add the lock to avc->slocks (since we already have a
		     * lock there). So, we are done. */
		    break;
		}
	    }
	    if (code == 0) {
		/* compatible here, decide if needs to go to file server.  If
		 * we've already got the file locked (and thus read-locked, since
		 * we've already checked for compatibility), we shouldn't send
		 * the call through to the server again */
		if (avc->flockCount == 0) {
		    struct rx_connection *rxconn;
		    /* we're the first on our block, send the call through */
		    lockType = ((acom & LOCK_EX) ? LockWrite : LockRead);
		    if (!AFS_IS_DISCONNECTED) {
		        do {
			    tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
			    if (tc) {
			        XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK);
			        RX_AFS_GUNLOCK();
			        code = RXAFS_SetLock(rxconn, (struct AFSFid *)
						     &avc->f.fid.Fid, lockType,
						     &tsync);
			        RX_AFS_GLOCK();
			        XSTATS_END_TIME;
			    } else
			        code = -1;
		        } while (afs_Analyze
			         (tc, rxconn, code, &avc->f.fid, areq,
			          AFS_STATS_FS_RPCIDX_SETLOCK, SHARED_LOCK,
			          NULL));
			if ((lockType == LockWrite) && (code == VREADONLY))
			    code = EBADF; /* per POSIX; VREADONLY == EROFS */
		    } else
		        /* XXX - Should probably try and log this when we're
		         * XXX - running with logging enabled. But it's horrid
		         */
		        code = 0; /* pretend we worked - ick!!! */
		} else
		    code = 0;	/* otherwise, pretend things worked */
	    }
	    if (code == 0) {
		slp = (struct SimpleLocks *)
		    osi_AllocSmallSpace(sizeof(struct SimpleLocks));
		if (acom & LOCK_EX) {

/* defect 3083 */

#ifdef AFS_AIX_ENV
		    /* Record unique id of process owning exclusive lock. */
		    avc->ownslock = getpid();
#endif

		    slp->type = LockWrite;
		    slp->next = NULL;
		    avc->slocks = slp;
		    avc->flockCount = -1;
		} else {
		    slp->type = LockRead;
		    slp->next = avc->slocks;
		    avc->slocks = slp;
		    avc->flockCount++;
		}

		lockIdSet(&flock, slp, clid);
		break;
	    }
	    /* now, if we got EWOULDBLOCK, and we're supposed to wait, we do */
	    if (((code == EWOULDBLOCK) || (code == EAGAIN) || 
		 (code == UAEWOULDBLOCK) || (code == UAEAGAIN))
		&& !(acom & LOCK_NB)) {
		/* sleep for a second, allowing interrupts */
		ReleaseWriteLock(&avc->lock);
#if defined(AFS_SGI_ENV)
		AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
#endif
		code = afs_osi_Wait(1000, NULL, 1);
#if defined(AFS_SGI_ENV)
		AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
#endif
		ObtainWriteLock(&avc->lock, 120);
		if (code) {
		    code = EINTR;	/* return this if ^C typed */
		    break;
		}
	    } else
		break;
	}			/* while loop */
    }
    ReleaseWriteLock(&avc->lock);
    code = afs_CheckCode(code, areq, 1);	/* defeat a buggy AIX optimization */
    return code;
}


/* warn a user that a lock has been ignored */
afs_int32 lastWarnTime = 0;	/* this is used elsewhere */
static afs_int32 lastWarnPid = 0;
static void
DoLockWarning(afs_ucred_t * acred)
{
    afs_int32 now;
    pid_t pid = MyPidxx2Pid(MyPidxx);
    char *procname;

    now = osi_Time();

    AFS_STATCNT(DoLockWarning);
    /* check if we've already warned this user recently */
    if (!((now < lastWarnTime + 120) && (lastWarnPid == pid))) {
	procname = afs_osi_Alloc(256);

	if (!procname)
	    return;

	/* Copies process name to allocated procname, see osi_machdeps for details of macro */
	osi_procname(procname, 256);
	procname[255] = '\0';

	/* otherwise, it is time to nag the user */
	lastWarnTime = now;
	lastWarnPid = pid;
#ifdef AFS_LINUX26_ENV
	afs_warnuser
	    ("afs: byte-range locks only enforced for processes on this machine (pid %d (%s), user %ld).\n", pid, procname, (long)afs_cr_uid(acred));
#else
	afs_warnuser
	    ("afs: byte-range lock/unlock ignored; make sure no one else is running this program (pid %d (%s), user %ld).\n", pid, procname, (long)afs_cr_uid(acred));
#endif
	afs_osi_Free(procname, 256);
    }
    return;
}
示例#3
0
int
SRXAFSCB_GetLock(struct rx_call *a_call, afs_int32 a_index,
		 struct AFSDBLock *a_result)
{
    struct ltable *tl;		/*Ptr to lock table entry */
    int nentries;		/*Num entries in table */
    int code;			/*Return code */
    XSTATS_DECLS;

    RX_AFS_GLOCK();

    XSTATS_START_CMTIME(AFS_STATS_CM_RPCIDX_GETLOCK);

    AFS_STATCNT(SRXAFSCB_GetLock);
    nentries = sizeof(ltable) / sizeof(struct ltable);
    if (a_index < 0 || a_index >= nentries+afs_cellindex) {
	/*
	 * Past EOF
	 */
	code = 1;
    } else if (a_index >= nentries) {
	struct cell *tc = afs_GetCellByIndex(a_index-nentries, 0);
	strcpy(a_result->name, tc->cellName);
	a_result->lock.waitStates =
	    ((struct afs_lock *)&(tc->lock))->wait_states;
	a_result->lock.exclLocked =
	    ((struct afs_lock *)&(tc->lock))->excl_locked;
	a_result->lock.readersReading =
	    ((struct afs_lock *)&(tc->lock))->readers_reading;
	a_result->lock.numWaiting =
	    ((struct afs_lock *)&(tc->lock))->num_waiting;
#ifdef INSTRUMENT_LOCKS
	a_result->lock.pid_last_reader =
	    MyPidxx2Pid(((struct afs_lock *)&(tc->lock))->pid_last_reader);
	a_result->lock.pid_writer =
	    MyPidxx2Pid(((struct afs_lock *)&(tc->lock))->pid_writer);
	a_result->lock.src_indicator =
	    ((struct afs_lock *)&(tc->lock))->src_indicator;
#else
	a_result->lock.pid_last_reader = 0;
	a_result->lock.pid_writer = 0;
	a_result->lock.src_indicator = 0;
#endif
	code = 0;
    } else {
	/*
	 * Found it - copy out its contents.
	 */
	tl = &ltable[a_index];
	strcpy(a_result->name, tl->name);
	a_result->lock.waitStates =
	    ((struct afs_lock *)(tl->addr))->wait_states;
	a_result->lock.exclLocked =
	    ((struct afs_lock *)(tl->addr))->excl_locked;
	a_result->lock.readersReading =
	    ((struct afs_lock *)(tl->addr))->readers_reading;
	a_result->lock.numWaiting =
	    ((struct afs_lock *)(tl->addr))->num_waiting;
#ifdef INSTRUMENT_LOCKS
	a_result->lock.pid_last_reader =
	    MyPidxx2Pid(((struct afs_lock *)(tl->addr))->pid_last_reader);
	a_result->lock.pid_writer =
	    MyPidxx2Pid(((struct afs_lock *)(tl->addr))->pid_writer);
	a_result->lock.src_indicator =
	    ((struct afs_lock *)(tl->addr))->src_indicator;
#else
	a_result->lock.pid_last_reader = 0;
	a_result->lock.pid_writer = 0;
	a_result->lock.src_indicator = 0;
#endif
	code = 0;
    }

    XSTATS_END_TIME;

    RX_AFS_GUNLOCK();

    return (code);

}				/*SRXAFSCB_GetLock */