Exemplo n.º 1
0
/* Try to invalidate pages, for "fs flush" or "fs flushv"; or
 * try to free pages, when deleting a file.
 *
 * Locking:  the vcache entry's lock is held.  It may be dropped and 
 * re-obtained.
 *
 * Since we drop and re-obtain the lock, we can't guarantee that there won't
 * be some pages around when we return, newly created by concurrent activity.
 */
void
osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync)
{
    ReleaseWriteLock(&avc->lock);
    AFS_GUNLOCK();
    /* current remapf restriction - cannot have VOP_RWLOCK */
    osi_Assert(OSI_GET_LOCKID() != avc->vc_rwlockid);
    if (((vnode_t *) avc)->v_type == VREG && AFS_VN_MAPPED(((vnode_t *) avc)))
	remapf(((vnode_t *) avc), 0, 0);
    PTOSSVP(AFSTOV(avc), (off_t) 0, (off_t) MAXLONG);
    AFS_GLOCK();
    ObtainWriteLock(&avc->lock, 62);
}
Exemplo n.º 2
0
/* Try to store pages to cache, in order to store a file back to the server.
 *
 * Locking:  the vcache entry's lock is held.  It will usually be dropped and
 * re-obtained.
 */
void
osi_VM_StoreAllSegments(struct vcache *avc)
{
    int error;
    osi_Assert(valusema(&avc->vc_rwlock) <= 0);
    osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid);
    osi_Assert(avc->vrefCount > 0);
    ReleaseWriteLock(&avc->lock);
    /* We may call back into AFS via:
     * pflushvp->chunkpush->do_pdflush->mp_afs_bmap
     */
    AFS_GUNLOCK();

    /* Write out dirty pages list to avoid B_DELWRI buffers. */
    while (VN_GET_DPAGES((vnode_t *) avc)) {
	pdflush(AFSTOV(avc), 0);
    }

    PFLUSHVP(AFSTOV(avc), (off_t) avc->f.m.Length, (off_t) 0, error);
    AFS_GLOCK();
    if (error) {
	/*
	 * If this fails (due to quota overage, etc.)
	 * what can we do?? we need to sure that
	 * that the VM cache is cleared of dirty pages
	 * We note that pinvalfree ignores write errors & otherwise
	 * does what we want (we don't use this normally since
	 * it also unhashes pages ..)
	 */
	PINVALFREE((vnode_t *) avc, avc->f.m.Length);
    }
    ObtainWriteLock(&avc->lock, 121);
    if (error && avc->f.m.LinkCount)
	cmn_err(CE_WARN,
		"AFS:Failed to push back pages for vnode 0x%x error %d (from afs_StoreOnLastReference)",
		avc, error);
}
Exemplo n.º 3
0
/* clid - nonzero on sgi sunos osf1 only */
int
HandleFlock(struct vcache *avc, int acom, struct vrequest *areq,
	    pid_t clid, int onlymine)
{
    struct afs_conn *tc;
    struct SimpleLocks *slp, *tlp, **slpp;
    afs_int32 code;
    struct AFSVolSync tsync;
    afs_int32 lockType;
    struct AFS_FLOCK flock;
    XSTATS_DECLS;
    AFS_STATCNT(HandleFlock);
    code = 0;			/* default when we don't make any network calls */
    lockIdSet(&flock, NULL, clid);

#if defined(AFS_SGI_ENV)
    osi_Assert(valusema(&avc->vc_rwlock) <= 0);
    osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid);
#endif
    ObtainWriteLock(&avc->lock, 118);
    if (acom & LOCK_UN) {
	int stored_segments = 0;
     retry_unlock:

/* defect 3083 */

#ifdef AFS_AIX_ENV
	/* If the lock is held exclusive, then only the owning process
	 * or a child can unlock it. Use pid and ppid because they are
	 * unique identifiers.
	 */
	if ((avc->flockCount < 0) && (getpid() != avc->ownslock)) {
#ifdef	AFS_AIX41_ENV
	    if (onlymine || (getppid() != avc->ownslock)) {
#else
	    if (onlymine || (u.u_procp->p_ppid != avc->ownslock)) {
#endif
		ReleaseWriteLock(&avc->lock);
		return 0;
	    }
	}
#endif
	if (lockIdcmp2(&flock, avc, NULL, onlymine, clid)) {
	    ReleaseWriteLock(&avc->lock);
	    return 0;
	}
#ifdef AFS_AIX_ENV
	avc->ownslock = 0;
#endif
	if (avc->flockCount == 0) {
	    ReleaseWriteLock(&avc->lock);
	    return 0 /*ENOTTY*/;
	    /* no lock held */
	}
	/* unlock the lock */
	if (avc->flockCount > 0) {
	    slpp = &avc->slocks;
	    for (slp = *slpp; slp;) {
		if (!lockIdcmp2(&flock, avc, slp, onlymine, clid)) {
		    avc->flockCount--;
		    tlp = *slpp = slp->next;
		    osi_FreeSmallSpace(slp);
		    slp = tlp;
		} else {
		    slpp = &slp->next;
		    slp = *slpp;
		}
	    }
	} else if (avc->flockCount == -1) {
	    if (!stored_segments) {
		afs_StoreAllSegments(avc, areq, AFS_SYNC | AFS_VMSYNC);	/* fsync file early */
		/* afs_StoreAllSegments can drop and reacquire the write lock
		 * on avc and GLOCK, so the flocks may be completely different
		 * now. Go back and perform all checks again. */
		 stored_segments = 1;
		 goto retry_unlock;
	    }
	    avc->flockCount = 0;
	    /* And remove the (only) exclusive lock entry from the list... */
	    osi_FreeSmallSpace(avc->slocks);
	    avc->slocks = 0;
	}
	if (avc->flockCount == 0) {
	    if (!AFS_IS_DISCONNECTED) {
		struct rx_connection *rxconn;
	        do {
		    tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
		    if (tc) {
		        XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK);
		        RX_AFS_GUNLOCK();
		        code = RXAFS_ReleaseLock(rxconn, (struct AFSFid *)
					         &avc->f.fid.Fid, &tsync);
		        RX_AFS_GLOCK();
		        XSTATS_END_TIME;
		    } else
		    code = -1;
	        } while (afs_Analyze
		         (tc, rxconn, code, &avc->f.fid, areq,
		          AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK, NULL));
	    } else {
	  	/*printf("Network is dooooooowwwwwwwnnnnnnn\n");*/
	       code = ENETDOWN;
	    }
	}
    } else {
	while (1) {		/* set a new lock */
	    /*
	     * Upgrading from shared locks to Exclusive and vice versa
	     * is a bit tricky and we don't really support it yet. But
	     * we try to support the common used one which is upgrade
	     * a shared lock to an exclusive for the same process...
	     */
	    if ((avc->flockCount > 0 && (acom & LOCK_EX))
		|| (avc->flockCount == -1 && (acom & LOCK_SH))) {
		/*
		 * Upgrading from shared locks to an exclusive one:
		 * For now if all the shared locks belong to the
		 * same process then we unlock them on the server
		 * and proceed with the upgrade.  Unless we change the
		 * server's locking interface impl we prohibit from
		 * unlocking other processes's shared locks...
		 * Upgrading from an exclusive lock to a shared one:
		 * Again only allowed to be done by the same process.
		 */
		slpp = &avc->slocks;
		for (slp = *slpp; slp;) {
		    if (!lockIdcmp2
			(&flock, avc, slp, 1 /*!onlymine */ , clid)) {
			if (acom & LOCK_EX)
			    avc->flockCount--;
			else
			    avc->flockCount = 0;
			tlp = *slpp = slp->next;
			osi_FreeSmallSpace(slp);
			slp = tlp;
		    } else {
			code = EWOULDBLOCK;
			slpp = &slp->next;
			slp = *slpp;
		    }
		}
		if (!code && avc->flockCount == 0) {
		    if (!AFS_IS_DISCONNECTED) {
			struct rx_connection *rxconn;
		        do {
			    tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
			    if (tc) {
			        XSTATS_START_TIME
				    (AFS_STATS_FS_RPCIDX_RELEASELOCK);
			        RX_AFS_GUNLOCK();
			        code =
				    RXAFS_ReleaseLock(rxconn,
						      (struct AFSFid *)&avc->
						      f.fid.Fid, &tsync);
			        RX_AFS_GLOCK();
			       XSTATS_END_TIME;
			    } else
			        code = -1;
		        } while (afs_Analyze
			         (tc, rxconn, code, &avc->f.fid, areq,
			          AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK,
			          NULL));
		    }
		}
	    } else if (avc->flockCount == -1 && (acom & LOCK_EX)) {
		if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
		    code = EWOULDBLOCK;
		} else {
		    code = 0;
		    /* We've just re-grabbed an exclusive lock, so we don't
		     * need to contact the fileserver, and we don't need to
		     * add the lock to avc->slocks (since we already have a
		     * lock there). So, we are done. */
		    break;
		}
	    }
	    if (code == 0) {
		/* compatible here, decide if needs to go to file server.  If
		 * we've already got the file locked (and thus read-locked, since
		 * we've already checked for compatibility), we shouldn't send
		 * the call through to the server again */
		if (avc->flockCount == 0) {
		    struct rx_connection *rxconn;
		    /* we're the first on our block, send the call through */
		    lockType = ((acom & LOCK_EX) ? LockWrite : LockRead);
		    if (!AFS_IS_DISCONNECTED) {
		        do {
			    tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
			    if (tc) {
			        XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK);
			        RX_AFS_GUNLOCK();
			        code = RXAFS_SetLock(rxconn, (struct AFSFid *)
						     &avc->f.fid.Fid, lockType,
						     &tsync);
			        RX_AFS_GLOCK();
			        XSTATS_END_TIME;
			    } else
			        code = -1;
		        } while (afs_Analyze
			         (tc, rxconn, code, &avc->f.fid, areq,
			          AFS_STATS_FS_RPCIDX_SETLOCK, SHARED_LOCK,
			          NULL));
			if ((lockType == LockWrite) && (code == VREADONLY))
			    code = EBADF; /* per POSIX; VREADONLY == EROFS */
		    } else
		        /* XXX - Should probably try and log this when we're
		         * XXX - running with logging enabled. But it's horrid
		         */
		        code = 0; /* pretend we worked - ick!!! */
		} else
		    code = 0;	/* otherwise, pretend things worked */
	    }
	    if (code == 0) {
		slp = (struct SimpleLocks *)
		    osi_AllocSmallSpace(sizeof(struct SimpleLocks));
		if (acom & LOCK_EX) {

/* defect 3083 */

#ifdef AFS_AIX_ENV
		    /* Record unique id of process owning exclusive lock. */
		    avc->ownslock = getpid();
#endif

		    slp->type = LockWrite;
		    slp->next = NULL;
		    avc->slocks = slp;
		    avc->flockCount = -1;
		} else {
		    slp->type = LockRead;
		    slp->next = avc->slocks;
		    avc->slocks = slp;
		    avc->flockCount++;
		}

		lockIdSet(&flock, slp, clid);
		break;
	    }
	    /* now, if we got EWOULDBLOCK, and we're supposed to wait, we do */
	    if (((code == EWOULDBLOCK) || (code == EAGAIN) || 
		 (code == UAEWOULDBLOCK) || (code == UAEAGAIN))
		&& !(acom & LOCK_NB)) {
		/* sleep for a second, allowing interrupts */
		ReleaseWriteLock(&avc->lock);
#if defined(AFS_SGI_ENV)
		AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
#endif
		code = afs_osi_Wait(1000, NULL, 1);
#if defined(AFS_SGI_ENV)
		AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
#endif
		ObtainWriteLock(&avc->lock, 120);
		if (code) {
		    code = EINTR;	/* return this if ^C typed */
		    break;
		}
	    } else
		break;
	}			/* while loop */
    }
    ReleaseWriteLock(&avc->lock);
    code = afs_CheckCode(code, areq, 1);	/* defeat a buggy AIX optimization */
    return code;
}


/* warn a user that a lock has been ignored */
afs_int32 lastWarnTime = 0;	/* this is used elsewhere */
static afs_int32 lastWarnPid = 0;
static void
DoLockWarning(afs_ucred_t * acred)
{
    afs_int32 now;
    pid_t pid = MyPidxx2Pid(MyPidxx);
    char *procname;

    now = osi_Time();

    AFS_STATCNT(DoLockWarning);
    /* check if we've already warned this user recently */
    if (!((now < lastWarnTime + 120) && (lastWarnPid == pid))) {
	procname = afs_osi_Alloc(256);

	if (!procname)
	    return;

	/* Copies process name to allocated procname, see osi_machdeps for details of macro */
	osi_procname(procname, 256);
	procname[255] = '\0';

	/* otherwise, it is time to nag the user */
	lastWarnTime = now;
	lastWarnPid = pid;
#ifdef AFS_LINUX26_ENV
	afs_warnuser
	    ("afs: byte-range locks only enforced for processes on this machine (pid %d (%s), user %ld).\n", pid, procname, (long)afs_cr_uid(acred));
#else
	afs_warnuser
	    ("afs: byte-range lock/unlock ignored; make sure no one else is running this program (pid %d (%s), user %ld).\n", pid, procname, (long)afs_cr_uid(acred));
#endif
	afs_osi_Free(procname, 256);
    }
    return;
}