Beispiel #1
0
/* Generic write interface */
int
afs_osi_Write(register struct osi_file *afile, afs_int32 offset, void *aptr,
	      afs_int32 asize)
{
    struct AFS_UCRED *oldCred;
    long resid;
    register afs_int32 code;
    AFS_STATCNT(osi_Write);
    if (!afile)
	osi_Panic("afs_osi_Write called with null param");
    if (offset != -1)
	afile->offset = offset;
    AFS_GUNLOCK();
    code =
	gop_rdwr(UIO_WRITE, afile->vnode, (caddr_t) aptr, asize,
		 afile->offset, AFS_UIOSYS, IO_UNIT, &resid);
    AFS_GLOCK();
    if (code == 0) {
	code = asize - resid;
	afile->offset += code;
    } else {
	if (code == ENOSPC)
	    afs_warnuser
		("\n\n\n*** Cache partition is FULL - Decrease cachesize!!! ***\n\n");
	setuerror(code);
	code = -1;
    }
    if (afile->proc) {
	(*afile->proc) (afile, code);
    }
    return code;
}
Beispiel #2
0
/* Generic write interface */
int
afs_osi_Write(struct osi_file *afile, afs_int32 offset, void *aptr,
	      afs_int32 asize)
{
    afs_ucred_t *oldCred;
    ssize_t resid;
    afs_int32 code;
    AFS_STATCNT(osi_Write);
    if (!afile)
	osi_Panic("afs_osi_Write called with null param");
    if (offset != -1)
	afile->offset = offset;
    AFS_GUNLOCK();
    code =
	gop_rdwr(UIO_WRITE, afile->vnode, (caddr_t) aptr, asize,
		 afile->offset, AFS_UIOSYS, 0, 0x7fffffff, &afs_osi_cred,
		 &resid);
    AFS_GLOCK();
    if (code == 0) {
	code = asize - resid;
	afile->offset += code;
    } else {
	if (code == ENOSPC)
	    afs_warnuser
		("\n\n\n*** Cache partition is FULL - Decrease cachesize!!! ***\n\n");
	if (code > 0) {
	    code = -code;
	}
    }
    if (afile->proc) {
	(*afile->proc) (afile, code);
    }
    return code;
}
Beispiel #3
0
/**
 * forceConnectFS is set whenever we must recompute the connection. UTokensBad
 * is true only if we know that the tokens are bad.  We thus clear this flag
 * when we get a new set of tokens..
 * Having force... true and UTokensBad true simultaneously means that the tokens
 * went bad and we're supposed to create a new, unauthenticated, connection.
 *
 * @param aserver Server to connect to.
 * @param aport Connection port.
 * @param acell The cell where all of this happens.
 * @param areq The request.
 * @param aforce Force connection?
 * @param locktype Type of lock to be used.
 *
 * @return The established connection.
 */
struct afs_conn *
afs_ConnByHost(struct server *aserver, unsigned short aport, afs_int32 acell,
	       struct vrequest *areq, int aforce, afs_int32 locktype,
	       struct rx_connection **rxconn)
{
    struct unixuser *tu;
    struct afs_conn *tc = NULL;
    struct srvAddr *sa = NULL;

    *rxconn = NULL;

    AFS_STATCNT(afs_ConnByHost);

    if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) {
        afs_warnuser("afs_ConnByHost: disconnected\n");
        return NULL;
    }

/*
  1.  look for an existing connection
  2.  create a connection at an address believed to be up
      (if aforce is true, create a connection at the first address)
*/

    tu = afs_GetUser(areq->uid, acell, SHARED_LOCK);

    for (sa = aserver->addr; sa; sa = sa->next_sa) {
	tc = afs_ConnBySA(sa, aport, acell, tu, aforce,
			  0 /*don't create one */ ,
			  locktype, rxconn);
	if (tc)
	    break;
    }

    if (!tc) {
	for (sa = aserver->addr; sa; sa = sa->next_sa) {
	    tc = afs_ConnBySA(sa, aport, acell, tu, aforce,
			      1 /*create one */ ,
			      locktype, rxconn);
	    if (tc)
		break;
	}
    }

    afs_PutUser(tu, SHARED_LOCK);
    return tc;

}				/*afs_ConnByHost */
Beispiel #4
0
/* Generic write interface */
int
afs_osi_Write(struct osi_file *afile, afs_int32 offset, void *aptr,
	      afs_int32 asize)
{
    afs_ucred_t *oldCred;
    unsigned int resid;
    afs_int32 code;
    AFS_STATCNT(osi_Write);
    if (!afile)
	osi_Panic("afs_osi_Write called with null param");
    if (offset != -1)
	afile->offset = offset;
    /* Note the difference in the way the afile->offset is passed (see comments in gop_rdwr() in afs_aix_subr.c for comments) */
    AFS_GUNLOCK();
#ifdef AFS_64BIT_KERNEL
    code =
	gop_rdwr(UIO_WRITE, afile->vnode, (caddr_t) aptr, asize,
		 &afile->offset, AFS_UIOSYS, NULL, &resid);
#else
    code =
	gop_rdwr(UIO_WRITE, afile->vnode, (caddr_t) aptr, asize,
		 (off_t) & afile->offset, AFS_UIOSYS, NULL, &resid);
#endif
    AFS_GLOCK();
    if (code == 0) {
	if (resid)
	    afs_Trace3(afs_iclSetp, CM_TRACE_WRITEFAILED, ICL_TYPE_INT32,
		       asize, ICL_TYPE_INT32, resid, ICL_TYPE_INT32, code);
	code = asize - resid;
	afile->offset += code;
    } else {
	afs_Trace3(afs_iclSetp, CM_TRACE_WRITEFAILED, ICL_TYPE_INT32, asize,
		   ICL_TYPE_INT32, resid, ICL_TYPE_INT32, code);
	if (code == ENOSPC)
	    afs_warnuser
		("\n\n\n*** Cache partition is FULL - Decrease cachesize!!! ***\n\n");
	setuerror(code);
	if (code > 0) {
	    code = -code;
	}
    }
    if (afile->proc) {
	(*afile->proc) (afile, code);
    }
    return code;
}
Beispiel #5
0
/* Generic write interface */
int
afs_osi_Write(struct osi_file *afile, afs_int32 offset, void *aptr,
	      afs_int32 asize)
{
    struct uio auio;
    struct iovec iov;
    afs_int32 code;

    memset(&auio, 0, sizeof(auio));
    memset(&iov, 0, sizeof(iov));

    AFS_STATCNT(osi_Write);

    if (!afile) {
	if (afs_shuttingdown == AFS_RUNNING)
	    osi_Panic("afs_osi_Write called with null param");
	else
	    return -EIO;
    }

    if (offset != -1)
	afile->offset = offset;
    setup_uio(&auio, &iov, aptr, afile->offset, asize, UIO_WRITE, AFS_UIOSYS);
    AFS_GUNLOCK();
    code = osi_rdwr(afile, &auio, UIO_WRITE);
    AFS_GLOCK();
    if (code == 0) {
	code = asize - auio.uio_resid;
	afile->offset += code;
    } else {
	if (code == ENOSPC)
	    afs_warnuser
		("\n\n\n*** Cache partition is FULL - Decrease cachesize!!! ***\n\n");
	if (code > 0) {
	    code = -code;
	}
    }

    if (afile->proc)
	(*afile->proc)(afile, code);

    return code;
}
Beispiel #6
0
/**
 * Connects to a server by it's server address.
 *
 * @param sap Server address.
 * @param aport Server port.
 * @param acell
 * @param tu Connect as this user.
 * @param force_if_down
 * @param create
 * @param locktype Specifies type of lock to be used for this function.
 *
 * @return The new connection.
 */
struct afs_conn *
afs_ConnBySA(struct srvAddr *sap, unsigned short aport, afs_int32 acell,
	     struct unixuser *tu, int force_if_down, afs_int32 create,
	     afs_int32 locktype, struct rx_connection **rxconn)
{
    int glocked, foundvec;
    struct afs_conn *tc = NULL;
    struct sa_conn_vector *tcv = NULL;
    struct rx_securityClass *csec; /*Security class object */
    int isec; /*Security index */
    int service;

    *rxconn = NULL;

    /* find cached connection */
    ObtainSharedLock(&afs_xconn, 15);
    foundvec = 0;
    for (tcv = sap->conns; tcv; tcv = tcv->next) {
        if (tcv->user == tu && tcv->port == aport) {
            /* return most eligible conn */
            if (!foundvec)
                foundvec = 1;
            UpgradeSToWLock(&afs_xconn, 37);
            tc = find_preferred_connection(tcv, create);
            ConvertWToSLock(&afs_xconn);
            break;
        }
    }

    if (!tc && !create) {
        /* Not found and can't create a new one. */
        ReleaseSharedLock(&afs_xconn);
        return NULL;
    }

    if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) {
        afs_warnuser("afs_ConnBySA: disconnected\n");
        ReleaseSharedLock(&afs_xconn);
        return NULL;
    }

    if (!foundvec && create) {
	/* No such connection vector exists.  Create one and splice it in.
	 * Make sure the server record has been marked as used (for the purposes
	 * of calculating up & down times, it's now considered to be an
	 * ``active'' server).  Also make sure the server's lastUpdateEvalTime
	 * gets set, marking the time of its ``birth''.
	 */
	UpgradeSToWLock(&afs_xconn, 37);
        new_conn_vector(tcv);

        tcv->user = tu;
        tcv->port = aport;
        tcv->srvr = sap;
        tcv->next = sap->conns;
        sap->conns = tcv;

        /* all struct afs_conn ptrs come from here */
        tc = find_preferred_connection(tcv, create);

	afs_ActivateServer(sap);

	ConvertWToSLock(&afs_xconn);
    } /* end of if (!tcv) */

    if (!tc) {
        /* Not found and no alternatives. */
        ReleaseSharedLock(&afs_xconn);
        return NULL;
    }

    if (tu->states & UTokensBad) {
	/* we may still have an authenticated RPC connection here,
	 * we'll have to create a new, unauthenticated, connection.
	 * Perhaps a better way to do this would be to set
	 * conn->forceConnectFS on all conns when the token first goes
	 * bad, but that's somewhat trickier, due to locking
	 * constraints (though not impossible).
	 */
	if (tc->id && (rx_SecurityClassOf(tc->id) != 0)) {
	    tc->forceConnectFS = 1;	/* force recreation of connection */
	}
	tu->states &= ~UHasTokens;      /* remove the authentication info */
    }

    glocked = ISAFS_GLOCK();
    if (tc->forceConnectFS) {
	UpgradeSToWLock(&afs_xconn, 38);
	csec = (struct rx_securityClass *)0;
	if (tc->id) {
	    if (glocked)
                AFS_GUNLOCK();
            rx_SetConnSecondsUntilNatPing(tc->id, 0);
            rx_DestroyConnection(tc->id);
	    if (glocked)
                AFS_GLOCK();
	}
	/*
	 * Stupid hack to determine if using vldb service or file system
	 * service.
	 */
	if (aport == sap->server->cell->vlport)
	    service = 52;
	else
	    service = 1;
	isec = 0;

	csec = afs_pickSecurityObject(tc, &isec);

	if (glocked)
            AFS_GUNLOCK();
	tc->id = rx_NewConnection(sap->sa_ip, aport, service, csec, isec);
	if (glocked)
            AFS_GLOCK();
	if (service == 52) {
	    rx_SetConnHardDeadTime(tc->id, afs_rx_harddead);
	}
	/* set to a RX_CALL_TIMEOUT error to allow MTU retry to trigger */
	rx_SetServerConnIdleDeadErr(tc->id, RX_CALL_DEAD);
	rx_SetConnIdleDeadTime(tc->id, afs_rx_idledead);

	/*
	 * Only do this for the base connection, not per-user.
	 * Will need to be revisited if/when CB gets security.
	 */
	if ((isec == 0) && (service != 52) && !(tu->states & UTokensBad) &&
	    (tu->viceId == UNDEFVID)
#ifndef UKERNEL /* ukernel runs as just one uid anyway */
	    && (tu->uid == 0)
#endif
	    )
	    rx_SetConnSecondsUntilNatPing(tc->id, 20);

	tc->forceConnectFS = 0;	/* apparently we're appropriately connected now */
	if (csec)
	    rxs_Release(csec);
	ConvertWToSLock(&afs_xconn);
    } /* end of if (tc->forceConnectFS)*/

    *rxconn = tc->id;
    rx_GetConnection(*rxconn);

    ReleaseSharedLock(&afs_xconn);
    return tc;
}
Beispiel #7
0
/*------------------------------------------------------------------------
 * EXPORTED afs_Analyze
 *
 * Description:
 *	Analyze the outcome of an RPC operation, taking whatever support
 *	actions are necessary.
 *
 * Arguments:
 *	aconn : Ptr to the relevant connection on which the call was made.
 *	acode : The return code experienced by the RPC.
 *	afid  : The FID of the file involved in the action.  This argument
 *		may be null if none was involved.
 *	areq  : The request record associated with this operation.
 *      op    : which RPC we are analyzing.
 *      cellp : pointer to a cell struct.  Must provide either fid or cell.
 *
 * Returns:
 *	Non-zero value if the related RPC operation should be retried,
 *	zero otherwise.
 *
 * Environment:
 *	This routine is typically called in a do-while loop, causing the
 *	embedded RPC operation to be called repeatedly if appropriate
 *	until whatever error condition (if any) is intolerable.
 *
 * Side Effects:
 *	As advertised.
 *
 * NOTE:
 *	The retry return value is used by afs_StoreAllSegments to determine
 *	if this is a temporary or permanent error.
 *------------------------------------------------------------------------*/
int
afs_Analyze(register struct afs_conn *aconn, afs_int32 acode,
	    struct VenusFid *afid, register struct vrequest *areq, int op,
	    afs_int32 locktype, struct cell *cellp)
{
    afs_int32 i;
    struct srvAddr *sa;
    struct server *tsp;
    struct volume *tvp;
    afs_int32 shouldRetry = 0;
    afs_int32 serversleft = 1;
    struct afs_stats_RPCErrors *aerrP;
    afs_int32 markeddown;

 
 
    if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) {
	/* On reconnection, act as connected. XXX: for now.... */
        /* SXW - This may get very tired after a while. We should try and
	 *       intercept all RPCs before they get here ... */
	/*printf("afs_Analyze: disconnected\n");*/
	afs_FinalizeReq(areq);
	if (aconn) {
	    /* SXW - I suspect that this will _never_ happen - we shouldn't
	     *       get a connection because we're disconnected !!!*/
	    afs_PutConn(aconn, locktype);
	}
	return 0;
    }
    
    AFS_STATCNT(afs_Analyze);
    afs_Trace4(afs_iclSetp, CM_TRACE_ANALYZE, ICL_TYPE_INT32, op,
	       ICL_TYPE_POINTER, aconn, ICL_TYPE_INT32, acode, ICL_TYPE_LONG,
	       areq->uid);

    aerrP = (struct afs_stats_RPCErrors *)0;

    if ((op >= 0) && (op < AFS_STATS_NUM_FS_RPC_OPS))
	aerrP = &(afs_stats_cmfullperf.rpc.fsRPCErrors[op]);

    afs_FinalizeReq(areq);
    if (!aconn && areq->busyCount) {	/* one RPC or more got VBUSY/VRESTARTING */

	tvp = afs_FindVolume(afid, READ_LOCK);
	if (tvp) {
	    afs_warnuser("afs: Waiting for busy volume %u (%s) in cell %s\n",
			 (afid ? afid->Fid.Volume : 0),
			 (tvp->name ? tvp->name : ""),
			 ((tvp->serverHost[0]
			   && tvp->serverHost[0]->cell) ? tvp->serverHost[0]->
			  cell->cellName : ""));

	    for (i = 0; i < MAXHOSTS; i++) {
		if (tvp->status[i] != not_busy && tvp->status[i] != offline) {
		    tvp->status[i] = not_busy;
		}
		if (tvp->status[i] == not_busy)
		    shouldRetry = 1;
	    }
	    afs_PutVolume(tvp, READ_LOCK);
	} else {
	    afs_warnuser("afs: Waiting for busy volume %u\n",
			 (afid ? afid->Fid.Volume : 0));
	}

	if (areq->busyCount > 100) {
	    if (aerrP)
		(aerrP->err_Volume)++;
	    areq->volumeError = VOLBUSY;
	    shouldRetry = 0;
	} else {
	    VSleep(afs_BusyWaitPeriod);	/* poll periodically */
	}
	if (shouldRetry != 0)
	    areq->busyCount++;

	return shouldRetry;	/* should retry */
    }

    if (!aconn || !aconn->srvr) {
	if (!areq->volumeError) {
	    if (aerrP)
		(aerrP->err_Network)++;
	    if (hm_retry_int && !(areq->flags & O_NONBLOCK) &&	/* "hard" mount */
		((afid && afs_IsPrimaryCellNum(afid->Cell))
		 || (cellp && afs_IsPrimaryCell(cellp)))) {
		if (!afid) {
		    afs_warnuser
			("afs: hard-mount waiting for a vlserver to return to service\n");
		    VSleep(hm_retry_int);
		    afs_CheckServers(1, cellp);
		    shouldRetry = 1;
		} else {
		    tvp = afs_FindVolume(afid, READ_LOCK);
		    if (!tvp || (tvp->states & VRO)) {
			shouldRetry = hm_retry_RO;
		    } else {
			shouldRetry = hm_retry_RW;
		    }
		    if (tvp)
			afs_PutVolume(tvp, READ_LOCK);
		    if (shouldRetry) {
			afs_warnuser
			    ("afs: hard-mount waiting for volume %u\n",
			     afid->Fid.Volume);
			VSleep(hm_retry_int);
			afs_CheckServers(1, cellp);
		    }
		}
	    } /* if (hm_retry_int ... */
	    else {
		areq->networkError = 1;
	    }
	}
	return shouldRetry;
    }

    /* Find server associated with this connection. */
    sa = aconn->srvr;
    tsp = sa->server;

    /* Before we do anything with acode, make sure we translate it back to
     * a system error */
    if ((acode & ~0xff) == ERROR_TABLE_BASE_uae)
	acode = et_to_sys_error(acode);

    if (acode == 0) {
	/* If we previously took an error, mark this volume not busy */
	if (areq->volumeError) {
	    tvp = afs_FindVolume(afid, READ_LOCK);
	    if (tvp) {
		for (i = 0; i < MAXHOSTS; i++) {
		    if (tvp->serverHost[i] == tsp) {
			tvp->status[i] = not_busy;
		    }
		}
		afs_PutVolume(tvp, READ_LOCK);
	    }
	}

	afs_PutConn(aconn, locktype);
	return 0;
    }

    /* If network troubles, mark server as having bogued out again. */
    /* VRESTARTING is < 0 because of backward compatibility issues 
     * with 3.4 file servers and older cache managers */
#ifdef AFS_64BIT_CLIENT
    if (acode == -455)
	acode = 455;
#endif /* AFS_64BIT_CLIENT */
    if ((acode < 0) && (acode != VRESTARTING)) {
	if (acode == RX_CALL_TIMEOUT) {
	    serversleft = afs_BlackListOnce(areq, afid, tsp);
	    areq->idleError++;
	    if (serversleft) {
		shouldRetry = 1;
	    } else {
		shouldRetry = 0;
	    }
	    /* By doing this, we avoid ever marking a server down
	     * in an idle timeout case. That's because the server is 
	     * still responding and may only be letting a single vnode
	     * time out. We otherwise risk having the server continually
	     * be marked down, then up, then down again... 
	     */
	    goto out;
	} 
	markeddown = afs_ServerDown(sa);
	ForceNewConnections(sa); /**multi homed clients lock:afs_xsrvAddr? */
	if (aerrP)
	    (aerrP->err_Server)++;
#if 0
	/* retry *once* when the server is timed out in case of NAT */
	if (markeddown && acode == RX_CALL_DEAD) {
	    aconn->forceConnectFS = 1;
	    shouldRetry = 1;
	}
#endif
    }

    if (acode == VBUSY || acode == VRESTARTING) {
	if (acode == VBUSY) {
	    areq->busyCount++;
	    if (aerrP)
		(aerrP->err_VolumeBusies)++;
	} else
	    areq->busyCount = 1;

	tvp = afs_FindVolume(afid, READ_LOCK);
	if (tvp) {
	    for (i = 0; i < MAXHOSTS; i++) {
		if (tvp->serverHost[i] == tsp) {
		    tvp->status[i] = rdwr_busy;	/* can't tell which yet */
		    /* to tell which, have to look at the op code. */
		}
	    }
	    afs_PutVolume(tvp, READ_LOCK);
	} else {
	    afs_warnuser("afs: Waiting for busy volume %u in cell %s\n",
			 (afid ? afid->Fid.Volume : 0), tsp->cell->cellName);
	    VSleep(afs_BusyWaitPeriod);	/* poll periodically */
	}
	shouldRetry = 1;
	acode = 0;
    } else if (acode == VICETOKENDEAD
	       || (acode & ~0xff) == ERROR_TABLE_BASE_RXK) {
	/* any rxkad error is treated as token expiration */
	struct unixuser *tu;
	/*
	 * I'm calling these errors protection errors, since they involve
	 * faulty authentication.
	 */
	if (aerrP)
	    (aerrP->err_Protection)++;

	tu = afs_FindUser(areq->uid, tsp->cell->cellNum, READ_LOCK);
	if (tu) {
	    if (acode == VICETOKENDEAD) {
		aconn->forceConnectFS = 1;
	    } else if (acode == RXKADEXPIRED) {
		aconn->forceConnectFS = 0;	/* don't check until new tokens set */
		aconn->user->states |= UTokensBad;
		afs_warnuser
		    ("afs: Tokens for user of AFS id %d for cell %s have expired\n",
		     tu->vid, aconn->srvr->server->cell->cellName);
	    } else {
		serversleft = afs_BlackListOnce(areq, afid, tsp);
		areq->tokenError++;

		if (serversleft) {
		    afs_warnuser
			("afs: Tokens for user of AFS id %d for cell %s: rxkad error=%d\n",
			 tu->vid, aconn->srvr->server->cell->cellName, acode);
		    shouldRetry = 1;
		} else {
		    areq->tokenError = 0;
		    aconn->forceConnectFS = 0;	/* don't check until new tokens set */
		    aconn->user->states |= UTokensBad;
		    afs_warnuser
			("afs: Tokens for user of AFS id %d for cell %s are discarded (rxkad error=%d)\n",
			 tu->vid, aconn->srvr->server->cell->cellName, acode);
		}
	    }
	    afs_PutUser(tu, READ_LOCK);
	} else {
	    /* The else case shouldn't be possible and should probably be replaced by a panic? */
	    if (acode == VICETOKENDEAD) {
		aconn->forceConnectFS = 1;
	    } else if (acode == RXKADEXPIRED) {
		aconn->forceConnectFS = 0;	/* don't check until new tokens set */
		aconn->user->states |= UTokensBad;
		afs_warnuser
		    ("afs: Tokens for user %d for cell %s have expired\n",
		     areq->uid, aconn->srvr->server->cell->cellName);
	    } else {
		aconn->forceConnectFS = 0;	/* don't check until new tokens set */
		aconn->user->states |= UTokensBad;
		afs_warnuser
		    ("afs: Tokens for user %d for cell %s are discarded (rxkad error = %d)\n",
		     areq->uid, aconn->srvr->server->cell->cellName, acode);
	    }
	}
	shouldRetry = 1;	/* Try again (as root). */
    }
    /* Check for access violation. */
    else if (acode == EACCES) {
	/* should mark access error in non-existent per-user global structure */
	if (aerrP)
	    (aerrP->err_Protection)++;
	areq->accessError = 1;
	if (op == AFS_STATS_FS_RPCIDX_STOREDATA)
	    areq->permWriteError = 1;
	shouldRetry = 0;
    }
    /* check for ubik errors; treat them like crashed servers */
    else if (acode >= ERROR_TABLE_BASE_U && acode < ERROR_TABLE_BASE_U + 255) {
	afs_ServerDown(sa);
	if (aerrP)
	    (aerrP->err_Server)++;
	shouldRetry = 1;	/* retryable (maybe one is working) */
	VSleep(1);		/* just in case */
    }
    /* Check for bad volume data base / missing volume. */
    else if (acode == VSALVAGE || acode == VOFFLINE || acode == VNOVOL
	     || acode == VNOSERVICE || acode == VMOVED) {
	struct cell *tcell;
	int same;

	shouldRetry = 1;
	areq->volumeError = VOLMISSING;
	if (aerrP)
	    (aerrP->err_Volume)++;
	if (afid && (tcell = afs_GetCell(afid->Cell, 0))) {
	    same = VLDB_Same(afid, areq);
	    tvp = afs_FindVolume(afid, READ_LOCK);
	    if (tvp) {
		for (i = 0; i < MAXHOSTS && tvp->serverHost[i]; i++) {
		    if (tvp->serverHost[i] == tsp) {
			if (tvp->status[i] == end_not_busy)
			    tvp->status[i] = offline;
			else
			    tvp->status[i]++;
		    } else if (!same) {
			tvp->status[i] = not_busy;	/* reset the others */
		    }
		}
		afs_PutVolume(tvp, READ_LOCK);
	    }
	}
    } else if (acode >= ERROR_TABLE_BASE_VL && acode <= ERROR_TABLE_BASE_VL + 255) {	/* vlserver errors */
	shouldRetry = 0;
	areq->volumeError = VOLMISSING;
    } else if (acode >= 0) {
	if (aerrP)
	    (aerrP->err_Other)++;
	if (op == AFS_STATS_FS_RPCIDX_STOREDATA)
	    areq->permWriteError = 1;
	shouldRetry = 0;	/* Other random Vice error. */
    } else if (acode == RX_MSGSIZE) {	/* same meaning as EMSGSIZE... */
	VSleep(1);		/* Just a hack for desperate times. */
	if (aerrP)
	    (aerrP->err_Other)++;
	shouldRetry = 1;	/* packet was too big, please retry call */
    }

    if (acode < 0 && acode != RX_MSGSIZE && acode != VRESTARTING) {
	/* If we get here, code < 0 and we have network/Server troubles.
	 * areq->networkError is not set here, since we always
	 * retry in case there is another server.  However, if we find
	 * no connection (aconn == 0) we set the networkError flag.
	 */
	afs_MarkServerUpOrDown(sa, SRVR_ISDOWN);
	if (aerrP)
	    (aerrP->err_Server)++;
	VSleep(1);		/* Just a hack for desperate times. */
	shouldRetry = 1;
    }
out:
    /* now unlock the connection and return */
    afs_PutConn(aconn, locktype);
    return (shouldRetry);
}				/*afs_Analyze */
Beispiel #8
0
/**
 * Connects to a server by it's server address.
 *
 * @param sap Server address.
 * @param aport Server port.
 * @param acell
 * @param tu Connect as this user.
 * @param force_if_down
 * @param create
 * @param locktype Specifies type of lock to be used for this function.
 *
 * @return The new connection.
 */
struct afs_conn *
afs_ConnBySA(struct srvAddr *sap, unsigned short aport, afs_int32 acell,
	     struct unixuser *tu, int force_if_down, afs_int32 create,
	     afs_int32 locktype)
{
    struct afs_conn *tc = 0;
    struct rx_securityClass *csec;	/*Security class object */
    int isec;			/*Security index */
    int service;

    if (!sap || ((sap->sa_flags & SRVR_ISDOWN) && !force_if_down)) {
	/* sa is known down, and we don't want to force it.  */
	return NULL;
    }

    ObtainSharedLock(&afs_xconn, 15);
    /* Get conn by port and user. */
    for (tc = sap->conns; tc; tc = tc->next) {
	if (tc->user == tu && tc->port == aport) {
	    break;
	}
    }

    if (!tc && !create) {
	/* Not found and can't create a new one. */
	ReleaseSharedLock(&afs_xconn);
	return NULL;
    }
    
    if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) {
        afs_warnuser("afs_ConnBySA: disconnected\n");
        ReleaseSharedLock(&afs_xconn);
        return NULL;
    }

    if (!tc) {
	/* No such connection structure exists.  Create one and splice it in.
	 * Make sure the server record has been marked as used (for the purposes
	 * of calculating up & down times, it's now considered to be an
	 * ``active'' server).  Also make sure the server's lastUpdateEvalTime
	 * gets set, marking the time of its ``birth''.
	 */
	UpgradeSToWLock(&afs_xconn, 37);
	tc = (struct afs_conn *)afs_osi_Alloc(sizeof(struct afs_conn));
	memset(tc, 0, sizeof(struct afs_conn));

	tc->user = tu;
	tc->port = aport;
	tc->srvr = sap;
	tc->refCount = 0;	/* bumped below */
	tc->forceConnectFS = 1;
	tc->id = (struct rx_connection *)0;
	tc->next = sap->conns;
	sap->conns = tc;
	afs_ActivateServer(sap);

	ConvertWToSLock(&afs_xconn);
    } /* end of if (!tc) */
    tc->refCount++;

    if (tu->states & UTokensBad) {
	/* we may still have an authenticated RPC connection here,
	 * we'll have to create a new, unauthenticated, connection.
	 * Perhaps a better way to do this would be to set
	 * conn->forceConnectFS on all conns when the token first goes
	 * bad, but that's somewhat trickier, due to locking
	 * constraints (though not impossible).
	 */
	if (tc->id && (rx_SecurityClassOf(tc->id) != 0)) {
	    tc->forceConnectFS = 1;	/* force recreation of connection */
	}
	tu->vid = UNDEFVID;	/* forcibly disconnect the authentication info */
    }

    if (tc->forceConnectFS) {
	UpgradeSToWLock(&afs_xconn, 38);
	csec = (struct rx_securityClass *)0;
	if (tc->id) {
	    AFS_GUNLOCK();
	    rx_DestroyConnection(tc->id);
	    AFS_GLOCK();
	}
	/*
	 * Stupid hack to determine if using vldb service or file system
	 * service.
	 */
	if (aport == sap->server->cell->vlport)
	    service = 52;
	else
	    service = 1;
	isec = 0;

	csec = afs_pickSecurityObject(tc, &isec);

	AFS_GUNLOCK();
	tc->id = rx_NewConnection(sap->sa_ip, aport, service, csec, isec);
	AFS_GLOCK();
	if (service == 52) {
	    rx_SetConnHardDeadTime(tc->id, afs_rx_harddead);
	}
	/* set to a RX_CALL_TIMEOUT error to allow MTU retry to trigger */
	rx_SetServerConnIdleDeadErr(tc->id, RX_CALL_DEAD);
	rx_SetConnIdleDeadTime(tc->id, afs_rx_idledead);
	rx_SetMsgsizeRetryErr(tc->id, RX_MSGSIZE);

	/*
	 * Only do this for the base connection, not per-user.
	 * Will need to be revisited if/when CB gets security.
	 */
	if ((isec == 0) && (service != 52) && !(tu->states & UTokensBad) &&
	    (tu->vid == UNDEFVID))
	    rx_SetConnSecondsUntilNatPing(tc->id, 20);

	tc->forceConnectFS = 0;	/* apparently we're appropriately connected now */
	if (csec)
	    rxs_Release(csec);
	ConvertWToSLock(&afs_xconn);
    } /* end of if (tc->forceConnectFS)*/

    ReleaseSharedLock(&afs_xconn);
    return tc;
}
Beispiel #9
0
/* clid - nonzero on sgi sunos osf1 only */
int
HandleFlock(struct vcache *avc, int acom, struct vrequest *areq,
	    pid_t clid, int onlymine)
{
    struct afs_conn *tc;
    struct SimpleLocks *slp, *tlp, **slpp;
    afs_int32 code;
    struct AFSVolSync tsync;
    afs_int32 lockType;
    struct AFS_FLOCK flock;
    XSTATS_DECLS;
    AFS_STATCNT(HandleFlock);
    code = 0;			/* default when we don't make any network calls */
    lockIdSet(&flock, NULL, clid);

#if defined(AFS_SGI_ENV)
    osi_Assert(valusema(&avc->vc_rwlock) <= 0);
    osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid);
#endif
    ObtainWriteLock(&avc->lock, 118);
    if (acom & LOCK_UN) {
	int stored_segments = 0;
     retry_unlock:

/* defect 3083 */

#ifdef AFS_AIX_ENV
	/* If the lock is held exclusive, then only the owning process
	 * or a child can unlock it. Use pid and ppid because they are
	 * unique identifiers.
	 */
	if ((avc->flockCount < 0) && (getpid() != avc->ownslock)) {
#ifdef	AFS_AIX41_ENV
	    if (onlymine || (getppid() != avc->ownslock)) {
#else
	    if (onlymine || (u.u_procp->p_ppid != avc->ownslock)) {
#endif
		ReleaseWriteLock(&avc->lock);
		return 0;
	    }
	}
#endif
	if (lockIdcmp2(&flock, avc, NULL, onlymine, clid)) {
	    ReleaseWriteLock(&avc->lock);
	    return 0;
	}
#ifdef AFS_AIX_ENV
	avc->ownslock = 0;
#endif
	if (avc->flockCount == 0) {
	    ReleaseWriteLock(&avc->lock);
	    return 0 /*ENOTTY*/;
	    /* no lock held */
	}
	/* unlock the lock */
	if (avc->flockCount > 0) {
	    slpp = &avc->slocks;
	    for (slp = *slpp; slp;) {
		if (!lockIdcmp2(&flock, avc, slp, onlymine, clid)) {
		    avc->flockCount--;
		    tlp = *slpp = slp->next;
		    osi_FreeSmallSpace(slp);
		    slp = tlp;
		} else {
		    slpp = &slp->next;
		    slp = *slpp;
		}
	    }
	} else if (avc->flockCount == -1) {
	    if (!stored_segments) {
		afs_StoreAllSegments(avc, areq, AFS_SYNC | AFS_VMSYNC);	/* fsync file early */
		/* afs_StoreAllSegments can drop and reacquire the write lock
		 * on avc and GLOCK, so the flocks may be completely different
		 * now. Go back and perform all checks again. */
		 stored_segments = 1;
		 goto retry_unlock;
	    }
	    avc->flockCount = 0;
	    /* And remove the (only) exclusive lock entry from the list... */
	    osi_FreeSmallSpace(avc->slocks);
	    avc->slocks = 0;
	}
	if (avc->flockCount == 0) {
	    if (!AFS_IS_DISCONNECTED) {
		struct rx_connection *rxconn;
	        do {
		    tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
		    if (tc) {
		        XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK);
		        RX_AFS_GUNLOCK();
		        code = RXAFS_ReleaseLock(rxconn, (struct AFSFid *)
					         &avc->f.fid.Fid, &tsync);
		        RX_AFS_GLOCK();
		        XSTATS_END_TIME;
		    } else
		    code = -1;
	        } while (afs_Analyze
		         (tc, rxconn, code, &avc->f.fid, areq,
		          AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK, NULL));
	    } else {
	  	/*printf("Network is dooooooowwwwwwwnnnnnnn\n");*/
	       code = ENETDOWN;
	    }
	}
    } else {
	while (1) {		/* set a new lock */
	    /*
	     * Upgrading from shared locks to Exclusive and vice versa
	     * is a bit tricky and we don't really support it yet. But
	     * we try to support the common used one which is upgrade
	     * a shared lock to an exclusive for the same process...
	     */
	    if ((avc->flockCount > 0 && (acom & LOCK_EX))
		|| (avc->flockCount == -1 && (acom & LOCK_SH))) {
		/*
		 * Upgrading from shared locks to an exclusive one:
		 * For now if all the shared locks belong to the
		 * same process then we unlock them on the server
		 * and proceed with the upgrade.  Unless we change the
		 * server's locking interface impl we prohibit from
		 * unlocking other processes's shared locks...
		 * Upgrading from an exclusive lock to a shared one:
		 * Again only allowed to be done by the same process.
		 */
		slpp = &avc->slocks;
		for (slp = *slpp; slp;) {
		    if (!lockIdcmp2
			(&flock, avc, slp, 1 /*!onlymine */ , clid)) {
			if (acom & LOCK_EX)
			    avc->flockCount--;
			else
			    avc->flockCount = 0;
			tlp = *slpp = slp->next;
			osi_FreeSmallSpace(slp);
			slp = tlp;
		    } else {
			code = EWOULDBLOCK;
			slpp = &slp->next;
			slp = *slpp;
		    }
		}
		if (!code && avc->flockCount == 0) {
		    if (!AFS_IS_DISCONNECTED) {
			struct rx_connection *rxconn;
		        do {
			    tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
			    if (tc) {
			        XSTATS_START_TIME
				    (AFS_STATS_FS_RPCIDX_RELEASELOCK);
			        RX_AFS_GUNLOCK();
			        code =
				    RXAFS_ReleaseLock(rxconn,
						      (struct AFSFid *)&avc->
						      f.fid.Fid, &tsync);
			        RX_AFS_GLOCK();
			       XSTATS_END_TIME;
			    } else
			        code = -1;
		        } while (afs_Analyze
			         (tc, rxconn, code, &avc->f.fid, areq,
			          AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK,
			          NULL));
		    }
		}
	    } else if (avc->flockCount == -1 && (acom & LOCK_EX)) {
		if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
		    code = EWOULDBLOCK;
		} else {
		    code = 0;
		    /* We've just re-grabbed an exclusive lock, so we don't
		     * need to contact the fileserver, and we don't need to
		     * add the lock to avc->slocks (since we already have a
		     * lock there). So, we are done. */
		    break;
		}
	    }
	    if (code == 0) {
		/* compatible here, decide if needs to go to file server.  If
		 * we've already got the file locked (and thus read-locked, since
		 * we've already checked for compatibility), we shouldn't send
		 * the call through to the server again */
		if (avc->flockCount == 0) {
		    struct rx_connection *rxconn;
		    /* we're the first on our block, send the call through */
		    lockType = ((acom & LOCK_EX) ? LockWrite : LockRead);
		    if (!AFS_IS_DISCONNECTED) {
		        do {
			    tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
			    if (tc) {
			        XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK);
			        RX_AFS_GUNLOCK();
			        code = RXAFS_SetLock(rxconn, (struct AFSFid *)
						     &avc->f.fid.Fid, lockType,
						     &tsync);
			        RX_AFS_GLOCK();
			        XSTATS_END_TIME;
			    } else
			        code = -1;
		        } while (afs_Analyze
			         (tc, rxconn, code, &avc->f.fid, areq,
			          AFS_STATS_FS_RPCIDX_SETLOCK, SHARED_LOCK,
			          NULL));
			if ((lockType == LockWrite) && (code == VREADONLY))
			    code = EBADF; /* per POSIX; VREADONLY == EROFS */
		    } else
		        /* XXX - Should probably try and log this when we're
		         * XXX - running with logging enabled. But it's horrid
		         */
		        code = 0; /* pretend we worked - ick!!! */
		} else
		    code = 0;	/* otherwise, pretend things worked */
	    }
	    if (code == 0) {
		slp = (struct SimpleLocks *)
		    osi_AllocSmallSpace(sizeof(struct SimpleLocks));
		if (acom & LOCK_EX) {

/* defect 3083 */

#ifdef AFS_AIX_ENV
		    /* Record unique id of process owning exclusive lock. */
		    avc->ownslock = getpid();
#endif

		    slp->type = LockWrite;
		    slp->next = NULL;
		    avc->slocks = slp;
		    avc->flockCount = -1;
		} else {
		    slp->type = LockRead;
		    slp->next = avc->slocks;
		    avc->slocks = slp;
		    avc->flockCount++;
		}

		lockIdSet(&flock, slp, clid);
		break;
	    }
	    /* now, if we got EWOULDBLOCK, and we're supposed to wait, we do */
	    if (((code == EWOULDBLOCK) || (code == EAGAIN) || 
		 (code == UAEWOULDBLOCK) || (code == UAEAGAIN))
		&& !(acom & LOCK_NB)) {
		/* sleep for a second, allowing interrupts */
		ReleaseWriteLock(&avc->lock);
#if defined(AFS_SGI_ENV)
		AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
#endif
		code = afs_osi_Wait(1000, NULL, 1);
#if defined(AFS_SGI_ENV)
		AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
#endif
		ObtainWriteLock(&avc->lock, 120);
		if (code) {
		    code = EINTR;	/* return this if ^C typed */
		    break;
		}
	    } else
		break;
	}			/* while loop */
    }
    ReleaseWriteLock(&avc->lock);
    code = afs_CheckCode(code, areq, 1);	/* defeat a buggy AIX optimization */
    return code;
}


/* warn a user that a lock has been ignored */
afs_int32 lastWarnTime = 0;	/* this is used elsewhere */
static afs_int32 lastWarnPid = 0;
static void
DoLockWarning(afs_ucred_t * acred)
{
    afs_int32 now;
    pid_t pid = MyPidxx2Pid(MyPidxx);
    char *procname;

    now = osi_Time();

    AFS_STATCNT(DoLockWarning);
    /* check if we've already warned this user recently */
    if (!((now < lastWarnTime + 120) && (lastWarnPid == pid))) {
	procname = afs_osi_Alloc(256);

	if (!procname)
	    return;

	/* Copies process name to allocated procname, see osi_machdeps for details of macro */
	osi_procname(procname, 256);
	procname[255] = '\0';

	/* otherwise, it is time to nag the user */
	lastWarnTime = now;
	lastWarnPid = pid;
#ifdef AFS_LINUX26_ENV
	afs_warnuser
	    ("afs: byte-range locks only enforced for processes on this machine (pid %d (%s), user %ld).\n", pid, procname, (long)afs_cr_uid(acred));
#else
	afs_warnuser
	    ("afs: byte-range lock/unlock ignored; make sure no one else is running this program (pid %d (%s), user %ld).\n", pid, procname, (long)afs_cr_uid(acred));
#endif
	afs_osi_Free(procname, 256);
    }
    return;
}