Ejemplo n.º 1
0
/**
 * Release all connections for unix user xu at server xs
 * @param xu
 * @param xs
 */
static void
release_conns_user_server(struct unixuser *xu, struct server *xs)
{
    int cix, glocked;
    struct srvAddr *sa;
    struct afs_conn *tc;
    struct sa_conn_vector *tcv, **lcv;
    for (sa = (xs)->addr; sa; sa = sa->next_sa) {
        lcv = &sa->conns;
        for (tcv = *lcv; tcv; lcv = &tcv->next, tcv = *lcv) {
            if (tcv->user == (xu) && tcv->refCount == 0) {
                *lcv = tcv->next;
                /* our old friend, the GLOCK */
                glocked = ISAFS_GLOCK();
                if (glocked)
                    AFS_GUNLOCK();
                for(cix = 0; cix < CVEC_LEN; ++cix) {
                    tc = &(tcv->cvec[cix]);
                    if (tc->activated) {
                        rx_SetConnSecondsUntilNatPing(tc->id, 0);
                        rx_DestroyConnection(tc->id);
                    }
                }
                if (glocked)
                    AFS_GLOCK();
                afs_osi_Free(tcv, sizeof(struct sa_conn_vector));
                break;    /* at most one instance per server */
            } /*Found unreferenced connection for user */
        }
    } /*For each connection on the server */

}        /* release_conns_user_server */
Ejemplo n.º 2
0
static void
release_conns_vector(struct sa_conn_vector *xcv)
{
    int cix, glocked;
    struct afs_conn *tc;
    struct sa_conn_vector *tcv = NULL;
    struct sa_conn_vector **lcv = NULL;
    for (tcv = xcv; tcv; lcv = &tcv->next, tcv = *lcv) {
        *lcv = tcv->next;
        /* you know it, you love it, the GLOCK */
        glocked = ISAFS_GLOCK();
        if (glocked)
            AFS_GUNLOCK(); \
        for(cix = 0; cix < CVEC_LEN; ++cix) {
            tc = &(tcv->cvec[cix]);
            if (tc->activated) {
                rx_SetConnSecondsUntilNatPing(tc->id, 0);
                rx_DestroyConnection(tc->id);
            }
        }
        if (glocked)
            AFS_GLOCK();
        afs_osi_Free(tcv, sizeof(struct sa_conn_vector));
    }

}        /* release_conns_vector */
Ejemplo n.º 3
0
void
afs_cv_timedwait(afs_kcondvar_t * cv, afs_kmutex_t * l, int waittime)
{
    int seq, isAFSGlocked = ISAFS_GLOCK();
    long t = waittime * HZ / 1000;
#ifdef DECLARE_WAITQUEUE
    DECLARE_WAITQUEUE(wait, current);
#else
    struct wait_queue wait = { current, NULL };
#endif
    seq = cv->seq;

    set_current_state(TASK_INTERRUPTIBLE);
    add_wait_queue(&cv->waitq, &wait);

    if (isAFSGlocked)
	AFS_GUNLOCK();
    MUTEX_EXIT(l);

    while(seq == cv->seq) {
	t = schedule_timeout(t);
	if (!t)         /* timeout */
	    break;
    }
    
    remove_wait_queue(&cv->waitq, &wait);
    set_current_state(TASK_RUNNING);

    if (isAFSGlocked)
	AFS_GLOCK();
    MUTEX_ENTER(l);
}
Ejemplo n.º 4
0
int
osi_NetReceive(osi_socket asocket, struct sockaddr_in *addr,
	       struct iovec *dvec, int nvecs, int *alength)
{
    struct uio u;
    int i;
    struct iovec iov[RX_MAXIOVECS];
    struct sockaddr *sa = NULL;
    int code;

    int haveGlock = ISAFS_GLOCK();
    /*AFS_STATCNT(osi_NetReceive); */

    if (nvecs > RX_MAXIOVECS)
	osi_Panic("osi_NetReceive: %d: Too many iovecs.\n", nvecs);

    for (i = 0; i < nvecs; i++)
	iov[i] = dvec[i];

    u.uio_iov = &iov[0];
    u.uio_iovcnt = nvecs;
    u.uio_offset = 0;
    u.uio_resid = *alength;
    u.uio_segflg = UIO_SYSSPACE;
    u.uio_rw = UIO_READ;
#ifdef AFS_FBSD50_ENV
    u.uio_td = NULL;
#else
    u.uio_procp = NULL;
#endif

    if (haveGlock)
	AFS_GUNLOCK();
    code = soreceive(asocket, &sa, &u, NULL, NULL, NULL);
    if (haveGlock)
	AFS_GLOCK();

    if (code) {
#if KNET_DEBUG
	if (code == EINVAL)
	    Debugger("afs NetReceive busted");
	else
	    printf("y");
#else
	return code;
#endif
    }
    *alength -= u.uio_resid;
    if (sa) {
	if (sa->sa_family == AF_INET) {
	    if (addr)
		*addr = *(struct sockaddr_in *)sa;
	} else
	    printf("Unknown socket family %d in NetReceive\n", sa->sa_family);
	FREE(sa, M_SONAME);
    }
    return code;
}
Ejemplo n.º 5
0
int
osi_NetSend(osi_socket asocket, struct sockaddr_in *addr, struct iovec *dvec,
	    int nvecs, afs_int32 alength, int istack)
{
    register afs_int32 code;
    int i;
    struct iovec iov[RX_MAXIOVECS];
    struct uio u;
    int haveGlock = ISAFS_GLOCK();

    AFS_STATCNT(osi_NetSend);
    if (nvecs > RX_MAXIOVECS)
	osi_Panic("osi_NetSend: %d: Too many iovecs.\n", nvecs);

    for (i = 0; i < nvecs; i++)
	iov[i] = dvec[i];

    u.uio_iov = &iov[0];
    u.uio_iovcnt = nvecs;
    u.uio_offset = 0;
    u.uio_resid = alength;
    u.uio_segflg = UIO_SYSSPACE;
    u.uio_rw = UIO_WRITE;
#ifdef AFS_FBSD50_ENV
    u.uio_td = NULL;
#else
    u.uio_procp = NULL;
#endif

    addr->sin_len = sizeof(struct sockaddr_in);

    if (haveGlock)
	AFS_GUNLOCK();
#if KNET_DEBUG
    printf("+");
#endif
#ifdef AFS_FBSD50_ENV
    code =
	sosend(asocket, (struct sockaddr *)addr, &u, NULL, NULL, 0,
	       curthread);
#else
    code =
	sosend(asocket, (struct sockaddr *)addr, &u, NULL, NULL, 0, curproc);
#endif
#if KNET_DEBUG
    if (code) {
	if (code == EINVAL)
	    Debugger("afs NetSend busted");
	else
	    printf("z");
    }
#endif
    if (haveGlock)
	AFS_GLOCK();
    return code;
}
Ejemplo n.º 6
0
int
osi_NetReceive(osi_socket asocket, struct sockaddr_in *addr,
	       struct iovec *dvec, int nvecs, int *alength)
{
    struct uio u;
    int i, code;
    struct iovec iov[RX_MAXIOVECS];
    struct mbuf *nam = NULL;

    int haveGlock = ISAFS_GLOCK();

    memset(&u, 0, sizeof(u));
    memset(&iov, 0, sizeof(iov));

    if (nvecs > RX_MAXIOVECS)
	osi_Panic("osi_NetReceive: %d: too many iovecs\n", nvecs);

    for (i = 0; i < nvecs; i++)
	iov[i] = dvec[i];

    u.uio_iov = &iov[0];
    u.uio_iovcnt = nvecs;
    u.uio_offset = 0;
    u.uio_resid = *alength;
    u.uio_segflg = UIO_SYSSPACE;
    u.uio_rw = UIO_READ;
    u.uio_procp = NULL;

    if (haveGlock)
	AFS_GUNLOCK();
    code = soreceive(asocket, (addr ? &nam : NULL), &u, NULL, NULL, NULL
#if defined(AFS_OBSD45_ENV)
		     , 0
#endif
		     );
    if (haveGlock)
	AFS_GLOCK();

    if (code) {
#ifdef RXKNET_DEBUG
	printf("rx code %d termState %d\n", code, afs_termState);
#endif
	while (afs_termState == AFSOP_STOP_RXEVENT)
	    afs_osi_Sleep(&afs_termState);
	return code;
    }

    *alength -= u.uio_resid;
    if (addr && nam) {
	memcpy(addr, mtod(nam, caddr_t), nam->m_len);
	m_freem(nam);
    }

    return code;
}
Ejemplo n.º 7
0
int
osi_UFSTruncate(register struct osi_file *afile, afs_int32 asize)
{
    struct vattr tvattr;
    struct vnode *vp;
    register afs_int32 code, glocked;
    AFS_STATCNT(osi_Truncate);

    ObtainWriteLock(&afs_xosi, 321);
    vp = afile->vnode;
    /*
     * This routine only shrinks files, and most systems
     * have very slow truncates, even when the file is already
     * small enough.  Check now and save some time.
     */
    glocked = ISAFS_GLOCK();
    if (glocked)
      AFS_GUNLOCK();
#if defined(AFS_FBSD80_ENV)
    vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    code = VOP_GETATTR(afile->vnode, &tvattr, afs_osi_credp);
#elif defined(AFS_FBSD50_ENV)
    vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
    code = VOP_GETATTR(afile->vnode, &tvattr, afs_osi_credp, curthread);
#else
    vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
    code = VOP_GETATTR(afile->vnode, &tvattr, afs_osi_credp, curproc);
#endif
    if (code != 0 || tvattr.va_size <= asize)
	goto out;

    VATTR_NULL(&tvattr);
    tvattr.va_size = asize;
#if defined(AFS_FBSD80_ENV)
    code = VOP_SETATTR(vp, &tvattr, afs_osi_credp);
#elif defined(AFS_FBSD50_ENV)
    code = VOP_SETATTR(vp, &tvattr, afs_osi_credp, curthread);
#else
    code = VOP_SETATTR(vp, &tvattr, afs_osi_credp, curproc);
#endif

out:
#if defined(AFS_FBSD80_ENV)
    VOP_UNLOCK(vp, 0);
#elif defined(AFS_FBSD50_ENV)
    VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
#else
    VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc);
#endif
    if (glocked)
      AFS_GLOCK();
    ReleaseWriteLock(&afs_xosi);
    return code;
}
Ejemplo n.º 8
0
void
osi_StopListener(void)
{
    struct proc *p;

    /*
     * Have to drop global lock to safely do this.
     * soclose() is currently protected by Giant,
     * but pfind and psignal are MPSAFE.
     */
    int haveGlock = ISAFS_GLOCK();
    if (haveGlock)
	AFS_GUNLOCK();
    soshutdown(rx_socket, 2);
#ifndef AFS_FBSD70_ENV
    soclose(rx_socket);
#endif
    p = pfind(rxk_ListenerPid);
    afs_warn("osi_StopListener: rxk_ListenerPid %lx\n", p);
    if (p)
	psignal(p, SIGUSR1);
#ifdef AFS_FBSD50_ENV
    PROC_UNLOCK(p);
#endif
#ifdef AFS_FBSD70_ENV
    {
      /* Avoid destroying socket until osi_NetReceive has
       * had a chance to clean up */
      int tries;
      struct mtx s_mtx;

      MUTEX_INIT(&s_mtx, "rx_shutdown_mutex", MUTEX_DEFAULT, 0);
      MUTEX_ENTER(&s_mtx);
      tries = 3;
      while ((tries > 0) && (!so_is_disconn(rx_socket))) {
	msleep(&osi_StopListener, &s_mtx, PSOCK | PCATCH,
	       "rx_shutdown_timedwait", 1 * hz);
	--tries;
      }
      if (so_is_disconn(rx_socket))
	soclose(rx_socket);
      MUTEX_EXIT(&s_mtx);
      MUTEX_DESTROY(&s_mtx);
    }
#endif
    if (haveGlock)
	AFS_GLOCK();
}
Ejemplo n.º 9
0
int
osi_lookupname(char *aname, enum uio_seg seg, int followlink,
	       struct vnode **vpp)
{
    struct nameidata n;
    int flags, error, glocked;

#ifdef AFS_FBSD50_ENV
    glocked = ISAFS_GLOCK();
    if (glocked)
	AFS_GUNLOCK();
#endif

    flags = 0;
    flags = LOCKLEAF;
    if (followlink)
	flags |= FOLLOW;
    else
	flags |= NOFOLLOW;
#ifdef AFS_FBSD80_ENV
    flags |= MPSAFE; /* namei must take GIANT if needed */
#endif
    NDINIT(&n, LOOKUP, flags, seg, aname, curproc);
    if ((error = namei(&n)) != 0) {
#ifdef AFS_FBSD50_ENV
	if (glocked)
	    AFS_GLOCK();
#endif
	return error;
    }
    *vpp = n.ni_vp;
    /* XXX should we do this?  Usually NOT (matt) */
#if defined(AFS_FBSD80_ENV)
    /*VOP_UNLOCK(n.ni_vp, 0);*/
#elif defined(AFS_FBSD50_ENV)
    VOP_UNLOCK(n.ni_vp, 0, curthread);
#else
    VOP_UNLOCK(n.ni_vp, 0, curproc);
#endif
    NDFREE(&n, NDF_ONLY_PNBUF);
#ifdef AFS_FBSD50_ENV
    if (glocked)
	AFS_GLOCK();
#endif
    return 0;
}
Ejemplo n.º 10
0
/*
 * Replace all of the bogus special-purpose memory allocators...
 */
void *
osi_fbsd_alloc(size_t size, int dropglobal)
{
	void *rv;
	int glocked;

	if (dropglobal) {
	    glocked = ISAFS_GLOCK();
	    if (glocked)
		AFS_GUNLOCK();
	    rv = malloc(size, M_AFS, M_WAITOK);
	    if (glocked)
		AFS_GLOCK();
	} else
	    rv = malloc(size, M_AFS, M_NOWAIT);

	return (rv);
}
Ejemplo n.º 11
0
int
osi_NetSend(osi_socket asocket, struct sockaddr_in *addr, struct iovec *dvec,
	    int nvecs, afs_int32 alength, int istack)
{
    int i, code;
    struct iovec iov[RX_MAXIOVECS];
    struct uio u;
    struct mbuf *nam;
    int haveGlock = ISAFS_GLOCK();

    memset(&u, 0, sizeof(u));
    memset(&iov, 0, sizeof(iov));

    AFS_STATCNT(osi_NetSend);
    if (nvecs > RX_MAXIOVECS)
	osi_Panic("osi_NetSend: %d: Too many iovecs.\n", nvecs);

    for (i = 0; i < nvecs; i++)
	iov[i] = dvec[i];

    u.uio_iov = &iov[0];
    u.uio_iovcnt = nvecs;
    u.uio_offset = 0;
    u.uio_resid = alength;
    u.uio_segflg = UIO_SYSSPACE;
    u.uio_rw = UIO_WRITE;
    u.uio_procp = NULL;

    nam = m_get(M_DONTWAIT, MT_SONAME);
    if (!nam)
	return ENOBUFS;
    nam->m_len = addr->sin_len = sizeof(struct sockaddr_in);
    memcpy(mtod(nam, caddr_t), addr, addr->sin_len);

    if (haveGlock)
	AFS_GUNLOCK();
    code = sosend(asocket, nam, &u, NULL, NULL, 0);
    if (haveGlock)
	AFS_GLOCK();
    m_freem(nam);

    return code;
}
Ejemplo n.º 12
0
void
put_vfs_context(void)
{
    int isglock = ISAFS_GLOCK();

    if (!isglock)
	AFS_GLOCK();
    if (afs_osi_ctxtp_initialized) {
	if (!isglock)
	    AFS_GUNLOCK();
	return;
    }
    if (vfs_context_owner == current_thread())
	vfs_context_owner = (thread_t)0;
    vfs_context_ref--;
    afs_osi_Wakeup(&afs_osi_ctxtp);
    if (!isglock)
        AFS_GUNLOCK();
}
Ejemplo n.º 13
0
/*
 * Replace all of the bogus special-purpose memory allocators...
 */
void *
osi_fbsd_alloc(size_t size, int dropglobal)
{
	void *rv;
#ifdef AFS_FBSD50_ENV
	int glocked;

	if (dropglobal) {
	    glocked = ISAFS_GLOCK();
	    if (glocked)
		AFS_GUNLOCK();
	    rv = malloc(size, M_AFS, M_WAITOK);
	    if (glocked)
		AFS_GLOCK();
	} else
#endif
	    rv = malloc(size, M_AFS, M_NOWAIT);

	return (rv);
}
Ejemplo n.º 14
0
int
osi_lookupname(char *aname, enum uio_seg seg, int followlink,
	       struct vnode **vpp)
{
    struct nameidata n;
    int flags, error, glocked;

    glocked = ISAFS_GLOCK();
    if (glocked)
	AFS_GUNLOCK();

#if __FreeBSD_version >= 1000021 /* MPSAFE is gone for good! */
    flags = 0;
#else
    flags = MPSAFE; /* namei must take Giant if needed */
#endif
    if (followlink)
	flags |= FOLLOW;
    else
	flags |= NOFOLLOW;
    NDINIT(&n, LOOKUP, flags, seg, aname, curthread);
    if ((error = namei(&n)) != 0) {
	if (glocked)
	    AFS_GLOCK();
	return error;
    }
    *vpp = n.ni_vp;
    /* XXX should we do this?  Usually NOT (matt) */
#if defined(AFS_FBSD80_ENV)
    /*VOP_UNLOCK(n.ni_vp, 0);*/
#else
    VOP_UNLOCK(n.ni_vp, 0, curthread);
#endif
    NDFREE(&n, NDF_ONLY_PNBUF);
    if (glocked)
	AFS_GLOCK();
    return 0;
}
Ejemplo n.º 15
0
void
get_vfs_context(void)
{
    int isglock = ISAFS_GLOCK();

    if (!isglock)
	AFS_GLOCK();
    if (afs_osi_ctxtp_initialized) {
	if (!isglock)
	    AFS_GUNLOCK();
	return;
    }
    osi_Assert(vfs_context_owner != current_thread());
    if (afs_osi_ctxtp && current_proc() == vfs_context_curproc) {
	vfs_context_ref++;
	vfs_context_owner = current_thread();
	if (!isglock)
	    AFS_GUNLOCK();
	return;
    }
    while (afs_osi_ctxtp && vfs_context_ref) {
	afs_osi_Sleep(&afs_osi_ctxtp);
	if (afs_osi_ctxtp_initialized) {
	    if (!isglock)
		AFS_GUNLOCK();
	    return;
	}
    }
    vfs_context_rele(afs_osi_ctxtp);
    vfs_context_ref=1;
    afs_osi_ctxtp = vfs_context_create(NULL);
    vfs_context_owner = current_thread();
    vfs_context_curproc = current_proc();
    if (!isglock)
	AFS_GUNLOCK();
}
Ejemplo n.º 16
0
/**
 * Connects to a server by it's server address.
 *
 * @param sap Server address.
 * @param aport Server port.
 * @param acell
 * @param tu Connect as this user.
 * @param force_if_down
 * @param create
 * @param locktype Specifies type of lock to be used for this function.
 *
 * @return The new connection.
 */
struct afs_conn *
afs_ConnBySA(struct srvAddr *sap, unsigned short aport, afs_int32 acell,
	     struct unixuser *tu, int force_if_down, afs_int32 create,
	     afs_int32 locktype, struct rx_connection **rxconn)
{
    int glocked, foundvec;
    struct afs_conn *tc = NULL;
    struct sa_conn_vector *tcv = NULL;
    struct rx_securityClass *csec; /*Security class object */
    int isec; /*Security index */
    int service;

    *rxconn = NULL;

    /* find cached connection */
    ObtainSharedLock(&afs_xconn, 15);
    foundvec = 0;
    for (tcv = sap->conns; tcv; tcv = tcv->next) {
        if (tcv->user == tu && tcv->port == aport) {
            /* return most eligible conn */
            if (!foundvec)
                foundvec = 1;
            UpgradeSToWLock(&afs_xconn, 37);
            tc = find_preferred_connection(tcv, create);
            ConvertWToSLock(&afs_xconn);
            break;
        }
    }

    if (!tc && !create) {
        /* Not found and can't create a new one. */
        ReleaseSharedLock(&afs_xconn);
        return NULL;
    }

    if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) {
        afs_warnuser("afs_ConnBySA: disconnected\n");
        ReleaseSharedLock(&afs_xconn);
        return NULL;
    }

    if (!foundvec && create) {
	/* No such connection vector exists.  Create one and splice it in.
	 * Make sure the server record has been marked as used (for the purposes
	 * of calculating up & down times, it's now considered to be an
	 * ``active'' server).  Also make sure the server's lastUpdateEvalTime
	 * gets set, marking the time of its ``birth''.
	 */
	UpgradeSToWLock(&afs_xconn, 37);
        new_conn_vector(tcv);

        tcv->user = tu;
        tcv->port = aport;
        tcv->srvr = sap;
        tcv->next = sap->conns;
        sap->conns = tcv;

        /* all struct afs_conn ptrs come from here */
        tc = find_preferred_connection(tcv, create);

	afs_ActivateServer(sap);

	ConvertWToSLock(&afs_xconn);
    } /* end of if (!tcv) */

    if (!tc) {
        /* Not found and no alternatives. */
        ReleaseSharedLock(&afs_xconn);
        return NULL;
    }

    if (tu->states & UTokensBad) {
	/* we may still have an authenticated RPC connection here,
	 * we'll have to create a new, unauthenticated, connection.
	 * Perhaps a better way to do this would be to set
	 * conn->forceConnectFS on all conns when the token first goes
	 * bad, but that's somewhat trickier, due to locking
	 * constraints (though not impossible).
	 */
	if (tc->id && (rx_SecurityClassOf(tc->id) != 0)) {
	    tc->forceConnectFS = 1;	/* force recreation of connection */
	}
	tu->states &= ~UHasTokens;      /* remove the authentication info */
    }

    glocked = ISAFS_GLOCK();
    if (tc->forceConnectFS) {
	UpgradeSToWLock(&afs_xconn, 38);
	csec = (struct rx_securityClass *)0;
	if (tc->id) {
	    if (glocked)
                AFS_GUNLOCK();
            rx_SetConnSecondsUntilNatPing(tc->id, 0);
            rx_DestroyConnection(tc->id);
	    if (glocked)
                AFS_GLOCK();
	}
	/*
	 * Stupid hack to determine if using vldb service or file system
	 * service.
	 */
	if (aport == sap->server->cell->vlport)
	    service = 52;
	else
	    service = 1;
	isec = 0;

	csec = afs_pickSecurityObject(tc, &isec);

	if (glocked)
            AFS_GUNLOCK();
	tc->id = rx_NewConnection(sap->sa_ip, aport, service, csec, isec);
	if (glocked)
            AFS_GLOCK();
	if (service == 52) {
	    rx_SetConnHardDeadTime(tc->id, afs_rx_harddead);
	}
	/* set to a RX_CALL_TIMEOUT error to allow MTU retry to trigger */
	rx_SetServerConnIdleDeadErr(tc->id, RX_CALL_DEAD);
	rx_SetConnIdleDeadTime(tc->id, afs_rx_idledead);

	/*
	 * Only do this for the base connection, not per-user.
	 * Will need to be revisited if/when CB gets security.
	 */
	if ((isec == 0) && (service != 52) && !(tu->states & UTokensBad) &&
	    (tu->viceId == UNDEFVID)
#ifndef UKERNEL /* ukernel runs as just one uid anyway */
	    && (tu->uid == 0)
#endif
	    )
	    rx_SetConnSecondsUntilNatPing(tc->id, 20);

	tc->forceConnectFS = 0;	/* apparently we're appropriately connected now */
	if (csec)
	    rxs_Release(csec);
	ConvertWToSLock(&afs_xconn);
    } /* end of if (tc->forceConnectFS)*/

    *rxconn = tc->id;
    rx_GetConnection(*rxconn);

    ReleaseSharedLock(&afs_xconn);
    return tc;
}
Ejemplo n.º 17
0
/* CV_WAIT and CV_TIMEDWAIT sleep until the specified event occurs, or, in the
 * case of CV_TIMEDWAIT, until the specified timeout occurs.
 * - NOTE: that on Linux, there are circumstances in which TASK_INTERRUPTIBLE
 *   can wake up, even if all signals are blocked
 * - TODO: handle signals correctly by passing an indication back to the
 *   caller that the wait has been interrupted and the stack should be cleaned
 *   up preparatory to signal delivery
 */
int
afs_cv_wait(afs_kcondvar_t * cv, afs_kmutex_t * l, int sigok)
{
    int seq, isAFSGlocked = ISAFS_GLOCK();
    sigset_t saved_set;
#ifdef DECLARE_WAITQUEUE
    DECLARE_WAITQUEUE(wait, current);
#else
    struct wait_queue wait = { current, NULL };
#endif
    sigemptyset(&saved_set);
    seq = cv->seq;
    
    set_current_state(TASK_INTERRUPTIBLE);
    add_wait_queue(&cv->waitq, &wait);

    if (isAFSGlocked)
	AFS_GUNLOCK();
    MUTEX_EXIT(l);

    if (!sigok) {
	SIG_LOCK(current);
	saved_set = current->blocked;
	sigfillset(&current->blocked);
	RECALC_SIGPENDING(current);
	SIG_UNLOCK(current);
    }

    while(seq == cv->seq) {
	schedule();
#ifdef AFS_LINUX26_ENV
#ifdef CONFIG_PM
	if (
#ifdef PF_FREEZE
	    current->flags & PF_FREEZE
#else
#if defined(STRUCT_TASK_STRUCT_HAS_TODO)
	    !current->todo
#else
#if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO)
	    test_ti_thread_flag(current->thread_info, TIF_FREEZE)
#else
	    test_ti_thread_flag(task_thread_info(current), TIF_FREEZE)
#endif
#endif
#endif
	    )
#ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
	    refrigerator(PF_FREEZE);
#else
	    refrigerator();
#endif
	    set_current_state(TASK_INTERRUPTIBLE);
#endif
#endif
    }

    remove_wait_queue(&cv->waitq, &wait);
    set_current_state(TASK_RUNNING);

    if (!sigok) {
	SIG_LOCK(current);
	current->blocked = saved_set;
	RECALC_SIGPENDING(current);
	SIG_UNLOCK(current);
    }

    if (isAFSGlocked)
	AFS_GLOCK();
    MUTEX_ENTER(l);

    return (sigok && signal_pending(current)) ? EINTR : 0;
}
Ejemplo n.º 18
0
int
osi_NetReceive(osi_socket so, struct sockaddr_in *addr, struct iovec *dvec,
	       int nvecs, int *alength)
{
#ifdef AFS_DARWIN80_ENV
    socket_t asocket = (socket_t)so;
    struct msghdr msg;
    struct sockaddr_storage ss;
    int rlen;
    mbuf_t m;
#else
    struct socket *asocket = (struct socket *)so;
    struct uio u;
#endif
    int i;
    struct iovec iov[RX_MAXIOVECS];
    struct sockaddr *sa = NULL;
    int code;
    size_t resid;

    int haveGlock = ISAFS_GLOCK();
    /*AFS_STATCNT(osi_NetReceive); */

    if (nvecs > RX_MAXIOVECS)
	osi_Panic("osi_NetReceive: %d: Too many iovecs.\n", nvecs);

    for (i = 0; i < nvecs; i++)
	iov[i] = dvec[i];

    if ((afs_termState == AFSOP_STOP_RXK_LISTENER) ||
	(afs_termState == AFSOP_STOP_COMPLETE))
	return -1;

    if (haveGlock)
	AFS_GUNLOCK();
#if defined(KERNEL_FUNNEL)
    thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
#endif
#ifdef AFS_DARWIN80_ENV
    resid = *alength;
    memset(&msg, 0, sizeof(struct msghdr));
    msg.msg_name = &ss;
    msg.msg_namelen = sizeof(struct sockaddr_storage);
    sa =(struct sockaddr *) &ss;
    code = sock_receivembuf(asocket, &msg, &m, 0, alength);
    if (!code) {
        size_t offset=0,sz;
        resid = *alength;
        for (i=0;i<nvecs && resid;i++) {
            sz=MIN(resid, iov[i].iov_len);
            code = mbuf_copydata(m, offset, sz, iov[i].iov_base);
            if (code)
                break;
            resid-=sz;
            offset+=sz;
        }
    }
    mbuf_freem(m);
#else

    u.uio_iov = &iov[0];
    u.uio_iovcnt = nvecs;
    u.uio_offset = 0;
    u.uio_resid = *alength;
    u.uio_segflg = UIO_SYSSPACE;
    u.uio_rw = UIO_READ;
    u.uio_procp = NULL;
    code = soreceive(asocket, &sa, &u, NULL, NULL, NULL);
    resid = u.uio_resid;
#endif

#if defined(KERNEL_FUNNEL)
    thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
#endif
    if (haveGlock)
	AFS_GLOCK();

    if (code)
	return code;
    *alength -= resid;
    if (sa) {
	if (sa->sa_family == AF_INET) {
	    if (addr)
		*addr = *(struct sockaddr_in *)sa;
	} else
	    printf("Unknown socket family %d in NetReceive\n", sa->sa_family);
#ifndef AFS_DARWIN80_ENV
	FREE(sa, M_SONAME);
#endif
    }
    return code;
}
Ejemplo n.º 19
0
/* works like PFlushVolumeData */
void
darwin_notify_perms(struct unixuser *auser, int event)
{
    int i;
    struct afs_q *tq, *uq = NULL;
    struct vcache *tvc, *hnext;
    int isglock = ISAFS_GLOCK();
    struct vnode *vp;
    struct vnode_attr va;
    int isctxtowner = 0;

    if (!afs_darwin_fsevents)
	return;

    VATTR_INIT(&va);
    VATTR_SET(&va, va_mode, 0777);
    if (event & UTokensObtained)
	VATTR_SET(&va, va_uid, auser->uid);
    else
	VATTR_SET(&va, va_uid, -2); /* nobody */

    if (!isglock)
	AFS_GLOCK();
    if (!(vfs_context_owner == current_thread())) {
	get_vfs_context();
	isctxtowner = 1;
    }
loop:
    ObtainReadLock(&afs_xvcache);
    for (i = 0; i < VCSIZE; i++) {
	for (tq = afs_vhashTV[i].prev; tq != &afs_vhashTV[i]; tq = uq) {
	    uq = QPrev(tq);
	    tvc = QTOVH(tq);
	    if (tvc->f.states & CDeadVnode) {
		/* we can afford to be best-effort */
		continue;
	    }
	    /* no per-file acls, so only notify on directories */
	    if (!(vp = AFSTOV(tvc)) || !vnode_isdir(AFSTOV(tvc)))
		continue;
	    /* dynroot object. no callbacks. anonymous ACL. just no. */
	    if (afs_IsDynrootFid(&tvc->f.fid))
		continue;
	    /* no fake fsevents on mount point sources. leaks refs */
	    if (tvc->mvstat == 1)
		continue;
	    /* if it's being reclaimed, just pass */
	    if (vnode_get(vp))
		continue;
	    if (vnode_ref(vp)) {
		AFS_GUNLOCK();
		vnode_put(vp);
		AFS_GLOCK();
		continue;
	    }
	    ReleaseReadLock(&afs_xvcache);
	    /* Avoid potentially re-entering on this lock */
	    if (0 == NBObtainWriteLock(&tvc->lock, 234)) {
		tvc->f.states |= CEvent;
		AFS_GUNLOCK();
		vnode_setattr(vp, &va, afs_osi_ctxtp);
		tvc->f.states &= ~CEvent;
		vnode_put(vp);
		AFS_GLOCK();
		ReleaseWriteLock(&tvc->lock);
	    }
	    ObtainReadLock(&afs_xvcache);
	    uq = QPrev(tq);
	    /* our tvc ptr is still good until now */
	    AFS_FAST_RELE(tvc);
	}
    }
    ReleaseReadLock(&afs_xvcache);
    if (isctxtowner)
	put_vfs_context();
    if (!isglock)
	AFS_GUNLOCK();
}
Ejemplo n.º 20
0
int
osi_NetSend(osi_socket so, struct sockaddr_in *addr, struct iovec *dvec,
	    int nvecs, afs_int32 alength, int istack)
{
#ifdef AFS_DARWIN80_ENV
    socket_t asocket = (socket_t)so;
    struct msghdr msg;
    size_t slen;
#else
    struct socket *asocket = (struct socket *)so;
    struct uio u;
#endif
    afs_int32 code;
    int i;
    struct iovec iov[RX_MAXIOVECS];
    int haveGlock = ISAFS_GLOCK();

    AFS_STATCNT(osi_NetSend);
    if (nvecs > RX_MAXIOVECS)
	osi_Panic("osi_NetSend: %d: Too many iovecs.\n", nvecs);

    for (i = 0; i < nvecs; i++)
	iov[i] = dvec[i];

    addr->sin_len = sizeof(struct sockaddr_in);

    if ((afs_termState == AFSOP_STOP_RXK_LISTENER) ||
	(afs_termState == AFSOP_STOP_COMPLETE))
	return -1;

    if (haveGlock)
	AFS_GUNLOCK();

#if defined(KERNEL_FUNNEL)
    thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
#endif
#ifdef AFS_DARWIN80_ENV
    memset(&msg, 0, sizeof(struct msghdr));
    msg.msg_name = addr;
    msg.msg_namelen = ((struct sockaddr *)addr)->sa_len;
    msg.msg_iov = &iov[0];
    msg.msg_iovlen = nvecs;
    code = sock_send(asocket, &msg, 0, &slen);
#else
    u.uio_iov = &iov[0];
    u.uio_iovcnt = nvecs;
    u.uio_offset = 0;
    u.uio_resid = alength;
    u.uio_segflg = UIO_SYSSPACE;
    u.uio_rw = UIO_WRITE;
    u.uio_procp = NULL;
    code = sosend(asocket, (struct sockaddr *)addr, &u, NULL, NULL, 0);
#endif

#if defined(KERNEL_FUNNEL)
    thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
#endif
    if (haveGlock)
	AFS_GLOCK();
    return code;
}
Ejemplo n.º 21
0
/* Flush any buffered data to the stream, switch to read mode
 * (clients) or to EOF mode (servers)
 *
 * LOCKS HELD: called at netpri.
 */
void
rxi_FlushWrite(struct rx_call *call)
{
    struct rx_packet *cp = NULL;

    /* Free any packets from the last call to ReadvProc/WritevProc */
    if (queue_IsNotEmpty(&call->iovq)) {
#ifdef RXDEBUG_PACKET
        call->iovqc -=
#endif /* RXDEBUG_PACKET */
            rxi_FreePackets(0, &call->iovq);
    }

    if (call->mode == RX_MODE_SENDING) {

	call->mode =
	    (call->conn->type ==
	     RX_CLIENT_CONNECTION ? RX_MODE_RECEIVING : RX_MODE_EOF);

#ifdef RX_KERNEL_TRACE
	{
	    int glockOwner = ISAFS_GLOCK();
	    if (!glockOwner)
		AFS_GLOCK();
	    afs_Trace3(afs_iclSetp, CM_TRACE_WASHERE, ICL_TYPE_STRING,
		       __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER,
		       call);
	    if (!glockOwner)
		AFS_GUNLOCK();
	}
#endif

        MUTEX_ENTER(&call->lock);
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
        rxi_WaitforTQBusy(call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
        if (call->error)
            call->mode = RX_MODE_ERROR;

        cp = call->currentPacket;

	if (cp) {
	    /* cp->length is only supposed to be the user's data */
	    /* cp->length was already set to (then-current)
	     * MaxUserDataSize or less. */
#ifdef RX_TRACK_PACKETS
	    cp->flags &= ~RX_PKTFLAG_CP;
#endif
	    cp->length -= call->nFree;
	    call->currentPacket = (struct rx_packet *)0;
	    call->nFree = 0;
	} else {
	    cp = rxi_AllocSendPacket(call, 0);
	    if (!cp) {
		/* Mode can no longer be MODE_SENDING */
		return;
	    }
	    cp->length = 0;
	    cp->niovecs = 2;	/* header + space for rxkad stuff */
	    call->nFree = 0;
	}

	/* The 1 specifies that this is the last packet */
	hadd32(call->bytesSent, cp->length);
	rxi_PrepareSendPacket(call, cp, 1);
#ifdef RX_TRACK_PACKETS
	cp->flags |= RX_PKTFLAG_TQ;
#endif
	queue_Append(&call->tq, cp);
#ifdef RXDEBUG_PACKET
        call->tqc++;
#endif /* RXDEBUG_PACKET */
	if (!
	    (call->
	     flags & (RX_CALL_FAST_RECOVER | RX_CALL_FAST_RECOVER_WAIT))) {
	    rxi_Start(0, call, 0, 0);
	}
        MUTEX_EXIT(&call->lock);
    }
}