Beispiel #1
0
bool
rpc_ctx_xfer_replymsg(struct x_vc_data *xd, struct rpc_msg *msg)
{
    rpc_ctx_t ctx_k, *ctx;
    struct opr_rbtree_node *nv;
    rpc_dplx_lock_t *lk = &xd->rec->recv.lock;
    
    ctx_k.xid = msg->rm_xid;
    mutex_lock(&xd->rec->mtx);
    nv = opr_rbtree_lookup(&xd->cx.calls.t, &ctx_k.node_k);
    if (nv) {
        ctx = opr_containerof(nv, rpc_ctx_t, node_k);
        opr_rbtree_remove(&xd->cx.calls.t, &ctx->node_k);
        free_rpc_msg(ctx->msg); /* free call header */
        ctx->msg = msg; /* and stash reply header */
        ctx->flags |= RPC_CTX_FLAG_SYNCDONE;
        mutex_unlock(&xd->rec->mtx);
        cond_signal(&lk->we.cv); /* XXX we hold lk->we.mtx */
	/* now, we must ourselves wait for the other side to run */
	while (! (ctx->flags & RPC_CTX_FLAG_ACKSYNC))
	    cond_wait(&lk->we.cv, &lk->we.mtx);

	/* ctx-specific signal--indicates we will make no further
	 * references to ctx whatsoever */
	mutex_lock(&ctx->we.mtx);
	ctx->flags &= ~RPC_CTX_FLAG_WAITSYNC;
        cond_signal(&ctx->we.cv);
	mutex_unlock(&ctx->we.mtx);

        return (TRUE);
    }
    mutex_unlock(&xd->rec->mtx);
    return (FALSE);
}
Beispiel #2
0
void
free_rpc_call_ctx(rpc_ctx_t *ctx, uint32_t flags)
{
    struct x_vc_data *xd = (struct x_vc_data *) ctx->ctx_u.clnt.clnt->cl_p1;
    struct rpc_dplx_rec *rec  = xd->rec;
    struct timespec ts;

    /* wait for commit of any xfer (ctx specific) */
    mutex_lock(&ctx->we.mtx);
    if (ctx->flags & RPC_CTX_FLAG_WAITSYNC) {
        /* WAITSYNC is already cleared if the call timed out, but it is
         * incorrect to wait forever */
        (void) clock_gettime(CLOCK_MONOTONIC_FAST, &ts);
        timespecadd(&ts, &ctx->ctx_u.clnt.timeout);
        (void) cond_timedwait(&ctx->we.cv, &ctx->we.mtx, &ts);
    }

    mutex_lock(&rec->mtx);
    opr_rbtree_remove(&xd->cx.calls.t, &ctx->node_k);
    /* interlock */
    mutex_unlock(&ctx->we.mtx);
    mutex_unlock(&rec->mtx);

    if (ctx->msg)
        free_rpc_msg(ctx->msg);
    mem_free(ctx, sizeof(rpc_ctx_t));
}
Beispiel #3
0
/**
 * @brief Decrement the call path refcnt on a cache entry.
 *
 * We assert req->rq_u1 now points to the corresonding duplicate request
 * cache entry (dv).
 *
 * In the common case, a refcnt of 0 indicates that dv is cached.  If
 * also dv->state == DUPREQ_DELETED, the request entry has been discarded
 * and should be destroyed here.
 *
 * @param[in] req  The svc_req structure.
 * @param[in] func The function descriptor for this request type
 */
void nfs_dupreq_rele(struct svc_req *req, const nfs_function_desc_t *func)
{
	dupreq_entry_t *dv = (dupreq_entry_t *) req->rq_u1;

	/* no-cache cleanup */
	if (dv == (void *)DUPREQ_NOCACHE) {
		LogFullDebug(COMPONENT_DUPREQ, "releasing no-cache res %p",
			     req->rq_u2);
		func->free_function(req->rq_u2);
		free_nfs_res(req->rq_u2);
		goto out;
	}

	pthread_mutex_lock(&dv->mtx);

	LogFullDebug(COMPONENT_DUPREQ,
		     "releasing dv=%p xid=%u on DRC=%p state=%s, " "refcnt=%d",
		     dv, dv->hin.tcp.rq_xid, dv->hin.drc,
		     dupreq_state_table[dv->state], dv->refcnt);

	(dv->refcnt)--;
	if (dv->refcnt == 0) {
		if (dv->state == DUPREQ_DELETED) {
			pthread_mutex_unlock(&dv->mtx);
			/* deep free */
			nfs_dupreq_free_dupreq(dv);
			return;
		}
	}
	pthread_mutex_unlock(&dv->mtx);

 out:
	/* dispose RPC header */
	if (req->rq_auth)
		SVCAUTH_RELEASE(req->rq_auth, req);

	/* XXX */
	if (req->rq_rtaddr.len)
		mem_free(req->rq_rtaddr.buf, req->rq_rtaddr.len);

	(void)free_rpc_msg(req->rq_msg);

	return;
}