Exemple #1
0
/* Randomly remove nodes from the tree, this is really, really inefficient, but
 * hey
 */
int
destroyTree(struct opr_rbtree *head)
{
    int counter;

    for (counter = 1000; counter>0; counter--) {
	struct opr_rbtree_node *node;
	int remove, i;

	remove = random() % counter;
	node = opr_rbtree_first(head);
	for (i=0; i<remove; i++)
	    node = opr_rbtree_next(node);

	opr_rbtree_remove(head, node);
	if (countNodes(head) != counter-1) {
	    printf("Tree has lost nodes after %d deletions", 1001 - counter);
	    return 0;
	}

	if (!checkTree(head)) {
	    printf("Tree check failed at %d removals\n", 1001 - counter);
	    return 0;
	}
    }
    return 1;
}
/**
 * @brief Check for expired TCP DRCs.
 */
static inline void drc_free_expired(void)
{
	drc_t *drc;
	time_t now = time(NULL);
	struct rbtree_x_part *t;
	struct opr_rbtree_node *odrc = NULL;

	DRC_ST_LOCK();

	if ((drc_st->tcp_drc_recycle_qlen < 1) ||
	    (now - drc_st->last_expire_check) < 600) /* 10m */
		goto unlock;

	do {
		drc = TAILQ_FIRST(&drc_st->tcp_drc_recycle_q);
		if (drc && (drc->d_u.tcp.recycle_time > 0)
		    && ((now - drc->d_u.tcp.recycle_time) >
			drc_st->expire_delta) && (drc->refcnt == 0)) {
			LogFullDebug(COMPONENT_DUPREQ,
				     "remove expired drc %p from recycle queue",
				     drc);
			t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t,
						     drc->d_u.tcp.hk);

			odrc =
			    opr_rbtree_lookup(&t->t, &drc->d_u.tcp.recycle_k);
			if (!odrc) {
				LogCrit(COMPONENT_DUPREQ,
					"BUG: asked to dequeue DRC not on queue");
			} else {
				(void)opr_rbtree_remove(&t->t,
							&drc->d_u.tcp.
							recycle_k);
			}
			TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q, drc,
				     d_u.tcp.recycle_q);
			--(drc_st->tcp_drc_recycle_qlen);
			/* expect DRC to be reachable from some xprt(s) */
			PTHREAD_MUTEX_lock(&drc->mtx);
			drc->flags &= ~DRC_FLAG_RECYCLE;
			/* but if not, dispose it */
			if (drc->refcnt == 0) {
				PTHREAD_MUTEX_unlock(&drc->mtx);
				free_tcp_drc(drc);
				continue;
			}
			PTHREAD_MUTEX_unlock(&drc->mtx);
		} else {
			LogFullDebug(COMPONENT_DUPREQ,
				     "unexpired drc %p in recycle queue expire check (nothing happens)",
				     drc);
			drc_st->last_expire_check = now;
			break;
		}

	} while (1);

 unlock:
	DRC_ST_UNLOCK();
}
Exemple #3
0
void
free_rpc_call_ctx(rpc_ctx_t *ctx, uint32_t flags)
{
    struct x_vc_data *xd = (struct x_vc_data *) ctx->ctx_u.clnt.clnt->cl_p1;
    struct rpc_dplx_rec *rec  = xd->rec;
    struct timespec ts;

    /* wait for commit of any xfer (ctx specific) */
    mutex_lock(&ctx->we.mtx);
    if (ctx->flags & RPC_CTX_FLAG_WAITSYNC) {
        /* WAITSYNC is already cleared if the call timed out, but it is
         * incorrect to wait forever */
        (void) clock_gettime(CLOCK_MONOTONIC_FAST, &ts);
        timespecadd(&ts, &ctx->ctx_u.clnt.timeout);
        (void) cond_timedwait(&ctx->we.cv, &ctx->we.mtx, &ts);
    }

    mutex_lock(&rec->mtx);
    opr_rbtree_remove(&xd->cx.calls.t, &ctx->node_k);
    /* interlock */
    mutex_unlock(&ctx->we.mtx);
    mutex_unlock(&rec->mtx);

    if (ctx->msg)
        free_rpc_msg(ctx->msg);
    mem_free(ctx, sizeof(rpc_ctx_t));
}
Exemple #4
0
bool
rpc_ctx_xfer_replymsg(struct x_vc_data *xd, struct rpc_msg *msg)
{
    rpc_ctx_t ctx_k, *ctx;
    struct opr_rbtree_node *nv;
    rpc_dplx_lock_t *lk = &xd->rec->recv.lock;
    
    ctx_k.xid = msg->rm_xid;
    mutex_lock(&xd->rec->mtx);
    nv = opr_rbtree_lookup(&xd->cx.calls.t, &ctx_k.node_k);
    if (nv) {
        ctx = opr_containerof(nv, rpc_ctx_t, node_k);
        opr_rbtree_remove(&xd->cx.calls.t, &ctx->node_k);
        free_rpc_msg(ctx->msg); /* free call header */
        ctx->msg = msg; /* and stash reply header */
        ctx->flags |= RPC_CTX_FLAG_SYNCDONE;
        mutex_unlock(&xd->rec->mtx);
        cond_signal(&lk->we.cv); /* XXX we hold lk->we.mtx */
	/* now, we must ourselves wait for the other side to run */
	while (! (ctx->flags & RPC_CTX_FLAG_ACKSYNC))
	    cond_wait(&lk->we.cv, &lk->we.mtx);

	/* ctx-specific signal--indicates we will make no further
	 * references to ctx whatsoever */
	mutex_lock(&ctx->we.mtx);
	ctx->flags &= ~RPC_CTX_FLAG_WAITSYNC;
        cond_signal(&ctx->we.cv);
	mutex_unlock(&ctx->we.mtx);

        return (TRUE);
    }
    mutex_unlock(&xd->rec->mtx);
    return (FALSE);
}
Exemple #5
0
int
rpc_ctx_wait_reply(rpc_ctx_t *ctx, uint32_t flags)
{
    struct x_vc_data *xd = (struct x_vc_data *) ctx->ctx_u.clnt.clnt->cl_p1;
    struct rpc_dplx_rec *rec = xd->rec;
    rpc_dplx_lock_t *lk = &rec->recv.lock;
    struct timespec ts;
    int code = 0;

    /* we hold recv channel lock */
    ctx->flags |= RPC_CTX_FLAG_WAITSYNC;
    while (! (ctx->flags & RPC_CTX_FLAG_SYNCDONE)) {
        (void) clock_gettime(CLOCK_MONOTONIC_FAST, &ts);
        timespecadd(&ts, &ctx->ctx_u.clnt.timeout);
        code = cond_timedwait(&lk->we.cv, &lk->we.mtx, &ts);
        /* if we timed out, check for xprt destroyed (no more receives) */
        if (code == ETIMEDOUT) {
            SVCXPRT *xprt = rec->hdl.xprt;
            uint32_t xp_flags;

            /* dequeue the call */
            mutex_lock(&rec->mtx);
            opr_rbtree_remove(&xd->cx.calls.t, &ctx->node_k);
            mutex_unlock(&rec->mtx);

            mutex_lock(&xprt->xp_lock);
            xp_flags = xprt->xp_flags;
            mutex_unlock(&xprt->xp_lock);

            if (xp_flags & SVC_XPRT_FLAG_DESTROYED) {
                /* XXX should also set error.re_why, but the facility is not
                 * well developed. */
                ctx->error.re_status = RPC_TIMEDOUT;
            }
            ctx->flags &= ~RPC_CTX_FLAG_WAITSYNC;
            goto out;
        }
    }
    ctx->flags &= ~RPC_CTX_FLAG_SYNCDONE;

    /* switch on direction */
    switch (ctx->msg->rm_direction) {
    case REPLY:
        if (ctx->msg->rm_xid == ctx->xid)
            return (RPC_SUCCESS);
        break;
    case CALL:
        /* XXX cond transfer control to svc */
        /* */
        break;
    default:
        break;
    }

out:
    return (code);
}
Exemple #6
0
int32_t
rpc_dplx_unref(struct rpc_dplx_rec *rec, u_int flags)
{
    struct rbtree_x_part *t;
    struct opr_rbtree_node *nv;
    int32_t refcnt;

    if (! (flags & RPC_DPLX_FLAG_LOCKED))
        REC_LOCK(rec);

    refcnt = --(rec->refcnt);

    __warnx(TIRPC_DEBUG_FLAG_REFCNT,
            "%s: postunref %p rec->refcnt %u",
            __func__, rec, refcnt);

    if (rec->refcnt == 0) {
        t = rbtx_partition_of_scalar(&rpc_dplx_rec_set.xt, rec->fd_k);
        REC_UNLOCK(rec);
        rwlock_wrlock(&t->lock);
        nv = opr_rbtree_lookup(&t->t, &rec->node_k);
        rec = NULL;
        if (nv) {
            rec = opr_containerof(nv, struct rpc_dplx_rec, node_k);
            REC_LOCK(rec);
            if (rec->refcnt == 0) {
                (void) opr_rbtree_remove(&t->t, &rec->node_k);
                REC_UNLOCK(rec);
                __warnx(TIRPC_DEBUG_FLAG_REFCNT,
                        "%s: free rec %p rec->refcnt %u",
                        __func__, rec, refcnt);

                free_dplx_rec(rec);
                rec = NULL;
            } else {
                refcnt = rec->refcnt;
            }
        }
        rwlock_unlock(&t->lock);
    }
Exemple #7
0
void rpc_ctx_next_xid(rpc_ctx_t *ctx, uint32_t flags)
{
    struct x_vc_data *xd = (struct x_vc_data *) ctx->ctx_u.clnt.clnt->cl_p1;
    struct rpc_dplx_rec *rec = xd->rec;

    assert (flags & RPC_CTX_FLAG_LOCKED);

    mutex_lock(&rec->mtx);
    opr_rbtree_remove(&xd->cx.calls.t, &ctx->node_k);
    ctx->xid = ++(xd->cx.calls.xid);
    if (opr_rbtree_insert(&xd->cx.calls.t, &ctx->node_k)) {
        mutex_unlock(&rec->mtx);
        __warnx(TIRPC_DEBUG_FLAG_RPC_CTX,
                "%s: call ctx insert failed (xid %d client %p)",
                __func__,
                ctx->xid,
                ctx->ctx_u.clnt.clnt);
        goto out;
    }
    mutex_unlock(&rec->mtx);
out:
    return;
}