struct rpc_dplx_rec * rpc_dplx_lookup_rec(int fd, uint32_t iflags, uint32_t *oflags) { struct rbtree_x_part *t; struct rpc_dplx_rec rk, *rec = NULL; struct opr_rbtree_node *nv; cond_init_rpc_dplx(); rk.fd_k = fd; t = rbtx_partition_of_scalar(&(rpc_dplx_rec_set.xt), fd); rwlock_rdlock(&t->lock); nv = opr_rbtree_lookup(&t->t, &rk.node_k); /* XXX rework lock+insert case, so that new entries are inserted * locked, and t->lock critical section is reduced */ if (! nv) { rwlock_unlock(&t->lock); rwlock_wrlock(&t->lock); nv = opr_rbtree_lookup(&t->t, &rk.node_k); if (! nv) { rec = alloc_dplx_rec(); if (! rec) { __warnx(TIRPC_DEBUG_FLAG_LOCK, "%s: failed allocating rpc_dplx_rec", __func__); goto unlock; } /* tell the caller */ *oflags = RPC_DPLX_LKP_OFLAG_ALLOC; rec->fd_k = fd; if (opr_rbtree_insert(&t->t, &rec->node_k)) { /* cant happen */ __warnx(TIRPC_DEBUG_FLAG_LOCK, "%s: collision inserting in locked rbtree partition", __func__); free_dplx_rec(rec); } } } else { rec = opr_containerof(nv, struct rpc_dplx_rec, node_k); *oflags = RPC_DPLX_LKP_FLAG_NONE; } rpc_dplx_ref(rec, (iflags & RPC_DPLX_LKP_IFLAG_LOCKREC) ? RPC_DPLX_FLAG_LOCK : RPC_DPLX_FLAG_NONE); unlock: rwlock_unlock(&t->lock); return (rec); }
/** * @brief Check for expired TCP DRCs. */ static inline void drc_free_expired(void) { drc_t *drc; time_t now = time(NULL); struct rbtree_x_part *t; struct opr_rbtree_node *odrc = NULL; DRC_ST_LOCK(); if ((drc_st->tcp_drc_recycle_qlen < 1) || (now - drc_st->last_expire_check) < 600) /* 10m */ goto unlock; do { drc = TAILQ_FIRST(&drc_st->tcp_drc_recycle_q); if (drc && (drc->d_u.tcp.recycle_time > 0) && ((now - drc->d_u.tcp.recycle_time) > drc_st->expire_delta) && (drc->refcnt == 0)) { LogFullDebug(COMPONENT_DUPREQ, "remove expired drc %p from recycle queue", drc); t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t, drc->d_u.tcp.hk); odrc = opr_rbtree_lookup(&t->t, &drc->d_u.tcp.recycle_k); if (!odrc) { LogCrit(COMPONENT_DUPREQ, "BUG: asked to dequeue DRC not on queue"); } else { (void)opr_rbtree_remove(&t->t, &drc->d_u.tcp. recycle_k); } TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q, drc, d_u.tcp.recycle_q); --(drc_st->tcp_drc_recycle_qlen); /* expect DRC to be reachable from some xprt(s) */ PTHREAD_MUTEX_lock(&drc->mtx); drc->flags &= ~DRC_FLAG_RECYCLE; /* but if not, dispose it */ if (drc->refcnt == 0) { PTHREAD_MUTEX_unlock(&drc->mtx); free_tcp_drc(drc); continue; } PTHREAD_MUTEX_unlock(&drc->mtx); } else { LogFullDebug(COMPONENT_DUPREQ, "unexpired drc %p in recycle queue expire check (nothing happens)", drc); drc_st->last_expire_check = now; break; } } while (1); unlock: DRC_ST_UNLOCK(); }
bool rpc_ctx_xfer_replymsg(struct x_vc_data *xd, struct rpc_msg *msg) { rpc_ctx_t ctx_k, *ctx; struct opr_rbtree_node *nv; rpc_dplx_lock_t *lk = &xd->rec->recv.lock; ctx_k.xid = msg->rm_xid; mutex_lock(&xd->rec->mtx); nv = opr_rbtree_lookup(&xd->cx.calls.t, &ctx_k.node_k); if (nv) { ctx = opr_containerof(nv, rpc_ctx_t, node_k); opr_rbtree_remove(&xd->cx.calls.t, &ctx->node_k); free_rpc_msg(ctx->msg); /* free call header */ ctx->msg = msg; /* and stash reply header */ ctx->flags |= RPC_CTX_FLAG_SYNCDONE; mutex_unlock(&xd->rec->mtx); cond_signal(&lk->we.cv); /* XXX we hold lk->we.mtx */ /* now, we must ourselves wait for the other side to run */ while (! (ctx->flags & RPC_CTX_FLAG_ACKSYNC)) cond_wait(&lk->we.cv, &lk->we.mtx); /* ctx-specific signal--indicates we will make no further * references to ctx whatsoever */ mutex_lock(&ctx->we.mtx); ctx->flags &= ~RPC_CTX_FLAG_WAITSYNC; cond_signal(&ctx->we.cv); mutex_unlock(&ctx->we.mtx); return (TRUE); } mutex_unlock(&xd->rec->mtx); return (FALSE); }
int32_t rpc_dplx_unref(struct rpc_dplx_rec *rec, u_int flags) { struct rbtree_x_part *t; struct opr_rbtree_node *nv; int32_t refcnt; if (! (flags & RPC_DPLX_FLAG_LOCKED)) REC_LOCK(rec); refcnt = --(rec->refcnt); __warnx(TIRPC_DEBUG_FLAG_REFCNT, "%s: postunref %p rec->refcnt %u", __func__, rec, refcnt); if (rec->refcnt == 0) { t = rbtx_partition_of_scalar(&rpc_dplx_rec_set.xt, rec->fd_k); REC_UNLOCK(rec); rwlock_wrlock(&t->lock); nv = opr_rbtree_lookup(&t->t, &rec->node_k); rec = NULL; if (nv) { rec = opr_containerof(nv, struct rpc_dplx_rec, node_k); REC_LOCK(rec); if (rec->refcnt == 0) { (void) opr_rbtree_remove(&t->t, &rec->node_k); REC_UNLOCK(rec); __warnx(TIRPC_DEBUG_FLAG_REFCNT, "%s: free rec %p rec->refcnt %u", __func__, rec, refcnt); free_dplx_rec(rec); rec = NULL; } else { refcnt = rec->refcnt; } } rwlock_unlock(&t->lock); }
/** * @brief Find and reference a DRC to process the supplied svc_req. * * @param[in] req The svc_req being processed. * * @return The ref'd DRC if sucessfully located, else NULL. */ static /* inline */ drc_t * nfs_dupreq_get_drc(struct svc_req *req) { enum drc_type dtype = get_drc_type(req); drc_t *drc = NULL; bool drc_check_expired = false; switch (dtype) { case DRC_UDP_V234: LogFullDebug(COMPONENT_DUPREQ, "ref shared UDP DRC"); drc = &(drc_st->udp_drc); DRC_ST_LOCK(); (void)nfs_dupreq_ref_drc(drc); DRC_ST_UNLOCK(); goto out; case DRC_TCP_V4: case DRC_TCP_V3: /* Idempotent address, no need for lock; * xprt will be valid as long as svc_req. */ drc = (drc_t *)req->rq_xprt->xp_u2; if (drc) { /* found, no danger of removal */ LogFullDebug(COMPONENT_DUPREQ, "ref DRC=%p for xprt=%p", drc, req->rq_xprt); PTHREAD_MUTEX_lock(&drc->mtx); /* LOCKED */ } else { drc_t drc_k; struct rbtree_x_part *t = NULL; struct opr_rbtree_node *ndrc = NULL; drc_t *tdrc = NULL; memset(&drc_k, 0, sizeof(drc_k)); drc_k.type = dtype; /* Since the drc can last longer than the xprt, * copy the address. Read operation of constant data, * no xprt lock required. */ (void)copy_xprt_addr(&drc_k.d_u.tcp.addr, req->rq_xprt); drc_k.d_u.tcp.hk = CityHash64WithSeed((char *)&drc_k.d_u.tcp.addr, sizeof(sockaddr_t), 911); { char str[SOCK_NAME_MAX]; sprint_sockaddr(&drc_k.d_u.tcp.addr, str, sizeof(str)); LogFullDebug(COMPONENT_DUPREQ, "get drc for addr: %s", str); } t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t, drc_k.d_u.tcp.hk); DRC_ST_LOCK(); ndrc = opr_rbtree_lookup(&t->t, &drc_k.d_u.tcp.recycle_k); if (ndrc) { /* reuse old DRC */ tdrc = opr_containerof(ndrc, drc_t, d_u.tcp.recycle_k); PTHREAD_MUTEX_lock(&tdrc->mtx); /* LOCKED */ if (tdrc->flags & DRC_FLAG_RECYCLE) { TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q, tdrc, d_u.tcp.recycle_q); --(drc_st->tcp_drc_recycle_qlen); tdrc->flags &= ~DRC_FLAG_RECYCLE; } drc = tdrc; LogFullDebug(COMPONENT_DUPREQ, "recycle TCP DRC=%p for xprt=%p", tdrc, req->rq_xprt); } if (!drc) { drc = alloc_tcp_drc(dtype); LogFullDebug(COMPONENT_DUPREQ, "alloc new TCP DRC=%p for xprt=%p", drc, req->rq_xprt); /* assign addr */ memcpy(&drc->d_u.tcp.addr, &drc_k.d_u.tcp.addr, sizeof(sockaddr_t)); /* assign already-computed hash */ drc->d_u.tcp.hk = drc_k.d_u.tcp.hk; PTHREAD_MUTEX_lock(&drc->mtx); /* LOCKED */ /* xprt ref */ drc->refcnt = 1; /* insert dict */ opr_rbtree_insert(&t->t, &drc->d_u.tcp.recycle_k); } DRC_ST_UNLOCK(); drc->d_u.tcp.recycle_time = 0; (void)nfs_dupreq_ref_drc(drc); /* xprt ref */ /* try to expire unused DRCs somewhat in proportion to * new connection arrivals */ drc_check_expired = true; LogFullDebug(COMPONENT_DUPREQ, "after ref drc %p refcnt==%u ", drc, drc->refcnt); /* Idempotent address, no need for lock; * set once here, never changes. * No other fields are modified. * Assumes address stores are atomic. */ req->rq_xprt->xp_u2 = (void *)drc; } break; default: /* XXX error */ break; } /* call path ref */ (void)nfs_dupreq_ref_drc(drc); PTHREAD_MUTEX_unlock(&drc->mtx); if (drc_check_expired) drc_free_expired(); out: return drc; }
/** * @brief Find and reference a DRC to process the supplied svc_req. * * @param[in] req The svc_req being processed. * * @return The ref'd DRC if sucessfully located, else NULL. */ static /* inline */ drc_t * nfs_dupreq_get_drc(struct svc_req *req) { enum drc_type dtype = get_drc_type(req); gsh_xprt_private_t *xu = (gsh_xprt_private_t *) req->rq_xprt->xp_u1; drc_t *drc = NULL; bool drc_check_expired = false; switch (dtype) { case DRC_UDP_V234: LogFullDebug(COMPONENT_DUPREQ, "ref shared UDP DRC"); drc = &(drc_st->udp_drc); DRC_ST_LOCK(); (void)nfs_dupreq_ref_drc(drc); DRC_ST_UNLOCK(); goto out; break; case DRC_TCP_V4: case DRC_TCP_V3: pthread_mutex_lock(&req->rq_xprt->xp_lock); if (xu->drc) { drc = xu->drc; LogFullDebug(COMPONENT_DUPREQ, "ref DRC=%p for xprt=%p", drc, req->rq_xprt); pthread_mutex_lock(&drc->mtx); /* LOCKED */ } else { drc_t drc_k; struct rbtree_x_part *t = NULL; struct opr_rbtree_node *ndrc = NULL; drc_t *tdrc = NULL; memset(&drc_k, 0, sizeof(drc_k)); drc_k.type = dtype; (void)copy_xprt_addr(&drc_k.d_u.tcp.addr, req->rq_xprt); drc_k.d_u.tcp.hk = CityHash64WithSeed((char *)&drc_k.d_u.tcp.addr, sizeof(sockaddr_t), 911); { char str[512]; sprint_sockaddr(&drc_k.d_u.tcp.addr, str, 512); LogFullDebug(COMPONENT_DUPREQ, "get drc for addr: %s", str); } t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t, drc_k.d_u.tcp.hk); DRC_ST_LOCK(); ndrc = opr_rbtree_lookup(&t->t, &drc_k.d_u.tcp.recycle_k); if (ndrc) { /* reuse old DRC */ tdrc = opr_containerof(ndrc, drc_t, d_u.tcp.recycle_k); pthread_mutex_lock(&tdrc->mtx); /* LOCKED */ if (tdrc->flags & DRC_FLAG_RECYCLE) { TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q, tdrc, d_u.tcp.recycle_q); --(drc_st->tcp_drc_recycle_qlen); tdrc->flags &= ~DRC_FLAG_RECYCLE; } drc = tdrc; LogFullDebug(COMPONENT_DUPREQ, "recycle TCP DRC=%p for xprt=%p", tdrc, req->rq_xprt); } if (!drc) { drc = alloc_tcp_drc(dtype); LogFullDebug(COMPONENT_DUPREQ, "alloc new TCP DRC=%p for xprt=%p", drc, req->rq_xprt); /* assign addr */ memcpy(&drc->d_u.tcp.addr, &drc_k.d_u.tcp.addr, sizeof(sockaddr_t)); /* assign already-computed hash */ drc->d_u.tcp.hk = drc_k.d_u.tcp.hk; pthread_mutex_lock(&drc->mtx); /* LOCKED */ /* xprt ref */ drc->refcnt = 1; /* insert dict */ opr_rbtree_insert(&t->t, &drc->d_u.tcp.recycle_k); } DRC_ST_UNLOCK(); drc->d_u.tcp.recycle_time = 0; /* xprt drc */ (void)nfs_dupreq_ref_drc(drc); /* xu ref */ /* try to expire unused DRCs somewhat in proportion to * new connection arrivals */ drc_check_expired = true; LogFullDebug(COMPONENT_DUPREQ, "after ref drc %p refcnt==%u ", drc, drc->refcnt); xu->drc = drc; } pthread_mutex_unlock(&req->rq_xprt->xp_lock); break; default: /* XXX error */ break; } /* call path ref */ (void)nfs_dupreq_ref_drc(drc); pthread_mutex_unlock(&drc->mtx); if (drc_check_expired) drc_free_expired(); out: return drc; }