/**
 * @brief Allocate a duplicate request cache
 *
 * @param[in] dtype   Style DRC to allocate (e.g., TCP, by enum drc_type)
 * @param[in] maxsz   Upper bound on requests to cache
 * @param[in] cachesz Number of entries in the closed hash partition
 * @param[in] flags   DRC flags
 *
 * @return the drc, if successfully allocated, else NULL.
 */
static inline drc_t *alloc_tcp_drc(enum drc_type dtype)
{
	drc_t *drc = pool_alloc(tcp_drc_pool, NULL);
	int ix, code __attribute__ ((unused)) = 0;

	if (unlikely(!drc)) {
		LogCrit(COMPONENT_DUPREQ, "alloc TCP DRC failed");
		goto out;
	}

	drc->type = dtype;	/* DRC_TCP_V3 or DRC_TCP_V4 */
	drc->refcnt = 0;
	drc->retwnd = 0;
	drc->d_u.tcp.recycle_time = 0;
	drc->maxsize = nfs_param.core_param.drc.tcp.size;
	drc->cachesz = nfs_param.core_param.drc.tcp.cachesz;
	drc->npart = nfs_param.core_param.drc.tcp.npart;
	drc->hiwat = nfs_param.core_param.drc.udp.hiwat;

	PTHREAD_MUTEX_init(&drc->mtx, NULL);

	/* init dict */
	code =
	    rbtx_init(&drc->xt, dupreq_tcp_cmpf, drc->npart,
		      RBT_X_FLAG_ALLOC | RBT_X_FLAG_CACHE_WT);
	assert(!code);

	/* completed requests */
	TAILQ_INIT(&drc->dupreq_q);

	/* recycling DRC */
	TAILQ_INIT_ENTRY(drc, d_u.tcp.recycle_q);

	/* init "cache" partition */
	for (ix = 0; ix < drc->npart; ++ix) {
		struct rbtree_x_part *xp = &(drc->xt.tree[ix]);

		drc->xt.cachesz = drc->cachesz;
		xp->cache =
		    gsh_calloc(drc->cachesz, sizeof(struct opr_rbtree_node *));
		if (unlikely(!xp->cache)) {
			LogCrit(COMPONENT_DUPREQ,
				"TCP DRC hash partition allocation failed (ix=%d)",
				ix);
			drc->cachesz = 0;
			break;
		}
	}

 out:
	return drc;
}
/**
 * @brief Construct a duplicate request cache entry.
 *
 * Entries are allocated from the dupreq_pool.  Since dupre_entry_t
 * presently contains an expanded nfs_arg_t, zeroing of at least corresponding
 * value pointers is required for XDR allocation.
 *
 * @return The newly allocated dupreq entry or NULL.
 */
static inline dupreq_entry_t *alloc_dupreq(void)
{
	dupreq_entry_t *dv;

	dv = pool_alloc(dupreq_pool, NULL);
	if (!dv) {
		LogCrit(COMPONENT_DUPREQ, "alloc dupreq_entry_t failed");
		goto out;
	}
	memset(dv, 0, sizeof(dupreq_entry_t));	/* XXX pool_zalloc */
	gsh_mutex_init(&dv->mtx, NULL);
	TAILQ_INIT_ENTRY(dv, fifo_q);
out:
	return dv;
}
Beispiel #3
0
struct rpc_msg *
alloc_rpc_msg(void)
{
	struct rpc_msg *msg = mem_alloc(sizeof(struct rpc_msg));

	TAILQ_INIT_ENTRY(msg, msg_q);

	/* avoid separate alloc/free */
	msg->rm_call.cb_cred.oa_base = msg->cb_cred_body;
	msg->rm_call.cb_verf.oa_base = msg->cb_verf_body;

	/* required for REPLY decodes */
	msg->acpted_rply.ar_verf = _null_auth;
	msg->acpted_rply.ar_results.where = NULL;
	msg->acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;

	return (msg);
}