Esempio n. 1
0
/**
 * @brief Construct a duplicate request cache entry.
 *
 * Entries are allocated from the dupreq_pool.  Since dupre_entry_t
 * presently contains an expanded nfs_arg_t, zeroing of at least corresponding
 * value pointers is required for XDR allocation.
 *
 * @return The newly allocated dupreq entry or NULL.
 */
static inline dupreq_entry_t *alloc_dupreq(void)
{
	dupreq_entry_t *dv;

	dv = pool_alloc(dupreq_pool, NULL);
	if (!dv) {
		LogCrit(COMPONENT_DUPREQ, "alloc dupreq_entry_t failed");
		goto out;
	}
	memset(dv, 0, sizeof(dupreq_entry_t));	/* XXX pool_zalloc */
	gsh_mutex_init(&dv->mtx, NULL);
	TAILQ_INIT_ENTRY(dv, fifo_q);
out:
	return dv;
}
Esempio n. 2
0
/**
 * @brief Initialize the DRC package.
 */
void dupreq2_pkginit(void)
{
	int code __attribute__ ((unused)) = 0;

	dupreq_pool = pool_init("Duplicate Request Pool",
				sizeof(dupreq_entry_t),
				pool_basic_substrate, NULL, NULL, NULL);
	if (unlikely(!(dupreq_pool)))
		LogFatal(COMPONENT_INIT,
			 "Error while allocating duplicate request pool");

	nfs_res_pool = pool_init("nfs_res_t pool", sizeof(nfs_res_t),
				 pool_basic_substrate,
				 NULL, NULL, NULL);
	if (unlikely(!(nfs_res_pool)))
		LogFatal(COMPONENT_INIT,
			 "Error while allocating nfs_res_t pool");

	tcp_drc_pool = pool_init("TCP DRC Pool", sizeof(drc_t),
				 pool_basic_substrate,
				 NULL, NULL, NULL);
	if (unlikely(!(tcp_drc_pool)))
		LogFatal(COMPONENT_INIT,
			 "Error while allocating TCP DRC pool");

	drc_st = gsh_calloc(1, sizeof(struct drc_st));

	/* init shared statics */
	gsh_mutex_init(&drc_st->mtx, NULL);

	/* recycle_t */
	code =
	    rbtx_init(&drc_st->tcp_drc_recycle_t, drc_recycle_cmpf,
		      nfs_param.core_param.drc.tcp.recycle_npart,
		      RBT_X_FLAG_ALLOC);
	/* XXX error? */

	/* init recycle_q */
	TAILQ_INIT(&drc_st->tcp_drc_recycle_q);
	drc_st->tcp_drc_recycle_qlen = 0;
	drc_st->last_expire_check = time(NULL);
	drc_st->expire_delta = nfs_param.core_param.drc.tcp.recycle_expire_s;

	/* UDP DRC is global, shared */
	init_shared_drc();
}
Esempio n. 3
0
/**
 * @brief Initialize a shared duplicate request cache
 */
static inline void init_shared_drc()
{
	drc_t *drc = &drc_st->udp_drc;
	int ix, code __attribute__ ((unused)) = 0;

	drc->type = DRC_UDP_V234;
	drc->refcnt = 0;
	drc->retwnd = 0;
	drc->d_u.tcp.recycle_time = 0;
	drc->maxsize = nfs_param.core_param.drc.udp.size;
	drc->cachesz = nfs_param.core_param.drc.udp.cachesz;
	drc->npart = nfs_param.core_param.drc.udp.npart;
	drc->hiwat = nfs_param.core_param.drc.udp.hiwat;

	gsh_mutex_init(&drc->mtx, NULL);

	/* init dict */
	code =
	    rbtx_init(&drc->xt, dupreq_shared_cmpf, drc->npart,
		      RBT_X_FLAG_ALLOC | RBT_X_FLAG_CACHE_WT);
	assert(!code);

	/* completed requests */
	TAILQ_INIT(&drc->dupreq_q);

	/* init closed-form "cache" partition */
	for (ix = 0; ix < drc->npart; ++ix) {
		struct rbtree_x_part *xp = &(drc->xt.tree[ix]);
		drc->xt.cachesz = drc->cachesz;
		xp->cache =
		    gsh_calloc(drc->cachesz, sizeof(struct opr_rbtree_node *));
		if (unlikely(!xp->cache)) {
			LogCrit(COMPONENT_DUPREQ,
				"UDP DRC hash partition allocation "
				"failed (ix=%d)", ix);
			drc->cachesz = 0;
			break;
		}
	}

	return;
}