Ejemplo n.º 1
0
void test_clist(void)
{
    /* CU_ASSERT(NULL == ex_path_normalize(NULL)); */
    int i = 0;
    int test_cnt = 10000;
    clist *list = clist_new();

    CU_ASSERT(0 == clist_len(list));
    for(i = 0; i < test_cnt; ++i) {
        clist_append(list, cobj_int_new(i));
    }

    CU_ASSERT(test_cnt == clist_len(list));
    CU_ASSERT(false == clist_is_empty(list));
    CU_ASSERT(0 == cobj_int_val(clist_at_obj(list, 0)));
    CU_ASSERT(1 == cobj_int_val(clist_at_obj(list, 1)));
    CU_ASSERT(test_cnt - 1 == cobj_int_val(clist_at_obj(list, test_cnt - 1)));

    for(i = 0; i < test_cnt; ++i) {
        void *obj = clist_pop_front(list);
        CU_ASSERT(i == cobj_int_val(obj));
        cobj_free(obj);
    }
    CU_ASSERT(0 == clist_len(list));
    CU_ASSERT(true == clist_is_empty(list));

    for(i = 0; i < test_cnt; ++i) {
        clist_append(list, cobj_int_new(i));
    }
    clist_clear(list);
    CU_ASSERT(0 == clist_len(list));
    for(i = 0; i < test_cnt; ++i) {
        clist_append(list, cobj_int_new(i));
    }

    clist_node *node = NULL;
    void *obj = NULL;
    i = 0;
    clist_foreach_val(list, node, obj) {
        CU_ASSERT(i == cobj_int_val(obj));
        ++i;
    }
Ejemplo n.º 2
0
/*
 * If xp_wcl is NULL value, then the RPC payload will NOT carry
 * an RDMA WRITE chunk list, in this case we insert FALSE into
 * the XDR stream. Otherwise we use the clist and  RDMA register
 * the memory and encode the clist into the outbound XDR stream.
 */
static int
clnt_setup_wlist(CONN *conn, XDR *xdrs, XDR *call_xdrp, rdma_buf_t *rndbuf)
{
	int status;
	struct clist *wlist, *rndcl;
	int wlen, rndlen;
	int32_t xdr_flag = XDR_RDMA_WLIST_REG;

	XDR_CONTROL(call_xdrp, XDR_RDMA_GET_WLIST, &wlist);

	if (wlist != NULL) {
		/*
		 * If we are sending a non 4-byte alligned length
		 * the server will roundup the length to 4-byte
		 * boundary. In such a case, a trailing chunk is
		 * added to take any spill over roundup bytes.
		 */
		wlen = clist_len(wlist);
		rndlen = (roundup(wlen, BYTES_PER_XDR_UNIT) - wlen);
		if (rndlen) {
			rndcl = clist_alloc();
			/*
			 * calc_length() will allocate a PAGESIZE
			 * buffer below.
			 */
			rndcl->c_len = calc_length(rndlen);
			rndcl->rb_longbuf.type = RDMA_LONG_BUFFER;
			rndcl->rb_longbuf.len = rndcl->c_len;
			if (rdma_buf_alloc(conn, &rndcl->rb_longbuf)) {
				clist_free(rndcl);
				return (CLNT_RDMA_FAIL);
			}

			/* Roundup buffer freed back in caller */
			*rndbuf = rndcl->rb_longbuf;

			rndcl->u.c_daddr3 = rndcl->rb_longbuf.addr;
			rndcl->c_next = NULL;
			rndcl->c_dmemhandle = rndcl->rb_longbuf.handle;
			wlist->c_next = rndcl;
		}

		status = clist_register(conn, wlist, CLIST_REG_DST);
		if (status != RDMA_SUCCESS) {
			rdma_buf_free(conn, rndbuf);
			bzero(rndbuf, sizeof (rdma_buf_t));
			return (CLNT_RDMA_FAIL);
		}
		XDR_CONTROL(call_xdrp, XDR_RDMA_SET_FLAGS, &xdr_flag);
	}

	if (!xdr_encode_wlist(xdrs, wlist)) {
		if (rndlen) {
			rdma_buf_free(conn, rndbuf);
			bzero(rndbuf, sizeof (rdma_buf_t));
		}
		return (CLNT_RDMA_FAIL);
	}

	return (CLNT_RDMA_SUCCESS);
}
Ejemplo n.º 3
0
bool_t
xdrrdma_send_read_data(XDR *xdrs, uint_t data_len, struct clist *wcl)
{
	int status;
	xrdma_private_t	*xdrp = (xrdma_private_t *)(xdrs->x_private);
	struct xdr_ops *xops = xdrrdma_xops();
	struct clist *tcl, *wrcl, *cl;
	struct clist fcl;
	int rndup_present, rnduplen;

	rndup_present = 0;
	wrcl = NULL;

	/* caller is doing a sizeof */
	if (xdrs->x_ops != &xdrrdma_ops || xdrs->x_ops == xops)
		return (TRUE);

	/* copy of the first chunk */
	fcl = *wcl;
	fcl.c_next = NULL;

	/*
	 * The entire buffer is registered with the first chunk.
	 * Later chunks will use the same registered memory handle.
	 */

	status = clist_register(xdrp->xp_conn, &fcl, CLIST_REG_SOURCE);
	if (status != RDMA_SUCCESS) {
		return (FALSE);
	}

	wcl->c_regtype = CLIST_REG_SOURCE;
	wcl->c_smemhandle = fcl.c_smemhandle;
	wcl->c_ssynchandle = fcl.c_ssynchandle;

	/*
	 * Only transfer the read data ignoring any trailing
	 * roundup chunks. A bit of work, but it saves an
	 * unnecessary extra RDMA_WRITE containing only
	 * roundup bytes.
	 */

	rnduplen = clist_len(wcl) - data_len;

	if (rnduplen) {

		tcl = wcl->c_next;

		/*
		 * Check if there is a trailing roundup chunk
		 */
		while (tcl) {
			if ((tcl->c_next == NULL) && (tcl->c_len == rnduplen)) {
				rndup_present = 1;
				break;
			}
			tcl = tcl->c_next;
		}

		/*
		 * Make a copy chunk list skipping the last chunk
		 */
		if (rndup_present) {
			cl = wcl;
			tcl = NULL;
			while (cl) {
				if (tcl == NULL) {
					tcl = clist_alloc();
					wrcl = tcl;
				} else {
					tcl->c_next = clist_alloc();
					tcl = tcl->c_next;
				}

				*tcl = *cl;
				cl = cl->c_next;
				/* last chunk */
				if (cl->c_next == NULL)
					break;
			}
			tcl->c_next = NULL;
		}
	}

	if (wrcl == NULL) {
		/* No roundup chunks */
		wrcl = wcl;
	}

	/*
	 * Set the registered memory handles for the
	 * rest of the chunks same as the first chunk.
	 */
	tcl = wrcl->c_next;
	while (tcl) {
		tcl->c_smemhandle = fcl.c_smemhandle;
		tcl->c_ssynchandle = fcl.c_ssynchandle;
		tcl = tcl->c_next;
	}

	/*
	 * Sync the total len beginning from the first chunk.
	 */
	fcl.c_len = clist_len(wrcl);
	status = clist_syncmem(xdrp->xp_conn, &fcl, CLIST_REG_SOURCE);
	if (status != RDMA_SUCCESS) {
		return (FALSE);
	}

	status = RDMA_WRITE(xdrp->xp_conn, wrcl, WAIT);

	if (rndup_present)
		clist_free(wrcl);

	if (status != RDMA_SUCCESS) {
		return (FALSE);
	}

	return (TRUE);
}