void test_pkt_mbuf(){

    rte_mempool_t * mp1=utl_rte_mempool_create("big-const",
                                          CONST_NB_MBUF, 
                                          CONST_MBUF_SIZE,
                                          32);
    rte_mbuf_t * m1 = rte_pktmbuf_alloc(mp1);
    rte_mbuf_t * m2 = rte_pktmbuf_alloc(mp1);

    char *p=rte_pktmbuf_append(m1, 10);
    int i;
    
    for (i=0; i<10;i++) {
        p[i]=i;
    }

    p=rte_pktmbuf_append(m2, 10);

    for (i=0; i<10;i++) {
        p[i]=0x55+i;
    }

    rte_pktmbuf_dump(m1, m1->pkt_len);
    rte_pktmbuf_dump(m2, m1->pkt_len);

    rte_pktmbuf_free(m1);
    rte_pktmbuf_free(m2);
}
Example #2
0
/*
 * Allocate mbuf for flow stat (and latency) info sending
 * m - Original mbuf (can be complicated mbuf data structure)
 * fsp_head - return pointer in which the flow stat info should be filled
 * is_const - is the given mbuf const
 * return new mbuf structure in which the fsp_head can be written. If needed, orginal mbuf is freed.
 */
rte_mbuf_t * CGenNodeStateless::alloc_flow_stat_mbuf(rte_mbuf_t *m, struct flow_stat_payload_header *&fsp_head
                                                     , bool is_const) {
    rte_mbuf_t *m_ret = NULL, *m_lat = NULL;
    uint16_t fsp_head_size = sizeof(struct flow_stat_payload_header);

    if (is_const) {
        // const mbuf case
        if (rte_pktmbuf_data_len(m) > 128) {
            m_ret = CGlobalInfo::pktmbuf_alloc_small(get_socket_id());
            assert(m_ret);
            // alloc mbuf just for the latency header
            m_lat = CGlobalInfo::pktmbuf_alloc( get_socket_id(), fsp_head_size);
            assert(m_lat);
            fsp_head = (struct flow_stat_payload_header *)rte_pktmbuf_append(m_lat, fsp_head_size);
            rte_pktmbuf_attach(m_ret, m);
            rte_pktmbuf_trim(m_ret, sizeof(struct flow_stat_payload_header));
            utl_rte_pktmbuf_add_after2(m_ret, m_lat);
            // ref count was updated when we took the (const) mbuf, and again in rte_pktmbuf_attach
            // so need do decrease now, to avoid leak.
            rte_pktmbuf_refcnt_update(m, -1);
            return m_ret;
        } else {
            // Short packet. Just copy all bytes.
            m_ret = CGlobalInfo::pktmbuf_alloc( get_socket_id(), rte_pktmbuf_data_len(m) );
            assert(m_ret);
            char *p = rte_pktmbuf_mtod(m, char*);
            char *p_new = rte_pktmbuf_append(m_ret, rte_pktmbuf_data_len(m));
            memcpy(p_new , p, rte_pktmbuf_data_len(m));
            fsp_head = (struct flow_stat_payload_header *)(p_new + rte_pktmbuf_data_len(m) - fsp_head_size);
            rte_pktmbuf_free(m);
            return m_ret;
        }
    } else {
        // Field engine (vm)
        if (rte_pktmbuf_is_contiguous(m)) {
Example #3
0
/*
 * test data manipulation in mbuf with non-ascii data
 */
static int
test_pktmbuf_with_non_ascii_data(void)
{
	struct rte_mbuf *m = NULL;
	char *data;

	m = rte_pktmbuf_alloc(pktmbuf_pool);
	if (m == NULL)
		GOTO_FAIL("Cannot allocate mbuf");
	if (rte_pktmbuf_pkt_len(m) != 0)
		GOTO_FAIL("Bad length");

	data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
	if (data == NULL)
		GOTO_FAIL("Cannot append data");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad data length");
	memset(data, 0xff, rte_pktmbuf_pkt_len(m));
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");
	rte_pktmbuf_dump(m, MBUF_TEST_DATA_LEN);

	rte_pktmbuf_free(m);

	return 0;

fail:
	if(m) {
		rte_pktmbuf_free(m);
	}
	return -1;
}
Example #4
0
uint8_t add_tcp_data(struct rte_mbuf *mbuf, unsigned char *data, uint8_t len)
{
   //char *src = (char *)rte_pktmbuf_prepend (mbuf, len);
   if((MBUF_BUFFER_LEN - rte_pktmbuf_headroom(mbuf) - rte_pktmbuf_tailroom(mbuf)) < len) {
        logger(LOG_TCP, CRITICAL, "critical overflow **** buffer touching its limit.\n");
        logger(LOG_TCP, CRITICAL, "total ava %d, head %d, tail %d, len %d, \n", MBUF_BUFFER_LEN, rte_pktmbuf_headroom(mbuf), rte_pktmbuf_tailroom(mbuf), len);
   }
   if(MBUF_BUFFER_LEN) {
        //char *src = rte_pktmbuf_mtod(mbuf, char *);
        // at starting we have everything in tail. by append we move tail and get the space for our new data. Do not use mtod to put data. call append and then fill data.
        char *src = rte_pktmbuf_append (mbuf, len); // always pad the option to make total size multiple of 4.
        if(src == NULL) {
            assert(0);
        }
        memcpy(src, data, len);
        if(len > 0) {
             FILE *fp = fopen(DATA_SEND_DEBUG_FILE, "a");
             data[len] = '\0'; // only for debuuging.
             fprintf(fp, "added %s to mbuf\n", src);
             fclose(fp); 
        }
   }
   else{
        assert(0);
   }
   return len;
}
Example #5
0
/**
 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
 * submission to the multi buffer library for processing.
 *
 * @param	qp	queue pair
 * @param	job	JOB_AES_HMAC structure to fill
 * @param	m	mbuf to process
 *
 * @return
 * - Completed JOB_AES_HMAC structure pointer on success
 * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
 */
static JOB_AES_HMAC *
process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
		struct aesni_mb_session *session)
{
	JOB_AES_HMAC *job;

	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
	uint16_t m_offset = 0;

	job = (*qp->ops->job.get_next)(&qp->mb_mgr);
	if (unlikely(job == NULL))
		return job;

	/* Set crypto operation */
	job->chain_order = session->chain_order;

	/* Set cipher parameters */
	job->cipher_direction = session->cipher.direction;
	job->cipher_mode = session->cipher.mode;

	job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
	job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
	job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;


	/* Set authentication parameters */
	job->hash_alg = session->auth.algo;
	if (job->hash_alg == AES_XCBC) {
		job->_k1_expanded = session->auth.xcbc.k1_expanded;
		job->_k2 = session->auth.xcbc.k2;
		job->_k3 = session->auth.xcbc.k3;
	} else {
		job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
		job->hashed_auth_key_xor_opad = session->auth.pads.outer;
	}

	/* Mutable crypto operation parameters */
	if (op->sym->m_dst) {
		m_src = m_dst = op->sym->m_dst;

		/* append space for output data to mbuf */
		char *odata = rte_pktmbuf_append(m_dst,
				rte_pktmbuf_data_len(op->sym->m_src));
		if (odata == NULL) {
			MB_LOG_ERR("failed to allocate space in destination "
					"mbuf for source data");
			return NULL;
		}

		memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
				rte_pktmbuf_data_len(op->sym->m_src));
	} else {
Example #6
0
/**
 * Given a Netmap ring and a slot index for that ring, construct a dpdk mbuf
 * from the data held in the buffer associated with the slot.
 * Allocation/deallocation of the dpdk mbuf are the responsability of the
 * caller.
 * Note that mbuf chains are not supported.
 */
static void
slot_to_mbuf(struct netmap_ring *r, uint32_t index, struct rte_mbuf *mbuf)
{
	char *data;
	uint16_t length;

	rte_pktmbuf_reset(mbuf);
	length = r->slot[index].len;
	data = rte_pktmbuf_append(mbuf, length);

	if (data != NULL)
	    rte_memcpy(data, NETMAP_BUF(r, r->slot[index].buf_idx), length);
}
Example #7
0
/*
 * To send, we copy the data from the TCP/IP stack memory into DPDK
 * memory.  TODO: share TCP/IP stack mbufs with DPDK mbufs to avoid
 * data copy.
 */
void
VIFHYPER_SEND(struct virtif_user *viu,
	struct iovec *iov, size_t iovlen)
{
	struct rte_mbuf *m;
	void *dptr;
	unsigned i;

	m = rte_pktmbuf_alloc(mbpool);
	for (i = 0; i < iovlen; i++) {
		dptr = rte_pktmbuf_append(m, iov[i].iov_len);
		if (dptr == NULL) {
			/* log error somehow? */
			rte_pktmbuf_free(m);
			break;
		}
		memcpy(dptr, iov[i].iov_base, iov[i].iov_len);
	}
	rte_eth_tx_burst(IF_PORTID, 0, &m, 1);
}
Example #8
0
/*
 * Arrange for mbuf to be transmitted.
 *
 * TODO: use bulk transfers.  This should not be too difficult and will
 * have a big performance impact.
 */
void
VIFHYPER_SENDMBUF(struct virtif_user *viu, struct mbuf *m0, int pktlen, void *d, int dlen)
{
	struct rte_mbuf *rm;
	struct mbuf *m;
	void *rmdptr;

	rm = rte_pktmbuf_alloc(mbpool_tx);
	for (m = m0; m; ) {
		rmdptr = rte_pktmbuf_append(rm, dlen);
		if (rmdptr == NULL) {
			/* log error somehow? */
			rte_pktmbuf_free(rm);
			break;
		}
		memcpy(rmdptr, d, dlen); /* XXX */
		VIF_MBUF_NEXT(m, &m, &d, &dlen);
	}
	VIF_MBUF_FREE(m0);
	
	rte_eth_tx_burst(IF_PORTID, 0, &rm, 1);
}
Example #9
0
void sendtcpack(struct tcb *ptcb, struct rte_mbuf *mbuf, unsigned char *data, int len)
{
   //uint8_t tcp_len = 0x50 + add_mss_option(mbuf, 1300);// + add_winscale_option(mbuf, 7);
   uint8_t data_len = add_tcp_data(mbuf, data, len);
   uint8_t option_len = add_winscale_option(mbuf, 7) + add_mss_option(mbuf, 1300) + add_timestamp_option(mbuf, 203032, 0);

   uint8_t tcp_len = 20 + option_len;
   uint8_t pad = (tcp_len%4) ? 4 - (tcp_len % 4): 0;
   tcp_len += pad;
   logger(LOG_TCP, NORMAL, "padding option %d\n",  pad); 
   char *nop = rte_pktmbuf_append (mbuf, pad); // always pad the option to make total size multiple of 4.
   memset(nop, 0, pad);

   tcp_len = (tcp_len + 3) / 4;  // len is in multiple of 4 bytes;  20  will be 5
   tcp_len = tcp_len << 4; // len has upper 4 bits position in tcp header.
   logger(LOG_TCP, NORMAL, "sending tcp packet\n");
   struct tcp_hdr *ptcphdr = (struct tcp_hdr *)rte_pktmbuf_prepend (mbuf, sizeof(struct tcp_hdr));
  // printf("head room2 = %d\n", rte_pktmbuf_headroom(mbuf));
   if(ptcphdr == NULL) {
    //  printf("tcp header is null\n");
   }
   ptcphdr->src_port = htons(ptcb->dport);
   ptcphdr->dst_port = htons(ptcb->sport);
   ptcphdr->sent_seq = htonl(ptcb->next_seq);
   ptcb->next_seq += data_len;
  // ptcb->next_seq ++;  // for syn 
   ptcphdr->recv_ack = htonl(ptcb->ack);
   ptcphdr->data_off = tcp_len;
   ptcphdr->tcp_flags =  TCP_FLAG_ACK;
   ptcphdr->rx_win = 12000;
//   ptcphdr->cksum = 0x0001;
   ptcphdr->tcp_urp = 0; 
   //mbuf->ol_flags |=  PKT_TX_IP_CKSUM; // someday will calclate checkum here only.
   
 //  printf(" null\n");
  // fflush(stdout);
   ip_out(ptcb, mbuf, ptcphdr, data_len); 

}
Example #10
0
/*
 * To send, we copy the data from the TCP/IP stack memory into DPDK
 * memory.  TODO: share TCP/IP stack mbufs with DPDK mbufs to avoid
 * data copy.
 */
void
rumpcomp_virtif_send(struct virtif_user *viu,
                     struct iovec *iov, size_t iovlen)
{
    void *cookie = rumpuser_component_unschedule();
    struct rte_mbuf *m;
    void *dptr;
    unsigned i;

    m = rte_pktmbuf_alloc(mbpool);
    for (i = 0; i < iovlen; i++) {
        dptr = rte_pktmbuf_append(m, iov[i].iov_len);
        if (dptr == NULL) {
            /* log error somehow? */
            rte_pktmbuf_free(m);
            goto out;
        }
        memcpy(dptr, iov[i].iov_base, iov[i].iov_len);
    }
    rte_eth_tx_burst(IF_PORTID, 0, &m, 1);

out:
    rumpuser_component_schedule(cookie);
}
void
TrexStatelessDpCore::add_cont_stream(TrexStatelessDpPerPort * lp_port,
                                     TrexStream * stream,
                                     TrexStreamsCompiledObj *comp) {

    CGenNodeStateless *node = m_core->create_node_sl();

    /* add periodic */
    node->m_type = CGenNode::STATELESS_PKT;

    node->m_ref_stream_info  =   stream->clone_as_dp();

    node->m_next_stream=0; /* will be fixed later */


    if ( stream->m_self_start ){
        /* if self start it is in active mode */
        node->m_state =CGenNodeStateless::ss_ACTIVE;
        lp_port->m_active_streams++;
    }else{
        node->m_state =CGenNodeStateless::ss_INACTIVE;
    }

    node->m_time = m_core->m_cur_time_sec + usec_to_sec(stream->m_isg_usec);

    pkt_dir_t dir = m_core->m_node_gen.m_v_if->port_id_to_dir(stream->m_port_id);
    node->m_flags = 0; 

    /* set socket id */
    node->set_socket_id(m_core->m_node_gen.m_socket_id);

    /* build a mbuf from a packet */
    
    uint16_t pkt_size = stream->m_pkt.len;
    const uint8_t *stream_pkt = stream->m_pkt.binary;

    node->m_pause =0;
    node->m_stream_type = stream->m_type;
    node->m_next_time_offset =  1.0 / (stream->get_pps() * comp->get_multiplier()) ;


    /* stateless specific fields */
    switch ( stream->m_type ) {

    case TrexStream::stCONTINUOUS :
        node->m_single_burst=0;
        node->m_single_burst_refill=0;
        node->m_multi_bursts=0;
        node->m_ibg_sec                 = 0.0;
        break;

    case TrexStream::stSINGLE_BURST :
        node->m_stream_type             = TrexStream::stMULTI_BURST;
        node->m_single_burst            = stream->m_burst_total_pkts;
        node->m_single_burst_refill     = stream->m_burst_total_pkts;
        node->m_multi_bursts            = 1;  /* single burst in multi burst of 1 */
        node->m_ibg_sec                 = 0.0;
        break;

    case TrexStream::stMULTI_BURST :
        node->m_single_burst        = stream->m_burst_total_pkts;
        node->m_single_burst_refill = stream->m_burst_total_pkts;
        node->m_multi_bursts        = stream->m_num_bursts;
        node->m_ibg_sec             = usec_to_sec( stream->m_ibg_usec );
        break;
    default:

        assert(0);
    };

    node->m_port_id = stream->m_port_id;

    /* allocate const mbuf */
    rte_mbuf_t *m = CGlobalInfo::pktmbuf_alloc(node->get_socket_id(), pkt_size);
    assert(m);

    char *p = rte_pktmbuf_append(m, pkt_size);
    assert(p);
    /* copy the packet */
    memcpy(p,stream_pkt,pkt_size);

    /* set dir 0 or 1 client or server */
    node->set_mbuf_cache_dir(dir);

    /* TBD repace the mac if req we should add flag  */
    m_core->m_node_gen.m_v_if->update_mac_addr_from_global_cfg(dir, m);

    /* set the packet as a readonly */
    node->set_cache_mbuf(m);

    CDpOneStream one_stream;

    one_stream.m_dp_stream = node->m_ref_stream_info;
    one_stream.m_node =node;

    lp_port->m_active_nodes.push_back(one_stream);

    /* schedule only if active */
    if (node->m_state == CGenNodeStateless::ss_ACTIVE) {
        m_core->m_node_gen.add_node((CGenNode *)node);
    }
}
Example #12
0
/*
 * test data manipulation in mbuf
 */
static int
test_one_pktmbuf(void)
{
	struct rte_mbuf *m = NULL;
	char *data, *data2, *hdr;
	unsigned i;

	printf("Test pktmbuf API\n");

	/* alloc a mbuf */

	m = rte_pktmbuf_alloc(pktmbuf_pool);
	if (m == NULL)
		GOTO_FAIL("Cannot allocate mbuf");
	if (rte_pktmbuf_pkt_len(m) != 0)
		GOTO_FAIL("Bad length");

	rte_pktmbuf_dump(m, 0);

	/* append data */

	data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
	if (data == NULL)
		GOTO_FAIL("Cannot append data");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad data length");
	memset(data, 0x66, rte_pktmbuf_pkt_len(m));
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");
	rte_pktmbuf_dump(m, MBUF_TEST_DATA_LEN);
	rte_pktmbuf_dump(m, 2*MBUF_TEST_DATA_LEN);

	/* this append should fail */

	data2 = rte_pktmbuf_append(m, (uint16_t)(rte_pktmbuf_tailroom(m) + 1));
	if (data2 != NULL)
		GOTO_FAIL("Append should not succeed");

	/* append some more data */

	data2 = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
	if (data2 == NULL)
		GOTO_FAIL("Cannot append data");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");

	/* trim data at the end of mbuf */

	if (rte_pktmbuf_trim(m, MBUF_TEST_DATA_LEN2) < 0)
		GOTO_FAIL("Cannot trim data");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");

	/* this trim should fail */

	if (rte_pktmbuf_trim(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) == 0)
		GOTO_FAIL("trim should not succeed");

	/* prepend one header */

	hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR1_LEN);
	if (hdr == NULL)
		GOTO_FAIL("Cannot prepend");
	if (data - hdr != MBUF_TEST_HDR1_LEN)
		GOTO_FAIL("Prepend failed");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");
	memset(hdr, 0x55, MBUF_TEST_HDR1_LEN);

	/* prepend another header */

	hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR2_LEN);
	if (hdr == NULL)
		GOTO_FAIL("Cannot prepend");
	if (data - hdr != MBUF_TEST_ALL_HDRS_LEN)
		GOTO_FAIL("Prepend failed");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");
	memset(hdr, 0x55, MBUF_TEST_HDR2_LEN);

	rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
	rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 0);
	rte_pktmbuf_dump(m, 0);

	/* this prepend should fail */

	hdr = rte_pktmbuf_prepend(m, (uint16_t)(rte_pktmbuf_headroom(m) + 1));
	if (hdr != NULL)
		GOTO_FAIL("prepend should not succeed");

	/* remove data at beginning of mbuf (adj) */

	if (data != rte_pktmbuf_adj(m, MBUF_TEST_ALL_HDRS_LEN))
		GOTO_FAIL("rte_pktmbuf_adj failed");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");

	/* this adj should fail */

	if (rte_pktmbuf_adj(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) != NULL)
		GOTO_FAIL("rte_pktmbuf_adj should not succeed");

	/* check data */

	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");

	for (i=0; i<MBUF_TEST_DATA_LEN; i++) {
		if (data[i] != 0x66)
			GOTO_FAIL("Data corrupted at offset %u", i);
	}

	/* free mbuf */

	rte_pktmbuf_free(m);
	m = NULL;
	return 0;

fail:
	if (m)
		rte_pktmbuf_free(m);
	return -1;
}
Example #13
0
/**
 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
 * submission to the multi buffer library for processing.
 *
 * @param	qp	queue pair
 * @param	job	JOB_AES_HMAC structure to fill
 * @param	m	mbuf to process
 *
 * @return
 * - Completed JOB_AES_HMAC structure pointer on success
 * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
 */
static inline int
set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
		struct rte_crypto_op *op, uint8_t *digest_idx)
{
	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
	struct aesni_mb_session *session;
	uint16_t m_offset = 0;

	session = get_session(qp, op);
	if (session == NULL) {
		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
		return -1;
	}

	/* Set crypto operation */
	job->chain_order = session->chain_order;

	/* Set cipher parameters */
	job->cipher_direction = session->cipher.direction;
	job->cipher_mode = session->cipher.mode;

	job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;

	/* Set authentication parameters */
	job->hash_alg = session->auth.algo;

	switch (job->hash_alg) {
	case AES_XCBC:
		job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
		job->u.XCBC._k2 = session->auth.xcbc.k2;
		job->u.XCBC._k3 = session->auth.xcbc.k3;

		job->aes_enc_key_expanded =
				session->cipher.expanded_aes_keys.encode;
		job->aes_dec_key_expanded =
				session->cipher.expanded_aes_keys.decode;
		break;

	case AES_CCM:
		job->u.CCM.aad = op->sym->aead.aad.data + 18;
		job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
		job->aes_enc_key_expanded =
				session->cipher.expanded_aes_keys.encode;
		job->aes_dec_key_expanded =
				session->cipher.expanded_aes_keys.decode;
		break;

	case AES_CMAC:
		job->u.CMAC._key_expanded = session->auth.cmac.expkey;
		job->u.CMAC._skey1 = session->auth.cmac.skey1;
		job->u.CMAC._skey2 = session->auth.cmac.skey2;
		job->aes_enc_key_expanded =
				session->cipher.expanded_aes_keys.encode;
		job->aes_dec_key_expanded =
				session->cipher.expanded_aes_keys.decode;
		break;

	case AES_GMAC:
		job->u.GCM.aad = op->sym->aead.aad.data;
		job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
		job->aes_enc_key_expanded = &session->cipher.gcm_key;
		job->aes_dec_key_expanded = &session->cipher.gcm_key;
		break;

	default:
		job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
		job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;

		if (job->cipher_mode == DES3) {
			job->aes_enc_key_expanded =
				session->cipher.exp_3des_keys.ks_ptr;
			job->aes_dec_key_expanded =
				session->cipher.exp_3des_keys.ks_ptr;
		} else {
			job->aes_enc_key_expanded =
				session->cipher.expanded_aes_keys.encode;
			job->aes_dec_key_expanded =
				session->cipher.expanded_aes_keys.decode;
		}
	}

	/* Mutable crypto operation parameters */
	if (op->sym->m_dst) {
		m_src = m_dst = op->sym->m_dst;

		/* append space for output data to mbuf */
		char *odata = rte_pktmbuf_append(m_dst,
				rte_pktmbuf_data_len(op->sym->m_src));
		if (odata == NULL) {
			AESNI_MB_LOG(ERR, "failed to allocate space in destination "
					"mbuf for source data");
			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
			return -1;
		}

		memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
				rte_pktmbuf_data_len(op->sym->m_src));
	} else {
Example #14
0
static int
test_blockcipher_one_case(const struct blockcipher_test_case *t,
	struct rte_mempool *mbuf_pool,
	struct rte_mempool *op_mpool,
	uint8_t dev_id,
	enum rte_cryptodev_type cryptodev_type,
	char *test_msg)
{
	struct rte_mbuf *ibuf = NULL;
	struct rte_mbuf *obuf = NULL;
	struct rte_mbuf *iobuf;
	struct rte_crypto_sym_xform *cipher_xform = NULL;
	struct rte_crypto_sym_xform *auth_xform = NULL;
	struct rte_crypto_sym_xform *init_xform = NULL;
	struct rte_crypto_sym_op *sym_op = NULL;
	struct rte_crypto_op *op = NULL;
	struct rte_cryptodev_sym_session *sess = NULL;
	struct rte_cryptodev_info dev_info;

	int status = TEST_SUCCESS;
	const struct blockcipher_test_data *tdata = t->test_data;
	uint8_t cipher_key[tdata->cipher_key.len];
	uint8_t auth_key[tdata->auth_key.len];
	uint32_t buf_len = tdata->ciphertext.len;
	uint32_t digest_len = 0;
	char *buf_p = NULL;
	uint8_t src_pattern = 0xa5;
	uint8_t dst_pattern = 0xb6;
	uint8_t tmp_src_buf[MBUF_SIZE];
	uint8_t tmp_dst_buf[MBUF_SIZE];

	int nb_segs = 1;

	if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SG) {
		rte_cryptodev_info_get(dev_id, &dev_info);
		if (!(dev_info.feature_flags &
				RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
			printf("Device doesn't support scatter-gather. "
					"Test Skipped.\n");
			return 0;
		}
		nb_segs = 3;
	}

	if (tdata->cipher_key.len)
		memcpy(cipher_key, tdata->cipher_key.data,
			tdata->cipher_key.len);
	if (tdata->auth_key.len)
		memcpy(auth_key, tdata->auth_key.data,
			tdata->auth_key.len);

	switch (cryptodev_type) {
	case RTE_CRYPTODEV_QAT_SYM_PMD:
	case RTE_CRYPTODEV_OPENSSL_PMD:
	case RTE_CRYPTODEV_ARMV8_PMD: /* Fall through */
		digest_len = tdata->digest.len;
		break;
	case RTE_CRYPTODEV_AESNI_MB_PMD:
	case RTE_CRYPTODEV_SCHEDULER_PMD:
		digest_len = tdata->digest.truncated_len;
		break;
	default:
		snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
			"line %u FAILED: %s",
			__LINE__, "Unsupported PMD type");
		status = TEST_FAILED;
		goto error_exit;
	}

	/* preparing data */
	if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
		buf_len += tdata->iv.len;
	if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH)
		buf_len += digest_len;

	/* for contiguous mbuf, nb_segs is 1 */
	ibuf = create_segmented_mbuf(mbuf_pool,
			tdata->ciphertext.len, nb_segs, src_pattern);
	if (ibuf == NULL) {
		snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
			"line %u FAILED: %s",
			__LINE__, "Cannot create source mbuf");
		status = TEST_FAILED;
		goto error_exit;
	}

	/* only encryption requires plaintext.data input,
	 * decryption/(digest gen)/(digest verify) use ciphertext.data
	 * to be computed
	 */
	if (t->op_mask & BLOCKCIPHER_TEST_OP_ENCRYPT)
		pktmbuf_write(ibuf, 0, tdata->plaintext.len,
				tdata->plaintext.data);
	else
		pktmbuf_write(ibuf, 0, tdata->ciphertext.len,
				tdata->ciphertext.data);

	if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
		rte_memcpy(rte_pktmbuf_prepend(ibuf, tdata->iv.len),
				tdata->iv.data, tdata->iv.len);
	}
	buf_p = rte_pktmbuf_append(ibuf, digest_len);
	if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_VERIFY)
		rte_memcpy(buf_p, tdata->digest.data, digest_len);
	else
		memset(buf_p, 0, digest_len);

	if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
		obuf = rte_pktmbuf_alloc(mbuf_pool);
		if (!obuf) {
			snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "line %u "
				"FAILED: %s", __LINE__,
				"Allocation of rte_mbuf failed");
			status = TEST_FAILED;
			goto error_exit;
		}
		memset(obuf->buf_addr, dst_pattern, obuf->buf_len);

		buf_p = rte_pktmbuf_append(obuf, buf_len);
		if (!buf_p) {
			snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "line %u "
				"FAILED: %s", __LINE__,
				"No room to append mbuf");
			status = TEST_FAILED;
			goto error_exit;
		}
		memset(buf_p, 0, buf_len);
	}

	/* Generate Crypto op data structure */
	op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
	if (!op) {
		snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
			"line %u FAILED: %s",
			__LINE__, "Failed to allocate symmetric crypto "
			"operation struct");
		status = TEST_FAILED;
		goto error_exit;
	}

	sym_op = op->sym;

	sym_op->m_src = ibuf;

	if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
		sym_op->m_dst = obuf;
		iobuf = obuf;
	} else {
		sym_op->m_dst = NULL;
		iobuf = ibuf;
	}

	/* sessionless op requires allocate xform using
	 * rte_crypto_op_sym_xforms_alloc(), otherwise rte_zmalloc()
	 * is used
	 */
	if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SESSIONLESS) {
		uint32_t n_xforms = 0;

		if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
			n_xforms++;
		if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH)
			n_xforms++;

		if (rte_crypto_op_sym_xforms_alloc(op, n_xforms)
			== NULL) {
			snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "line %u "
				"FAILED: %s", __LINE__, "Failed to "
				"allocate space for crypto transforms");
			status = TEST_FAILED;
			goto error_exit;
		}
	} else {
		cipher_xform = rte_zmalloc(NULL,
			sizeof(struct rte_crypto_sym_xform), 0);

		auth_xform = rte_zmalloc(NULL,
			sizeof(struct rte_crypto_sym_xform), 0);

		if (!cipher_xform || !auth_xform) {
			snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "line %u "
				"FAILED: %s", __LINE__, "Failed to "
				"allocate memory for crypto transforms");
			status = TEST_FAILED;
			goto error_exit;
		}
	}

	/* preparing xform, for sessioned op, init_xform is initialized
	 * here and later as param in rte_cryptodev_sym_session_create() call
	 */
	if (t->op_mask == BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN) {
		if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SESSIONLESS) {
			cipher_xform = op->sym->xform;
			auth_xform = cipher_xform->next;
			auth_xform->next = NULL;
		} else {
			cipher_xform->next = auth_xform;
			auth_xform->next = NULL;
			init_xform = cipher_xform;
		}
	} else if (t->op_mask == BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC) {
		if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SESSIONLESS) {
			auth_xform = op->sym->xform;
			cipher_xform = auth_xform->next;
			cipher_xform->next = NULL;
		} else {
			auth_xform->next = cipher_xform;
			cipher_xform->next = NULL;
			init_xform = auth_xform;
		}
	} else if ((t->op_mask == BLOCKCIPHER_TEST_OP_ENCRYPT) ||
			(t->op_mask == BLOCKCIPHER_TEST_OP_DECRYPT)) {
		if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SESSIONLESS)
			cipher_xform = op->sym->xform;
		else
			init_xform = cipher_xform;
		cipher_xform->next = NULL;
	} else if ((t->op_mask == BLOCKCIPHER_TEST_OP_AUTH_GEN) ||
			(t->op_mask == BLOCKCIPHER_TEST_OP_AUTH_VERIFY)) {
		if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SESSIONLESS)
			auth_xform = op->sym->xform;
		else
			init_xform = auth_xform;
		auth_xform->next = NULL;
	} else {
		snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
			"line %u FAILED: %s",
			__LINE__, "Unrecognized operation");
		status = TEST_FAILED;
		goto error_exit;
	}

	/*configure xforms & sym_op cipher and auth data*/
	if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
		cipher_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
		cipher_xform->cipher.algo = tdata->crypto_algo;
		if (t->op_mask & BLOCKCIPHER_TEST_OP_ENCRYPT)
			cipher_xform->cipher.op =
				RTE_CRYPTO_CIPHER_OP_ENCRYPT;
		else
			cipher_xform->cipher.op =
				RTE_CRYPTO_CIPHER_OP_DECRYPT;
		cipher_xform->cipher.key.data = cipher_key;
		cipher_xform->cipher.key.length = tdata->cipher_key.len;

		sym_op->cipher.data.offset = tdata->iv.len;
		sym_op->cipher.data.length = tdata->ciphertext.len;
		sym_op->cipher.iv.data = rte_pktmbuf_mtod(sym_op->m_src,
			uint8_t *);
		sym_op->cipher.iv.length = tdata->iv.len;
		sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(
			sym_op->m_src);
	}
/*
 * dpdk_virtio_from_vm_rx - receive packets from a virtio client so that
 * the packets can be handed to vrouter for forwarding. the virtio client is
 * usually a VM.
 *
 * Returns the number of packets received from the virtio.
 */
static int
dpdk_virtio_from_vm_rx(void *port, struct rte_mbuf **pkts, uint32_t max_pkts)
{
    struct dpdk_virtio_reader *p = (struct dpdk_virtio_reader *)port;
    vr_dpdk_virtioq_t *vq = p->rx_virtioq;
    uint16_t vq_hard_avail_idx, i;
    uint16_t avail_pkts, next_desc_idx, next_avail_idx;
    struct vring_desc *desc;
    char *pkt_addr, *tail_addr;
    struct rte_mbuf *mbuf;
    uint32_t pkt_len, nb_pkts = 0;
    vr_uvh_client_t *vru_cl;

    if (unlikely(vq->vdv_ready_state == VQ_NOT_READY)) {
        DPDK_UDEBUG(VROUTER, &vq->vdv_hash, "%s: queue %p is not ready\n",
                __func__, vq);
        return 0;
    }

    vru_cl = vr_dpdk_virtio_get_vif_client(vq->vdv_vif_idx);
    if (unlikely(vru_cl == NULL))
        return 0;

    vq_hard_avail_idx = (*((volatile uint16_t *)&vq->vdv_avail->idx));

    /* Unsigned subtraction gives the right result even with wrap around. */
    avail_pkts = vq_hard_avail_idx - vq->vdv_last_used_idx;
    avail_pkts = RTE_MIN(avail_pkts, max_pkts);
    if (unlikely(avail_pkts == 0)) {
        DPDK_UDEBUG(VROUTER, &vq->vdv_hash, "%s: queue %p has no packets\n",
                    __func__, vq);
        return 0;
    }

    DPDK_UDEBUG(VROUTER, &vq->vdv_hash, "%s: queue %p AVAILABLE %u packets\n",
            __func__, vq, avail_pkts);
    for (i = 0; i < avail_pkts; i++) {
        /* Allocate a mbuf. */
        mbuf = rte_pktmbuf_alloc(vr_dpdk.rss_mempool);
        if (unlikely(mbuf == NULL)) {
            p->nb_nombufs++;
            DPDK_UDEBUG(VROUTER, &vq->vdv_hash, "%s: queue %p no_mbufs=%"PRIu64"\n",
                    __func__, vq, p->nb_nombufs);
            break;
        }

        next_avail_idx = (vq->vdv_last_used_idx + i) & (vq->vdv_size - 1);
        next_desc_idx = vq->vdv_avail->ring[next_avail_idx];
        /*
         * Move the (chain of) descriptors to the used list. The used
         * index will, however, only be updated at the end of the loop.
         */
        vq->vdv_used->ring[next_avail_idx].id = next_desc_idx;
        vq->vdv_used->ring[next_avail_idx].len = 0;

        desc = &vq->vdv_desc[next_desc_idx];
        pkt_len = desc->len;
        pkt_addr = vr_dpdk_guest_phys_to_host_virt(vru_cl, desc->addr);
        /* Check the descriptor is sane. */
        if (unlikely(desc->len < sizeof(struct virtio_net_hdr) ||
                desc->addr == 0 || pkt_addr == NULL)) {
            goto free_mbuf;
        }
        /* Now pkt_addr points to the virtio_net_hdr. */

        if (((struct virtio_net_hdr *)pkt_addr)->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
                mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;

        /* Skip virtio_net_hdr as we don't support mergeable receive buffers. */
        if (likely(desc->flags & VRING_DESC_F_NEXT &&
                pkt_len == sizeof(struct virtio_net_hdr))) {
            DPDK_UDEBUG(VROUTER, &vq->vdv_hash, "%s: queue %p pkt %u F_NEXT\n",
                __func__, vq, i);
            desc = &vq->vdv_desc[desc->next];
            pkt_len = desc->len;
            pkt_addr = vr_dpdk_guest_phys_to_host_virt(vru_cl, desc->addr);
        } else {
            DPDK_UDEBUG(VROUTER, &vq->vdv_hash, "%s: queue %p pkt %u no F_NEXT\n",
                __func__, vq, i);
            pkt_addr += sizeof(struct virtio_net_hdr);
            pkt_len -= sizeof(struct virtio_net_hdr);
        }
        /* Now pkt_addr points to the packet data. */

        tail_addr = rte_pktmbuf_append(mbuf, pkt_len);
        /* Check we ready to copy the data. */
        if (unlikely(desc->addr == 0 || pkt_addr == NULL ||
                tail_addr == NULL)) {
            goto free_mbuf;
        }
        /* Copy first descriptor data. */
        rte_memcpy(tail_addr, pkt_addr, pkt_len);

        /*
         * Gather mbuf from several virtio buffers. We do not support mbuf
         * chains, so all virtio buffers should fit into one mbuf.
         */
        while (unlikely(desc->flags & VRING_DESC_F_NEXT)) {
            desc = &vq->vdv_desc[desc->next];
            pkt_len = desc->len;
            pkt_addr = vr_dpdk_guest_phys_to_host_virt(vru_cl, desc->addr);
            tail_addr = rte_pktmbuf_append(mbuf, pkt_len);
            /* Check we ready to copy the data. */
            if (unlikely(desc->addr == 0 || pkt_addr == NULL ||
                    tail_addr == NULL)) {
                goto free_mbuf;
            }
            /* Append next descriptor(s) data. */
            rte_memcpy(tail_addr, pkt_addr, pkt_len);
        }

        pkts[nb_pkts] = mbuf;
        nb_pkts++;
        continue;

    free_mbuf:
        DPDK_VIRTIO_READER_STATS_PKTS_DROP_ADD(p, 1);
        rte_pktmbuf_free(mbuf);
    }

    /*
     * Do not call the guest if there are no descriptors processed.
     *
     * If there are no free mbufs on host, the TX queue in guest gets
     * filled up. This makes the guest kernel to switch to interrupt mode
     * and clear the VRING_AVAIL_F_NO_INTERRUPT flag.
     *
     * Meanwhile the host polls the virtio queue, sees the available
     * descriptors and interrupts the guest. Those interrupts get unhandled by
     * the guest virtio driver, so after 100K of the interrupts the IRQ get
     * reported and disabled by the guest kernel.
     */
    if (likely(i > 0)) {
        vq->vdv_last_used_idx += i;
        rte_wmb();
        vq->vdv_used->idx += i;
        RTE_LOG(DEBUG, VROUTER,
                "%s: vif %d vq %p last_used_idx %d used->idx %u avail->idx %u\n",
                __func__, vq->vdv_vif_idx, vq, vq->vdv_last_used_idx,
                vq->vdv_used->idx, vq->vdv_avail->idx);

        /* Call guest if required. */
        if (unlikely(!(vq->vdv_avail->flags & VRING_AVAIL_F_NO_INTERRUPT))) {
            p->nb_syscalls++;
            eventfd_write(vq->vdv_callfd, 1);
        }
    }

    DPDK_UDEBUG(VROUTER, &vq->vdv_hash, "%s: queue %p RETURNS %u pkts\n",
            __func__, vq, nb_pkts);

    DPDK_VIRTIO_READER_STATS_PKTS_IN_ADD(p, nb_pkts);

    return nb_pkts;
}
Example #16
0
/* this test harness a Linux guest to check that packet are send and received
 * by the vhost brick. An ethernet bridge inside the guest will forward packets
 * between the two vhost-user virtio interfaces.
 */
static void test_vhost_flow_(int qemu_exit_signal)
{
	const char mac_addr_0[18] = "52:54:00:12:34:11";
	const char mac_addr_1[18] = "52:54:00:12:34:12";
	struct rte_mempool *mbuf_pool = pg_get_mempool();
	struct pg_brick *vhost_0, *vhost_1, *collect;
	struct rte_mbuf *pkts[PG_MAX_PKTS_BURST];
	const char *socket_path_0, *socket_path_1;
	struct pg_error *error = NULL;
	struct rte_mbuf **result_pkts;
	int ret, qemu_pid, i;
	uint64_t pkts_mask;

	/* start vhost */
	ret = pg_vhost_start("/tmp", &error);
	g_assert(ret == 0);
	g_assert(!error);

	/* instanciate brick */
	vhost_0 = pg_vhost_new("vhost-0", &error);
	g_assert(!error);
	g_assert(vhost_0);

	vhost_1 = pg_vhost_new("vhost-1", &error);
	g_assert(!error);
	g_assert(vhost_1);

	collect = pg_collect_new("collect", &error);
	g_assert(!error);
	g_assert(collect);

	/* build the graph */
	pg_brick_link(collect, vhost_1, &error);
	g_assert(!error);

	/* spawn first QEMU */
	socket_path_0 = pg_vhost_socket_path(vhost_0, &error);
	g_assert(!error);
	g_assert(socket_path_0);
	socket_path_1 = pg_vhost_socket_path(vhost_1, &error);
	g_assert(!error);
	g_assert(socket_path_1);

	qemu_pid = pg_util_spawn_qemu(socket_path_0, socket_path_1,
				      mac_addr_0, mac_addr_1,
				      glob_vm_path,
				      glob_vm_key_path,
				      glob_hugepages_path, &error);

	g_assert(!error);
	g_assert(qemu_pid);

	/* Prepare VM's bridge. */
#	define SSH(c) \
		g_assert(pg_util_ssh("localhost", ssh_port_id, glob_vm_key_path, c) == 0)
	SSH("brctl addbr br0");
	SSH("ifconfig br0 up");
	SSH("ifconfig ens4 up");
	SSH("ifconfig ens5 up");
	SSH("brctl addif br0 ens4");
	SSH("brctl addif br0 ens5");
	SSH("brctl setfd br0 0");
	SSH("brctl stp br0 off");
#	undef SSH
	ssh_port_id++;

	/* prepare packet to send */
	for (i = 0; i < NB_PKTS; i++) {
		pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
		g_assert(pkts[i]);
		rte_pktmbuf_append(pkts[i], ETHER_MIN_LEN);
		/* set random dst/src mac address so the linux guest bridge
		 * will not filter them
		 */
		pg_set_mac_addrs(pkts[i],
			      "52:54:00:12:34:15", "52:54:00:12:34:16");
		/* set size */
		pg_set_ether_type(pkts[i], ETHER_MIN_LEN - ETHER_HDR_LEN - 4);
	}

	/* send packet to the guest via one interface */
	pg_brick_burst_to_east(vhost_0, 0, pkts,
			       pg_mask_firsts(NB_PKTS), &error);
	g_assert(!error);

	/* let the packet propagate and flow */
	for (i = 0; i < 10; i++) {
		uint16_t count = 0;

		usleep(100000);
		pg_brick_poll(vhost_1, &count, &error);
		g_assert(!error);
		if (count)
			break;
	}

	result_pkts = pg_brick_east_burst_get(collect, &pkts_mask, &error);
	g_assert(!error);
	g_assert(result_pkts);
	g_assert(pg_brick_rx_bytes(vhost_0) == 0);
	g_assert(pg_brick_tx_bytes(vhost_0) != 0);
	g_assert(pg_brick_rx_bytes(vhost_1) != 0);
	g_assert(pg_brick_tx_bytes(vhost_1) == 0);

	/* kill QEMU */
	pg_util_stop_qemu(qemu_pid, qemu_exit_signal);

	/* free result packets */
	pg_packets_free(result_pkts, pkts_mask);

	/* free sent packet */
	for (i = 0; i < NB_PKTS; i++)
		rte_pktmbuf_free(pkts[i]);

	/* break the graph */
	pg_brick_unlink(collect, &error);
	g_assert(!error);

	/* clean up */
	/* pg_brick_decref(vhost_0, &error); */
	pg_brick_destroy(vhost_0);
	g_assert(!error);
	pg_brick_destroy(vhost_1);
	/* pg_brick_decref(vhost_1, &error); */
	g_assert(!error);
	pg_brick_decref(collect, &error);
	g_assert(!error);

	/* stop vhost */
	pg_vhost_stop();
}
Example #17
0
/**
 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
 * submission to the multi buffer library for processing.
 *
 * @param	qp	queue pair
 * @param	job	JOB_AES_HMAC structure to fill
 * @param	m	mbuf to process
 *
 * @return
 * - Completed JOB_AES_HMAC structure pointer on success
 * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
 */
static JOB_AES_HMAC *
process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
		struct rte_crypto_op *c_op, struct aesni_mb_session *session)
{
	JOB_AES_HMAC *job;

	job = (*qp->ops->job.get_next)(&qp->mb_mgr);
	if (unlikely(job == NULL))
		return job;

	/* Set crypto operation */
	job->chain_order = session->chain_order;

	/* Set cipher parameters */
	job->cipher_direction = session->cipher.direction;
	job->cipher_mode = session->cipher.mode;

	job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
	job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
	job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;


	/* Set authentication parameters */
	job->hash_alg = session->auth.algo;
	if (job->hash_alg == AES_XCBC) {
		job->_k1_expanded = session->auth.xcbc.k1_expanded;
		job->_k2 = session->auth.xcbc.k2;
		job->_k3 = session->auth.xcbc.k3;
	} else {
		job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
		job->hashed_auth_key_xor_opad = session->auth.pads.outer;
	}

	/* Mutable crypto operation parameters */

	/* Set digest output location */
	if (job->cipher_direction == DECRYPT) {
		job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
				get_digest_byte_length(job->hash_alg));

		if (job->auth_tag_output)
			memset(job->auth_tag_output, 0,
				sizeof(get_digest_byte_length(job->hash_alg)));
		else
			return NULL;
	} else {
		job->auth_tag_output = c_op->digest.data;
	}

	/*
	 * Multiple buffer library current only support returning a truncated
	 * digest length as specified in the relevant IPsec RFCs
	 */
	job->auth_tag_output_len_in_bytes =
			get_truncated_digest_byte_length(job->hash_alg);

	/* Set IV parameters */
	job->iv = c_op->iv.data;
	job->iv_len_in_bytes = c_op->iv.length;

	/* Data  Parameter */
	job->src = rte_pktmbuf_mtod(m, uint8_t *);
	job->dst = c_op->dst.m ?
			rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
			c_op->dst.offset :
			rte_pktmbuf_mtod(m, uint8_t *) +
			c_op->data.to_cipher.offset;

	job->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;
	job->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;

	job->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;
	job->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;

	/* Set user data to be crypto operation data struct */
	job->user_data = m;
	job->user_data2 = c_op;

	return job;
}