示例#1
0
文件: enic_ethdev.c 项目: btw616/dpdk
static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
	uint16_t queue_idx,
	uint16_t nb_desc,
	unsigned int socket_id,
	const struct rte_eth_rxconf *rx_conf,
	struct rte_mempool *mp)
{
	int ret;
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return -E_RTE_SECONDARY;
	RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
	eth_dev->data->rx_queues[queue_idx] =
		(void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];

	ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
			    rx_conf->rx_free_thresh);
	if (ret) {
		dev_err(enic, "error in allocating rq\n");
		return ret;
	}

	return enicpmd_dev_setup_intr(enic);
}
示例#2
0
/*
 * Initialize a pool of keys
 * These are unique tokens that can be obtained by threads
 * calling lthread_key_create()
 */
void _lthread_key_pool_init(void)
{
	static struct rte_ring *pool;
	struct lthread_key *new_key;
	char name[MAX_LTHREAD_NAME_SIZE];

	bzero(key_table, sizeof(key_table));

	/* only one lcore should do this */
	if (rte_atomic64_cmpset(&key_pool_init, 0, 1)) {

		snprintf(name,
			MAX_LTHREAD_NAME_SIZE,
			"lthread_key_pool_%d",
			getpid());

		pool = rte_ring_create(name,
					LTHREAD_MAX_KEYS, 0, 0);
		RTE_ASSERT(pool);

		int i;

		for (i = 1; i < LTHREAD_MAX_KEYS; i++) {
			new_key = &key_table[i];
			rte_ring_mp_enqueue((struct rte_ring *)pool,
						(void *)new_key);
		}
		key_pool = pool;
	}
	/* other lcores wait here till done */
	while (key_pool == NULL) {
		rte_compiler_barrier();
		sched_yield();
	};
}
示例#3
0
文件: enic_ethdev.c 项目: btw616/dpdk
static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
	uint16_t queue_idx,
	uint16_t nb_desc,
	unsigned int socket_id,
	const struct rte_eth_txconf *tx_conf)
{
	int ret;
	struct enic *enic = pmd_priv(eth_dev);
	struct vnic_wq *wq;

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return -E_RTE_SECONDARY;

	ENICPMD_FUNC_TRACE();
	RTE_ASSERT(queue_idx < enic->conf_wq_count);
	wq = &enic->wq[queue_idx];
	wq->offloads = tx_conf->offloads |
		eth_dev->data->dev_conf.txmode.offloads;
	eth_dev->data->tx_queues[queue_idx] = (void *)wq;

	ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
	if (ret) {
		dev_err(enic, "error in allocating wq\n");
		return ret;
	}

	return enicpmd_dev_setup_intr(enic);
}
示例#4
0
/*
 * Unlock a mutex
 */
int lthread_mutex_unlock(struct lthread_mutex *m)
{
	struct lthread *lt = THIS_LTHREAD;
	struct lthread *unblocked;

	if ((m == NULL) || (m->blocked == NULL)) {
		DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EINVAL));
		return POSIX_ERRNO(EINVAL);
	}

	/* fail if its owned */
	if (m->owner != lt || m->owner == NULL) {
		DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EPERM));
		return POSIX_ERRNO(EPERM);
	}

	rte_atomic64_dec(&m->count);
	/* if there are blocked threads then make one ready */
	while (rte_atomic64_read(&m->count) > 0) {
		unblocked = _lthread_queue_remove(m->blocked);

		if (unblocked != NULL) {
			rte_atomic64_dec(&m->count);
			DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
			RTE_ASSERT(unblocked->sched != NULL);
			_ready_queue_insert((struct lthread_sched *)
					    unblocked->sched, unblocked);
			break;
		}
	}
	/* release the lock */
	m->owner = NULL;
	return 0;
}
void
activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
{
	struct bond_dev_private *internals = eth_dev->data->dev_private;
	uint8_t active_count = internals->active_slave_count;

	if (internals->mode == BONDING_MODE_8023AD)
		bond_mode_8023ad_activate_slave(eth_dev, port_id);

	if (internals->mode == BONDING_MODE_TLB
			|| internals->mode == BONDING_MODE_ALB) {

		internals->tlb_slaves_order[active_count] = port_id;
	}

	RTE_ASSERT(internals->active_slave_count <
			(RTE_DIM(internals->active_slaves) - 1));

	internals->active_slaves[internals->active_slave_count] = port_id;
	internals->active_slave_count++;

	if (internals->mode == BONDING_MODE_TLB)
		bond_tlb_activate_slave(internals);
	if (internals->mode == BONDING_MODE_ALB)
		bond_mode_alb_client_list_upd(eth_dev);
}
示例#6
0
uint16_t
failsafe_rx_burst_fast(void *queue,
			 struct rte_mbuf **rx_pkts,
			 uint16_t nb_pkts)
{
	struct fs_priv *priv;
	struct sub_device *sdev;
	struct rxq *rxq;
	void *sub_rxq;
	uint16_t nb_rx;
	uint8_t nb_polled, nb_subs;
	uint8_t i;

	rxq = queue;
	priv = rxq->priv;
	nb_subs = priv->subs_tail - priv->subs_head;
	nb_polled = 0;
	for (i = rxq->last_polled; nb_polled < nb_subs; nb_polled++) {
		i++;
		if (i == priv->subs_tail)
			i = priv->subs_head;
		sdev = &priv->subs[i];
		RTE_ASSERT(!fs_rx_unsafe(sdev));
		sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
		FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
		nb_rx = ETH(sdev)->
			rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
		FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
		if (nb_rx) {
			rxq->last_polled = i;
			return nb_rx;
		}
	}
	return 0;
}
示例#7
0
/**
 * Process the received mbox message.
 */
int
lio_mbox_process_message(struct lio_mbox *mbox)
{
	struct lio_mbox_cmd mbox_cmd;

	if (mbox->state & LIO_MBOX_STATE_ERROR) {
		if (mbox->state & (LIO_MBOX_STATE_RES_PENDING |
				   LIO_MBOX_STATE_RES_RECEIVING)) {
			rte_memcpy(&mbox_cmd, &mbox->mbox_resp,
				   sizeof(struct lio_mbox_cmd));
			mbox->state = LIO_MBOX_STATE_IDLE;
			rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
			mbox_cmd.recv_status = 1;
			if (mbox_cmd.fn)
				mbox_cmd.fn(mbox->lio_dev, &mbox_cmd,
					    mbox_cmd.fn_arg);

			return 0;
		}

		mbox->state = LIO_MBOX_STATE_IDLE;
		rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);

		return 0;
	}

	if (mbox->state & LIO_MBOX_STATE_RES_RECEIVED) {
		rte_memcpy(&mbox_cmd, &mbox->mbox_resp,
			   sizeof(struct lio_mbox_cmd));
		mbox->state = LIO_MBOX_STATE_IDLE;
		rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
		mbox_cmd.recv_status = 0;
		if (mbox_cmd.fn)
			mbox_cmd.fn(mbox->lio_dev, &mbox_cmd, mbox_cmd.fn_arg);

		return 0;
	}

	if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVED) {
		rte_memcpy(&mbox_cmd, &mbox->mbox_req,
			   sizeof(struct lio_mbox_cmd));
		if (!mbox_cmd.msg.s.resp_needed) {
			mbox->state &= ~LIO_MBOX_STATE_REQ_RECEIVED;
			if (!(mbox->state & LIO_MBOX_STATE_RES_PENDING))
				mbox->state = LIO_MBOX_STATE_IDLE;
			rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
		}

		lio_mbox_process_cmd(mbox, &mbox_cmd);

		return 0;
	}

	RTE_ASSERT(0);

	return 0;
}
示例#8
0
/*
 * Create a scheduler on the current lcore
 */
struct lthread_sched *_lthread_sched_create(size_t stack_size)
{
	int status;
	struct lthread_sched *new_sched;
	unsigned lcoreid = rte_lcore_id();

	RTE_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);

	if (stack_size == 0)
		stack_size = LTHREAD_MAX_STACK_SIZE;

	new_sched =
	     rte_calloc_socket(NULL, 1, sizeof(struct lthread_sched),
				RTE_CACHE_LINE_SIZE,
				rte_socket_id());
	if (new_sched == NULL) {
		RTE_LOG(CRIT, LTHREAD,
			"Failed to allocate memory for scheduler\n");
		return NULL;
	}

	_lthread_key_pool_init();

	new_sched->stack_size = stack_size;
	new_sched->birth = rte_rdtsc();
	THIS_SCHED = new_sched;

	status = _lthread_sched_alloc_resources(new_sched);
	if (status != SCHED_ALLOC_OK) {
		RTE_LOG(CRIT, LTHREAD,
			"Failed to allocate resources for scheduler code = %d\n",
			status);
		rte_free(new_sched);
		return NULL;
	}

	bzero(&new_sched->ctx, sizeof(struct ctx));

	new_sched->lcore_id = lcoreid;

	schedcore[lcoreid] = new_sched;

	new_sched->run_flag = 1;

	DIAG_EVENT(new_sched, LT_DIAG_SCHED_CREATE, rte_lcore_id(), 0);

	rte_wmb();
	return new_sched;
}
示例#9
0
/*
 * Allocate data for TLS cache
*/
void _lthread_tls_alloc(struct lthread *lt)
{
	struct lthread_tls *tls;

	tls = _lthread_objcache_alloc((THIS_SCHED)->tls_cache);

	RTE_ASSERT(tls != NULL);

	tls->root_sched = (THIS_SCHED);
	lt->tls = tls;

	/* allocate data for TLS varaiables using RTE_PER_LTHREAD macros */
	if (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE) {
		lt->per_lthread_data =
		    _lthread_objcache_alloc((THIS_SCHED)->per_lthread_cache);
	}
}
示例#10
0
uint16_t
failsafe_tx_burst_fast(void *queue,
			 struct rte_mbuf **tx_pkts,
			 uint16_t nb_pkts)
{
	struct sub_device *sdev;
	struct txq *txq;
	void *sub_txq;
	uint16_t nb_tx;

	txq = queue;
	sdev = TX_SUBDEV(txq->priv->dev);
	RTE_ASSERT(!fs_tx_unsafe(sdev));
	sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
	FS_ATOMIC_P(txq->refcnt[sdev->sid]);
	nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
	FS_ATOMIC_V(txq->refcnt[sdev->sid]);
	return nb_tx;
}
void
deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
{
	uint8_t slave_pos;
	struct bond_dev_private *internals = eth_dev->data->dev_private;
	uint8_t active_count = internals->active_slave_count;

	if (internals->mode == BONDING_MODE_8023AD) {
		bond_mode_8023ad_stop(eth_dev);
		bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
	} else if (internals->mode == BONDING_MODE_TLB
			|| internals->mode == BONDING_MODE_ALB)
		bond_tlb_disable(internals);

	slave_pos = find_slave_by_id(internals->active_slaves, active_count,
			port_id);

	/* If slave was not at the end of the list
	 * shift active slaves up active array list */
	if (slave_pos < active_count) {
		active_count--;
		memmove(internals->active_slaves + slave_pos,
				internals->active_slaves + slave_pos + 1,
				(active_count - slave_pos) *
					sizeof(internals->active_slaves[0]));
	}

	RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
	internals->active_slave_count = active_count;

	if (eth_dev->data->dev_started) {
		if (internals->mode == BONDING_MODE_8023AD) {
			bond_mode_8023ad_start(eth_dev);
		} else if (internals->mode == BONDING_MODE_TLB) {
			bond_tlb_enable(internals);
		} else if (internals->mode == BONDING_MODE_ALB) {
			bond_tlb_enable(internals);
			bond_mode_alb_client_list_upd(eth_dev);
		}
	}
}
示例#12
0
static uint16_t
schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
	struct scheduler_qp_ctx *qp_ctx = qp;
	struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
	struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
	struct scheduler_session *sess;
	uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
			psd_qp_ctx->primary_slave.nb_inflight_cops,
			psd_qp_ctx->secondary_slave.nb_inflight_cops
	};
	struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
		{PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
	};
	struct psd_schedule_op *p_enq_op;
	uint16_t i, processed_ops_pri = 0, processed_ops_sec = 0;
	uint32_t job_len;

	if (unlikely(nb_ops == 0))
		return 0;

	for (i = 0; i < nb_ops && i < 4; i++) {
		rte_prefetch0(ops[i]->sym);
		rte_prefetch0(ops[i]->sym->session);
	}

	for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
		rte_prefetch0(ops[i + 4]->sym);
		rte_prefetch0(ops[i + 4]->sym->session);
		rte_prefetch0(ops[i + 5]->sym);
		rte_prefetch0(ops[i + 5]->sym->session);
		rte_prefetch0(ops[i + 6]->sym);
		rte_prefetch0(ops[i + 6]->sym->session);
		rte_prefetch0(ops[i + 7]->sym);
		rte_prefetch0(ops[i + 7]->sym->session);

		sess = (struct scheduler_session *)
				ops[i]->sym->session->_private;
		/* job_len is initialized as cipher data length, once
		 * it is 0, equals to auth data length
		 */
		job_len = ops[i]->sym->cipher.data.length;
		job_len += (ops[i]->sym->cipher.data.length == 0) *
				ops[i]->sym->auth.data.length;
		/* decide the target op based on the job length */
		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];

		/* stop schedule cops before the queue is full, this shall
		 * prevent the failed enqueue
		 */
		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
				qp_ctx->max_nb_objs) {
			i = nb_ops;
			break;
		}

		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
		ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
		p_enq_op->pos++;

		sess = (struct scheduler_session *)
				ops[i+1]->sym->session->_private;
		job_len = ops[i+1]->sym->cipher.data.length;
		job_len += (ops[i+1]->sym->cipher.data.length == 0) *
				ops[i+1]->sym->auth.data.length;
		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];

		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
				qp_ctx->max_nb_objs) {
			i = nb_ops;
			break;
		}

		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];
		ops[i+1]->sym->session = sess->sessions[p_enq_op->slave_idx];
		p_enq_op->pos++;

		sess = (struct scheduler_session *)
				ops[i+2]->sym->session->_private;
		job_len = ops[i+2]->sym->cipher.data.length;
		job_len += (ops[i+2]->sym->cipher.data.length == 0) *
				ops[i+2]->sym->auth.data.length;
		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];

		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
				qp_ctx->max_nb_objs) {
			i = nb_ops;
			break;
		}

		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];
		ops[i+2]->sym->session = sess->sessions[p_enq_op->slave_idx];
		p_enq_op->pos++;

		sess = (struct scheduler_session *)
				ops[i+3]->sym->session->_private;

		job_len = ops[i+3]->sym->cipher.data.length;
		job_len += (ops[i+3]->sym->cipher.data.length == 0) *
				ops[i+3]->sym->auth.data.length;
		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];

		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
				qp_ctx->max_nb_objs) {
			i = nb_ops;
			break;
		}

		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];
		ops[i+3]->sym->session = sess->sessions[p_enq_op->slave_idx];
		p_enq_op->pos++;
	}

	for (; i < nb_ops; i++) {
		sess = (struct scheduler_session *)
				ops[i]->sym->session->_private;

		job_len = ops[i]->sym->cipher.data.length;
		job_len += (ops[i]->sym->cipher.data.length == 0) *
				ops[i]->sym->auth.data.length;
		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];

		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
				qp_ctx->max_nb_objs) {
			i = nb_ops;
			break;
		}

		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
		ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
		p_enq_op->pos++;
	}

	processed_ops_pri = rte_cryptodev_enqueue_burst(
			psd_qp_ctx->primary_slave.dev_id,
			psd_qp_ctx->primary_slave.qp_id,
			sched_ops[PRIMARY_SLAVE_IDX],
			enq_ops[PRIMARY_SLAVE_IDX].pos);
	/* enqueue shall not fail as the slave queue is monitored */
	RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);

	psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;

	processed_ops_sec = rte_cryptodev_enqueue_burst(
			psd_qp_ctx->secondary_slave.dev_id,
			psd_qp_ctx->secondary_slave.qp_id,
			sched_ops[SECONDARY_SLAVE_IDX],
			enq_ops[SECONDARY_SLAVE_IDX].pos);
	RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);

	psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;

	return processed_ops_pri + processed_ops_sec;
}
示例#13
0
/*
 * Write scattered channel packet to TX bufring.
 *
 * The offset of this channel packet is written as a 64bits value
 * immediately after this channel packet.
 *
 * The write goes through three stages:
 *  1. Reserve space in ring buffer for the new data.
 *     Writer atomically moves priv_write_index.
 *  2. Copy the new data into the ring.
 *  3. Update the tail of the ring (visible to host) that indicates
 *     next read location. Writer updates write_index
 */
int
vmbus_txbr_write(struct vmbus_br *tbr, const struct iovec iov[], int iovlen,
		 bool *need_sig)
{
	struct vmbus_bufring *vbr = tbr->vbr;
	uint32_t ring_size = tbr->dsize;
	uint32_t old_windex, next_windex, windex, total;
	uint64_t save_windex;
	int i;

	total = 0;
	for (i = 0; i < iovlen; i++)
		total += iov[i].iov_len;
	total += sizeof(save_windex);

	/* Reserve space in ring */
	do {
		uint32_t avail;

		/* Get current free location */
		old_windex = tbr->windex;

		/* Prevent compiler reordering this with calculation */
		rte_compiler_barrier();

		avail = vmbus_br_availwrite(tbr, old_windex);

		/* If not enough space in ring, then tell caller. */
		if (avail <= total)
			return -EAGAIN;

		next_windex = vmbus_br_idxinc(old_windex, total, ring_size);

		/* Atomic update of next write_index for other threads */
	} while (!rte_atomic32_cmpset(&tbr->windex, old_windex, next_windex));

	/* Space from old..new is now reserved */
	windex = old_windex;
	for (i = 0; i < iovlen; i++) {
		windex = vmbus_txbr_copyto(tbr, windex,
					   iov[i].iov_base, iov[i].iov_len);
	}

	/* Set the offset of the current channel packet. */
	save_windex = ((uint64_t)old_windex) << 32;
	windex = vmbus_txbr_copyto(tbr, windex, &save_windex,
				   sizeof(save_windex));

	/* The region reserved should match region used */
	RTE_ASSERT(windex == next_windex);

	/* Ensure that data is available before updating host index */
	rte_smp_wmb();

	/* Checkin for our reservation. wait for our turn to update host */
	while (!rte_atomic32_cmpset(&vbr->windex, old_windex, next_windex))
		rte_pause();

	/* If host had read all data before this, then need to signal */
	*need_sig |= vmbus_txbr_need_signal(tbr, old_windex);
	return 0;
}
示例#14
0
/**
 * IPv4 fragmentation.
 *
 * This function implements the fragmentation of IPv4 packets.
 *
 * @param pkt_in
 *   The input packet.
 * @param pkts_out
 *   Array storing the output fragments.
 * @param mtu_size
 *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
 *   datagrams. This value includes the size of the IPv4 header.
 * @param pool_direct
 *   MBUF pool used for allocating direct buffers for the output fragments.
 * @param pool_indirect
 *   MBUF pool used for allocating indirect buffers for the output fragments.
 * @return
 *   Upon successful completion - number of output fragments placed
 *   in the pkts_out array.
 *   Otherwise - (-1) * <errno>.
 */
int32_t
rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
	struct rte_mbuf **pkts_out,
	uint16_t nb_pkts_out,
	uint16_t mtu_size,
	struct rte_mempool *pool_direct,
	struct rte_mempool *pool_indirect)
{
	struct rte_mbuf *in_seg = NULL;
	struct ipv4_hdr *in_hdr;
	uint32_t out_pkt_pos, in_seg_data_pos;
	uint32_t more_in_segs;
	uint16_t fragment_offset, flag_offset, frag_size;

	frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr));

	/* Fragment size should be a multiply of 8. */
	RTE_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);

	in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv4_hdr *);
	flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);

	/* If Don't Fragment flag is set */
	if (unlikely ((flag_offset & IPV4_HDR_DF_MASK) != 0))
		return -ENOTSUP;

	/* Check that pkts_out is big enough to hold all fragments */
	if (unlikely(frag_size * nb_pkts_out <
	    (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv4_hdr))))
		return -EINVAL;

	in_seg = pkt_in;
	in_seg_data_pos = sizeof(struct ipv4_hdr);
	out_pkt_pos = 0;
	fragment_offset = 0;

	more_in_segs = 1;
	while (likely(more_in_segs)) {
		struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
		uint32_t more_out_segs;
		struct ipv4_hdr *out_hdr;

		/* Allocate direct buffer */
		out_pkt = rte_pktmbuf_alloc(pool_direct);
		if (unlikely(out_pkt == NULL)) {
			__free_fragments(pkts_out, out_pkt_pos);
			return -ENOMEM;
		}

		/* Reserve space for the IP header that will be built later */
		out_pkt->data_len = sizeof(struct ipv4_hdr);
		out_pkt->pkt_len = sizeof(struct ipv4_hdr);

		out_seg_prev = out_pkt;
		more_out_segs = 1;
		while (likely(more_out_segs && more_in_segs)) {
			struct rte_mbuf *out_seg = NULL;
			uint32_t len;

			/* Allocate indirect buffer */
			out_seg = rte_pktmbuf_alloc(pool_indirect);
			if (unlikely(out_seg == NULL)) {
				rte_pktmbuf_free(out_pkt);
				__free_fragments(pkts_out, out_pkt_pos);
				return -ENOMEM;
			}
			out_seg_prev->next = out_seg;
			out_seg_prev = out_seg;

			/* Prepare indirect buffer */
			rte_pktmbuf_attach(out_seg, in_seg);
			len = mtu_size - out_pkt->pkt_len;
			if (len > (in_seg->data_len - in_seg_data_pos)) {
				len = in_seg->data_len - in_seg_data_pos;
			}
			out_seg->data_off = in_seg->data_off + in_seg_data_pos;
			out_seg->data_len = (uint16_t)len;
			out_pkt->pkt_len = (uint16_t)(len +
			    out_pkt->pkt_len);
			out_pkt->nb_segs += 1;
			in_seg_data_pos += len;

			/* Current output packet (i.e. fragment) done ? */
			if (unlikely(out_pkt->pkt_len >= mtu_size))
				more_out_segs = 0;

			/* Current input segment done ? */
			if (unlikely(in_seg_data_pos == in_seg->data_len)) {
				in_seg = in_seg->next;
				in_seg_data_pos = 0;

				if (unlikely(in_seg == NULL))
					more_in_segs = 0;
			}
		}

		/* Build the IP header */

		out_hdr = rte_pktmbuf_mtod(out_pkt, struct ipv4_hdr *);

		__fill_ipv4hdr_frag(out_hdr, in_hdr,
		    (uint16_t)out_pkt->pkt_len,
		    flag_offset, fragment_offset, more_in_segs);

		fragment_offset = (uint16_t)(fragment_offset +
		    out_pkt->pkt_len - sizeof(struct ipv4_hdr));

		out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
		out_pkt->l3_len = sizeof(struct ipv4_hdr);

		/* Write the fragment to the output list */
		pkts_out[out_pkt_pos] = out_pkt;
		out_pkt_pos ++;
	}

	return out_pkt_pos;
}
示例#15
0
static int
fs_execute_cmd(struct sub_device *sdev, char *cmdline)
{
	FILE *fp;
	/* store possible newline as well */
	char output[DEVARGS_MAXLEN + 1];
	size_t len;
	int old_err;
	int ret, pclose_ret;

	RTE_ASSERT(cmdline != NULL || sdev->cmdline != NULL);
	if (sdev->cmdline == NULL) {
		size_t i;

		len = strlen(cmdline) + 1;
		sdev->cmdline = calloc(1, len);
		if (sdev->cmdline == NULL) {
			ERROR("Command line allocation failed");
			return -ENOMEM;
		}
		snprintf(sdev->cmdline, len, "%s", cmdline);
		/* Replace all commas in the command line by spaces */
		for (i = 0; i < len; i++)
			if (sdev->cmdline[i] == ',')
				sdev->cmdline[i] = ' ';
	}
	DEBUG("'%s'", sdev->cmdline);
	old_err = errno;
	fp = popen(sdev->cmdline, "r");
	if (fp == NULL) {
		ret = errno;
		ERROR("popen: %s", strerror(errno));
		errno = old_err;
		return ret;
	}
	/* We only read one line */
	if (fgets(output, sizeof(output) - 1, fp) == NULL) {
		DEBUG("Could not read command output");
		ret = -ENODEV;
		goto ret_pclose;
	}
	fs_sanitize_cmdline(output);
	if (output[0] == '\0') {
		ret = -ENODEV;
		goto ret_pclose;
	}
	ret = fs_parse_device(sdev, output);
	if (ret) {
		ERROR("Parsing device '%s' failed", output);
		goto ret_pclose;
	}
ret_pclose:
	pclose_ret = pclose(fp);
	if (pclose_ret) {
		pclose_ret = errno;
		ERROR("pclose: %s", strerror(errno));
		errno = old_err;
		return pclose_ret;
	}
	return ret;
}
示例#16
0
/**
 * lio_mbox_read:
 * @mbox: Pointer mailbox
 *
 * Reads the 8-bytes of data from the mbox register
 * Writes back the acknowledgment indicating completion of read
 */
int
lio_mbox_read(struct lio_mbox *mbox)
{
	union lio_mbox_message msg;
	int ret = 0;

	msg.mbox_msg64 = rte_read64(mbox->mbox_read_reg);

	if ((msg.mbox_msg64 == LIO_PFVFACK) || (msg.mbox_msg64 == LIO_PFVFSIG))
		return 0;

	if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVING) {
		mbox->mbox_req.data[mbox->mbox_req.recv_len - 1] =
					msg.mbox_msg64;
		mbox->mbox_req.recv_len++;
	} else {
		if (mbox->state & LIO_MBOX_STATE_RES_RECEIVING) {
			mbox->mbox_resp.data[mbox->mbox_resp.recv_len - 1] =
					msg.mbox_msg64;
			mbox->mbox_resp.recv_len++;
		} else {
			if ((mbox->state & LIO_MBOX_STATE_IDLE) &&
					(msg.s.type == LIO_MBOX_REQUEST)) {
				mbox->state &= ~LIO_MBOX_STATE_IDLE;
				mbox->state |= LIO_MBOX_STATE_REQ_RECEIVING;
				mbox->mbox_req.msg.mbox_msg64 = msg.mbox_msg64;
				mbox->mbox_req.q_no = mbox->q_no;
				mbox->mbox_req.recv_len = 1;
			} else {
				if ((mbox->state &
				     LIO_MBOX_STATE_RES_PENDING) &&
				    (msg.s.type == LIO_MBOX_RESPONSE)) {
					mbox->state &=
						~LIO_MBOX_STATE_RES_PENDING;
					mbox->state |=
						LIO_MBOX_STATE_RES_RECEIVING;
					mbox->mbox_resp.msg.mbox_msg64 =
								msg.mbox_msg64;
					mbox->mbox_resp.q_no = mbox->q_no;
					mbox->mbox_resp.recv_len = 1;
				} else {
					rte_write64(LIO_PFVFERR,
						    mbox->mbox_read_reg);
					mbox->state |= LIO_MBOX_STATE_ERROR;
					return -1;
				}
			}
		}
	}

	if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVING) {
		if (mbox->mbox_req.recv_len < msg.s.len) {
			ret = 0;
		} else {
			mbox->state &= ~LIO_MBOX_STATE_REQ_RECEIVING;
			mbox->state |= LIO_MBOX_STATE_REQ_RECEIVED;
			ret = 1;
		}
	} else {
		if (mbox->state & LIO_MBOX_STATE_RES_RECEIVING) {
			if (mbox->mbox_resp.recv_len < msg.s.len) {
				ret = 0;
			} else {
				mbox->state &= ~LIO_MBOX_STATE_RES_RECEIVING;
				mbox->state |= LIO_MBOX_STATE_RES_RECEIVED;
				ret = 1;
			}
		} else {
			RTE_ASSERT(0);
		}
	}

	rte_write64(LIO_PFVFACK, mbox->mbox_read_reg);

	return ret;
}
示例#17
0
/**
 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
 * submission to the multi buffer library for processing.
 *
 * @param	qp		queue pair
 * @param	op		symmetric crypto operation
 * @param	session		GCM session
 *
 * @return
 *
 */
static int
process_gcm_crypto_op(struct rte_crypto_sym_op *op,
		struct aesni_gcm_session *session)
{
	uint8_t *src, *dst;
	struct rte_mbuf *m_src = op->m_src;
	uint32_t offset = op->cipher.data.offset;
	uint32_t part_len, total_len, data_len;

	RTE_ASSERT(m_src != NULL);

	while (offset >= m_src->data_len) {
		offset -= m_src->data_len;
		m_src = m_src->next;

		RTE_ASSERT(m_src != NULL);
	}

	data_len = m_src->data_len - offset;
	part_len = (data_len < op->cipher.data.length) ? data_len :
			op->cipher.data.length;

	/* Destination buffer is required when segmented source buffer */
	RTE_ASSERT((part_len == op->cipher.data.length) ||
			((part_len != op->cipher.data.length) &&
					(op->m_dst != NULL)));
	/* Segmented destination buffer is not supported */
	RTE_ASSERT((op->m_dst == NULL) ||
			((op->m_dst != NULL) &&
					rte_pktmbuf_is_contiguous(op->m_dst)));


	dst = op->m_dst ?
			rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
					op->cipher.data.offset) :
			rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
					op->cipher.data.offset);

	src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);

	/* sanity checks */
	if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
			op->cipher.iv.length != 0) {
		GCM_LOG_ERR("iv");
		return -1;
	}

	/*
	 * GCM working in 12B IV mode => 16B pre-counter block we need
	 * to set BE LSB to 1, driver expects that 16B is allocated
	 */
	if (op->cipher.iv.length == 12) {
		uint32_t *iv_padd = (uint32_t *)&op->cipher.iv.data[12];
		*iv_padd = rte_bswap32(1);
	}

	if (op->auth.digest.length != 16 &&
			op->auth.digest.length != 12 &&
			op->auth.digest.length != 8) {
		GCM_LOG_ERR("digest");
		return -1;
	}

	if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {

		aesni_gcm_enc[session->key].init(&session->gdata,
				op->cipher.iv.data,
				op->auth.aad.data,
				(uint64_t)op->auth.aad.length);

		aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
				(uint64_t)part_len);
		total_len = op->cipher.data.length - part_len;

		while (total_len) {
			dst += part_len;
			m_src = m_src->next;

			RTE_ASSERT(m_src != NULL);

			src = rte_pktmbuf_mtod(m_src, uint8_t *);
			part_len = (m_src->data_len < total_len) ?
					m_src->data_len : total_len;

			aesni_gcm_enc[session->key].update(&session->gdata,
					dst, src,
					(uint64_t)part_len);
			total_len -= part_len;
		}

		aesni_gcm_enc[session->key].finalize(&session->gdata,
				op->auth.digest.data,
				(uint64_t)op->auth.digest.length);
	} else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */