Ejemplo n.º 1
0
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
{
	struct bnxt *bp = rxq->bp;
	struct bnxt_cp_ring_info *cpr;
	struct bnxt_rx_ring_info *rxr;
	struct bnxt_ring *ring;

	rxq->rx_buf_use_size = bp->eth_dev->data->mtu +
			       ETHER_HDR_LEN + ETHER_CRC_LEN +
			       (2 * VLAN_TAG_SIZE);
	rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);

	rxr = rte_zmalloc_socket("bnxt_rx_ring",
				 sizeof(struct bnxt_rx_ring_info),
				 RTE_CACHE_LINE_SIZE, socket_id);
	if (rxr == NULL)
		return -ENOMEM;
	rxq->rx_ring = rxr;

	ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
				   sizeof(struct bnxt_ring),
				   RTE_CACHE_LINE_SIZE, socket_id);
	if (ring == NULL)
		return -ENOMEM;
	rxr->rx_ring_struct = ring;
	ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
	ring->ring_mask = ring->ring_size - 1;
	ring->bd = (void *)rxr->rx_desc_ring;
	ring->bd_dma = rxr->rx_desc_mapping;
	ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
	ring->vmem = (void **)&rxr->rx_buf_ring;

	cpr = rte_zmalloc_socket("bnxt_rx_ring",
				 sizeof(struct bnxt_cp_ring_info),
				 RTE_CACHE_LINE_SIZE, socket_id);
	if (cpr == NULL)
		return -ENOMEM;
	rxq->cp_ring = cpr;

	ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
				   sizeof(struct bnxt_ring),
				   RTE_CACHE_LINE_SIZE, socket_id);
	if (ring == NULL)
		return -ENOMEM;
	cpr->cp_ring_struct = ring;
	ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
	ring->ring_mask = ring->ring_size - 1;
	ring->bd = (void *)cpr->cp_desc_ring;
	ring->bd_dma = cpr->cp_desc_mapping;
	ring->vmem_size = 0;
	ring->vmem = NULL;

	return 0;
}
Ejemplo n.º 2
0
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
{
	struct bnxt_cp_ring_info *cpr;
	struct bnxt_tx_ring_info *txr;
	struct bnxt_ring *ring;

	txr = rte_zmalloc_socket("bnxt_tx_ring",
				 sizeof(struct bnxt_tx_ring_info),
				 RTE_CACHE_LINE_SIZE, socket_id);
	if (txr == NULL)
		return -ENOMEM;
	txq->tx_ring = txr;

	ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
				  sizeof(struct bnxt_ring),
				  RTE_CACHE_LINE_SIZE, socket_id);
	if (ring == NULL)
		return -ENOMEM;
	txr->tx_ring_struct = ring;
	ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
	ring->ring_mask = ring->ring_size - 1;
	ring->bd = (void *)txr->tx_desc_ring;
	ring->bd_dma = txr->tx_desc_mapping;
	ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd);
	ring->vmem = (void **)&txr->tx_buf_ring;

	cpr = rte_zmalloc_socket("bnxt_tx_ring",
				 sizeof(struct bnxt_cp_ring_info),
				 RTE_CACHE_LINE_SIZE, socket_id);
	if (cpr == NULL)
		return -ENOMEM;
	txq->cp_ring = cpr;

	ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
				  sizeof(struct bnxt_ring),
				  RTE_CACHE_LINE_SIZE, socket_id);
	if (ring == NULL)
		return -ENOMEM;
	cpr->cp_ring_struct = ring;
	ring->ring_size = txr->tx_ring_struct->ring_size;
	ring->ring_mask = ring->ring_size - 1;
	ring->bd = (void *)cpr->cp_desc_ring;
	ring->bd_dma = cpr->cp_desc_mapping;
	ring->vmem_size = 0;
	ring->vmem = NULL;

	return 0;
}
Ejemplo n.º 3
0
static void *
rte_table_array_create(void *params, int socket_id, uint32_t entry_size)
{
	struct rte_table_array_params *p = params;
	struct rte_table_array *t;
	uint32_t total_cl_size, total_size;

	/* Check input parameters */
	if ((p == NULL) ||
	    (p->n_entries == 0) ||
		(!rte_is_power_of_2(p->n_entries)))
		return NULL;

	/* Memory allocation */
	total_cl_size = (sizeof(struct rte_table_array) +
			RTE_CACHE_LINE_SIZE) / RTE_CACHE_LINE_SIZE;
	total_cl_size += (p->n_entries * entry_size +
			RTE_CACHE_LINE_SIZE) / RTE_CACHE_LINE_SIZE;
	total_size = total_cl_size * RTE_CACHE_LINE_SIZE;
	t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
	if (t == NULL) {
		RTE_LOG(ERR, TABLE,
			"%s: Cannot allocate %u bytes for array table\n",
			__func__, total_size);
		return NULL;
	}

	/* Memory initialization */
	t->entry_size = entry_size;
	t->n_entries = p->n_entries;
	t->offset = p->offset;
	t->entry_pos_mask = t->n_entries - 1;

	return t;
}
static void *
dpdk_knidev_writer_create(void *params, int socket_id)
{
    struct dpdk_knidev_writer_params *conf =
            (struct dpdk_knidev_writer_params *) params;
    struct dpdk_knidev_writer *port;

    /* Check input parameters */
    if ((conf == NULL) ||
        (conf->tx_burst_sz == 0) ||
        (conf->tx_burst_sz > VR_DPDK_TX_BURST_SZ) ||
        (!rte_is_power_of_2(conf->tx_burst_sz))) {
        RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
        return NULL;
    }

    /* Memory allocation */
    port = rte_zmalloc_socket("PORT", sizeof(*port),
            RTE_CACHE_LINE_SIZE, socket_id);
    if (port == NULL) {
        RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
        return NULL;
    }

    /* Initialization */
    port->kni = conf->kni;
    port->tx_burst_sz = conf->tx_burst_sz;
    port->tx_buf_count = 0;
    port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);

    return port;
}
Ejemplo n.º 5
0
int vmbus_chan_create(const struct rte_vmbus_device *device,
		      uint16_t relid, uint16_t subid, uint8_t monitor_id,
		      struct vmbus_channel **new_chan)
{
	struct vmbus_channel *chan;
	int err;

	chan = rte_zmalloc_socket("VMBUS", sizeof(*chan), RTE_CACHE_LINE_SIZE,
				  device->device.numa_node);
	if (!chan)
		return -ENOMEM;

	STAILQ_INIT(&chan->subchannel_list);
	chan->device = device;
	chan->subchannel_id = subid;
	chan->relid = relid;
	chan->monitor_id = monitor_id;
	*new_chan = chan;

	err = vmbus_uio_map_rings(chan);
	if (err) {
		rte_free(chan);
		return err;
	}

	return 0;
}
static void *
dpdk_knidev_reader_create(void *params, int socket_id)
{
    struct dpdk_knidev_reader_params *conf =
            (struct dpdk_knidev_reader_params *) params;
    struct dpdk_knidev_reader *port;

    /* Check input parameters */
    if (conf == NULL) {
        RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
        return NULL;
    }

    /* Memory allocation */
    port = rte_zmalloc_socket("PORT", sizeof(*port),
            RTE_CACHE_LINE_SIZE, socket_id);
    if (port == NULL) {
        RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
        return NULL;
    }

    /* Initialization */
    port->kni = conf->kni;

    return port;
}
static void *
rte_port_sink_create(void *params, int socket_id)
{
	struct rte_port_sink *port;
	struct rte_port_sink_params *p = params;

	/* Memory allocation */
	port = rte_zmalloc_socket("PORT", sizeof(*port),
			RTE_CACHE_LINE_SIZE, socket_id);
	if (port == NULL) {
		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
		return NULL;
	}

	if (!p)
		return port;

	if (p->file_name) {
		int status = PCAP_SINK_OPEN(port, p->file_name,
			p->max_n_pkts);

		if (status < 0) {
			rte_free(port);
			port = NULL;
		}
	}

	return port;
}
Ejemplo n.º 8
0
static void *
rte_port_ring_writer_create_internal(void *params, int socket_id,
	uint32_t is_multi)
{
	struct rte_port_ring_writer_params *conf =
			(struct rte_port_ring_writer_params *) params;
	struct rte_port_ring_writer *port;

	/* Check input parameters */
	if ((conf == NULL) ||
		(conf->ring == NULL) ||
		(conf->ring->prod.sp_enqueue && is_multi) ||
		(!(conf->ring->prod.sp_enqueue) && !is_multi) ||
		(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
		RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
		return NULL;
	}

	/* Memory allocation */
	port = rte_zmalloc_socket("PORT", sizeof(*port),
			RTE_CACHE_LINE_SIZE, socket_id);
	if (port == NULL) {
		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
		return NULL;
	}

	/* Initialization */
	port->ring = conf->ring;
	port->tx_burst_sz = conf->tx_burst_sz;
	port->tx_buf_count = 0;
	port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
	port->is_multi = is_multi;

	return port;
}
Ejemplo n.º 9
0
static void *
rte_port_fd_writer_create(void *params, int socket_id)
{
	struct rte_port_fd_writer_params *conf =
		params;
	struct rte_port_fd_writer *port;

	/* Check input parameters */
	if ((conf == NULL) ||
		(conf->tx_burst_sz == 0) ||
		(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
		(!rte_is_power_of_2(conf->tx_burst_sz))) {
		RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
		return NULL;
	}

	/* Memory allocation */
	port = rte_zmalloc_socket("PORT", sizeof(*port),
		RTE_CACHE_LINE_SIZE, socket_id);
	if (port == NULL) {
		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
		return NULL;
	}

	/* Initialization */
	port->fd = conf->fd;
	port->tx_burst_sz = conf->tx_burst_sz;
	port->tx_buf_count = 0;

	return port;
}
Ejemplo n.º 10
0
static void *
rte_port_ring_reader_create_internal(void *params, int socket_id,
	uint32_t is_multi)
{
	struct rte_port_ring_reader_params *conf =
			(struct rte_port_ring_reader_params *) params;
	struct rte_port_ring_reader *port;

	/* Check input parameters */
	if ((conf == NULL) ||
		(conf->ring == NULL) ||
		(conf->ring->cons.sc_dequeue && is_multi) ||
		(!(conf->ring->cons.sc_dequeue) && !is_multi)) {
		RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
		return NULL;
	}

	/* Memory allocation */
	port = rte_zmalloc_socket("PORT", sizeof(*port),
			RTE_CACHE_LINE_SIZE, socket_id);
	if (port == NULL) {
		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
		return NULL;
	}

	/* Initialization */
	port->ring = conf->ring;

	return port;
}
Ejemplo n.º 11
0
static void *
rte_port_source_create(void *params, int socket_id)
{
	struct rte_port_source_params *p =
			(struct rte_port_source_params *) params;
	struct rte_port_source *port;

	/* Check input arguments*/
	if ((p == NULL) || (p->mempool == NULL)) {
		RTE_LOG(ERR, PORT, "%s: Invalid params\n", __func__);
		return NULL;
	}

	/* Memory allocation */
	port = rte_zmalloc_socket("PORT", sizeof(*port),
			CACHE_LINE_SIZE, socket_id);
	if (port == NULL) {
		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
		return NULL;
	}

	/* Initialization */
	port->mempool = (struct rte_mempool *) p->mempool;

	return port;
}
Ejemplo n.º 12
0
static void *
rte_port_ring_writer_ras_create(void *params, int socket_id, int is_ipv4)
{
	struct rte_port_ring_writer_ras_params *conf =
			params;
	struct rte_port_ring_writer_ras *port;
	uint64_t frag_cycles;

	/* Check input parameters */
	if (conf == NULL) {
		RTE_LOG(ERR, PORT, "%s: Parameter conf is NULL\n", __func__);
		return NULL;
	}
	if (conf->ring == NULL) {
		RTE_LOG(ERR, PORT, "%s: Parameter ring is NULL\n", __func__);
		return NULL;
	}
	if ((conf->tx_burst_sz == 0) ||
	    (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
		RTE_LOG(ERR, PORT, "%s: Parameter tx_burst_sz is invalid\n",
			__func__);
		return NULL;
	}

	/* Memory allocation */
	port = rte_zmalloc_socket("PORT", sizeof(*port),
			RTE_CACHE_LINE_SIZE, socket_id);
	if (port == NULL) {
		RTE_LOG(ERR, PORT, "%s: Failed to allocate socket\n", __func__);
		return NULL;
	}

	/* Create fragmentation table */
	frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S * MS_PER_S;
	frag_cycles *= 100;

	port->frag_tbl = rte_ip_frag_table_create(
		RTE_PORT_RAS_N_BUCKETS,
		RTE_PORT_RAS_N_ENTRIES_PER_BUCKET,
		RTE_PORT_RAS_N_ENTRIES,
		frag_cycles,
		socket_id);

	if (port->frag_tbl == NULL) {
		RTE_LOG(ERR, PORT, "%s: rte_ip_frag_table_create failed\n",
			__func__);
		rte_free(port);
		return NULL;
	}

	/* Initialization */
	port->ring = conf->ring;
	port->tx_burst_sz = conf->tx_burst_sz;
	port->tx_buf_count = 0;

	port->f_ras = (is_ipv4 == 1) ? process_ipv4 : process_ipv6;

	return port;
}
Ejemplo n.º 13
0
/*
 * Allocates memory for LPM object
 */
struct rte_lpm *
rte_lpm_create(const char *name, int socket_id, int max_rules,
		__rte_unused int flags)
{
	char mem_name[RTE_LPM_NAMESIZE];
	struct rte_lpm *lpm = NULL;
	uint32_t mem_size;
	struct rte_lpm_list *lpm_list;

	/* check that we have an initialised tail queue */
	if ((lpm_list = 
	     RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;	
	}

	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);

	/* Check user arguments. */
	if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){
		rte_errno = EINVAL;
		return NULL;
	}

	rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);

	/* Determine the amount of memory to allocate. */
	mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);

	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

	/* guarantee there's no existing */
	TAILQ_FOREACH(lpm, lpm_list, next) {
		if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
			break;
	}
	if (lpm != NULL)
		goto exit;

	/* Allocate memory to store the LPM data structures. */
	lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
			CACHE_LINE_SIZE, socket_id);
	if (lpm == NULL) {
		RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
		goto exit;
	}

	/* Save user arguments. */
	lpm->max_rules = max_rules;
	rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name);

	TAILQ_INSERT_TAIL(lpm_list, lpm, next);

exit:	
	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

	return lpm;
}
Ejemplo n.º 14
0
/** Setup a queue pair */
static int
null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
		const struct rte_cryptodev_qp_conf *qp_conf,
		int socket_id, struct rte_mempool *session_pool)
{
	struct null_crypto_private *internals = dev->data->dev_private;
	struct null_crypto_qp *qp;
	int retval;

	if (qp_id >= internals->max_nb_qpairs) {
		NULL_CRYPTO_LOG_ERR("Invalid qp_id %u, greater than maximum "
				"number of queue pairs supported (%u).",
				qp_id, internals->max_nb_qpairs);
		return (-EINVAL);
	}

	/* Free memory prior to re-allocation if needed. */
	if (dev->data->queue_pairs[qp_id] != NULL)
		null_crypto_pmd_qp_release(dev, qp_id);

	/* Allocate the queue pair data structure. */
	qp = rte_zmalloc_socket("Null Crypto PMD Queue Pair", sizeof(*qp),
					RTE_CACHE_LINE_SIZE, socket_id);
	if (qp == NULL) {
		NULL_CRYPTO_LOG_ERR("Failed to allocate queue pair memory");
		return (-ENOMEM);
	}

	qp->id = qp_id;
	dev->data->queue_pairs[qp_id] = qp;

	retval = null_crypto_pmd_qp_set_unique_name(dev, qp);
	if (retval) {
		NULL_CRYPTO_LOG_ERR("Failed to create unique name for null "
				"crypto device");
		goto qp_setup_cleanup;
	}

	qp->processed_pkts = null_crypto_pmd_qp_create_processed_pkts_ring(qp,
			qp_conf->nb_descriptors, socket_id);
	if (qp->processed_pkts == NULL) {
		NULL_CRYPTO_LOG_ERR("Failed to create unique name for null "
				"crypto device");
		goto qp_setup_cleanup;
	}

	qp->sess_mp = session_pool;

	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));

	return 0;

qp_setup_cleanup:
	if (qp)
		rte_free(qp);

	return -1;
}
Ejemplo n.º 15
0
static inline int
rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
{
	uint8_t old_nb_queues = dev->data->nb_queues;
	struct rte_event_queue_conf *queues_cfg;
	unsigned int i;

	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
			 dev->data->dev_id);

	/* First time configuration */
	if (dev->data->queues_cfg == NULL && nb_queues != 0) {
		/* Allocate memory to store queue configuration */
		dev->data->queues_cfg = rte_zmalloc_socket(
				"eventdev->data->queues_cfg",
				sizeof(dev->data->queues_cfg[0]) * nb_queues,
				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
		if (dev->data->queues_cfg == NULL) {
			dev->data->nb_queues = 0;
			RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
					"nb_queues %u", nb_queues);
			return -(ENOMEM);
		}
	/* Re-configure */
	} else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);

		for (i = nb_queues; i < old_nb_queues; i++)
			(*dev->dev_ops->queue_release)(dev, i);

		/* Re allocate memory to store queue configuration */
		queues_cfg = dev->data->queues_cfg;
		queues_cfg = rte_realloc(queues_cfg,
				sizeof(queues_cfg[0]) * nb_queues,
				RTE_CACHE_LINE_SIZE);
		if (queues_cfg == NULL) {
			RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
						" nb_queues %u", nb_queues);
			return -(ENOMEM);
		}
		dev->data->queues_cfg = queues_cfg;

		if (nb_queues > old_nb_queues) {
			uint8_t new_qs = nb_queues - old_nb_queues;

			memset(queues_cfg + old_nb_queues, 0,
				sizeof(queues_cfg[0]) * new_qs);
		}
	} else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);

		for (i = nb_queues; i < old_nb_queues; i++)
			(*dev->dev_ops->queue_release)(dev, i);
	}

	dev->data->nb_queues = nb_queues;
	return 0;
}
Ejemplo n.º 16
0
struct rte_acl_ctx *
rte_acl_create(const struct rte_acl_param *param)
{
	size_t sz;
	struct rte_acl_ctx *ctx;
	struct rte_acl_list *acl_list;
	char name[sizeof(ctx->name)];

	/* check that we have an initialised tail queue */
	acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
	if (acl_list == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;
	}

	/* check that input parameters are valid. */
	if (param == NULL || param->name == NULL) {
		rte_errno = EINVAL;
		return NULL;
	}

	snprintf(name, sizeof(name), "ACL_%s", param->name);

	/* calculate amount of memory required for pattern set. */
	sz = sizeof(*ctx) + param->max_rule_num * param->rule_size;

	/* get EAL TAILQ lock. */
	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

	/* if we already have one with that name */
	TAILQ_FOREACH(ctx, acl_list, next) {
		if (strncmp(param->name, ctx->name, sizeof(ctx->name)) == 0)
			break;
	}

	/* if ACL with such name doesn't exist, then create a new one. */
	if (ctx == NULL && (ctx = rte_zmalloc_socket(name, sz, CACHE_LINE_SIZE,
			param->socket_id)) != NULL) {

		/* init new allocated context. */
		ctx->rules = ctx + 1;
		ctx->max_rules = param->max_rule_num;
		ctx->rule_sz = param->rule_size;
		ctx->socket_id = param->socket_id;
		snprintf(ctx->name, sizeof(ctx->name), "%s", param->name);

		TAILQ_INSERT_TAIL(acl_list, ctx, next);

	} else if (ctx == NULL) {
		RTE_LOG(ERR, ACL,
			"allocation of %zu bytes on socket %d for %s failed\n",
			sz, param->socket_id, name);
	}

	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
	return ctx;
}
Ejemplo n.º 17
0
static void *
rte_table_lpm_create(void *params, int socket_id, uint32_t entry_size)
{
	struct rte_table_lpm_params *p = (struct rte_table_lpm_params *) params;
	struct rte_table_lpm *lpm;
	uint32_t total_size, nht_size;

	/* Check input parameters */
	if (p == NULL) {
		RTE_LOG(ERR, TABLE, "%s: NULL input parameters\n", __func__);
		return NULL;
	}
	if (p->n_rules == 0) {
		RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__);
		return NULL;
	}
	if (p->entry_unique_size == 0) {
		RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
			__func__);
		return NULL;
	}
	if (p->entry_unique_size > entry_size) {
		RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
			__func__);
		return NULL;
	}

	entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));

	/* Memory allocation */
	nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
	total_size = sizeof(struct rte_table_lpm) + nht_size;
	lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
		socket_id);
	if (lpm == NULL) {
		RTE_LOG(ERR, TABLE,
			"%s: Cannot allocate %u bytes for LPM table\n",
			__func__, total_size);
		return NULL;
	}

	/* LPM low-level table creation */
	lpm->lpm = rte_lpm_create("LPM", socket_id, p->n_rules, 0);
	if (lpm->lpm == NULL) {
		rte_free(lpm);
		RTE_LOG(ERR, TABLE, "Unable to create low-level LPM table\n");
		return NULL;
	}

	/* Memory initialization */
	lpm->entry_size = entry_size;
	lpm->entry_unique_size = p->entry_unique_size;
	lpm->n_rules = p->n_rules;
	lpm->offset = p->offset;

	return lpm;
}
Ejemplo n.º 18
0
static void *
rte_port_ring_reader_frag_create(void *params, int socket_id, int is_ipv4)
{
	struct rte_port_ring_reader_frag_params *conf =
			params;
	struct rte_port_ring_reader_frag *port;

	/* Check input parameters */
	if (conf == NULL) {
		RTE_LOG(ERR, PORT, "%s: Parameter conf is NULL\n", __func__);
		return NULL;
	}
	if (conf->ring == NULL) {
		RTE_LOG(ERR, PORT, "%s: Parameter ring is NULL\n", __func__);
		return NULL;
	}
	if (conf->mtu == 0) {
		RTE_LOG(ERR, PORT, "%s: Parameter mtu is invalid\n", __func__);
		return NULL;
	}
	if (conf->pool_direct == NULL) {
		RTE_LOG(ERR, PORT, "%s: Parameter pool_direct is NULL\n",
			__func__);
		return NULL;
	}
	if (conf->pool_indirect == NULL) {
		RTE_LOG(ERR, PORT, "%s: Parameter pool_indirect is NULL\n",
			__func__);
		return NULL;
	}

	/* Memory allocation */
	port = rte_zmalloc_socket("PORT", sizeof(*port), RTE_CACHE_LINE_SIZE,
		socket_id);
	if (port == NULL) {
		RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
		return NULL;
	}

	/* Initialization */
	port->ring = conf->ring;
	port->mtu = conf->mtu;
	port->metadata_size = conf->metadata_size;
	port->pool_direct = conf->pool_direct;
	port->pool_indirect = conf->pool_indirect;

	port->n_pkts = 0;
	port->pos_pkts = 0;
	port->n_frags = 0;
	port->pos_frags = 0;

	port->f_frag = (is_ipv4) ?
			rte_ipv4_fragment_packet : rte_ipv6_fragment_packet;

	return port;
}
Ejemplo n.º 19
0
int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
	unsigned int socket_id, struct rte_mempool *mp,
	uint16_t nb_desc)
{
	int rc;
	struct vnic_rq *rq = &enic->rq[queue_idx];

	rq->socket_id = socket_id;
	rq->mp = mp;

	if (nb_desc) {
		if (nb_desc > enic->config.rq_desc_count) {
			dev_warning(enic,
				"RQ %d - number of rx desc in cmd line (%d)"\
				"is greater than that in the UCSM/CIMC adapter"\
				"policy.  Applying the value in the adapter "\
				"policy (%d).\n",
				queue_idx, nb_desc, enic->config.rq_desc_count);
			nb_desc = enic->config.rq_desc_count;
		}
		dev_info(enic, "RX Queues - effective number of descs:%d\n",
			 nb_desc);
	}

	/* Allocate queue resources */
	rc = vnic_rq_alloc(enic->vdev, rq, queue_idx,
		nb_desc, sizeof(struct rq_enet_desc));
	if (rc) {
		dev_err(enic, "error in allocation of rq\n");
		goto err_exit;
	}

	rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
		socket_id, nb_desc,
		sizeof(struct cq_enet_rq_desc));
	if (rc) {
		dev_err(enic, "error in allocation of cq for rq\n");
		goto err_free_rq_exit;
	}

	/* Allocate the mbuf ring */
	rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
			sizeof(struct rte_mbuf *) * nb_desc,
			RTE_CACHE_LINE_SIZE, rq->socket_id);

	if (rq->mbuf_ring != NULL)
		return 0;

	/* cleanup on error */
	vnic_cq_free(&enic->cq[queue_idx]);
err_free_rq_exit:
	vnic_rq_free(rq);
err_exit:
	return -ENOMEM;
}
static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
		uint8_t eth_dev_id,
		int rx_queue_id,
		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
	struct rte_event_eth_rx_adapter_queue_conf temp_conf;
	uint32_t i;
	int ret;

	if (queue_conf->servicing_weight == 0) {

		struct rte_eth_dev_data *data = dev_info->dev->data;
		if (data->dev_conf.intr_conf.rxq) {
			RTE_EDEV_LOG_ERR("Interrupt driven queues"
					" not supported");
			return -ENOTSUP;
		}
		temp_conf = *queue_conf;

		/* If Rx interrupts are disabled set wt = 1 */
		temp_conf.servicing_weight = 1;
		queue_conf = &temp_conf;
	}

	if (dev_info->rx_queue == NULL) {
		dev_info->rx_queue =
		    rte_zmalloc_socket(rx_adapter->mem_name,
				       dev_info->dev->data->nb_rx_queues *
				       sizeof(struct eth_rx_queue_info), 0,
				       rx_adapter->socket_id);
		if (dev_info->rx_queue == NULL)
			return -ENOMEM;
	}

	if (rx_queue_id == -1) {
		for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
			event_eth_rx_adapter_queue_add(rx_adapter,
						dev_info, i,
						queue_conf);
	} else {
		event_eth_rx_adapter_queue_add(rx_adapter, dev_info,
					  (uint16_t)rx_queue_id,
					  queue_conf);
	}

	ret = eth_poll_wrr_calc(rx_adapter);
	if (ret) {
		event_eth_rx_adapter_queue_del(rx_adapter,
					dev_info, rx_queue_id);
		return ret;
	}

	return ret;
}
Ejemplo n.º 21
0
static int
ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
				const struct rte_event_port_conf *port_conf)
{
	struct ssows *ws;
	uint32_t reg_off;
	uint8_t q;
	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);

	ssovf_func_trace("port=%d", port_id);
	RTE_SET_USED(port_conf);

	/* Free memory prior to re-allocation if needed */
	if (dev->data->ports[port_id] != NULL) {
		ssovf_port_release(dev->data->ports[port_id]);
		dev->data->ports[port_id] = NULL;
	}

	/* Allocate event port memory */
	ws = rte_zmalloc_socket("eventdev ssows",
			sizeof(struct ssows), RTE_CACHE_LINE_SIZE,
			dev->data->socket_id);
	if (ws == NULL) {
		ssovf_log_err("Failed to alloc memory for port=%d", port_id);
		return -ENOMEM;
	}

	ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
	if (ws->base == NULL) {
		rte_free(ws);
		ssovf_log_err("Failed to get hws base addr port=%d", port_id);
		return -EINVAL;
	}

	reg_off = SSOW_VHWS_OP_GET_WORK0;
	reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */
	reg_off |= 1 << 16; /* Wait */
	ws->getwork = ws->base + reg_off;
	ws->port = port_id;

	for (q = 0; q < edev->nb_event_queues; q++) {
		ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
		if (ws->grps[q] == NULL) {
			rte_free(ws);
			ssovf_log_err("Failed to get grp%d base addr", q);
			return -EINVAL;
		}
	}

	dev->data->ports[port_id] = ws;
	ssovf_log_dbg("port=%d ws=%p", port_id, ws);
	return 0;
}
Ejemplo n.º 22
0
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
	unsigned int count = wq->ring.desc_count;
       /* Allocate the mbuf ring */
	wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs",
		    sizeof(struct vnic_wq_buf) * count,
		    RTE_CACHE_LINE_SIZE, wq->socket_id);
	wq->head_idx = 0;
	wq->tail_idx = 0;
	if (wq->bufs == NULL)
		return -ENOMEM;
	return 0;
}
Ejemplo n.º 23
0
static void init_task_qinq_encap4(struct task_base *tbase, struct task_args *targ)
{
	struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)(tbase);
	int socket_id = rte_lcore_to_socket_id(targ->lconf->id);

	task->qinq_tag = targ->qinq_tag;
	task->cpe_table = targ->cpe_table;
	task->cpe_timeout = rte_get_tsc_hz()/1000*targ->cpe_table_timeout_ms;

	if (!strcmp(targ->task_init->sub_mode_str, "pe")) {
		PROX_PANIC(!strcmp(targ->cpe_table_name, ""), "CPE table not configured\n");
		fill_table(targ, task->cpe_table);
	}

#ifdef ENABLE_EXTRA_USER_STATISTICS
	task->n_users = targ->n_users;
	task->stats_per_user = rte_zmalloc_socket(NULL, targ->n_users * sizeof(uint32_t),
						  RTE_CACHE_LINE_SIZE, rte_lcore_to_socket_id(targ->lconf->id));
#endif
	if (targ->runtime_flags & TASK_CLASSIFY) {
		PROX_PANIC(!strcmp(targ->dscp, ""), "DSCP table not specified\n");
		task->dscp = prox_sh_find_socket(socket_id, targ->dscp);
		if (!task->dscp) {
			int ret = lua_to_dscp(prox_lua(), GLOBAL, targ->dscp, socket_id, &task->dscp);
			PROX_PANIC(ret, "Failed to create dscp table from config:\n%s\n",
				   get_lua_to_errors());
			prox_sh_add_socket(socket_id, targ->dscp, task->dscp);
		}
	}

	task->runtime_flags = targ->runtime_flags;

	for (uint32_t i = 0; i < 64; ++i) {
		task->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&task->keys[i] - sizeof (struct rte_mbuf));
	}

	targ->lconf->ctrl_timeout = rte_get_tsc_hz()/targ->ctrl_freq;
	targ->lconf->ctrl_func_m[targ->task] = arp_msg;

	/* TODO: check if it is not necessary to limit reverse mapping
	   for the elements that have been changing in mapping? */

	for (uint32_t i =0 ; i < sizeof(targ->mapping)/sizeof(targ->mapping[0]); ++i) {
		task->src_mac[targ->mapping[i]] = *(uint64_t*)&prox_port_cfg[i].eth_addr;
	}

	/* task->src_mac[entry->port_idx] = *(uint64_t*)&prox_port_cfg[entry->port_idx].eth_addr; */
}
Ejemplo n.º 24
0
/**
 * Give the library a mempool of rte_mbufs with which it can do the
 * rte_mbuf <--> netmap slot conversions.
 */
int
rte_netmap_init(const struct rte_netmap_conf *conf)
{
	size_t buf_ofs, nmif_sz, sz;
	size_t port_rings, port_slots, port_bufs;
	uint32_t i, port_num;

	port_num = RTE_MAX_ETHPORTS;
	port_rings = 2 * conf->max_rings;
	port_slots = port_rings * conf->max_slots;
    	port_bufs = port_slots;

	nmif_sz = NETMAP_IF_RING_OFS(port_rings, port_rings, port_slots);
	sz = nmif_sz * port_num;

	buf_ofs = RTE_ALIGN_CEIL(sz, CACHE_LINE_SIZE);
	sz = buf_ofs + port_bufs * conf->max_bufsz * port_num;

	if (sz > UINT32_MAX ||
			(netmap.mem = rte_zmalloc_socket(__func__, sz,
			CACHE_LINE_SIZE, conf->socket_id)) == NULL) {
		RTE_LOG(ERR, USER1, "%s: failed to allocate %zu bytes\n",
			__func__, sz);
		return (-ENOMEM);
	}

	netmap.mem_sz = sz;
	netmap.netif_memsz = nmif_sz;
	netmap.buf_start = (uintptr_t)netmap.mem + buf_ofs;
	netmap.conf = *conf;

	rte_spinlock_init(&netmap_lock);

	/* Mark all ports as unused and set NETIF pointer. */
	for (i = 0; i != RTE_DIM(ports); i++) {
		ports[i].fd = UINT32_MAX;
		ports[i].nmif = (struct netmap_if *)
			((uintptr_t)netmap.mem + nmif_sz * i);
	}

	/* Mark all fd_ports as unused. */
	for (i = 0; i != RTE_DIM(fd_port); i++) {
		fd_port[i].port = FD_PORT_FREE;
	}

	return (0);
}
Ejemplo n.º 25
0
/**
 * Internal helper to allocate memory once for several disparate objects.
 *
 * The most restrictive alignment constraint for standard objects is assumed
 * to be sizeof(double) and is used as a default value.
 *
 * C11 code would include stdalign.h and use alignof(max_align_t) however
 * we'll stick with C99 for the time being.
 */
static inline size_t
mlx4_mallocv_inline(const char *type, const struct mlx4_malloc_vec *vec,
		    unsigned int cnt, int zero, int socket)
{
	unsigned int i;
	size_t size;
	size_t least;
	uint8_t *data = NULL;
	int fill = !vec[0].addr;

fill:
	size = 0;
	least = 0;
	for (i = 0; i < cnt; ++i) {
		size_t align = (uintptr_t)vec[i].align;

		if (!align) {
			align = sizeof(double);
		} else if (!rte_is_power_of_2(align)) {
			rte_errno = EINVAL;
			goto error;
		}
		if (least < align)
			least = align;
		align = RTE_ALIGN_CEIL(size, align);
		size = align + vec[i].size;
		if (fill && vec[i].addr)
			*vec[i].addr = data + align;
	}
	if (fill)
		return size;
	if (!zero)
		data = rte_malloc_socket(type, size, least, socket);
	else
		data = rte_zmalloc_socket(type, size, least, socket);
	if (data) {
		fill = 1;
		goto fill;
	}
	rte_errno = ENOMEM;
error:
	for (i = 0; i != cnt; ++i)
		if (vec[i].addr)
			*vec[i].addr = NULL;
	return 0;
}
Ejemplo n.º 26
0
static int
scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
{
	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
	struct psd_scheduler_qp_ctx *ps_qp_ctx;

	ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0,
			rte_socket_id());
	if (!ps_qp_ctx) {
		CS_LOG_ERR("failed allocate memory for private queue pair");
		return -ENOMEM;
	}

	qp_ctx->private_qp_ctx = (void *)ps_qp_ctx;

	return 0;
}
Ejemplo n.º 27
0
int
sfc_ev_qinit(struct sfc_adapter *sa,
	     enum sfc_evq_type type, unsigned int type_index,
	     unsigned int entries, int socket_id, struct sfc_evq **evqp)
{
	struct sfc_evq *evq;
	int rc;

	sfc_log_init(sa, "type=%s type_index=%u",
		     sfc_evq_type2str(type), type_index);

	SFC_ASSERT(rte_is_power_of_2(entries));

	rc = ENOMEM;
	evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
				 socket_id);
	if (evq == NULL)
		goto fail_evq_alloc;

	evq->sa = sa;
	evq->type = type;
	evq->entries = entries;

	/* Allocate DMA space */
	rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index,
			   EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem);
	if (rc != 0)
		goto fail_dma_alloc;

	evq->init_state = SFC_EVQ_INITIALIZED;

	sa->evq_count++;

	*evqp = evq;

	return 0;

fail_dma_alloc:
	rte_free(evq);

fail_evq_alloc:

	sfc_log_init(sa, "failed %d", rc);
	return rc;
}
Ejemplo n.º 28
0
/** Setup a queue pair */
static int
aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
		const struct rte_cryptodev_qp_conf *qp_conf,
		int socket_id, struct rte_mempool *session_pool)
{
	struct aesni_gcm_qp *qp = NULL;
	struct aesni_gcm_private *internals = dev->data->dev_private;

	/* Free memory prior to re-allocation if needed. */
	if (dev->data->queue_pairs[qp_id] != NULL)
		aesni_gcm_pmd_qp_release(dev, qp_id);

	/* Allocate the queue pair data structure. */
	qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
					RTE_CACHE_LINE_SIZE, socket_id);
	if (qp == NULL)
		return (-ENOMEM);

	qp->id = qp_id;
	dev->data->queue_pairs[qp_id] = qp;

	if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
		goto qp_setup_cleanup;

	qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode];

	qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
			qp_conf->nb_descriptors, socket_id);
	if (qp->processed_pkts == NULL)
		goto qp_setup_cleanup;

	qp->sess_mp = session_pool;

	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));

	return 0;

qp_setup_cleanup:
	if (qp)
		rte_free(qp);

	return -1;
}
Ejemplo n.º 29
0
static void *
rte_port_ring_writer_nodrop_create_internal(void *params, int socket_id,
	uint32_t is_multi)
{
	struct rte_port_ring_writer_nodrop_params *conf =
			(struct rte_port_ring_writer_nodrop_params *) params;
	struct rte_port_ring_writer_nodrop *port;

	/* Check input parameters */
	if ((conf == NULL) ||
		(conf->ring == NULL) ||
		(conf->ring->prod.sp_enqueue && is_multi) ||
		(!(conf->ring->prod.sp_enqueue) && !is_multi) ||
		(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
		RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
		return NULL;
	}

	/* Memory allocation */
	port = rte_zmalloc_socket("PORT", sizeof(*port),
			RTE_CACHE_LINE_SIZE, socket_id);
	if (port == NULL) {
		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
		return NULL;
	}

	/* Initialization */
	port->ring = conf->ring;
	port->tx_burst_sz = conf->tx_burst_sz;
	port->tx_buf_count = 0;
	port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
	port->is_multi = is_multi;

	/*
	 * When n_retries is 0 it means that we should wait for every packet to
	 * send no matter how many retries should it take. To limit number of
	 * branches in fast path, we use UINT64_MAX instead of branching.
	 */
	port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries;

	return port;
}
Ejemplo n.º 30
0
/* create fragmentation table */
struct rte_ip_frag_tbl *
rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
	uint32_t max_entries, uint64_t max_cycles, int socket_id)
{
	struct rte_ip_frag_tbl *tbl;
	size_t sz;
	uint64_t nb_entries;

	nb_entries = rte_align32pow2(bucket_num);
	nb_entries *= bucket_entries;
	nb_entries *= IP_FRAG_HASH_FNUM;

	/* check input parameters. */
	if (rte_is_power_of_2(bucket_entries) == 0 ||
			nb_entries > UINT32_MAX || nb_entries == 0 ||
			nb_entries < max_entries) {
		RTE_LOG(ERR, USER1, "%s: invalid input parameter\n", __func__);
		return (NULL);
	}

	sz = sizeof (*tbl) + nb_entries * sizeof (tbl->pkt[0]);
	if ((tbl = rte_zmalloc_socket(__func__, sz, CACHE_LINE_SIZE,
			socket_id)) == NULL) {
		RTE_LOG(ERR, USER1,
			"%s: allocation of %zu bytes at socket %d failed do\n",
			__func__, sz, socket_id);
		return (NULL);
	}

	RTE_LOG(INFO, USER1, "%s: allocated of %zu bytes at socket %d\n",
		__func__, sz, socket_id);

	tbl->max_cycles = max_cycles;
	tbl->max_entries = max_entries;
	tbl->nb_entries = (uint32_t)nb_entries;
	tbl->nb_buckets = bucket_num;
	tbl->bucket_entries = bucket_entries;
	tbl->entry_mask = (tbl->nb_entries - 1) & ~(tbl->bucket_entries  - 1);

	TAILQ_INIT(&(tbl->lru));
	return (tbl);
}