예제 #1
0
static void
skeleton_eventdev_stop(struct rte_eventdev *dev)
{
	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);

	PMD_DRV_FUNC_TRACE();

	RTE_SET_USED(skel);
}
예제 #2
0
static int
ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
			      const struct rte_event_queue_conf *queue_conf)
{
	RTE_SET_USED(dev);
	ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority);

	return ssovf_mbox_priority_set(queue_id, queue_conf->priority);
}
예제 #3
0
/* check the consistency of mempool (size, cookies, ...) */
void
rte_mempool_audit(const struct rte_mempool *mp)
{
	mempool_audit_cache(mp);
	mempool_audit_cookies(mp);

	/* For case where mempool DEBUG is not set, and cache size is 0 */
	RTE_SET_USED(mp);
}
예제 #4
0
파일: ssovf_evdev.c 프로젝트: btw616/dpdk
static int
ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
		const uint8_t priorities[], uint16_t nb_links)
{
	uint16_t link;
	uint64_t val;
	struct ssows *ws = port;

	ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links);
	RTE_SET_USED(dev);
	RTE_SET_USED(priorities);

	for (link = 0; link < nb_links; link++) {
		val = queues[link];
		val |= (1ULL << 24); /* Set membership */
		ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
	}
	return (int)nb_links;
}
예제 #5
0
int
opdl_xstats_reset(struct rte_eventdev *dev,
		enum rte_event_dev_xstats_mode mode,
		int16_t queue_port_id, const uint32_t ids[],
		uint32_t nb_ids)
{
	struct opdl_evdev *device = opdl_pmd_priv(dev);

	if (!device->do_validation)
		return -ENOTSUP;

	RTE_SET_USED(dev);
	RTE_SET_USED(mode);
	RTE_SET_USED(queue_port_id);
	RTE_SET_USED(ids);
	RTE_SET_USED(nb_ids);

	return -ENOTSUP;
}
예제 #6
0
static int
skeleton_eventdev_close(struct rte_eventdev *dev)
{
	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);

	PMD_DRV_FUNC_TRACE();

	RTE_SET_USED(skel);

	return 0;
}
예제 #7
0
static void
ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
				 struct rte_event_port_conf *port_conf)
{
	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);

	RTE_SET_USED(port_id);
	port_conf->new_event_threshold = edev->max_num_events;
	port_conf->dequeue_depth = 1;
	port_conf->enqueue_depth = 1;
}
예제 #8
0
파일: ssovf_evdev.c 프로젝트: btw616/dpdk
static int
ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
				const struct rte_event_port_conf *port_conf)
{
	struct ssows *ws;
	uint32_t reg_off;
	uint8_t q;
	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);

	ssovf_func_trace("port=%d", port_id);
	RTE_SET_USED(port_conf);

	/* Free memory prior to re-allocation if needed */
	if (dev->data->ports[port_id] != NULL) {
		ssovf_port_release(dev->data->ports[port_id]);
		dev->data->ports[port_id] = NULL;
	}

	/* Allocate event port memory */
	ws = rte_zmalloc_socket("eventdev ssows",
			sizeof(struct ssows), RTE_CACHE_LINE_SIZE,
			dev->data->socket_id);
	if (ws == NULL) {
		ssovf_log_err("Failed to alloc memory for port=%d", port_id);
		return -ENOMEM;
	}

	ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
	if (ws->base == NULL) {
		rte_free(ws);
		ssovf_log_err("Failed to get hws base addr port=%d", port_id);
		return -EINVAL;
	}

	reg_off = SSOW_VHWS_OP_GET_WORK0;
	reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */
	reg_off |= 1 << 16; /* Wait */
	ws->getwork = ws->base + reg_off;
	ws->port = port_id;

	for (q = 0; q < edev->nb_event_queues; q++) {
		ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
		if (ws->grps[q] == NULL) {
			rte_free(ws);
			ssovf_log_err("Failed to get grp%d base addr", q);
			return -EINVAL;
		}
	}

	dev->data->ports[port_id] = ws;
	ssovf_log_dbg("port=%d ws=%p", port_id, ws);
	return 0;
}
예제 #9
0
static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
				    uint16_t queue_idx,	uint16_t nb_desc,
				    unsigned int socket_id,
				    const struct rte_eth_txconf *tx_conf)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	struct sge *s = &adapter->sge;
	struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
	int err = 0;
	unsigned int temp_nb_desc;

	RTE_SET_USED(tx_conf);

	dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
		  __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
		  socket_id, pi->first_qset);

	/*  Free up the existing queue  */
	if (eth_dev->data->tx_queues[queue_idx]) {
		cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
		eth_dev->data->tx_queues[queue_idx] = NULL;
	}

	eth_dev->data->tx_queues[queue_idx] = (void *)txq;

	/* Sanity Checking
	 *
	 * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
	 */
	temp_nb_desc = nb_desc;
	if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
		dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
			 __func__, CXGBE_MIN_RING_DESC_SIZE,
			 CXGBE_DEFAULT_TX_DESC_SIZE);
		temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
	} else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
		dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
			__func__, CXGBE_MIN_RING_DESC_SIZE,
			CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
		return -(EINVAL);
	}

	txq->q.size = temp_nb_desc;

	err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
				   s->fw_evtq.cntxt_id, socket_id);

	dev_debug(adapter, "%s: txq->q.cntxt_id= %d err = %d\n",
		  __func__, txq->q.cntxt_id, err);

	return err;
}
예제 #10
0
static int
skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
				 uint64_t *timeout_ticks)
{
	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
	uint32_t scale = 1;

	PMD_DRV_FUNC_TRACE();

	RTE_SET_USED(skel);
	*timeout_ticks = ns * scale;

	return 0;
}
예제 #11
0
static int
ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
		const struct rte_eth_dev *eth_dev)
{
	int ret;
	const struct octeontx_nic *nic = eth_dev->data->dev_private;
	RTE_SET_USED(dev);

	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
	if (ret)
		return 0;
	octeontx_pki_port_stop(nic->port_id);
	return 0;
}
예제 #12
0
static int
ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
{
	int ret = 0;
	const struct octeontx_nic *nic = eth_dev->data->dev_private;
	pki_del_qos_t pki_qos;
	RTE_SET_USED(dev);
	RTE_SET_USED(rx_queue_id);

	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
	if (ret)
		return -EINVAL;

	pki_qos.port_type = 0;
	pki_qos.index = 0;
	memset(&pki_qos, 0, sizeof(pki_del_qos_t));
	ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
	if (ret < 0)
		ssovf_log_err("Failed to delete QOS port=%d, q=%d",
				nic->port_id, queue_conf->ev.queue_id);
	return ret;
}
예제 #13
0
파일: ssovf_evdev.c 프로젝트: btw616/dpdk
static int
ssovf_eth_tx_adapter_caps_get(const struct rte_eventdev *dev,
		const struct rte_eth_dev *eth_dev, uint32_t *caps)
{
	int ret;
	RTE_SET_USED(dev);

	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
	if (ret)
		*caps = 0;
	else
		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;

	return 0;
}
예제 #14
0
static int
skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
				const struct rte_event_port_conf *port_conf)
{
	struct skeleton_port *sp;
	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);

	PMD_DRV_FUNC_TRACE();

	RTE_SET_USED(skel);
	RTE_SET_USED(port_conf);

	/* Free memory prior to re-allocation if needed */
	if (dev->data->ports[port_id] != NULL) {
		PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
				port_id);
		skeleton_eventdev_port_release(dev->data->ports[port_id]);
		dev->data->ports[port_id] = NULL;
	}

	/* Allocate event port memory */
	sp = rte_zmalloc_socket("eventdev port",
			sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
			dev->data->socket_id);
	if (sp == NULL) {
		PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
		return -ENOMEM;
	}

	sp->port_id = port_id;

	PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);

	dev->data->ports[port_id] = sp;
	return 0;
}
static void
usock_close(struct vr_usocket *usockp)
{
    int i;
    struct vr_usocket *parent;

    RTE_SET_USED(parent);

    if (!usockp)
        return;

    RTE_LOG(DEBUG, USOCK, "%s[%lx]: FD %d\n", __func__, pthread_self(), usockp->usock_fd);
    usock_unbind(usockp);
    usock_deinit_poll(usockp);

    for (i = 0; i < usockp->usock_cfds; i++) {
        usock_close(usockp->usock_children[i]);
    }

    RTE_LOG(DEBUG, USOCK, "%s: closing FD %d\n", __func__, usockp->usock_fd);
    close(usockp->usock_fd);

    if (!usockp->usock_mbuf_pool && usockp->usock_rx_buf) {
        vr_free(usockp->usock_rx_buf, VR_USOCK_BUF_OBJECT);
        usockp->usock_rx_buf = NULL;
    }

    if (usockp->usock_iovec) {
        vr_free(usockp->usock_iovec, VR_USOCK_IOVEC_OBJECT);
        usockp->usock_iovec = NULL;
    }

    if (usockp->usock_mbuf_pool) {
        /* no api to destroy a pool */
    }

    if (usockp->usock_proto == PACKET) {
        RTE_LOG(DEBUG, USOCK, "%s[%lx]: unlinking %s\n", __func__,
            pthread_self(), VR_PACKET_UNIX_FILE);
        unlink(VR_PACKET_UNIX_FILE);
    }

    usockp->usock_io_in_progress = 0;

    vr_free(usockp, VR_USOCK_OBJECT);

    return;
}
예제 #16
0
파일: ssovf_evdev.c 프로젝트: btw616/dpdk
static int
ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
	int ret = 0;
	const struct octeontx_nic *nic = eth_dev->data->dev_private;
	pki_mod_qos_t pki_qos;
	RTE_SET_USED(dev);

	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
	if (ret)
		return -EINVAL;

	if (rx_queue_id >= 0)
		return -EINVAL;

	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
		return -ENOTSUP;

	memset(&pki_qos, 0, sizeof(pki_mod_qos_t));

	pki_qos.port_type = 0;
	pki_qos.index = 0;
	pki_qos.mmask.f_tag_type = 1;
	pki_qos.mmask.f_port_add = 1;
	pki_qos.mmask.f_grp_ok = 1;
	pki_qos.mmask.f_grp_bad = 1;
	pki_qos.mmask.f_grptag_ok = 1;
	pki_qos.mmask.f_grptag_bad = 1;

	pki_qos.tag_type = queue_conf->ev.sched_type;
	pki_qos.qos_entry.port_add = 0;
	pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
	pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
	pki_qos.qos_entry.grptag_bad = 0;
	pki_qos.qos_entry.grptag_ok = 0;

	ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos);
	if (ret < 0)
		ssovf_log_err("failed to modify QOS, port=%d, q=%d",
				nic->port_id, queue_conf->ev.queue_id);

	return ret;
}
예제 #17
0
static void
lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
{
    struct rte_eth_link link;

    RTE_SET_USED(param);

    printf("\n\nIn registered callback...\n");
    printf("Event type: %s\n", type == RTE_ETH_EVENT_INTR_LSC ? "LSC interrupt" : "unknown event");
    rte_eth_link_get_nowait(port_id, &link);
    if (link.link_status) {
        printf("Port %d Link Up - speed %u Mbps - %s\n\n",
                port_id, (unsigned)link.link_speed,
            (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
                ("full-duplex") : ("half-duplex"));
    } else
        printf("Port %d Link Down\n\n", port_id);
}
예제 #18
0
파일: ssovf_evdev.c 프로젝트: btw616/dpdk
static int
ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
			uint16_t nb_unlinks)
{
	uint16_t unlink;
	uint64_t val;
	struct ssows *ws = port;

	ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks);
	RTE_SET_USED(dev);

	for (unlink = 0; unlink < nb_unlinks; unlink++) {
		val = queues[unlink];
		val &= ~(1ULL << 24); /* Clear membership */
		ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
	}
	return (int)nb_unlinks;
}
예제 #19
0
int
perf_test_result(struct evt_test *test, struct evt_options *opt)
{
	RTE_SET_USED(opt);
	int i;
	uint64_t total = 0;
	struct test_perf *t = evt_test_priv(test);

	printf("Packet distribution across worker cores :\n");
	for (i = 0; i < t->nb_workers; i++)
		total += t->worker[i].processed_pkts;
	for (i = 0; i < t->nb_workers; i++)
		printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
				CLGRN" %3.2f\n"CLNRM, i,
				t->worker[i].processed_pkts,
				(((double)t->worker[i].processed_pkts)/total)
				* 100);

	return t->result;
}
예제 #20
0
static int32_t
rte_service_runner_func(void *arg)
{
	RTE_SET_USED(arg);
	uint32_t i;
	const int lcore = rte_lcore_id();
	struct core_state *cs = &lcore_states[lcore];

	while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
		const uint64_t service_mask = cs->service_mask;

		for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
			/* return value ignored as no change to code flow */
			service_run(i, cs, service_mask);
		}

		rte_smp_rmb();
	}

	lcore_config[lcore].state = WAIT;

	return 0;
}
예제 #21
0
/* dump the cache status */
static unsigned
rte_mempool_dump_cache(const struct rte_mempool *mp)
{
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
	unsigned lcore_id;
	unsigned count = 0;
	unsigned cache_count;

	printf("  cache infos:\n");
	printf("    cache_size=%"PRIu32"\n", mp->cache_size);
	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
		cache_count = mp->local_cache[lcore_id].len;
		printf("    cache_count[%u]=%u\n", lcore_id, cache_count);
		count += cache_count;
	}
	printf("    total_cache_count=%u\n", count);
	return count;
#else
	RTE_SET_USED(mp);
	printf("  cache disabled\n");
	return 0;
#endif
}
예제 #22
0
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
{
	uintptr_t p;
	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */

	mb_def.nb_segs = 1;
	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
	mb_def.port = rxq->port_id;
	rte_mbuf_refcnt_set(&mb_def, 1);

	/* prevent compiler reordering: rearm_data covers previous fields */
	rte_compiler_barrier();
	p = (uintptr_t)&mb_def.rearm_data;
	rxq->mbuf_initializer = *(uint64_t *)p;
	return 0;
}

int __attribute__((cold))
ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
{
	if (txq->sw_ring_v == NULL)
		return -1;

	/* leave the first one for overflow */
	txq->sw_ring_v = txq->sw_ring_v + 1;
	txq->ops = &vec_txq_ops;

	return 0;
}

int __attribute__((cold))
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;

#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
	/* whithout rx ol_flags, no VP flag report */
	if (rxmode->hw_vlan_strip != 0 ||
	    rxmode->hw_vlan_extend != 0)
		return -1;
#endif

	/* no fdir support */
	if (fconf->mode != RTE_FDIR_MODE_NONE)
		return -1;

	/*
	 * - no csum error report support
	 * - no header split support
	 */
	if (rxmode->hw_ip_checksum == 1 ||
	    rxmode->header_split == 1)
		return -1;

	return 0;
#else
	RTE_SET_USED(dev);
	return -1;
#endif
}
static struct vr_usocket *
usock_alloc(unsigned short proto, unsigned short type)
{
    int sock_fd = -1, domain, ret;
    /* socket TX buffer size = (hold flow table entries * size of jumbo frame) */
    int setsocksndbuff = vr_flow_hold_limit * VR_DPDK_MAX_PACKET_SZ;
    int getsocksndbuff;
    socklen_t getsocksndbufflen = sizeof(getsocksndbuff);
    int error = 0, flags;
    unsigned int buf_len;
    struct vr_usocket *usockp = NULL, *child;
    bool is_socket = true;
    unsigned short sock_type;

    RTE_SET_USED(child);

    RTE_LOG(DEBUG, USOCK, "%s[%lx]: proto %u type %u\n", __func__,
                pthread_self(), proto, type);
    switch (type) {
    case TCP:
        domain = AF_INET;
        sock_type = SOCK_STREAM;
        break;

    case UNIX:
    case RAW:
        domain = AF_UNIX;
        sock_type = SOCK_DGRAM;
        break;

    default:
        return NULL;
    }

    if (proto == EVENT) {
        is_socket = false;
        sock_fd = eventfd(0, 0);
        RTE_LOG(DEBUG, USOCK, "%s[%lx]: new event FD %d\n", __func__,
                pthread_self(), sock_fd);
        if (sock_fd < 0)
            return NULL;
    }

    if (is_socket) {
        sock_fd = socket(domain, sock_type, 0);
        RTE_LOG(INFO, USOCK, "%s[%lx]: new socket FD %d\n", __func__,
                pthread_self(), sock_fd);
        if (sock_fd < 0)
            return NULL;

        /* set socket send buffer size */
        ret = setsockopt(sock_fd, SOL_SOCKET, SO_SNDBUF, &setsocksndbuff,
                         sizeof(setsocksndbuff));
        if (ret == 0) {
            /* check if setting buffer succeeded */
            ret = getsockopt(sock_fd, SOL_SOCKET, SO_SNDBUF, &getsocksndbuff,
                             &getsocksndbufflen);
            if (ret == 0) {
                if (getsocksndbuff >= setsocksndbuff) {
                    RTE_LOG(INFO, USOCK, "%s[%lx]: setting socket FD %d send buff size.\n"
                            "Buffer size set to %d (requested %d)\n", __func__,
                            pthread_self(), sock_fd, getsocksndbuff, setsocksndbuff);
                } else { /* set other than requested */
                    RTE_LOG(ERR, USOCK, "%s[%lx]: setting socket FD %d send buff size failed.\n"
                            "Buffer size set to %d (requested %d)\n", __func__,
                            pthread_self(), sock_fd, getsocksndbuff, setsocksndbuff);
                }
            } else { /* requesting buffer size failed */
                RTE_LOG(ERR, USOCK, "%s[%lx]: getting socket FD %d send buff size failed (%d)\n",
                         __func__, pthread_self(), sock_fd, errno);
            }
        } else { /* setting buffer size failed */
            RTE_LOG(ERR, USOCK, "%s[%lx]: setting socket FD %d send buff size %d failed (%d)\n",
                     __func__, pthread_self(), sock_fd, setsocksndbuff, errno);
        }
    }

    usockp = vr_zalloc(sizeof(*usockp), VR_USOCK_OBJECT);
    if (!usockp)
        goto error_exit;

    usockp->usock_type = type;
    usockp->usock_proto = proto;
    usockp->usock_fd = sock_fd;
    usockp->usock_state = INITED;

    if (is_socket) {
        error = vr_usocket_bind(usockp);
        if (error < 0)
            goto error_exit;

        if (usockp->usock_proto == PACKET) {
            error = vr_usocket_connect(usockp);
            if (error < 0)
                goto error_exit;
        }
    }

    switch (proto) {
    case NETLINK:
        usockp->usock_max_cfds = USOCK_MAX_CHILD_FDS;
        buf_len = 0;
        break;

    case PACKET:
        usockp->usock_max_cfds = USOCK_MAX_CHILD_FDS;
        buf_len = 0;
        break;

    case EVENT:
        /* TODO: we don't need the buf since we use stack to send an event */
        buf_len = USOCK_EVENT_BUF_LEN;
        break;

    default:
        buf_len = 0;
        break;
    }

    if (buf_len) {
        usockp->usock_rx_buf = vr_zalloc(buf_len, VR_USOCK_BUF_OBJECT);
        if (!usockp->usock_rx_buf)
            goto error_exit;

        usockp->usock_buf_len = buf_len;
        usock_read_init(usockp);
    }

    if (proto == PACKET) {
        usockp->usock_mbuf_pool = rte_mempool_lookup("packet_mbuf_pool");
        if (!usockp->usock_mbuf_pool) {
            usockp->usock_mbuf_pool = rte_mempool_create("packet_mbuf_pool",
                    PKT0_MBUF_POOL_SIZE, PKT0_MBUF_PACKET_SIZE,
                    PKT0_MBUF_POOL_CACHE_SZ, sizeof(struct rte_pktmbuf_pool_private),
                    vr_dpdk_pktmbuf_pool_init, NULL, vr_dpdk_pktmbuf_init, NULL,
                    rte_socket_id(), 0);
            if (!usockp->usock_mbuf_pool)
                goto error_exit;
        }

        usockp->usock_iovec = vr_zalloc(sizeof(struct iovec) *
                PKT0_MAX_IOV_LEN, VR_USOCK_IOVEC_OBJECT);
        if (!usockp->usock_iovec)
            goto error_exit;

        usock_read_init(usockp);
    }

    RTE_LOG(DEBUG, USOCK, "%s[%lx]: FD %d F_GETFL\n", __func__, pthread_self(),
                usockp->usock_fd);
    flags = fcntl(usockp->usock_fd, F_GETFL);
    if (flags == -1)
        goto error_exit;

    RTE_LOG(DEBUG, USOCK, "%s[%lx]: FD %d F_SETFL\n", __func__, pthread_self(),
                usockp->usock_fd);
    error = fcntl(usockp->usock_fd, F_SETFL, flags | O_NONBLOCK);
    if (error == -1)
        goto error_exit;

    usockp->usock_poll_block = 1;

    return usockp;

error_exit:

    error = errno;
    if (sock_fd >= 0) {
        close(sock_fd);
        sock_fd = -1;
    }

    usock_close(usockp);
    usockp = NULL;
    errno = error;

    return usockp;
}
예제 #24
0
파일: cxgbe_ethdev.c 프로젝트: Leon555/dpdk
static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
{
	RTE_SET_USED(dev);
	return EEPROMSIZE;
}
예제 #25
0
파일: test_debug.c 프로젝트: YBorn/OVDK-QoS
static void
dummy_app_usage(const char *progname)
{
	RTE_SET_USED(progname);
}
예제 #26
0
static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
				    uint16_t queue_idx,	uint16_t nb_desc,
				    unsigned int socket_id,
				    const struct rte_eth_rxconf *rx_conf,
				    struct rte_mempool *mp)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	struct sge *s = &adapter->sge;
	struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
	int err = 0;
	int msi_idx = 0;
	unsigned int temp_nb_desc;
	struct rte_eth_dev_info dev_info;
	unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;

	RTE_SET_USED(rx_conf);

	dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
		  __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
		  socket_id, mp);

	cxgbe_dev_info_get(eth_dev, &dev_info);

	/* Must accommodate at least ETHER_MIN_MTU */
	if ((pkt_len < dev_info.min_rx_bufsize) ||
	    (pkt_len > dev_info.max_rx_pktlen)) {
		dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
			__func__, dev_info.min_rx_bufsize,
			dev_info.max_rx_pktlen);
		return -EINVAL;
	}

	/*  Free up the existing queue  */
	if (eth_dev->data->rx_queues[queue_idx]) {
		cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
		eth_dev->data->rx_queues[queue_idx] = NULL;
	}

	eth_dev->data->rx_queues[queue_idx] = (void *)rxq;

	/* Sanity Checking
	 *
	 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
	 */
	temp_nb_desc = nb_desc;
	if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
		dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
			 __func__, CXGBE_MIN_RING_DESC_SIZE,
			 CXGBE_DEFAULT_RX_DESC_SIZE);
		temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
	} else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
		dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
			__func__, CXGBE_MIN_RING_DESC_SIZE,
			CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
		return -(EINVAL);
	}

	rxq->rspq.size = temp_nb_desc;
	if ((&rxq->fl) != NULL)
		rxq->fl.size = temp_nb_desc;

	/* Set to jumbo mode if necessary */
	if (pkt_len > ETHER_MAX_LEN)
		eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
	else
		eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;

	err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
			       &rxq->fl, t4_ethrx_handler,
			       t4_get_mps_bg_map(adapter, pi->tx_chan), mp,
			       queue_idx, socket_id);

	dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
		  __func__, err, pi->port_id, rxq->rspq.cntxt_id);
	return err;
}
예제 #27
0
/*
 * Mmap all hugepages of hugepage table: it first open a file in
 * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
 * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
 * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
 * map continguous physical blocks in contiguous virtual blocks.
 */
static int
map_all_hugepages(struct hugepage_file *hugepg_tbl,
		struct hugepage_info *hpi, int orig)
{
	int fd;
	unsigned i;
	void *virtaddr;
	void *vma_addr = NULL;
	size_t vma_len = 0;

#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
	RTE_SET_USED(vma_len);
#endif

	for (i = 0; i < hpi->num_pages[0]; i++) {
		uint64_t hugepage_sz = hpi->hugepage_sz;

		if (orig) {
			hugepg_tbl[i].file_id = i;
			hugepg_tbl[i].size = hugepage_sz;
#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
			eal_get_hugefile_temp_path(hugepg_tbl[i].filepath,
					sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
					hugepg_tbl[i].file_id);
#else
			eal_get_hugefile_path(hugepg_tbl[i].filepath,
					sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
					hugepg_tbl[i].file_id);
#endif
			hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0';
		}
#ifndef RTE_ARCH_64
		/* for 32-bit systems, don't remap 1G and 16G pages, just reuse
		 * original map address as final map address.
		 */
		else if ((hugepage_sz == RTE_PGSIZE_1G)
			|| (hugepage_sz == RTE_PGSIZE_16G)) {
			hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
			hugepg_tbl[i].orig_va = NULL;
			continue;
		}
#endif

#ifndef RTE_EAL_SINGLE_FILE_SEGMENTS
		else if (vma_len == 0) {
			unsigned j, num_pages;

			/* reserve a virtual area for next contiguous
			 * physical block: count the number of
			 * contiguous physical pages. */
			for (j = i+1; j < hpi->num_pages[0] ; j++) {
#ifdef RTE_ARCH_PPC_64
				/* The physical addresses are sorted in
				 * descending order on PPC64 */
				if (hugepg_tbl[j].physaddr !=
				    hugepg_tbl[j-1].physaddr - hugepage_sz)
					break;
#else
				if (hugepg_tbl[j].physaddr !=
				    hugepg_tbl[j-1].physaddr + hugepage_sz)
					break;
#endif
			}
			num_pages = j - i;
			vma_len = num_pages * hugepage_sz;

			/* get the biggest virtual memory area up to
			 * vma_len. If it fails, vma_addr is NULL, so
			 * let the kernel provide the address. */
			vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
			if (vma_addr == NULL)
				vma_len = hugepage_sz;
		}
#endif

		/* try to create hugepage file */
		fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0755);
		if (fd < 0) {
			RTE_LOG(ERR, EAL, "%s(): open failed: %s\n", __func__,
					strerror(errno));
			return -1;
		}

		virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
				MAP_SHARED, fd, 0);
		if (virtaddr == MAP_FAILED) {
			RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__,
					strerror(errno));
			close(fd);
			return -1;
		}

		if (orig) {
			hugepg_tbl[i].orig_va = virtaddr;
			memset(virtaddr, 0, hugepage_sz);
		}
		else {
			hugepg_tbl[i].final_va = virtaddr;
		}

		/* set shared flock on the file. */
		if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
			RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s \n",
				__func__, strerror(errno));
			close(fd);
			return -1;
		}

		close(fd);

		vma_addr = (char *)vma_addr + hugepage_sz;
		vma_len -= hugepage_sz;
	}
	return 0;
}
예제 #28
0
파일: ssovf_evdev.c 프로젝트: btw616/dpdk
static void
ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
{
	RTE_SET_USED(dev);
	RTE_SET_USED(queue_id);
}