Пример #1
0
/*
 * emit jmp <ofs>
 * where 'ofs' is the target offset for the native code.
 */
static void
emit_abs_jmp(struct bpf_jit_state *st, int32_t ofs)
{
	int32_t joff;
	uint32_t imsz;

	const uint8_t op8 = 0xEB;
	const uint8_t op32 = 0xE9;

	const int32_t sz8 = sizeof(op8) + sizeof(uint8_t);
	const int32_t sz32 = sizeof(op32) + sizeof(uint32_t);

	/* max possible jmp instruction size */
	const int32_t iszm = RTE_MAX(sz8, sz32);

	joff = ofs - st->sz;
	imsz = RTE_MAX(imm_size(joff), imm_size(joff + iszm));

	if (imsz == 1) {
		emit_bytes(st, &op8, sizeof(op8));
		joff -= sz8;
	} else {
		emit_bytes(st, &op32, sizeof(op32));
		joff -= sz32;
	}

	emit_imm(st, joff, imsz);
}
Пример #2
0
/* this is really a sanity check */
static int
test_macros(int __rte_unused unused_parm)
{
#define SMALLER 0x1000U
#define BIGGER 0x2000U
#define PTR_DIFF BIGGER - SMALLER
#define FAIL_MACRO(x)\
	{printf(#x "() test failed!\n");\
	return -1;}

	uintptr_t unused = 0;

	RTE_SET_USED(unused);

	if ((uintptr_t)RTE_PTR_ADD(SMALLER, PTR_DIFF) != BIGGER)
		FAIL_MACRO(RTE_PTR_ADD);
	if ((uintptr_t)RTE_PTR_SUB(BIGGER, PTR_DIFF) != SMALLER)
		FAIL_MACRO(RTE_PTR_SUB);
	if (RTE_PTR_DIFF(BIGGER, SMALLER) != PTR_DIFF)
		FAIL_MACRO(RTE_PTR_DIFF);
	if (RTE_MAX(SMALLER, BIGGER) != BIGGER)
		FAIL_MACRO(RTE_MAX);
	if (RTE_MIN(SMALLER, BIGGER) != SMALLER)
		FAIL_MACRO(RTE_MIN);

	if (strncmp(RTE_STR(test), "test", sizeof("test")))
		FAIL_MACRO(RTE_STR);

	return 0;
}
Пример #3
0
/*
 * Helper function for memzone_reserve_aligned_thread_unsafe().
 * Calculate address offset from the start of the segment.
 * Align offset in that way that it satisfy istart alignmnet and
 * buffer of the  requested length would not cross specified boundary.
 */
static inline phys_addr_t
align_phys_boundary(const struct rte_memseg *ms, size_t len, size_t align,
	size_t bound)
{
	phys_addr_t addr_offset, bmask, end, start;
	size_t step;

	step = RTE_MAX(align, bound);
	bmask = ~((phys_addr_t)bound - 1);

	/* calculate offset to closest alignment */
	start = RTE_ALIGN_CEIL(ms->phys_addr, align);
	addr_offset = start - ms->phys_addr;

	while (addr_offset + len < ms->len) {

		/* check, do we meet boundary condition */
		end = start + len - (len != 0);
		if ((start & bmask) == (end & bmask))
			break;

		/* calculate next offset */
		start = RTE_ALIGN_CEIL(start + 1, step);
		addr_offset = start - ms->phys_addr;
	}

	return (addr_offset);
}
Пример #4
0
int reg_input(struct input *in)
{
    if (n_inputs == sizeof(inputs)/sizeof(inputs[0]))
        return -1;

    for (int i = 0; i < n_inputs; ++i) {
        if (inputs[i] == in)
            return -1;
    }
    inputs[n_inputs++] = in;
    max_input_fd = RTE_MAX(in->fd, max_input_fd);

    return 0;
}
Пример #5
0
void unreg_input(struct input *in)
{
    int rm, i;

    for (rm = 0; rm < n_inputs; ++rm) {
        if (inputs[rm] == in) {
            break;
        }
    }

    if (rm == n_inputs)
        return ;

    for (i = rm + 1; i < n_inputs; ++i) {
        inputs[i - 1] = inputs[i];
    }

    n_inputs--;
    max_input_fd = 0;
    for (i = 0; i < n_inputs; ++i) {
        max_input_fd = RTE_MAX(inputs[i]->fd, max_input_fd);
    }
}
Пример #6
0
/**********************************************************************
*@description:
* 
*
*@parameters:
* [in]: 
* [in]: 
*
*@return values: 
*
**********************************************************************/
static int odp_init_ports(unsigned short nb_ports, struct odp_user_config  *user_conf, struct odp_lcore_config *lcore_conf)
{
    int ret;
    uint8_t portid; 
    uint16_t queueid;
    unsigned lcore_id;
    uint8_t nb_rx_queue =0;
    uint8_t max_rx_queue =0;
    uint8_t queue, socketid;
    uint32_t n_tx_queue, nb_lcores, nb_mbuf;
    struct ether_addr eth_addr;
    struct rte_eth_dev_info dev_info;
    struct rte_eth_txconf *txconf;


    nb_lcores = rte_lcore_count();
    n_tx_queue = nb_lcores;
    if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
    	n_tx_queue = MAX_TX_QUEUE_PER_PORT;

    printf("\nStart to Init port \n" );

    /* initialize all ports */
    for (portid = 0; portid < nb_ports; portid++) 
    {
        /* skip ports that are not enabled */
        if ((user_conf->port_mask & (1 << portid)) == 0) 
        {
            printf("\nSkipping disabled port %d\n", portid);
            continue;
        }

        /* init port */
        printf("\t port %d:  \n", portid );

        nb_rx_queue = odp_get_port_rx_queues_nb(portid, user_conf);

        if(max_rx_queue < nb_rx_queue)
            max_rx_queue = nb_rx_queue;
        
        printf("\t Creating queues: rx queue number=%d tx queue number=%u... \n", nb_rx_queue, (unsigned)n_tx_queue );

        ret = rte_eth_dev_configure(portid, nb_rx_queue, (uint16_t)n_tx_queue, &odp_port_conf);
        if (ret < 0)
        	rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n", ret, portid);

        rte_eth_macaddr_get(portid, &eth_addr);

        printf ("\t MAC Address:%02X:%02X:%02X:%02X:%02X:%02X \n", 
        	eth_addr.addr_bytes[0], eth_addr.addr_bytes[1],
        	eth_addr.addr_bytes[2], eth_addr.addr_bytes[3],
        	eth_addr.addr_bytes[4], eth_addr.addr_bytes[5]);

        /* init one TX queue per couple (lcore,port) */
        queueid = 0;
        for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
        {
            if (rte_lcore_is_enabled(lcore_id) == 0)
            	continue;

            if (user_conf->numa_on)
            	socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
            else
            	socketid = 0;

            printf("\t lcore id:%u, tx queue id:%d, socket id:%d \n", lcore_id, queueid, socketid);
            
            ret = rte_eth_tx_queue_setup(portid, queueid, ODP_TX_DESC_DEFAULT, socketid, &odp_tx_conf);
            if (ret < 0)
            	rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " "port=%d\n", ret, portid);

            lcore_conf[lcore_id].tx_queue_id[portid] = queueid;
            
            queueid++;
        }
        
        printf("\n");

    }

    nb_mbuf = RTE_MAX((nb_ports*nb_rx_queue*ODP_RX_DESC_DEFAULT +	
				nb_ports*nb_lcores*MAX_PKT_BURST +					
				nb_ports*n_tx_queue*ODP_TX_DESC_DEFAULT +	
				nb_lcores*MEMPOOL_CACHE_SIZE), (unsigned)8192);
				
    /* init memory */
    ret = odp_init_mbuf_pool(nb_mbuf, user_conf);
    if (ret < 0)
    	rte_exit(EXIT_FAILURE, "init_mem failed\n");

    for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) 
    {
        if (rte_lcore_is_enabled(lcore_id) == 0)
            continue;
        
        printf("\nInitializing rx queues on lcore %u ... \n", lcore_id );

        /* init RX queues */
        for(queue = 0; queue < lcore_conf[lcore_id].n_rx_queue; ++queue) 
        {
            portid = lcore_conf[lcore_id].rx_queue_list[queue].port_id;
            queueid = lcore_conf[lcore_id].rx_queue_list[queue].queue_id;

            if (user_conf->numa_on)
                socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
            else
                socketid = 0;

            printf("port id:%d, rx queue id: %d, socket id:%d \n", portid, queueid, socketid);

            ret = rte_eth_rx_queue_setup(portid, queueid, ODP_RX_DESC_DEFAULT, socketid, &odp_rx_conf, odp_pktmbuf_pool[socketid]);
            if (ret < 0)
                rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d," "port=%d\n", ret, portid);
        }
    }

    return 0;
}
Пример #7
0
static const struct rte_memzone *
memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
		int socket_id, unsigned flags, unsigned align, unsigned bound)
{
	struct rte_memzone *mz;
	struct rte_mem_config *mcfg;
	size_t requested_len;
	int socket, i;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	/* no more room in config */
	if (mcfg->memzone_cnt >= RTE_MAX_MEMZONE) {
		RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
		rte_errno = ENOSPC;
		return NULL;
	}

	/* zone already exist */
	if ((memzone_lookup_thread_unsafe(name)) != NULL) {
		RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n",
			__func__, name);
		rte_errno = EEXIST;
		return NULL;
	}

	if (strlen(name) >= sizeof(mz->name) - 1) {
		RTE_LOG(DEBUG, EAL, "%s(): memzone <%s>: name too long\n",
			__func__, name);
		rte_errno = EEXIST;
		return NULL;
	}

	/* if alignment is not a power of two */
	if (align && !rte_is_power_of_2(align)) {
		RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
				align);
		rte_errno = EINVAL;
		return NULL;
	}

	/* alignment less than cache size is not allowed */
	if (align < RTE_CACHE_LINE_SIZE)
		align = RTE_CACHE_LINE_SIZE;

	/* align length on cache boundary. Check for overflow before doing so */
	if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) {
		rte_errno = EINVAL; /* requested size too big */
		return NULL;
	}

	len += RTE_CACHE_LINE_MASK;
	len &= ~((size_t) RTE_CACHE_LINE_MASK);

	/* save minimal requested  length */
	requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE,  len);

	/* check that boundary condition is valid */
	if (bound != 0 && (requested_len > bound || !rte_is_power_of_2(bound))) {
		rte_errno = EINVAL;
		return NULL;
	}

	if ((socket_id != SOCKET_ID_ANY) && (socket_id >= RTE_MAX_NUMA_NODES)) {
		rte_errno = EINVAL;
		return NULL;
	}

	if (!rte_eal_has_hugepages())
		socket_id = SOCKET_ID_ANY;

	if (len == 0) {
		if (bound != 0)
			requested_len = bound;
		else {
			requested_len = find_heap_max_free_elem(&socket_id, align);
			if (requested_len == 0) {
				rte_errno = ENOMEM;
				return NULL;
			}
		}
	}

	if (socket_id == SOCKET_ID_ANY)
		socket = malloc_get_numa_socket();
	else
		socket = socket_id;

	/* allocate memory on heap */
	void *mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[socket], NULL,
			requested_len, flags, align, bound);

	if ((mz_addr == NULL) && (socket_id == SOCKET_ID_ANY)) {
		/* try other heaps */
		for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
			if (socket == i)
				continue;

			mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[i],
					NULL, requested_len, flags, align, bound);
			if (mz_addr != NULL)
				break;
		}
	}

	if (mz_addr == NULL) {
		rte_errno = ENOMEM;
		return NULL;
	}

	const struct malloc_elem *elem = malloc_elem_from_data(mz_addr);

	/* fill the zone in config */
	mz = get_next_free_memzone();

	if (mz == NULL) {
		RTE_LOG(ERR, EAL, "%s(): Cannot find free memzone but there is room "
				"in config!\n", __func__);
		rte_errno = ENOSPC;
		return NULL;
	}

	mcfg->memzone_cnt++;
	snprintf(mz->name, sizeof(mz->name), "%s", name);
	mz->phys_addr = rte_malloc_virt2phy(mz_addr);
	mz->addr = mz_addr;
	mz->len = (requested_len == 0 ? elem->size : requested_len);
	mz->hugepage_sz = elem->ms->hugepage_sz;
	mz->socket_id = elem->ms->socket_id;
	mz->flags = 0;
	mz->memseg_id = elem->ms - rte_eal_get_configuration()->mem_config->memseg;

	return mz;
}
Пример #8
0
static const struct rte_memzone *
memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
		int socket_id, unsigned flags, unsigned align, unsigned bound)
{
	struct rte_mem_config *mcfg;
	unsigned i = 0;
	int memseg_idx = -1;
	uint64_t addr_offset, seg_offset = 0;
	size_t requested_len;
	size_t memseg_len = 0;
	phys_addr_t memseg_physaddr;
	void *memseg_addr;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	/* no more room in config */
	if (mcfg->memzone_idx >= RTE_MAX_MEMZONE) {
		RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
		rte_errno = ENOSPC;
		return NULL;
	}

	/* zone already exist */
	if ((memzone_lookup_thread_unsafe(name)) != NULL) {
		RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n",
			__func__, name);
		rte_errno = EEXIST;
		return NULL;
	}

	/* if alignment is not a power of two */
	if (align && !rte_is_power_of_2(align)) {
		RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
				align);
		rte_errno = EINVAL;
		return NULL;
	}

	/* alignment less than cache size is not allowed */
	if (align < RTE_CACHE_LINE_SIZE)
		align = RTE_CACHE_LINE_SIZE;


	/* align length on cache boundary. Check for overflow before doing so */
	if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) {
		rte_errno = EINVAL; /* requested size too big */
		return NULL;
	}

	len += RTE_CACHE_LINE_MASK;
	len &= ~((size_t) RTE_CACHE_LINE_MASK);

	/* save minimal requested  length */
	requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE,  len);

	/* check that boundary condition is valid */
	if (bound != 0 &&
			(requested_len > bound || !rte_is_power_of_2(bound))) {
		rte_errno = EINVAL;
		return NULL;
	}

	/* find the smallest segment matching requirements */
	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
		/* last segment */
		if (free_memseg[i].addr == NULL)
			break;

		/* empty segment, skip it */
		if (free_memseg[i].len == 0)
			continue;

		/* bad socket ID */
		if (socket_id != SOCKET_ID_ANY &&
		    free_memseg[i].socket_id != SOCKET_ID_ANY &&
		    socket_id != free_memseg[i].socket_id)
			continue;

		/*
		 * calculate offset to closest alignment that
		 * meets boundary conditions.
		 */
		addr_offset = align_phys_boundary(free_memseg + i,
			requested_len, align, bound);

		/* check len */
		if ((requested_len + addr_offset) > free_memseg[i].len)
			continue;

		/* check flags for hugepage sizes */
		if ((flags & RTE_MEMZONE_2MB) &&
				free_memseg[i].hugepage_sz == RTE_PGSIZE_1G)
			continue;
		if ((flags & RTE_MEMZONE_1GB) &&
				free_memseg[i].hugepage_sz == RTE_PGSIZE_2M)
			continue;
		if ((flags & RTE_MEMZONE_16MB) &&
				free_memseg[i].hugepage_sz == RTE_PGSIZE_16G)
			continue;
		if ((flags & RTE_MEMZONE_16GB) &&
				free_memseg[i].hugepage_sz == RTE_PGSIZE_16M)
			continue;

		/* this segment is the best until now */
		if (memseg_idx == -1) {
			memseg_idx = i;
			memseg_len = free_memseg[i].len;
			seg_offset = addr_offset;
		}
		/* find the biggest contiguous zone */
		else if (len == 0) {
			if (free_memseg[i].len > memseg_len) {
				memseg_idx = i;
				memseg_len = free_memseg[i].len;
				seg_offset = addr_offset;
			}
		}
		/*
		 * find the smallest (we already checked that current
		 * zone length is > len
		 */
		else if (free_memseg[i].len + align < memseg_len ||
				(free_memseg[i].len <= memseg_len + align &&
				addr_offset < seg_offset)) {
			memseg_idx = i;
			memseg_len = free_memseg[i].len;
			seg_offset = addr_offset;
		}
	}

	/* no segment found */
	if (memseg_idx == -1) {
		/*
		 * If RTE_MEMZONE_SIZE_HINT_ONLY flag is specified,
		 * try allocating again without the size parameter otherwise -fail.
		 */
		if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY)  &&
		    ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB)
		|| (flags & RTE_MEMZONE_16MB) || (flags & RTE_MEMZONE_16GB)))
			return memzone_reserve_aligned_thread_unsafe(name,
				len, socket_id, 0, align, bound);

		rte_errno = ENOMEM;
		return NULL;
	}

	/* save aligned physical and virtual addresses */
	memseg_physaddr = free_memseg[memseg_idx].phys_addr + seg_offset;
	memseg_addr = RTE_PTR_ADD(free_memseg[memseg_idx].addr,
			(uintptr_t) seg_offset);

	/* if we are looking for a biggest memzone */
	if (len == 0) {
		if (bound == 0)
			requested_len = memseg_len - seg_offset;
		else
			requested_len = RTE_ALIGN_CEIL(memseg_physaddr + 1,
				bound) - memseg_physaddr;
	}

	/* set length to correct value */
	len = (size_t)seg_offset + requested_len;

	/* update our internal state */
	free_memseg[memseg_idx].len -= len;
	free_memseg[memseg_idx].phys_addr += len;
	free_memseg[memseg_idx].addr =
		(char *)free_memseg[memseg_idx].addr + len;

	/* fill the zone in config */
	struct rte_memzone *mz = &mcfg->memzone[mcfg->memzone_idx++];
	snprintf(mz->name, sizeof(mz->name), "%s", name);
	mz->phys_addr = memseg_physaddr;
	mz->addr = memseg_addr;
	mz->len = requested_len;
	mz->hugepage_sz = free_memseg[memseg_idx].hugepage_sz;
	mz->socket_id = free_memseg[memseg_idx].socket_id;
	mz->flags = 0;
	mz->memseg_id = memseg_idx;

	return mz;
}
/* Precalculate WRR polling sequence for all queues in rx_adapter */
static int
eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter)
{
	uint8_t d;
	uint16_t q;
	unsigned int i;

	/* Initialize variables for calculation of wrr schedule */
	uint16_t max_wrr_pos = 0;
	unsigned int poll_q = 0;
	uint16_t max_wt = 0;
	uint16_t gcd = 0;

	struct eth_rx_poll_entry *rx_poll = NULL;
	uint32_t *rx_wrr = NULL;

	if (rx_adapter->num_rx_polled) {
		size_t len = RTE_ALIGN(rx_adapter->num_rx_polled *
				sizeof(*rx_adapter->eth_rx_poll),
				RTE_CACHE_LINE_SIZE);
		rx_poll = rte_zmalloc_socket(rx_adapter->mem_name,
					     len,
					     RTE_CACHE_LINE_SIZE,
					     rx_adapter->socket_id);
		if (rx_poll == NULL)
			return -ENOMEM;

		/* Generate array of all queues to poll, the size of this
		 * array is poll_q
		 */
		for (d = 0; d < rte_eth_dev_count(); d++) {
			uint16_t nb_rx_queues;
			struct eth_device_info *dev_info =
					&rx_adapter->eth_devices[d];
			nb_rx_queues = dev_info->dev->data->nb_rx_queues;
			if (dev_info->rx_queue == NULL)
				continue;
			for (q = 0; q < nb_rx_queues; q++) {
				struct eth_rx_queue_info *queue_info =
					&dev_info->rx_queue[q];
				if (queue_info->queue_enabled == 0)
					continue;

				uint16_t wt = queue_info->wt;
				rx_poll[poll_q].eth_dev_id = d;
				rx_poll[poll_q].eth_rx_qid = q;
				max_wrr_pos += wt;
				max_wt = RTE_MAX(max_wt, wt);
				gcd = (gcd) ? gcd_u16(gcd, wt) : wt;
				poll_q++;
			}
		}

		len = RTE_ALIGN(max_wrr_pos * sizeof(*rx_wrr),
				RTE_CACHE_LINE_SIZE);
		rx_wrr = rte_zmalloc_socket(rx_adapter->mem_name,
					    len,
					    RTE_CACHE_LINE_SIZE,
					    rx_adapter->socket_id);
		if (rx_wrr == NULL) {
			rte_free(rx_poll);
			return -ENOMEM;
		}

		/* Generate polling sequence based on weights */
		int prev = -1;
		int cw = -1;
		for (i = 0; i < max_wrr_pos; i++) {
			rx_wrr[i] = wrr_next(rx_adapter, poll_q, &cw,
					     rx_poll, max_wt, gcd, prev);
			prev = rx_wrr[i];
		}
	}

	rte_free(rx_adapter->eth_rx_poll);
	rte_free(rx_adapter->wrr_sched);

	rx_adapter->eth_rx_poll = rx_poll;
	rx_adapter->wrr_sched = rx_wrr;
	rx_adapter->wrr_len = max_wrr_pos;

	return 0;
}
Пример #10
0
/*
 * Initialises a given port using global settings and with the rx buffers
 * coming from the mbuf_pool passed as parameter
 */
static inline int
port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
	struct rte_eth_dev_info dev_info;
	struct rte_eth_conf port_conf = {0};
	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
	int retval;
	uint16_t q;
	uint16_t queues_per_pool;
	uint32_t max_nb_pools;

	/*
	 * The max pool number from dev_info will be used to validate the pool
	 * number specified in cmd line
	 */
	rte_eth_dev_info_get(port, &dev_info);
	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
	/*
	 * We allow to process part of VMDQ pools specified by num_pools in
	 * command line.
	 */
	if (num_pools > max_nb_pools) {
		printf("num_pools %d >max_nb_pools %d\n",
			num_pools, max_nb_pools);
		return -1;
	}

	/*
	 * NIC queues are divided into pf queues and vmdq queues.
	 * There is assumption here all ports have the same configuration!
	*/
	vmdq_queue_base = dev_info.vmdq_queue_base;
	vmdq_pool_base  = dev_info.vmdq_pool_base;
	printf("vmdq queue base: %d pool base %d\n",
		vmdq_queue_base, vmdq_pool_base);
	if (vmdq_pool_base == 0) {
		num_vmdq_queues = dev_info.max_rx_queues;
		num_queues = dev_info.max_rx_queues;
		if (num_tcs != num_vmdq_queues / num_pools) {
			printf("nb_tcs %d is invalid considering with"
				" nb_pools %d, nb_tcs * nb_pools should = %d\n",
				num_tcs, num_pools, num_vmdq_queues);
			return -1;
		}
	} else {
		queues_per_pool = dev_info.vmdq_queue_num /
				  dev_info.max_vmdq_pools;
		if (num_tcs > queues_per_pool) {
			printf("num_tcs %d > num of queues per pool %d\n",
				num_tcs, queues_per_pool);
			return -1;
		}
		num_vmdq_queues = num_pools * queues_per_pool;
		num_queues = vmdq_queue_base + num_vmdq_queues;
		printf("Configured vmdq pool num: %u,"
			" each vmdq pool has %u queues\n",
			num_pools, queues_per_pool);
	}

	if (port >= rte_eth_dev_count())
		return -1;

	retval = get_eth_conf(&port_conf);
	if (retval < 0)
		return retval;

	/*
	 * Though in this example, all queues including pf queues are setup.
	 * This is because VMDQ queues doesn't always start from zero, and the
	 * PMD layer doesn't support selectively initialising part of rx/tx
	 * queues.
	 */
	retval = rte_eth_dev_configure(port, num_queues, num_queues, &port_conf);
	if (retval != 0)
		return retval;

	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
				&txRingSize);
	if (retval != 0)
		return retval;
	if (RTE_MAX(rxRingSize, txRingSize) >
	    RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT)) {
		printf("Mbuf pool has an insufficient size for port %u.\n",
			port);
		return -1;
	}

	for (q = 0; q < num_queues; q++) {
		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
					rte_eth_dev_socket_id(port),
					NULL,
					mbuf_pool);
		if (retval < 0) {
			printf("initialize rx queue %d failed\n", q);
			return retval;
		}
	}

	for (q = 0; q < num_queues; q++) {
		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
					rte_eth_dev_socket_id(port),
					NULL);
		if (retval < 0) {
			printf("initialize tx queue %d failed\n", q);
			return retval;
		}
	}

	retval  = rte_eth_dev_start(port);
	if (retval < 0) {
		printf("port %d start failed\n", port);
		return retval;
	}

	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
			(unsigned)port,
			vmdq_ports_eth_addr[port].addr_bytes[0],
			vmdq_ports_eth_addr[port].addr_bytes[1],
			vmdq_ports_eth_addr[port].addr_bytes[2],
			vmdq_ports_eth_addr[port].addr_bytes[3],
			vmdq_ports_eth_addr[port].addr_bytes[4],
			vmdq_ports_eth_addr[port].addr_bytes[5]);

	/* Set mac for each pool.*/
	for (q = 0; q < num_pools; q++) {
		struct ether_addr mac;

		mac = pool_addr_template;
		mac.addr_bytes[4] = port;
		mac.addr_bytes[5] = q;
		printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
			port, q,
			mac.addr_bytes[0], mac.addr_bytes[1],
			mac.addr_bytes[2], mac.addr_bytes[3],
			mac.addr_bytes[4], mac.addr_bytes[5]);
		retval = rte_eth_dev_mac_addr_add(port, &mac,
				q + vmdq_pool_base);
		if (retval) {
			printf("mac addr add failed at pool %d\n", q);
			return retval;
		}
	}

	return 0;
}
Пример #11
0
/*
 * Parse elem, the elem could be single number/range or '(' ')' group
 * 1) A single number elem, it's just a simple digit. e.g. 9
 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
 *    Within group elem, '-' used for a range separator;
 *                       ',' used for a single number.
 */
static int
eal_parse_set(const char *input, uint16_t set[], unsigned num)
{
	unsigned idx;
	const char *str = input;
	char *end = NULL;
	unsigned min, max;

	memset(set, 0, num * sizeof(uint16_t));

	while (isblank(*str))
		str++;

	/* only digit or left bracket is qualify for start point */
	if ((!isdigit(*str) && *str != '(') || *str == '\0')
		return -1;

	/* process single number or single range of number */
	if (*str != '(') {
		errno = 0;
		idx = strtoul(str, &end, 10);
		if (errno || end == NULL || idx >= num)
			return -1;
		else {
			while (isblank(*end))
				end++;

			min = idx;
			max = idx;
			if (*end == '-') {
				/* process single <number>-<number> */
				end++;
				while (isblank(*end))
					end++;
				if (!isdigit(*end))
					return -1;

				errno = 0;
				idx = strtoul(end, &end, 10);
				if (errno || end == NULL || idx >= num)
					return -1;
				max = idx;
				while (isblank(*end))
					end++;
				if (*end != ',' && *end != '\0')
					return -1;
			}

			if (*end != ',' && *end != '\0' &&
			    *end != '@')
				return -1;

			for (idx = RTE_MIN(min, max);
			     idx <= RTE_MAX(min, max); idx++)
				set[idx] = 1;

			return end - input;
		}
	}

	/* process set within bracket */
	str++;
	while (isblank(*str))
		str++;
	if (*str == '\0')
		return -1;

	min = RTE_MAX_LCORE;
	do {

		/* go ahead to the first digit */
		while (isblank(*str))
			str++;
		if (!isdigit(*str))
			return -1;

		/* get the digit value */
		errno = 0;
		idx = strtoul(str, &end, 10);
		if (errno || end == NULL || idx >= num)
			return -1;

		/* go ahead to separator '-',',' and ')' */
		while (isblank(*end))
			end++;
		if (*end == '-') {
			if (min == RTE_MAX_LCORE)
				min = idx;
			else /* avoid continuous '-' */
				return -1;
		} else if ((*end == ',') || (*end == ')')) {
			max = idx;
			if (min == RTE_MAX_LCORE)
				min = idx;
			for (idx = RTE_MIN(min, max);
			     idx <= RTE_MAX(min, max); idx++)
				set[idx] = 1;

			min = RTE_MAX_LCORE;
		} else
			return -1;

		str = end + 1;
	} while (*end != '\0' && *end != ')');

	return str - input;
}
Пример #12
0
static int
vfio_spapr_dma_map(int vfio_container_fd)
{
	const struct rte_memseg *ms = rte_eal_get_physmem_layout();
	int i, ret;

	struct vfio_iommu_spapr_register_memory reg = {
		.argsz = sizeof(reg),
		.flags = 0
	};
	struct vfio_iommu_spapr_tce_info info = {
		.argsz = sizeof(info),
	};
	struct vfio_iommu_spapr_tce_create create = {
		.argsz = sizeof(create),
	};
	struct vfio_iommu_spapr_tce_remove remove = {
		.argsz = sizeof(remove),
	};

	/* query spapr iommu info */
	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
	if (ret) {
		RTE_LOG(ERR, EAL, "  cannot get iommu info, "
				"error %i (%s)\n", errno, strerror(errno));
		return -1;
	}

	/* remove default DMA of 32 bit window */
	remove.start_addr = info.dma32_window_start;
	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
	if (ret) {
		RTE_LOG(ERR, EAL, "  cannot remove default DMA window, "
				"error %i (%s)\n", errno, strerror(errno));
		return -1;
	}

	/* create DMA window from 0 to max(phys_addr + len) */
	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
		if (ms[i].addr == NULL)
			break;

		create.window_size = RTE_MAX(create.window_size,
				ms[i].iova + ms[i].len);
	}

	/* sPAPR requires window size to be a power of 2 */
	create.window_size = rte_align64pow2(create.window_size);
	create.page_shift = __builtin_ctzll(ms->hugepage_sz);
	create.levels = 1;

	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
	if (ret) {
		RTE_LOG(ERR, EAL, "  cannot create new DMA window, "
				"error %i (%s)\n", errno, strerror(errno));
		return -1;
	}

	if (create.start_addr != 0) {
		RTE_LOG(ERR, EAL, "  DMA window start address != 0\n");
		return -1;
	}

	/* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
		struct vfio_iommu_type1_dma_map dma_map;

		if (ms[i].addr == NULL)
			break;

		reg.vaddr = (uintptr_t) ms[i].addr;
		reg.size = ms[i].len;
		ret = ioctl(vfio_container_fd,
			VFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);
		if (ret) {
			RTE_LOG(ERR, EAL, "  cannot register vaddr for IOMMU, "
				"error %i (%s)\n", errno, strerror(errno));
			return -1;
		}

		memset(&dma_map, 0, sizeof(dma_map));
		dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
		dma_map.vaddr = ms[i].addr_64;
		dma_map.size = ms[i].len;
		dma_map.iova = ms[i].iova;
		dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
				 VFIO_DMA_MAP_FLAG_WRITE;

		ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);

		if (ret) {
			RTE_LOG(ERR, EAL, "  cannot set up DMA remapping, "
				"error %i (%s)\n", errno, strerror(errno));
			return -1;
		}

	}

	return 0;
}

static int
vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
{
	/* No-IOMMU mode does not need DMA mapping */
	return 0;
}

int
rte_vfio_noiommu_is_enabled(void)
{
	int fd, ret, cnt __rte_unused;
	char c;

	ret = -1;
	fd = open(VFIO_NOIOMMU_MODE, O_RDONLY);
	if (fd < 0)
		return -1;

	cnt = read(fd, &c, 1);
	if (c == 'Y')
		ret = 1;

	close(fd);
	return ret;
}
Пример #13
0
/*
 * Initialises a given port using global settings and with the rx buffers
 * coming from the mbuf_pool passed as parameter
 */
static inline int
port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{
	struct rte_eth_dev_info dev_info;
	struct rte_eth_rxconf *rxconf;
	struct rte_eth_conf port_conf;
	uint16_t rxRings, txRings;
	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
	int retval;
	uint16_t q;
	uint16_t queues_per_pool;
	uint32_t max_nb_pools;

	/*
	 * The max pool number from dev_info will be used to validate the pool
	 * number specified in cmd line
	 */
	rte_eth_dev_info_get(port, &dev_info);
	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
	/*
	 * We allow to process part of VMDQ pools specified by num_pools in
	 * command line.
	 */
	if (num_pools > max_nb_pools) {
		printf("num_pools %d >max_nb_pools %d\n",
			num_pools, max_nb_pools);
		return -1;
	}
	retval = get_eth_conf(&port_conf, max_nb_pools);
	if (retval < 0)
		return retval;

	/*
	 * NIC queues are divided into pf queues and vmdq queues.
	 */
	/* There is assumption here all ports have the same configuration! */
	num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
	queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
	num_vmdq_queues = num_pools * queues_per_pool;
	num_queues = num_pf_queues + num_vmdq_queues;
	vmdq_queue_base = dev_info.vmdq_queue_base;
	vmdq_pool_base  = dev_info.vmdq_pool_base;

	printf("pf queue num: %u, configured vmdq pool num: %u,"
		" each vmdq pool has %u queues\n",
		num_pf_queues, num_pools, queues_per_pool);
	printf("vmdq queue base: %d pool base %d\n",
		vmdq_queue_base, vmdq_pool_base);
	if (port >= rte_eth_dev_count())
		return -1;

	/*
	 * Though in this example, we only receive packets from the first queue
	 * of each pool and send packets through first rte_lcore_count() tx
	 * queues of vmdq queues, all queues including pf queues are setup.
	 * This is because VMDQ queues doesn't always start from zero, and the
	 * PMD layer doesn't support selectively initialising part of rx/tx
	 * queues.
	 */
	rxRings = (uint16_t)dev_info.max_rx_queues;
	txRings = (uint16_t)dev_info.max_tx_queues;
	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
	if (retval != 0)
		return retval;

	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
				&txRingSize);
	if (retval != 0)
		return retval;
	if (RTE_MAX(rxRingSize, txRingSize) > RTE_MAX(RTE_TEST_RX_DESC_DEFAULT,
			RTE_TEST_TX_DESC_DEFAULT)) {
		printf("Mbuf pool has an insufficient size for port %u.\n",
			port);
		return -1;
	}

	rte_eth_dev_info_get(port, &dev_info);
	rxconf = &dev_info.default_rxconf;
	rxconf->rx_drop_en = 1;
	for (q = 0; q < rxRings; q++) {
		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
					rte_eth_dev_socket_id(port),
					rxconf,
					mbuf_pool);
		if (retval < 0) {
			printf("initialise rx queue %d failed\n", q);
			return retval;
		}
	}

	for (q = 0; q < txRings; q++) {
		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
					rte_eth_dev_socket_id(port),
					NULL);
		if (retval < 0) {
			printf("initialise tx queue %d failed\n", q);
			return retval;
		}
	}

	retval  = rte_eth_dev_start(port);
	if (retval < 0) {
		printf("port %d start failed\n", port);
		return retval;
	}

	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
			(unsigned)port,
			vmdq_ports_eth_addr[port].addr_bytes[0],
			vmdq_ports_eth_addr[port].addr_bytes[1],
			vmdq_ports_eth_addr[port].addr_bytes[2],
			vmdq_ports_eth_addr[port].addr_bytes[3],
			vmdq_ports_eth_addr[port].addr_bytes[4],
			vmdq_ports_eth_addr[port].addr_bytes[5]);

	/*
	 * Set mac for each pool.
	 * There is no default mac for the pools in i40.
	 * Removes this after i40e fixes this issue.
	 */
	for (q = 0; q < num_pools; q++) {
		struct ether_addr mac;
		mac = pool_addr_template;
		mac.addr_bytes[4] = port;
		mac.addr_bytes[5] = q;
		printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
			port, q,
			mac.addr_bytes[0], mac.addr_bytes[1],
			mac.addr_bytes[2], mac.addr_bytes[3],
			mac.addr_bytes[4], mac.addr_bytes[5]);
		retval = rte_eth_dev_mac_addr_add(port, &mac,
				q + vmdq_pool_base);
		if (retval) {
			printf("mac addr add failed at pool %d\n", q);
			return retval;
		}
	}

	return 0;
}
Пример #14
0
static int
avf_dev_start(struct rte_eth_dev *dev)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
	struct rte_intr_handle *intr_handle = dev->intr_handle;

	PMD_INIT_FUNC_TRACE();

	hw->adapter_stopped = 0;

	vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
	vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
				      dev->data->nb_tx_queues);

	if (avf_init_queues(dev) != 0) {
		PMD_DRV_LOG(ERR, "failed to do Queue init");
		return -1;
	}

	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
		if (avf_init_rss(adapter) != 0) {
			PMD_DRV_LOG(ERR, "configure rss failed");
			goto err_rss;
		}
	}

	if (avf_configure_queues(adapter) != 0) {
		PMD_DRV_LOG(ERR, "configure queues failed");
		goto err_queue;
	}

	if (avf_config_rx_queues_irqs(dev, intr_handle) != 0) {
		PMD_DRV_LOG(ERR, "configure irq failed");
		goto err_queue;
	}
	/* re-enable intr again, because efd assign may change */
	if (dev->data->dev_conf.intr_conf.rxq != 0) {
		rte_intr_disable(intr_handle);
		rte_intr_enable(intr_handle);
	}

	/* Set all mac addrs */
	avf_add_del_all_mac_addr(adapter, TRUE);

	if (avf_start_queues(dev) != 0) {
		PMD_DRV_LOG(ERR, "enable queues failed");
		goto err_mac;
	}

	return 0;

err_mac:
	avf_add_del_all_mac_addr(adapter, FALSE);
err_queue:
err_rss:
	return -1;
}