Exemple #1
0
/* Check the link status of all ports in up to 9s, and print them finally */
static void
check_all_ports_link_status(uint8_t port_num)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
    uint8_t portid, count, all_ports_up, print_flag = 0;
    struct rte_eth_link link;

    printf("\nChecking link status");
    fflush(stdout);
    rte_delay_ms(15000);
    for (count = 0; count <= MAX_CHECK_TIME; count++) {
        all_ports_up = 1;
        for (portid = 0; portid < port_num; portid++) {
            memset(&link, 0, sizeof(link));
            rte_eth_link_get_nowait(portid, &link);
            /* print link status if flag set */
            if (print_flag == 1) {
                if (link.link_status)
                    printf("Port %d Link Up - speed %u "
                           "Mbps - %s\n", (uint8_t)portid,
                           (unsigned)link.link_speed,
                           (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
                           ("full-duplex") : ("half-duplex\n"));
                else
                    printf("Port %d Link Down\n",
                           (uint8_t)portid);
                continue;
            }
            /* clear all_ports_up flag if any link down */
            if (link.link_status == 0) {
                all_ports_up = 0;
                break;
            }
        }
        /* after finally printing all link status, get out */
        if (print_flag == 1)
            break;

        if (all_ports_up == 0) {
            printf(".");
            fflush(stdout);
            rte_delay_ms(CHECK_INTERVAL);
        }

        /* set the print_flag if all ports up or timeout */
        if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
            print_flag = 1;
            printf("done\n");
        }
    }
}
Exemple #2
0
/* Check the link status of all ports in up to 9s, and print them finally */
static void
check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
	uint16_t portid;
	uint8_t count, all_ports_up, print_flag = 0;
	struct rte_eth_link link;
	uint32_t n_rx_queues, n_tx_queues;

	printf("\nChecking link status");
	fflush(stdout);
	for (count = 0; count <= MAX_CHECK_TIME; count++) {
		all_ports_up = 1;
		for (portid = 0; portid < port_num; portid++) {
			if ((port_mask & (1 << portid)) == 0)
				continue;
			n_rx_queues = app_get_nic_rx_queues_per_port(portid);
			n_tx_queues = app.nic_tx_port_mask[portid];
			if ((n_rx_queues == 0) && (n_tx_queues == 0))
				continue;
			memset(&link, 0, sizeof(link));
			rte_eth_link_get_nowait(portid, &link);
			/* print link status if flag set */
			if (print_flag == 1) {
				if (link.link_status)
					printf(
					"Port%d Link Up - speed %uMbps - %s\n",
						portid, link.link_speed,
				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
					("full-duplex") : ("half-duplex\n"));
				else
					printf("Port %d Link Down\n", portid);
				continue;
			}
			/* clear all_ports_up flag if any link down */
			if (link.link_status == ETH_LINK_DOWN) {
				all_ports_up = 0;
				break;
			}
		}
		/* after finally printing all link status, get out */
		if (print_flag == 1)
			break;

		if (all_ports_up == 0) {
			printf(".");
			fflush(stdout);
			rte_delay_ms(CHECK_INTERVAL);
		}

		/* set the print_flag if all ports up or timeout */
		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
			print_flag = 1;
			printf("done\n");
		}
	}
}
Exemple #3
0
/* Check the link status of all ports in up to 9s, and print them finally */
static void
check_all_ports_link_status()
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
    uint8_t portid, count, all_ports_up, print_flag = 0;
    struct rte_eth_link link;

    LOG_RAW(INFO, "\nChecking link status");
    fflush(stdout);
    for (count = 0; count <= MAX_CHECK_TIME; count++) {
        if (sk.force_quit)
            return;
        all_ports_up = 1;

        for (int i = 0; i < sk.nr_ports; i++) {
            if (sk.force_quit)
                return;
            portid = (uint8_t )sk.port_ids[i];
            memset(&link, 0, sizeof(link));
            rte_eth_link_get_nowait(portid, &link);
            /* print link status if flag set */
            if (print_flag == 1) {
                if (link.link_status)
                    LOG_RAW(INFO,
                            "Port %d Link Up - speed %u "
                            "Mbps - %s\n", (uint8_t)portid,
                            (unsigned)link.link_speed,
                            (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
                            ("full-duplex") : ("half-duplex\n"));
                else
                    LOG_RAW(INFO, "Port %d Link Down\n", (uint8_t)portid);
                continue;
            }
            /* clear all_ports_up flag if any link down */
            if (link.link_status == ETH_LINK_DOWN) {
                all_ports_up = 0;
                break;
            }
        }
        /* after finally printing all link status, get out */
        if (print_flag == 1)
            break;

        if (all_ports_up == 0) {
            LOG_RAW(INFO, ".");
            fflush(sk.log_fp);
            rte_delay_ms(CHECK_INTERVAL);
        }

        /* set the print_flag if all ports up or timeout */
        if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
            print_flag = 1;
            LOG_RAW(INFO, "done\n");
        }
    }
}
Exemple #4
0
/**********************************************************************
*@description:
* Check the link status of all ports in up to 9s, and print them finally
*
*@parameters:
* [in]: 
* [in]: 
*
*@return values: 
*
**********************************************************************/
static void odp_check_ports_link_status(uint8_t port_num, uint32_t port_mask)
{
    uint8_t check_interval = 100; /* 100ms */
    uint8_t max_check_time = 90; /* 9s (90 * 100ms) in total */
    uint8_t portid, count, all_ports_up, print_flag = 0;
    struct rte_eth_link link;

    printf("\nChecking link status ");

    for (count = 0; count <= max_check_time; count++) 
    {
    	all_ports_up = 1;
    	for (portid = 0; portid < port_num; portid++) 
       {
    		if ((port_mask & (1 << portid)) == 0)
    			continue;
            
    		memset(&link, 0, sizeof(link));
    		rte_eth_link_get_nowait(portid, &link);
    		/* print link status if flag set */
    		if (print_flag == 1) 
              {
    			if (link.link_status)
    				printf("Port %d Link Up - speed %u " "Mbps - %s\n", (uint8_t)portid,
    					(unsigned)link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex\n"));
    			else
    				printf("Port %d Link Down\n", (uint8_t)portid);
                
    			continue;
    		}
    		/* clear all_ports_up flag if any link down */
    		if (link.link_status == 0) 
              {
    			all_ports_up = 0;
    			break;
    		}
    	}
        
    	/* after finally printing all link status, get out */
    	if (print_flag == 1)
    		break;

    	if (all_ports_up == 0) 
       {
            printf(".");
            rte_delay_ms(check_interval);
    	}

    	/* set the print_flag if all ports up or timeout */
    	if (all_ports_up == 1 || count == (max_check_time - 1)) 
       {
            print_flag = 1;
            printf("done\n");
    	}
    }
}
int
test_alarm(void)
{
	int count = 0;

	/* check if the callback will be called */
	printf("check if the callback will be called\n");
	flag = 0;
	if (rte_eal_alarm_set(RTE_TEST_ALARM_TIMEOUT * US_PER_MS,
			test_alarm_callback, NULL) < 0) {
		printf("fail to set alarm callback\n");
		return -1;
	}
	while (flag == 0 && count ++ < 6)
		rte_delay_ms(RTE_TEST_CHECK_PERIOD);

	if (flag == 0){
		printf("Callback not called\n");
		return -1;
	}

	/* check if it will fail to set alarm with wrong us value */
	printf("check if it will fail to set alarm with wrong ms values\n");
	if (rte_eal_alarm_set(0, test_alarm_callback,
						NULL) >= 0) {
		printf("should not be successful with 0 us value\n");
		return -1;
	}
	if (rte_eal_alarm_set(UINT64_MAX - 1, test_alarm_callback,
						NULL) >= 0) {
		printf("should not be successful with (UINT64_MAX-1) us value\n");
		return -1;
	}

	/* check if it will fail to set alarm with null callback parameter */
	printf("check if it will fail to set alarm with null callback parameter\n");
	if (rte_eal_alarm_set(RTE_TEST_ALARM_TIMEOUT, NULL, NULL) >= 0) {
		printf("should not be successful to set alarm with null callback parameter\n");
		return -1;
	}

	/* check if it will fail to remove alarm with null callback parameter */
	printf("check if it will fail to remove alarm with null callback parameter\n");
	if (rte_eal_alarm_cancel(NULL, NULL) == 0) {
		printf("should not be successful to remove alarm with null callback parameter");
		return -1;
	}

	if (test_multi_alarms() != 0)
		return -1;

	return 0;
}
Exemple #6
0
/**
 * Check the full path of a specified type of interrupt simulated.
 */
static int
test_interrupt_full_path_check(enum test_interrupt_handle_type intr_type)
{
	int count;
	struct rte_intr_handle test_intr_handle;

	flag = 0;
	test_intr_handle = intr_handles[intr_type];
	test_intr_type = intr_type;
	if (rte_intr_callback_register(&test_intr_handle,
			test_interrupt_callback, &test_intr_handle) < 0) {
		printf("fail to register callback\n");
		return -1;
	}

	if (test_interrupt_trigger_interrupt() < 0)
		return -1;

	/* check flag */
	for (count = 0; flag == 0 && count < 3; count++)
		rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);

	rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
	if (rte_intr_callback_unregister(&test_intr_handle,
			test_interrupt_callback, &test_intr_handle) < 0)
		return -1;

	if (flag == 0) {
		printf("callback has not been called\n");
		return -1;
	} else if (flag < 0) {
		printf("it has internal error in callback\n");
		return -1;
	}

	return 0;
}
void
wr_logo(int row, int col, const char * appname)
{
	int		i;
	static const char * logo[] = {
		"#     #",
		"#  #  #     #    #    #  #####",
		"#  #  #     #    ##   #  #    #",
		"#  #  #     #    # #  #  #    #",
		"#  #  #     #    #  # #  #    #",
		"#  #  #     #    #   ##  #    #",
		" ## ##      #    #    #  #####",
		"",
		"######",
		"#     #     #    #    #  ######  #####",
		"#     #     #    #    #  #       #    #",
		"######      #    #    #  #####   #    #",
		"#   #       #    #    #  #       #####",
		"#    #      #     #  #   #       #   #",
		"#     #     #      ##    ######  #    #",
		"",
		" #####",
		"#     #   #   #   ####    #####  ######  #    #   ####",
		"#          # #   #          #    #       ##  ##  #",
		" #####      #     ####      #    #####   # ## #   ####",
		"      #     #         #     #    #       #    #       #",
		"#     #     #    #    #     #    #       #    #  #    #",
		" #####      #     ####      #    ######  #    #   ####",
		NULL
	};

	wr_scrn_cls();
	wr_scrn_color(GREEN, NO_CHANGE, BOLD);
	for(i=0, row++; logo[i] != NULL; i++)
		wr_scrn_printf(row++, 7, "%s", logo[i]);

	wr_scrn_color(MAGENTA, NO_CHANGE, OFF);
	wr_scrn_printf(++row, col, "%s", COPYRIGHT_MSG);
	wr_scrn_color(BLUE, NO_CHANGE, BOLD);
	wr_scrn_printf(++row, col+6, ">>> %s is %s <<<", appname, POWERED_BY_DPDK);
	wr_scrn_color(BLACK, NO_CHANGE, OFF);
	wr_scrn_pos(++row, 1);

	rte_delay_ms(1500);

    wr_scrn_cls();
    wr_scrn_pos(100, 1);
}
int main(int argc, char **argv)
{
    int32_t ret;
    uint8_t lcore_id;

    /* Signal */
    signal(SIGINT,(void *)netflow_print);
 

    clrscr();
    // call before the rte_eal_init()
    (void)rte_set_application_usage_hook(netflow_usage);

    init_probe(&probe);
    
    netflow_logo(8, 0, NETFLOW_APP_NAME); 
    sleep(2);
    
    ret = rte_eal_init(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Failed in rte_eal_init\n");
    argc -= ret;
    argv += ret;
    
    ret = netflow_parse_args(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Invalid arguments\n");
  
    netflow_init(&probe);

    RTE_LCORE_FOREACH_SLAVE(lcore_id) {
        rte_eal_remote_launch(launch_probe, NULL, lcore_id);
    }
    rte_delay_ms(5000);     // wait for the lcores to start up

    // Wait for all of the cores to stop runing and exit.

    process_hashtable();
    rte_eal_mp_wait_lcore(); 

    return 0;
}
void
wr_splash_screen(int row, int col, const char * appname, const char * created_by)
{
	int		i;

	row = 3;
	wr_scrn_color(BLUE, NO_CHANGE, OFF);
	wr_scrn_printf(row++, col, "%s", COPYRIGHT_MSG);
	wr_scrn_color(GREEN, NO_CHANGE, BOLD);
	for(i=0, row++; wr_copyright[i] != NULL; i++)
		wr_scrn_printf(row++, 7, "%s", wr_copyright[i]);
	wr_scrn_color(BLUE, NO_CHANGE, BOLD);
	wr_scrn_printf(row++, col, "%s created by %s -- >>> %s <<<", appname, created_by, POWERED_BY_DPDK);
	wr_scrn_color(BLACK, NO_CHANGE, OFF);
	wr_scrn_pos(++row, 1);

	rte_delay_ms(1500);

    wr_scrn_cls();
    wr_scrn_pos(100, 1);
}
Exemple #10
0
void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
			struct rte_eth_dev_info *device_info)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;

	static const struct rte_eth_desc_lim cxgbe_desc_lim = {
		.nb_max = CXGBE_MAX_RING_DESC_SIZE,
		.nb_min = CXGBE_MIN_RING_DESC_SIZE,
		.nb_align = 1,
	};

	device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
	device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
	device_info->max_rx_queues = max_queues;
	device_info->max_tx_queues = max_queues;
	device_info->max_mac_addrs = 1;
	/* XXX: For now we support one MAC/port */
	device_info->max_vfs = adapter->params.arch.vfcount;
	device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */

	device_info->rx_queue_offload_capa = 0UL;
	device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;

	device_info->tx_queue_offload_capa = 0UL;
	device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;

	device_info->reta_size = pi->rss_size;
	device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
	device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;

	device_info->rx_desc_lim = cxgbe_desc_lim;
	device_info->tx_desc_lim = cxgbe_desc_lim;
	cxgbe_get_speed_caps(pi, &device_info->speed_capa);
}

void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      1, -1, 1, -1, false);
}

void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      0, -1, 1, -1, false);
}

void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	/* TODO: address filters ?? */

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      -1, 1, 1, -1, false);
}

void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	/* TODO: address filters ?? */

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      -1, 0, 1, -1, false);
}

int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
			  int wait_to_complete)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	struct sge *s = &adapter->sge;
	struct rte_eth_link new_link = { 0 };
	unsigned int i, work_done, budget = 32;
	u8 old_link = pi->link_cfg.link_ok;

	for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
		cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);

		/* Exit if link status changed or always forced up */
		if (pi->link_cfg.link_ok != old_link || force_linkup(adapter))
			break;

		if (!wait_to_complete)
			break;

		rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
	}

	new_link.link_status = force_linkup(adapter) ?
			       ETH_LINK_UP : pi->link_cfg.link_ok;
	new_link.link_autoneg = pi->link_cfg.autoneg;
	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
	new_link.link_speed = pi->link_cfg.speed;

	return rte_eth_linkstatus_set(eth_dev, &new_link);
}

/**
 * Set device link up.
 */
int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
{
	struct port_info *pi = (struct port_info *)(dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	unsigned int work_done, budget = 32;
	struct sge *s = &adapter->sge;
	int ret;

	/* Flush all link events */
	cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);

	/* If link already up, nothing to do */
	if (pi->link_cfg.link_ok)
		return 0;

	ret = cxgbe_set_link_status(pi, true);
	if (ret)
		return ret;

	cxgbe_dev_link_update(dev, 1);
	return 0;
}

/**
 * Set device link down.
 */
int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
{
	struct port_info *pi = (struct port_info *)(dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	unsigned int work_done, budget = 32;
	struct sge *s = &adapter->sge;
	int ret;

	/* Flush all link events */
	cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);

	/* If link already down, nothing to do */
	if (!pi->link_cfg.link_ok)
		return 0;

	ret = cxgbe_set_link_status(pi, false);
	if (ret)
		return ret;

	cxgbe_dev_link_update(dev, 0);
	return 0;
}

int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	struct rte_eth_dev_info dev_info;
	int err;
	uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;

	cxgbe_dev_info_get(eth_dev, &dev_info);

	/* Must accommodate at least ETHER_MIN_MTU */
	if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen))
		return -EINVAL;

	/* set to jumbo mode if needed */
	if (new_mtu > ETHER_MAX_LEN)
		eth_dev->data->dev_conf.rxmode.offloads |=
			DEV_RX_OFFLOAD_JUMBO_FRAME;
	else
		eth_dev->data->dev_conf.rxmode.offloads &=
			~DEV_RX_OFFLOAD_JUMBO_FRAME;

	err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
			    -1, -1, true);
	if (!err)
		eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;

	return err;
}

/*
 * Stop device.
 */
void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	CXGBE_FUNC_TRACE();

	if (!(adapter->flags & FULL_INIT_DONE))
		return;

	cxgbe_down(pi);

	/*
	 *  We clear queues only if both tx and rx path of the port
	 *  have been disabled
	 */
	t4_sge_eth_clear_queues(pi);
}

/* Start the device.
 * It returns 0 on success.
 */
int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	int err = 0, i;

	CXGBE_FUNC_TRACE();

	/*
	 * If we don't have a connection to the firmware there's nothing we
	 * can do.
	 */
	if (!(adapter->flags & FW_OK)) {
		err = -ENXIO;
		goto out;
	}

	if (!(adapter->flags & FULL_INIT_DONE)) {
		err = cxgbe_up(adapter);
		if (err < 0)
			goto out;
	}

	cxgbe_enable_rx_queues(pi);

	err = setup_rss(pi);
	if (err)
		goto out;

	for (i = 0; i < pi->n_tx_qsets; i++) {
		err = cxgbe_dev_tx_queue_start(eth_dev, i);
		if (err)
			goto out;
	}

	for (i = 0; i < pi->n_rx_qsets; i++) {
		err = cxgbe_dev_rx_queue_start(eth_dev, i);
		if (err)
			goto out;
	}

	err = link_start(pi);
	if (err)
		goto out;

out:
	return err;
}

/*
 * Stop device: disable rx and tx functions to allow for reconfiguring.
 */
void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	CXGBE_FUNC_TRACE();

	if (!(adapter->flags & FULL_INIT_DONE))
		return;

	cxgbe_down(pi);

	/*
	 *  We clear queues only if both tx and rx path of the port
	 *  have been disabled
	 */
	t4_sge_eth_clear_queues(pi);
}

int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	uint64_t configured_offloads;
	int err;

	CXGBE_FUNC_TRACE();
	configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;

	/* KEEP_CRC offload flag is not supported by PMD
	 * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
	 */
	if (rte_eth_dev_must_keep_crc(configured_offloads)) {
		dev_info(adapter, "can't disable hw crc strip\n");
		eth_dev->data->dev_conf.rxmode.offloads |=
			DEV_RX_OFFLOAD_CRC_STRIP;
	}

	if (!(adapter->flags & FW_QUEUE_BOUND)) {
		err = setup_sge_fwevtq(adapter);
		if (err)
			return err;
		adapter->flags |= FW_QUEUE_BOUND;
		err = setup_sge_ctrl_txq(adapter);
		if (err)
			return err;
	}

	err = cfg_queue_count(eth_dev);
	if (err)
		return err;

	return 0;
}

int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
	int ret;
	struct sge_eth_txq *txq = (struct sge_eth_txq *)
				  (eth_dev->data->tx_queues[tx_queue_id]);

	dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);

	ret = t4_sge_eth_txq_start(txq);
	if (ret == 0)
		eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;

	return ret;
}
Exemple #11
0
void
sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
{
	struct rte_eth_dev_data *dev_data;
	struct sfc_txq_info *txq_info;
	struct sfc_txq *txq;
	unsigned int retry_count;
	unsigned int wait_count;

	sfc_log_init(sa, "TxQ = %u", sw_index);

	SFC_ASSERT(sw_index < sa->txq_count);
	txq_info = &sa->txq_info[sw_index];

	txq = txq_info->txq;

	if (txq->state == SFC_TXQ_INITIALIZED)
		return;

	SFC_ASSERT(txq->state & SFC_TXQ_STARTED);

	sa->dp_tx->qstop(txq->dp, &txq->evq->read_ptr);

	/*
	 * Retry TX queue flushing in case of flush failed or
	 * timeout; in the worst case it can delay for 6 seconds
	 */
	for (retry_count = 0;
	     ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
	     (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
	     ++retry_count) {
		if (efx_tx_qflush(txq->common) != 0) {
			txq->state |= SFC_TXQ_FLUSHING;
			break;
		}

		/*
		 * Wait for TX queue flush done or flush failed event at least
		 * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
		 * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
		 * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
		 */
		wait_count = 0;
		do {
			rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
			sfc_ev_qpoll(txq->evq);
		} while ((txq->state & SFC_TXQ_FLUSHING) &&
			 wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);

		if (txq->state & SFC_TXQ_FLUSHING)
			sfc_err(sa, "TxQ %u flush timed out", sw_index);

		if (txq->state & SFC_TXQ_FLUSHED)
			sfc_info(sa, "TxQ %u flushed", sw_index);
	}

	sa->dp_tx->qreap(txq->dp);

	txq->state = SFC_TXQ_INITIALIZED;

	efx_tx_qdestroy(txq->common);

	sfc_ev_qstop(txq->evq);

	/*
	 * It seems to be used by DPDK for debug purposes only ('rte_ether')
	 */
	dev_data = sa->eth_dev->data;
	dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
}
Exemple #12
0
int
main(int argc, char **argv)
{
	uint32_t i;
	int32_t ret;

	printf("\n%s %s\n", wr_copyright_msg(), wr_powered_by()); fflush(stdout);

	wr_scrn_setw(1);/* Reset the window size */

	/* call before the rte_eal_init() */
	(void)rte_set_application_usage_hook(pktgen_usage);

	memset(&pktgen, 0, sizeof(pktgen));

	pktgen.flags            = PRINT_LABELS_FLAG;
	pktgen.ident            = 0x1234;
	pktgen.nb_rxd           = DEFAULT_RX_DESC;
	pktgen.nb_txd           = DEFAULT_TX_DESC;
	pktgen.nb_ports_per_page = DEFAULT_PORTS_PER_PAGE;

	if ( (pktgen.l2p = wr_l2p_create()) == NULL)
		pktgen_log_panic("Unable to create l2p");

	pktgen.portdesc_cnt = wr_get_portdesc(pktgen.portlist, pktgen.portdesc, RTE_MAX_ETHPORTS, 0);

	/* Initialize the screen and logging */
	pktgen_init_log();
	pktgen_cpu_init();

	/* initialize EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		return -1;
	argc -= ret;
	argv += ret;

	pktgen.hz = rte_get_timer_hz();	/* Get the starting HZ value. */

	/* parse application arguments (after the EAL ones) */
	ret = pktgen_parse_args(argc, argv);
	if (ret < 0)
		return -1;

	pktgen_init_screen((pktgen.flags & ENABLE_THEME_FLAG) ? THEME_ON : THEME_OFF);

	rte_delay_ms(100);	/* Wait a bit for things to settle. */

	wr_print_copyright(PKTGEN_APP_NAME, PKTGEN_CREATED_BY);

	lua_newlib_add(_lua_openlib);

	/* Open the Lua script handler. */
	if ( (pktgen.L = lua_create_instance()) == NULL) {
		pktgen_log_error("Failed to open Lua pktgen support library");
		return -1;
	}

	pktgen_log_info(">>> Packet Burst %d, RX Desc %d, TX Desc %d, mbufs/port %d, mbuf cache %d",
	                DEFAULT_PKT_BURST, DEFAULT_RX_DESC, DEFAULT_TX_DESC, MAX_MBUFS_PER_PORT, MBUF_CACHE_SIZE);

	/* Configure and initialize the ports */
	pktgen_config_ports();

	pktgen_log_info("");
	pktgen_log_info("=== Display processing on lcore %d", rte_lcore_id());

	/* launch per-lcore init on every lcore except master and master + 1 lcores */
	for (i = 0; i < RTE_MAX_LCORE; i++) {
		if ( (i == rte_get_master_lcore()) || !rte_lcore_is_enabled(i) )
			continue;
		ret = rte_eal_remote_launch(pktgen_launch_one_lcore, NULL, i);
		if (ret != 0)
			pktgen_log_error("Failed to start lcore %d, return %d", i, ret);
	}
	rte_delay_ms(1000);	/* Wait for the lcores to start up. */

	/* Disable printing log messages of level info and below to screen, */
	/* erase the screen and start updating the screen again. */
	pktgen_log_set_screen_level(LOG_LEVEL_WARNING);
	wr_scrn_erase(pktgen.scrn->nrows);

	wr_logo(3, 16, PKTGEN_APP_NAME);
	wr_splash_screen(3, 16, PKTGEN_APP_NAME, PKTGEN_CREATED_BY);

	wr_scrn_resume();

	pktgen_redisplay(1);

	rte_timer_setup();

	if (pktgen.flags & ENABLE_GUI_FLAG) {
		if (!wr_scrn_is_paused() ) {
			wr_scrn_pause();
			wr_scrn_cls();
			wr_scrn_setw(1);
			wr_scrn_pos(pktgen.scrn->nrows, 1);
		}

		lua_init_socket(pktgen.L, &pktgen.thread, pktgen.hostname, pktgen.socket_port);
	}

	pktgen_cmdline_start();

	execute_lua_close(pktgen.L);
	pktgen_stop_running();

	wr_scrn_pause();

	wr_scrn_setw(1);
	wr_scrn_printf(100, 1, "\n");	/* Put the cursor on the last row and do a newline. */

	/* Wait for all of the cores to stop running and exit. */
	rte_eal_mp_wait_lcore();

	return 0;
}
Exemple #13
0
/**
 * lio_mbox_write:
 * @lio_dev: Pointer lio device
 * @mbox_cmd: Cmd to send to mailbox.
 *
 * Populates the queue specific mbox structure
 * with cmd information.
 * Write the cmd to mbox register
 */
int
lio_mbox_write(struct lio_device *lio_dev,
	       struct lio_mbox_cmd *mbox_cmd)
{
	struct lio_mbox *mbox = lio_dev->mbox[mbox_cmd->q_no];
	uint32_t count, i, ret = LIO_MBOX_STATUS_SUCCESS;

	if ((mbox_cmd->msg.s.type == LIO_MBOX_RESPONSE) &&
			!(mbox->state & LIO_MBOX_STATE_REQ_RECEIVED))
		return LIO_MBOX_STATUS_FAILED;

	if ((mbox_cmd->msg.s.type == LIO_MBOX_REQUEST) &&
			!(mbox->state & LIO_MBOX_STATE_IDLE))
		return LIO_MBOX_STATUS_BUSY;

	if (mbox_cmd->msg.s.type == LIO_MBOX_REQUEST) {
		rte_memcpy(&mbox->mbox_resp, mbox_cmd,
			   sizeof(struct lio_mbox_cmd));
		mbox->state = LIO_MBOX_STATE_RES_PENDING;
	}

	count = 0;

	while (rte_read64(mbox->mbox_write_reg) != LIO_PFVFSIG) {
		rte_delay_ms(1);
		if (count++ == 1000) {
			ret = LIO_MBOX_STATUS_FAILED;
			break;
		}
	}

	if (ret == LIO_MBOX_STATUS_SUCCESS) {
		rte_write64(mbox_cmd->msg.mbox_msg64, mbox->mbox_write_reg);
		for (i = 0; i < (uint32_t)(mbox_cmd->msg.s.len - 1); i++) {
			count = 0;
			while (rte_read64(mbox->mbox_write_reg) !=
					LIO_PFVFACK) {
				rte_delay_ms(1);
				if (count++ == 1000) {
					ret = LIO_MBOX_STATUS_FAILED;
					break;
				}
			}
			rte_write64(mbox_cmd->data[i], mbox->mbox_write_reg);
		}
	}

	if (mbox_cmd->msg.s.type == LIO_MBOX_RESPONSE) {
		mbox->state = LIO_MBOX_STATE_IDLE;
		rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
	} else {
		if ((!mbox_cmd->msg.s.resp_needed) ||
				(ret == LIO_MBOX_STATUS_FAILED)) {
			mbox->state &= ~LIO_MBOX_STATE_RES_PENDING;
			if (!(mbox->state & (LIO_MBOX_STATE_REQ_RECEIVING |
					     LIO_MBOX_STATE_REQ_RECEIVED)))
				mbox->state = LIO_MBOX_STATE_IDLE;
		}
	}

	return ret;
}
static int
test_multi_alarms(void)
{
	int rm_count = 0;
	cb_count.cnt = 0;

	printf("Expect 6 callbacks in order...\n");
	/* add two alarms in order */
	rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)1);
	rte_eal_alarm_set(2000 * US_PER_MS, test_multi_cb, (void *)2);

	/* now add in reverse order */
	rte_eal_alarm_set(6000 * US_PER_MS, test_multi_cb, (void *)6);
	rte_eal_alarm_set(5000 * US_PER_MS, test_multi_cb, (void *)5);
	rte_eal_alarm_set(4000 * US_PER_MS, test_multi_cb, (void *)4);
	rte_eal_alarm_set(3000 * US_PER_MS, test_multi_cb, (void *)3);

	/* wait for expiry */
	rte_delay_ms(6500);
	if (cb_count.cnt != 6) {
		printf("Missing callbacks\n");
		/* remove any callbacks that might remain */
		rte_eal_alarm_cancel(test_multi_cb, (void *)-1);
		return -1;
	}

	cb_count.cnt = 0;
	printf("Expect only callbacks with args 1 and 3...\n");
	/* Add 3 flags, then delete one */
	rte_eal_alarm_set(3000 * US_PER_MS, test_multi_cb, (void *)3);
	rte_eal_alarm_set(2000 * US_PER_MS, test_multi_cb, (void *)2);
	rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)1);
	rm_count = rte_eal_alarm_cancel(test_multi_cb, (void *)2);

	rte_delay_ms(3500);
	if (cb_count.cnt != 2 || rm_count != 1) {
		printf("Error: invalid flags count or alarm removal failure"
				" -  flags value = %d, expected = %d\n",
				(int)cb_count.cnt, 2);
		/* remove any callbacks that might remain */
		rte_eal_alarm_cancel(test_multi_cb, (void *)-1);
		return -1;
	}

	printf("Testing adding and then removing multiple alarms\n");
	/* finally test that no callbacks are called if we delete them all*/
	rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)1);
	rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)2);
	rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)3);
	rm_count = rte_eal_alarm_cancel(test_alarm_callback, (void *)-1);
	if (rm_count != 0) {
		printf("Error removing non-existant alarm succeeded\n");
		rte_eal_alarm_cancel(test_multi_cb, (void *) -1);
		return -1;
	}
	rm_count = rte_eal_alarm_cancel(test_multi_cb, (void *) -1);
	if (rm_count != 3) {
		printf("Error removing all pending alarm callbacks\n");
		return -1;
	}

	/* Test that we cannot cancel an alarm from within the callback itself
	 * Also test that we can cancel head-of-line callbacks ok.*/
	flag = 0;
	recursive_error = 0;
	rte_eal_alarm_set(1000 * US_PER_MS, test_remove_in_callback, (void *)1);
	rte_eal_alarm_set(2000 * US_PER_MS, test_remove_in_callback, (void *)2);
	rm_count = rte_eal_alarm_cancel(test_remove_in_callback, (void *)1);
	if (rm_count != 1) {
		printf("Error cancelling head-of-list callback\n");
		return -1;
	}
	rte_delay_ms(1500);
	if (flag != 0) {
		printf("Error, cancelling head-of-list leads to premature callback\n");
		return -1;
	}
	rte_delay_ms(1000);
	if (flag != 2) {
		printf("Error - expected callback not called\n");
		rte_eal_alarm_cancel(test_remove_in_callback, (void *)-1);
		return -1;
	}
	if (recursive_error == 1)
		return -1;

	/* Check if it can cancel all for the same callback */
	printf("Testing canceling all for the same callback\n");
	flag_2 = 0;
	rte_eal_alarm_set(1000 * US_PER_MS, test_remove_in_callback, (void *)1);
	rte_eal_alarm_set(2000 * US_PER_MS, test_remove_in_callback_2, (void *)2);
	rte_eal_alarm_set(3000 * US_PER_MS, test_remove_in_callback_2, (void *)3);
	rte_eal_alarm_set(4000 * US_PER_MS, test_remove_in_callback, (void *)4);
	rm_count = rte_eal_alarm_cancel(test_remove_in_callback_2, (void *)-1);
	if (rm_count != 2) {
		printf("Error, cannot cancel all for the same callback\n");
		return -1;
	}
	rm_count = rte_eal_alarm_cancel(test_remove_in_callback, (void *)-1);
	if (rm_count != 2) {
		printf("Error, cannot cancel all for the same callback\n");
		return -1;
	}

	return 0;
}
Exemple #15
0
/**
 * Main function of testing interrupt.
 */
static int
test_interrupt(void)
{
	int ret = -1;
	struct rte_intr_handle test_intr_handle;

	if (test_interrupt_init() < 0) {
		printf("fail to initialize for testing interrupt\n");
		return -1;
	}

	printf("Check unknown valid interrupt full path\n");
	if (test_interrupt_full_path_check(TEST_INTERRUPT_HANDLE_VALID) < 0) {
		printf("failure occurred during checking unknown valid "
						"interrupt full path\n");
		goto out;
	}

	printf("Check valid UIO interrupt full path\n");
	if (test_interrupt_full_path_check(TEST_INTERRUPT_HANDLE_VALID_UIO)
									< 0) {
		printf("failure occurred during checking valid UIO interrupt "
								"full path\n");
		goto out;
	}

	printf("Check valid device event interrupt full path\n");
	if (test_interrupt_full_path_check(
		TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT) < 0) {
		printf("failure occurred during checking valid device event "
						"interrupt full path\n");
		goto out;
	}

	printf("Check valid alarm interrupt full path\n");
	if (test_interrupt_full_path_check(
		TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT) < 0) {
		printf("failure occurred during checking valid alarm "
						"interrupt full path\n");
		goto out;
	}

	printf("start register/unregister test\n");
	/* check if it will fail to register cb with intr_handle = NULL */
	if (rte_intr_callback_register(NULL, test_interrupt_callback,
							NULL) == 0) {
		printf("unexpectedly register successfully with null "
			"intr_handle\n");
		goto out;
	}

	/* check if it will fail to register cb with invalid intr_handle */
	test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_INVALID];
	if (rte_intr_callback_register(&test_intr_handle,
			test_interrupt_callback, &test_intr_handle) == 0) {
		printf("unexpectedly register successfully with invalid "
			"intr_handle\n");
		goto out;
	}

	/* check if it will fail to register without callback */
	test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID];
	if (rte_intr_callback_register(&test_intr_handle, NULL, &test_intr_handle) == 0) {
		printf("unexpectedly register successfully with "
			"null callback\n");
		goto out;
	}

	/* check if it will fail to unregister cb with intr_handle = NULL */
	if (rte_intr_callback_unregister(NULL,
			test_interrupt_callback, NULL) > 0) {
		printf("unexpectedly unregister successfully with "
			"null intr_handle\n");
		goto out;
	}

	/* check if it will fail to unregister cb with invalid intr_handle */
	test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_INVALID];
	if (rte_intr_callback_unregister(&test_intr_handle,
			test_interrupt_callback, &test_intr_handle) > 0) {
		printf("unexpectedly unregister successfully with "
			"invalid intr_handle\n");
		goto out;
	}

	/* check if it is ok to register the same intr_handle twice */
	test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID];
	if (rte_intr_callback_register(&test_intr_handle,
			test_interrupt_callback, &test_intr_handle) < 0) {
		printf("it fails to register test_interrupt_callback\n");
		goto out;
	}
	if (rte_intr_callback_register(&test_intr_handle,
			test_interrupt_callback_1, &test_intr_handle) < 0) {
		printf("it fails to register test_interrupt_callback_1\n");
		goto out;
	}
	/* check if it will fail to unregister with invalid parameter */
	if (rte_intr_callback_unregister(&test_intr_handle,
			test_interrupt_callback, (void *)0xff) != 0) {
		printf("unexpectedly unregisters successfully with "
							"invalid arg\n");
		goto out;
	}
	if (rte_intr_callback_unregister(&test_intr_handle,
			test_interrupt_callback, &test_intr_handle) <= 0) {
		printf("it fails to unregister test_interrupt_callback\n");
		goto out;
	}
	if (rte_intr_callback_unregister(&test_intr_handle,
			test_interrupt_callback_1, (void *)-1) <= 0) {
		printf("it fails to unregister test_interrupt_callback_1 "
			"for all\n");
		goto out;
	}
	rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);

	printf("start interrupt enable/disable test\n");
	/* check interrupt enable/disable functions */
	if (test_interrupt_enable() < 0) {
		printf("fail to check interrupt enabling\n");
		goto out;
	}
	rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);

	if (test_interrupt_disable() < 0) {
		printf("fail to check interrupt disabling\n");
		goto out;
	}
	rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);

	ret = 0;

out:
	printf("Clearing for interrupt tests\n");
	/* clear registered callbacks */
	test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID];
	rte_intr_callback_unregister(&test_intr_handle,
			test_interrupt_callback, (void *)-1);
	rte_intr_callback_unregister(&test_intr_handle,
			test_interrupt_callback_1, (void *)-1);

	test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID_UIO];
	rte_intr_callback_unregister(&test_intr_handle,
			test_interrupt_callback, (void *)-1);
	rte_intr_callback_unregister(&test_intr_handle,
			test_interrupt_callback_1, (void *)-1);

	test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID_ALARM];
	rte_intr_callback_unregister(&test_intr_handle,
			test_interrupt_callback, (void *)-1);
	rte_intr_callback_unregister(&test_intr_handle,
			test_interrupt_callback_1, (void *)-1);

	test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT];
	rte_intr_callback_unregister(&test_intr_handle,
			test_interrupt_callback, (void *)-1);
	rte_intr_callback_unregister(&test_intr_handle,
			test_interrupt_callback_1, (void *)-1);

	rte_delay_ms(2 * TEST_INTERRUPT_CHECK_INTERVAL);
	/* deinit */
	test_interrupt_deinit();

	return ret;
}
Exemple #16
0
int
cn23xx_pfvf_handshake(struct lio_device *lio_dev)
{
	struct lio_mbox_cmd mbox_cmd;
	struct lio_version *lio_ver = (struct lio_version *)&mbox_cmd.data[0];
	uint32_t q_no, count = 0;
	rte_atomic64_t status;
	uint32_t pfmajor;
	uint32_t vfmajor;
	uint32_t ret;

	PMD_INIT_FUNC_TRACE();

	/* Sending VF_ACTIVE indication to the PF driver */
	lio_dev_dbg(lio_dev, "requesting info from PF\n");

	mbox_cmd.msg.mbox_msg64 = 0;
	mbox_cmd.msg.s.type = LIO_MBOX_REQUEST;
	mbox_cmd.msg.s.resp_needed = 1;
	mbox_cmd.msg.s.cmd = LIO_VF_ACTIVE;
	mbox_cmd.msg.s.len = 2;
	mbox_cmd.data[0] = 0;
	lio_ver->major = LIO_BASE_MAJOR_VERSION;
	lio_ver->minor = LIO_BASE_MINOR_VERSION;
	lio_ver->micro = LIO_BASE_MICRO_VERSION;
	mbox_cmd.q_no = 0;
	mbox_cmd.recv_len = 0;
	mbox_cmd.recv_status = 0;
	mbox_cmd.fn = (lio_mbox_callback)cn23xx_pfvf_hs_callback;
	mbox_cmd.fn_arg = (void *)&status;

	if (lio_mbox_write(lio_dev, &mbox_cmd)) {
		lio_dev_err(lio_dev, "Write to mailbox failed\n");
		return -1;
	}

	rte_atomic64_set(&status, 0);

	do {
		rte_delay_ms(1);
	} while ((rte_atomic64_read(&status) == 0) && (count++ < 10000));

	ret = rte_atomic64_read(&status);
	if (ret == 0) {
		lio_dev_err(lio_dev, "cn23xx_pfvf_handshake timeout\n");
		return -1;
	}

	for (q_no = 0; q_no < lio_dev->num_iqs; q_no++)
		lio_dev->instr_queue[q_no]->txpciq.s.pkind =
						lio_dev->pfvf_hsword.pkind;

	vfmajor = LIO_BASE_MAJOR_VERSION;
	pfmajor = ret >> 16;
	if (pfmajor != vfmajor) {
		lio_dev_err(lio_dev,
			    "VF LiquidIO driver (major version %d) is not compatible with LiquidIO PF driver (major version %d)\n",
			    vfmajor, pfmajor);
		ret = -EPERM;
	} else {
		lio_dev_dbg(lio_dev,
			    "VF LiquidIO driver (major version %d), LiquidIO PF driver (major version %d)\n",
			    vfmajor, pfmajor);
		ret = 0;
	}

	lio_dev_dbg(lio_dev, "got data from PF pkind is %d\n",
		    lio_dev->pfvf_hsword.pkind);

	return ret;
}
Exemple #17
0
static void
avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);

	memset(dev_info, 0, sizeof(*dev_info));
	dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
	dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
	dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
	dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
	dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX;
	dev_info->hash_key_size = vf->vf_res->rss_key_size;
	dev_info->reta_size = vf->vf_res->rss_lut_size;
	dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL;
	dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
	dev_info->rx_offload_capa =
		DEV_RX_OFFLOAD_VLAN_STRIP |
		DEV_RX_OFFLOAD_IPV4_CKSUM |
		DEV_RX_OFFLOAD_UDP_CKSUM |
		DEV_RX_OFFLOAD_TCP_CKSUM;
	dev_info->tx_offload_capa =
		DEV_TX_OFFLOAD_VLAN_INSERT |
		DEV_TX_OFFLOAD_IPV4_CKSUM |
		DEV_TX_OFFLOAD_UDP_CKSUM |
		DEV_TX_OFFLOAD_TCP_CKSUM |
		DEV_TX_OFFLOAD_SCTP_CKSUM |
		DEV_TX_OFFLOAD_TCP_TSO;

	dev_info->default_rxconf = (struct rte_eth_rxconf) {
		.rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
		.rx_drop_en = 0,
	};

	dev_info->default_txconf = (struct rte_eth_txconf) {
		.tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
		.tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
				ETH_TXQ_FLAGS_NOOFFLOADS,
	};

	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
		.nb_max = AVF_MAX_RING_DESC,
		.nb_min = AVF_MIN_RING_DESC,
		.nb_align = AVF_ALIGN_RING_DESC,
	};

	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
		.nb_max = AVF_MAX_RING_DESC,
		.nb_min = AVF_MIN_RING_DESC,
		.nb_align = AVF_ALIGN_RING_DESC,
	};
}

static const uint32_t *
avf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
	static const uint32_t ptypes[] = {
		RTE_PTYPE_L2_ETHER,
		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
		RTE_PTYPE_L4_FRAG,
		RTE_PTYPE_L4_ICMP,
		RTE_PTYPE_L4_NONFRAG,
		RTE_PTYPE_L4_SCTP,
		RTE_PTYPE_L4_TCP,
		RTE_PTYPE_L4_UDP,
		RTE_PTYPE_UNKNOWN
	};
	return ptypes;
}

int
avf_dev_link_update(struct rte_eth_dev *dev,
		    __rte_unused int wait_to_complete)
{
	struct rte_eth_link new_link;
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);

	/* Only read status info stored in VF, and the info is updated
	 *  when receive LINK_CHANGE evnet from PF by Virtchnnl.
	 */
	switch (vf->link_speed) {
	case VIRTCHNL_LINK_SPEED_100MB:
		new_link.link_speed = ETH_SPEED_NUM_100M;
		break;
	case VIRTCHNL_LINK_SPEED_1GB:
		new_link.link_speed = ETH_SPEED_NUM_1G;
		break;
	case VIRTCHNL_LINK_SPEED_10GB:
		new_link.link_speed = ETH_SPEED_NUM_10G;
		break;
	case VIRTCHNL_LINK_SPEED_20GB:
		new_link.link_speed = ETH_SPEED_NUM_20G;
		break;
	case VIRTCHNL_LINK_SPEED_25GB:
		new_link.link_speed = ETH_SPEED_NUM_25G;
		break;
	case VIRTCHNL_LINK_SPEED_40GB:
		new_link.link_speed = ETH_SPEED_NUM_40G;
		break;
	default:
		new_link.link_speed = ETH_SPEED_NUM_NONE;
		break;
	}

	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
	new_link.link_status = vf->link_up ? ETH_LINK_UP :
					     ETH_LINK_DOWN;
	new_link.link_autoneg = !!(dev->data->dev_conf.link_speeds &
				ETH_LINK_SPEED_FIXED);

	if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link,
				*(uint64_t *)&dev->data->dev_link,
				*(uint64_t *)&new_link) == 0)
		return -1;

	return 0;
}

static void
avf_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	int ret;

	if (vf->promisc_unicast_enabled)
		return;

	ret = avf_config_promisc(adapter, TRUE, vf->promisc_multicast_enabled);
	if (!ret)
		vf->promisc_unicast_enabled = TRUE;
}

static void
avf_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	int ret;

	if (!vf->promisc_unicast_enabled)
		return;

	ret = avf_config_promisc(adapter, FALSE, vf->promisc_multicast_enabled);
	if (!ret)
		vf->promisc_unicast_enabled = FALSE;
}

static void
avf_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	int ret;

	if (vf->promisc_multicast_enabled)
		return;

	ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, TRUE);
	if (!ret)
		vf->promisc_multicast_enabled = TRUE;
}

static void
avf_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	int ret;

	if (!vf->promisc_multicast_enabled)
		return;

	ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, FALSE);
	if (!ret)
		vf->promisc_multicast_enabled = FALSE;
}

static int
avf_dev_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr,
		     __rte_unused uint32_t index,
		     __rte_unused uint32_t pool)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	int err;

	if (is_zero_ether_addr(addr)) {
		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
		return -EINVAL;
	}

	err = avf_add_del_eth_addr(adapter, addr, TRUE);
	if (err) {
		PMD_DRV_LOG(ERR, "fail to add MAC address");
		return -EIO;
	}

	vf->mac_num++;

	return 0;
}

static void
avf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	struct ether_addr *addr;
	int err;

	addr = &dev->data->mac_addrs[index];

	err = avf_add_del_eth_addr(adapter, addr, FALSE);
	if (err)
		PMD_DRV_LOG(ERR, "fail to delete MAC address");

	vf->mac_num--;
}

static int
avf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	int err;

	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
		return -ENOTSUP;

	err = avf_add_del_vlan(adapter, vlan_id, on);
	if (err)
		return -EIO;
	return 0;
}

static int
avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
	int err;

	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
		return -ENOTSUP;

	/* Vlan stripping setting */
	if (mask & ETH_VLAN_STRIP_MASK) {
		/* Enable or disable VLAN stripping */
		if (dev_conf->rxmode.hw_vlan_strip)
			err = avf_enable_vlan_strip(adapter);
		else
			err = avf_disable_vlan_strip(adapter);

		if (err)
			return -EIO;
	}
	return 0;
}

static int
avf_dev_rss_reta_update(struct rte_eth_dev *dev,
			struct rte_eth_rss_reta_entry64 *reta_conf,
			uint16_t reta_size)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	uint8_t *lut;
	uint16_t i, idx, shift;
	int ret;

	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
		return -ENOTSUP;

	if (reta_size != vf->vf_res->rss_lut_size) {
		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
			"(%d) doesn't match the number of hardware can "
			"support (%d)", reta_size, vf->vf_res->rss_lut_size);
		return -EINVAL;
	}

	lut = rte_zmalloc("rss_lut", reta_size, 0);
	if (!lut) {
		PMD_DRV_LOG(ERR, "No memory can be allocated");
		return -ENOMEM;
	}
	/* store the old lut table temporarily */
	rte_memcpy(lut, vf->rss_lut, reta_size);

	for (i = 0; i < reta_size; i++) {
		idx = i / RTE_RETA_GROUP_SIZE;
		shift = i % RTE_RETA_GROUP_SIZE;
		if (reta_conf[idx].mask & (1ULL << shift))
			lut[i] = reta_conf[idx].reta[shift];
	}

	rte_memcpy(vf->rss_lut, lut, reta_size);
	/* send virtchnnl ops to configure rss*/
	ret = avf_configure_rss_lut(adapter);
	if (ret) /* revert back */
		rte_memcpy(vf->rss_lut, lut, reta_size);
	rte_free(lut);

	return ret;
}

static int
avf_dev_rss_reta_query(struct rte_eth_dev *dev,
		       struct rte_eth_rss_reta_entry64 *reta_conf,
		       uint16_t reta_size)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	uint16_t i, idx, shift;

	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
		return -ENOTSUP;

	if (reta_size != vf->vf_res->rss_lut_size) {
		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
			"(%d) doesn't match the number of hardware can "
			"support (%d)", reta_size, vf->vf_res->rss_lut_size);
		return -EINVAL;
	}

	for (i = 0; i < reta_size; i++) {
		idx = i / RTE_RETA_GROUP_SIZE;
		shift = i % RTE_RETA_GROUP_SIZE;
		if (reta_conf[idx].mask & (1ULL << shift))
			reta_conf[idx].reta[shift] = vf->rss_lut[i];
	}

	return 0;
}

static int
avf_dev_rss_hash_update(struct rte_eth_dev *dev,
			struct rte_eth_rss_conf *rss_conf)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);

	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
		return -ENOTSUP;

	/* HENA setting, it is enabled by default, no change */
	if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
		PMD_DRV_LOG(DEBUG, "No key to be configured");
		return 0;
	} else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) {
		PMD_DRV_LOG(ERR, "The size of hash key configured "
			"(%d) doesn't match the size of hardware can "
			"support (%d)", rss_conf->rss_key_len,
			vf->vf_res->rss_key_size);
		return -EINVAL;
	}

	rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);

	return avf_configure_rss_key(adapter);
}

static int
avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
			  struct rte_eth_rss_conf *rss_conf)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);

	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
		return -ENOTSUP;

	 /* Just set it to default value now. */
	rss_conf->rss_hf = AVF_RSS_OFFLOAD_ALL;

	if (!rss_conf->rss_key)
		return 0;

	rss_conf->rss_key_len = vf->vf_res->rss_key_size;
	rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);

	return 0;
}

static int
avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
	uint32_t frame_size = mtu + AVF_ETH_OVERHEAD;
	int ret = 0;

	if (mtu < ETHER_MIN_MTU || frame_size > AVF_FRAME_SIZE_MAX)
		return -EINVAL;

	/* mtu setting is forbidden if port is start */
	if (dev->data->dev_started) {
		PMD_DRV_LOG(ERR, "port must be stopped before configuration");
		return -EBUSY;
	}

	if (frame_size > ETHER_MAX_LEN)
		dev->data->dev_conf.rxmode.offloads |=
				DEV_RX_OFFLOAD_JUMBO_FRAME;
	else
		dev->data->dev_conf.rxmode.offloads &=
				~DEV_RX_OFFLOAD_JUMBO_FRAME;

	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;

	return ret;
}

static void
avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
			     struct ether_addr *mac_addr)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
	struct ether_addr *perm_addr, *old_addr;
	int ret;

	old_addr = (struct ether_addr *)hw->mac.addr;
	perm_addr = (struct ether_addr *)hw->mac.perm_addr;

	if (is_same_ether_addr(mac_addr, old_addr))
		return;

	/* If the MAC address is configured by host, skip the setting */
	if (is_valid_assigned_ether_addr(perm_addr))
		return;

	ret = avf_add_del_eth_addr(adapter, old_addr, FALSE);
	if (ret)
		PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
			    " %02X:%02X:%02X:%02X:%02X:%02X",
			    old_addr->addr_bytes[0],
			    old_addr->addr_bytes[1],
			    old_addr->addr_bytes[2],
			    old_addr->addr_bytes[3],
			    old_addr->addr_bytes[4],
			    old_addr->addr_bytes[5]);

	ret = avf_add_del_eth_addr(adapter, mac_addr, TRUE);
	if (ret)
		PMD_DRV_LOG(ERR, "Fail to add new MAC:"
			    " %02X:%02X:%02X:%02X:%02X:%02X",
			    mac_addr->addr_bytes[0],
			    mac_addr->addr_bytes[1],
			    mac_addr->addr_bytes[2],
			    mac_addr->addr_bytes[3],
			    mac_addr->addr_bytes[4],
			    mac_addr->addr_bytes[5]);

	ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
}

static int
avf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct virtchnl_eth_stats *pstats = NULL;
	int ret;

	ret = avf_query_stats(adapter, &pstats);
	if (ret == 0) {
		stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
						pstats->rx_broadcast;
		stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
						pstats->tx_unicast;
		stats->imissed = pstats->rx_discards;
		stats->oerrors = pstats->tx_errors + pstats->tx_discards;
		stats->ibytes = pstats->rx_bytes;
		stats->obytes = pstats->tx_bytes;
	} else {
		PMD_DRV_LOG(ERR, "Get statistics failed");
	}
	return -EIO;
}

static int
avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
	uint16_t msix_intr;

	msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
	if (msix_intr == AVF_MISC_VEC_ID) {
		PMD_DRV_LOG(INFO, "MISC is also enabled for control");
		AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
			      AVFINT_DYN_CTL01_INTENA_MASK |
			      AVFINT_DYN_CTL01_ITR_INDX_MASK);
	} else {
		AVF_WRITE_REG(hw,
			      AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START),
			      AVFINT_DYN_CTLN1_INTENA_MASK |
			      AVFINT_DYN_CTLN1_ITR_INDX_MASK);
	}

	AVF_WRITE_FLUSH(hw);

	rte_intr_enable(&pci_dev->intr_handle);

	return 0;
}

static int
avf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
	uint16_t msix_intr;

	msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
	if (msix_intr == AVF_MISC_VEC_ID) {
		PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
		return -EIO;
	}

	AVF_WRITE_REG(hw,
		      AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START),
		      0);

	AVF_WRITE_FLUSH(hw);
	return 0;
}

static int
avf_check_vf_reset_done(struct avf_hw *hw)
{
	int i, reset;

	for (i = 0; i < AVF_RESET_WAIT_CNT; i++) {
		reset = AVF_READ_REG(hw, AVFGEN_RSTAT) &
			AVFGEN_RSTAT_VFR_STATE_MASK;
		reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT;
		if (reset == VIRTCHNL_VFR_VFACTIVE ||
		    reset == VIRTCHNL_VFR_COMPLETED)
			break;
		rte_delay_ms(20);
	}

	if (i >= AVF_RESET_WAIT_CNT)
		return -1;

	return 0;
}

static int
avf_init_vf(struct rte_eth_dev *dev)
{
	int i, err, bufsz;
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);

	err = avf_set_mac_type(hw);
	if (err) {
		PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
		goto err;
	}

	err = avf_check_vf_reset_done(hw);
	if (err) {
		PMD_INIT_LOG(ERR, "VF is still resetting");
		goto err;
	}

	avf_init_adminq_parameter(hw);
	err = avf_init_adminq(hw);
	if (err) {
		PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
		goto err;
	}

	vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0);
	if (!vf->aq_resp) {
		PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
		goto err_aq;
	}
	if (avf_check_api_version(adapter) != 0) {
		PMD_INIT_LOG(ERR, "check_api version failed");
		goto err_api;
	}

	bufsz = sizeof(struct virtchnl_vf_resource) +
		(AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
	vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
	if (!vf->vf_res) {
		PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
		goto err_api;
	}
	if (avf_get_vf_resource(adapter) != 0) {
		PMD_INIT_LOG(ERR, "avf_get_vf_config failed");
		goto err_alloc;
	}
	/* Allocate memort for RSS info */
	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
		vf->rss_key = rte_zmalloc("rss_key",
					  vf->vf_res->rss_key_size, 0);
		if (!vf->rss_key) {
			PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
			goto err_rss;
		}
		vf->rss_lut = rte_zmalloc("rss_lut",
					  vf->vf_res->rss_lut_size, 0);
		if (!vf->rss_lut) {
			PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
			goto err_rss;
		}
	}
	return 0;
err_rss:
	rte_free(vf->rss_key);
	rte_free(vf->rss_lut);
err_alloc:
	rte_free(vf->vf_res);
	vf->vsi_res = NULL;
err_api:
	rte_free(vf->aq_resp);
err_aq:
	avf_shutdown_adminq(hw);
err:
	return -1;
}