Esempio n. 1
0
void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
{
	int i;
	unsigned long flags, flags1;

	pr_debug("port %d\n", port);

	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
	for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
		invalidate_guid_record(dev, port, i);

	if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) {
		/*
		make sure no work waits in the queue, if the work is already
		queued(not on the timer) the cancel will fail. That is not a problem
		because we just want the work started.
		*/
		__cancel_delayed_work(&dev->sriov.alias_guid.
				      ports_guid[port - 1].alias_guid_work);
		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
				   &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
				   0);
	}
	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
}
Esempio n. 2
0
void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
					 u8 port_num, u8 *p_data)
{
	int i;
	u64 guid_indexes;
	int slave_id;
	int port_index = port_num - 1;

	if (!mlx4_is_master(dev->dev))
		return;

	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
				   ports_guid[port_num - 1].
				   all_rec_per_port[block_num].guid_indexes);
	pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);

	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
		/* The location of the specific index starts from bit number 4
		 * until bit num 11 */
		if (test_bit(i + 4, (unsigned long *)&guid_indexes)) {
			slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
			if (slave_id >= dev->dev->num_slaves) {
				pr_debug("The last slave: %d\n", slave_id);
				return;
			}

			/* cache the guid: */
			memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
			       &p_data[i * GUID_REC_SIZE],
			       GUID_REC_SIZE);
		} else
			pr_debug("Guid number: %d in block: %d"
				 " was not updated\n", i, block_num);
	}
}
Esempio n. 3
0
void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
{
	unsigned long flags, flags1;

	if (!mlx4_is_master(dev->dev))
		return;
	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
	if (!dev->sriov.is_going_down) {
		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
			   &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
	}
	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
}
Esempio n. 4
0
/*
 * Snoop SM MADs for port info, GUID info, and  P_Key table sets, so we can
 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
 */
static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
		      u16 prev_lid)
{
	struct ib_port_info *pinfo;
	u16 lid;

	struct mlx4_ib_dev *dev = to_mdev(ibdev);
	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	     mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
	    mad->mad_hdr.method == IB_MGMT_METHOD_SET)
		switch (mad->mad_hdr.attr_id) {
		case IB_SMP_ATTR_PORT_INFO:
			pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
			lid = be16_to_cpu(pinfo->lid);

			update_sm_ah(dev, port_num,
				     be16_to_cpu(pinfo->sm_lid),
				     pinfo->neighbormtu_mastersmsl & 0xf);

			if (pinfo->clientrereg_resv_subnetto & 0x80)
				mlx4_ib_dispatch_event(dev, port_num,
						       IB_EVENT_CLIENT_REREGISTER);

			if (prev_lid != lid)
				mlx4_ib_dispatch_event(dev, port_num,
						       IB_EVENT_LID_CHANGE);
			break;

		case IB_SMP_ATTR_PKEY_TABLE:
			mlx4_ib_dispatch_event(dev, port_num,
					       IB_EVENT_PKEY_CHANGE);
			break;

		case IB_SMP_ATTR_GUID_INFO:
			/* paravirtualized master's guid is guid 0 -- does not change */
			if (!mlx4_is_master(dev->dev))
				mlx4_ib_dispatch_event(dev, port_num,
						       IB_EVENT_GID_CHANGE);
			break;
		default:
			break;
		}
}
Esempio n. 5
0
static int mlx4_reset_master(struct mlx4_dev *dev)
{
	int err = 0;

	if (mlx4_is_master(dev))
		mlx4_report_internal_err_comm_event(dev);

	if (!pci_channel_offline(dev->persist->pdev)) {
		err = read_vendor_id(dev);
		/* If PCI can't be accessed to read vendor ID we assume that its
		 * link was disabled and chip was already reset.
		 */
		if (err)
			return 0;

		err = mlx4_reset(dev);
		if (err)
			mlx4_err(dev, "Fail to reset HCA\n");
	}

	return err;
}
Esempio n. 6
0
int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
{
	char alias_wq_name[15];
	int ret = 0;
	int i, j, k;
	union ib_gid gid;

	if (!mlx4_is_master(dev->dev))
		return 0;
	dev->sriov.alias_guid.sa_client =
		kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL);
	if (!dev->sriov.alias_guid.sa_client)
		return -ENOMEM;

	ib_sa_register_client(dev->sriov.alias_guid.sa_client);

	spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);

	for (i = 1; i <= dev->num_ports; ++i) {
		if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
			ret = -EFAULT;
			goto err_unregister;
		}
	}

	for (i = 0 ; i < dev->num_ports; i++) {
		memset(&dev->sriov.alias_guid.ports_guid[i], 0,
		       sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
		/*Check if the SM doesn't need to assign the GUIDs*/
		for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
			if (mlx4_ib_sm_guid_assign) {
				dev->sriov.alias_guid.ports_guid[i].
					all_rec_per_port[j].
					ownership = MLX4_GUID_DRIVER_ASSIGN;
				continue;
			}
			dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j].
					ownership = MLX4_GUID_NONE_ASSIGN;
			/*mark each val as it was deleted,
			  till the sysAdmin will give it valid val*/
			for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
				*(__be64 *)&dev->sriov.alias_guid.ports_guid[i].
					all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] =
						cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
			}
		}
		INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
		/*prepare the records, set them to be allocated by sm*/
		for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
			invalidate_guid_record(dev, i + 1, j);

		dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
		dev->sriov.alias_guid.ports_guid[i].port  = i;
		if (mlx4_ib_sm_guid_assign)
			set_all_slaves_guids(dev, i);

		snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
		dev->sriov.alias_guid.ports_guid[i].wq =
			create_singlethread_workqueue(alias_wq_name);
		if (!dev->sriov.alias_guid.ports_guid[i].wq) {
			ret = -ENOMEM;
			goto err_thread;
		}
		INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work,
			  alias_guid_work);
	}
	return 0;

err_thread:
	for (--i; i >= 0; i--) {
		destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
		dev->sriov.alias_guid.ports_guid[i].wq = NULL;
	}

err_unregister:
	ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
	kfree(dev->sriov.alias_guid.sa_client);
	dev->sriov.alias_guid.sa_client = NULL;
	pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret);
	return ret;
}
Esempio n. 7
0
/*
 * Whenever new GUID is set/unset (guid table change) create event and
 * notify the relevant slave (master also should be notified).
 * If the GUID value is not as we have in the cache the slave will not be
 * updated; in this case it waits for the smp_snoop or the port management
 * event to call the function and to update the slave.
 * block_number - the index of the block (16 blocks available)
 * port_number - 1 or 2
 */
void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
					  int block_num, u8 port_num,
					  u8 *p_data)
{
	int i;
	u64 guid_indexes;
	int slave_id;
	enum slave_port_state new_state;
	enum slave_port_state prev_state;
	__be64 tmp_cur_ag, form_cache_ag;
	enum slave_port_gen_event gen_event;

	if (!mlx4_is_master(dev->dev))
		return;

	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
				   ports_guid[port_num - 1].
				   all_rec_per_port[block_num].guid_indexes);
	pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);

	/*calculate the slaves and notify them*/
	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
		/* the location of the specific index runs from bits 4..11 */
		if (!(test_bit(i + 4, (unsigned long *)&guid_indexes)))
			continue;

		slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
		if (slave_id >= dev->dev->num_slaves)
			return;
		tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
		form_cache_ag = get_cached_alias_guid(dev, port_num,
					(NUM_ALIAS_GUID_IN_REC * block_num) + i);
		/*
		 * Check if guid is not the same as in the cache,
		 * If it is different, wait for the snoop_smp or the port mgmt
		 * change event to update the slave on its port state change
		 */
		if (tmp_cur_ag != form_cache_ag)
			continue;
		mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);

		/*2 cases: Valid GUID, and Invalid Guid*/

		if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
			prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num);
			new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
								  MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
								  &gen_event);
			pr_debug("slave: %d, port: %d prev_port_state: %d,"
				 " new_port_state: %d, gen_event: %d\n",
				 slave_id, port_num, prev_state, new_state, gen_event);
			if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
				pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
					 slave_id, port_num);
				mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
							       port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
			}
		} else { /* request to invalidate GUID */
			set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
						      MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
						      &gen_event);
			pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
				 slave_id, port_num);
			mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num,
						       MLX4_PORT_CHANGE_SUBTYPE_DOWN);
		}
	}
}
Esempio n. 8
0
void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
{
	if (mlx4_is_master(dev))
		return;
	mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap);
}
Esempio n. 9
0
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
	struct mlx4_counter tmp_counter_stats;
	struct mlx4_en_stat_out_mbox *mlx4_en_stats;
	struct mlx4_en_stat_out_flow_control_mbox *flowstats;
	struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
	struct net_device_stats *stats = &priv->stats;
	struct mlx4_cmd_mailbox *mailbox;
	u64 in_mod = reset << 8 | port;
	int err;
	int i, counter_index;
	unsigned long sw_rx_dropped = 0;

	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
	if (IS_ERR(mailbox))
		return PTR_ERR(mailbox);
	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
			   MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
			   MLX4_CMD_WRAPPED);
	if (err)
		goto out;

	mlx4_en_stats = mailbox->buf;

	spin_lock_bh(&priv->stats_lock);

	stats->rx_packets = 0;
	stats->rx_bytes = 0;
	priv->port_stats.rx_chksum_good = 0;
	priv->port_stats.rx_chksum_none = 0;
	priv->port_stats.rx_chksum_complete = 0;
	for (i = 0; i < priv->rx_ring_num; i++) {
		stats->rx_packets += priv->rx_ring[i]->packets;
		stats->rx_bytes += priv->rx_ring[i]->bytes;
		sw_rx_dropped += priv->rx_ring[i]->dropped;
		priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
		priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
		priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
	}
	stats->tx_packets = 0;
	stats->tx_bytes = 0;
	priv->port_stats.tx_chksum_offload = 0;
	priv->port_stats.queue_stopped = 0;
	priv->port_stats.wake_queue = 0;
	priv->port_stats.tso_packets = 0;
	priv->port_stats.xmit_more = 0;

	for (i = 0; i < priv->tx_ring_num; i++) {
		const struct mlx4_en_tx_ring *ring = priv->tx_ring[i];

		stats->tx_packets += ring->packets;
		stats->tx_bytes += ring->bytes;
		priv->port_stats.tx_chksum_offload += ring->tx_csum;
		priv->port_stats.queue_stopped     += ring->queue_stopped;
		priv->port_stats.wake_queue        += ring->wake_queue;
		priv->port_stats.tso_packets       += ring->tso_packets;
		priv->port_stats.xmit_more         += ring->xmit_more;
	}
	if (mlx4_is_master(mdev->dev)) {
		stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
						   &mlx4_en_stats->RTOT_prio_1,
						   NUM_PRIORITIES);
		stats->tx_packets = en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
						   &mlx4_en_stats->TTOT_prio_1,
						   NUM_PRIORITIES);
		stats->rx_bytes = en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
						 &mlx4_en_stats->ROCT_prio_1,
						 NUM_PRIORITIES);
		stats->tx_bytes = en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
						 &mlx4_en_stats->TOCT_prio_1,
						 NUM_PRIORITIES);
	}

	/* net device stats */
	stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
			   be32_to_cpu(mlx4_en_stats->RJBBR) +
			   be32_to_cpu(mlx4_en_stats->RCRC) +
			   be32_to_cpu(mlx4_en_stats->RRUNT) +
			   be64_to_cpu(mlx4_en_stats->RInRangeLengthErr) +
			   be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr) +
			   be32_to_cpu(mlx4_en_stats->RSHORT) +
			   en_stats_adder(&mlx4_en_stats->RGIANT_prio_0,
					  &mlx4_en_stats->RGIANT_prio_1,
					  NUM_PRIORITIES);
	stats->tx_errors = en_stats_adder(&mlx4_en_stats->TGIANT_prio_0,
					  &mlx4_en_stats->TGIANT_prio_1,
					  NUM_PRIORITIES);
	stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
					  &mlx4_en_stats->MCAST_prio_1,
					  NUM_PRIORITIES);
	stats->collisions = 0;
	stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
			    sw_rx_dropped;
	stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
	stats->rx_over_errors = 0;
	stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
	stats->rx_frame_errors = 0;
	stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
	stats->rx_missed_errors = 0;
	stats->tx_aborted_errors = 0;
	stats->tx_carrier_errors = 0;
	stats->tx_fifo_errors = 0;
	stats->tx_heartbeat_errors = 0;
	stats->tx_window_errors = 0;
	stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);

	/* RX stats */
	priv->pkstats.rx_multicast_packets = stats->multicast;
	priv->pkstats.rx_broadcast_packets =
			en_stats_adder(&mlx4_en_stats->RBCAST_prio_0,
				       &mlx4_en_stats->RBCAST_prio_1,
				       NUM_PRIORITIES);
	priv->pkstats.rx_jabbers = be32_to_cpu(mlx4_en_stats->RJBBR);
	priv->pkstats.rx_in_range_length_error =
		be64_to_cpu(mlx4_en_stats->RInRangeLengthErr);
	priv->pkstats.rx_out_range_length_error =
		be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr);

	/* Tx stats */
	priv->pkstats.tx_multicast_packets =
		en_stats_adder(&mlx4_en_stats->TMCAST_prio_0,
			       &mlx4_en_stats->TMCAST_prio_1,
			       NUM_PRIORITIES);
	priv->pkstats.tx_broadcast_packets =
		en_stats_adder(&mlx4_en_stats->TBCAST_prio_0,
			       &mlx4_en_stats->TBCAST_prio_1,
			       NUM_PRIORITIES);

	priv->pkstats.rx_prio[0][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
	priv->pkstats.rx_prio[0][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_0);
	priv->pkstats.rx_prio[1][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
	priv->pkstats.rx_prio[1][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_1);
	priv->pkstats.rx_prio[2][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
	priv->pkstats.rx_prio[2][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_2);
	priv->pkstats.rx_prio[3][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
	priv->pkstats.rx_prio[3][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_3);
	priv->pkstats.rx_prio[4][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
	priv->pkstats.rx_prio[4][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_4);
	priv->pkstats.rx_prio[5][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
	priv->pkstats.rx_prio[5][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_5);
	priv->pkstats.rx_prio[6][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
	priv->pkstats.rx_prio[6][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_6);
	priv->pkstats.rx_prio[7][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
	priv->pkstats.rx_prio[7][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_7);
	priv->pkstats.rx_prio[8][0] = be64_to_cpu(mlx4_en_stats->RTOT_novlan);
	priv->pkstats.rx_prio[8][1] = be64_to_cpu(mlx4_en_stats->ROCT_novlan);
	priv->pkstats.tx_prio[0][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
	priv->pkstats.tx_prio[0][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_0);
	priv->pkstats.tx_prio[1][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
	priv->pkstats.tx_prio[1][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_1);
	priv->pkstats.tx_prio[2][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
	priv->pkstats.tx_prio[2][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_2);
	priv->pkstats.tx_prio[3][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
	priv->pkstats.tx_prio[3][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_3);
	priv->pkstats.tx_prio[4][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
	priv->pkstats.tx_prio[4][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_4);
	priv->pkstats.tx_prio[5][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
	priv->pkstats.tx_prio[5][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_5);
	priv->pkstats.tx_prio[6][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
	priv->pkstats.tx_prio[6][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_6);
	priv->pkstats.tx_prio[7][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
	priv->pkstats.tx_prio[7][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_7);
	priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
	priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);

	spin_unlock_bh(&priv->stats_lock);

	memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
	counter_index = mlx4_get_default_counter_index(mdev->dev, port);
	err = mlx4_get_counter_stats(mdev->dev, counter_index,
				     &tmp_counter_stats, reset);

	/* 0xffs indicates invalid value */
	memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);

	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
		memset(mailbox->buf, 0,
		       sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
		err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
				   in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
				   0, MLX4_CMD_DUMP_ETH_STATS,
				   MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
		if (err)
			goto out;
	}

	flowstats = mailbox->buf;

	spin_lock_bh(&priv->stats_lock);

	if (tmp_counter_stats.counter_mode == 0) {
		priv->pf_stats.rx_bytes   = be64_to_cpu(tmp_counter_stats.rx_bytes);
		priv->pf_stats.tx_bytes   = be64_to_cpu(tmp_counter_stats.tx_bytes);
		priv->pf_stats.rx_packets = be64_to_cpu(tmp_counter_stats.rx_frames);
		priv->pf_stats.tx_packets = be64_to_cpu(tmp_counter_stats.tx_frames);
	}

	for (i = 0; i < MLX4_NUM_PRIORITIES; i++)	{
		priv->rx_priority_flowstats[i].rx_pause =
			be64_to_cpu(flowstats[i].rx_pause);
		priv->rx_priority_flowstats[i].rx_pause_duration =
			be64_to_cpu(flowstats[i].rx_pause_duration);
		priv->rx_priority_flowstats[i].rx_pause_transition =
			be64_to_cpu(flowstats[i].rx_pause_transition);
		priv->tx_priority_flowstats[i].tx_pause =
			be64_to_cpu(flowstats[i].tx_pause);
		priv->tx_priority_flowstats[i].tx_pause_duration =
			be64_to_cpu(flowstats[i].tx_pause_duration);
		priv->tx_priority_flowstats[i].tx_pause_transition =
			be64_to_cpu(flowstats[i].tx_pause_transition);
	}

	/* if pfc is not in use, all priorities counters have the same value */
	priv->rx_flowstats.rx_pause =
		be64_to_cpu(flowstats[0].rx_pause);
	priv->rx_flowstats.rx_pause_duration =
		be64_to_cpu(flowstats[0].rx_pause_duration);
	priv->rx_flowstats.rx_pause_transition =
		be64_to_cpu(flowstats[0].rx_pause_transition);
	priv->tx_flowstats.tx_pause =
		be64_to_cpu(flowstats[0].tx_pause);
	priv->tx_flowstats.tx_pause_duration =
		be64_to_cpu(flowstats[0].tx_pause_duration);
	priv->tx_flowstats.tx_pause_transition =
		be64_to_cpu(flowstats[0].tx_pause_transition);

	spin_unlock_bh(&priv->stats_lock);

out:
	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
	return err;
}