示例#1
0
static int qede_set_ringparam(struct net_device *dev,
			      struct ethtool_ringparam *ering)
{
	struct qede_dev *edev = netdev_priv(dev);

	DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
		   "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
		   ering->rx_pending, ering->tx_pending);

	/* Validate legality of configuration */
	if (ering->rx_pending > NUM_RX_BDS_MAX ||
	    ering->rx_pending < NUM_RX_BDS_MIN ||
	    ering->tx_pending > NUM_TX_BDS_MAX ||
	    ering->tx_pending < NUM_TX_BDS_MIN) {
		DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
			   "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n",
			   NUM_RX_BDS_MIN, NUM_RX_BDS_MAX,
			   NUM_TX_BDS_MIN, NUM_TX_BDS_MAX);
		return -EINVAL;
	}

	/* Change ring size and re-load */
	edev->q_num_rx_buffers = ering->rx_pending;
	edev->q_num_tx_buffers = ering->tx_pending;

	if (netif_running(edev->ndev))
		qede_reload(edev, NULL, NULL);

	return 0;
}
示例#2
0
static void
ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
				  struct ecore_dcbx_get *params)
{
	struct ecore_dcbx_operational_params *p_operational;
	struct ecore_dcbx_results *p_results;
	struct dcbx_features *p_feat;
	bool enabled, err;
	u32 flags;
	bool val;

	flags = p_hwfn->p_dcbx_info->operational.flags;

	/* If DCBx version is non zero, then negotiation
	 * was successfuly performed
	 */
	p_operational = &params->operational;
	enabled = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) !=
		     DCBX_CONFIG_VERSION_DISABLED);
	if (!enabled) {
		p_operational->enabled = enabled;
		p_operational->valid = false;
		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx is disabled\n");
		return;
	}

	p_feat = &p_hwfn->p_dcbx_info->operational.features;
	p_results = &p_hwfn->p_dcbx_info->results;

	val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
		 DCBX_CONFIG_VERSION_IEEE);
	p_operational->ieee = val;

	val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
		 DCBX_CONFIG_VERSION_CEE);
	p_operational->cee = val;

	val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
		 DCBX_CONFIG_VERSION_STATIC);
	p_operational->local = val;

	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
		   "Version support: ieee %d, cee %d, static %d\n",
		   p_operational->ieee, p_operational->cee,
		   p_operational->local);

	ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
				     p_feat->app.app_pri_tbl, &p_feat->ets,
				     p_feat->pfc, &params->operational.params,
				     p_operational->ieee);
	ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio,
				     p_results);
	err = GET_MFW_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
	p_operational->err = err;
	p_operational->enabled = enabled;
	p_operational->valid = true;
}
示例#3
0
static int
qed_start_txq(struct ecore_dev *edev,
              uint8_t rss_id, uint16_t tx_queue_id,
              uint8_t vport_id, uint16_t sb,
              uint8_t sb_index,
              dma_addr_t pbl_addr,
              uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell)
{
    struct ecore_hwfn *p_hwfn;
    int rc, hwfn_index;

    hwfn_index = rss_id % edev->num_hwfns;
    p_hwfn = &edev->hwfns[hwfn_index];

    rc = ecore_sp_eth_tx_queue_start(p_hwfn,
                                     p_hwfn->hw_info.opaque_fid,
                                     tx_queue_id / edev->num_hwfns,
                                     vport_id,
                                     vport_id,
                                     sb,
                                     sb_index,
                                     pbl_addr, pbl_size, pp_doorbell);

    if (rc) {
        DP_ERR(edev, "Failed to start TXQ#%d\n", tx_queue_id);
        return rc;
    }

    DP_VERBOSE(edev, ECORE_MSG_SPQ,
               "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
               tx_queue_id, rss_id, vport_id, sb);

    return 0;
}
示例#4
0
static int qede_get_sset_count(struct net_device *dev, int stringset)
{
	struct qede_dev *edev = netdev_priv(dev);
	int num_stats = QEDE_NUM_STATS;

	switch (stringset) {
	case ETH_SS_STATS:
		if (IS_VF(edev)) {
			int i;

			for (i = 0; i < QEDE_NUM_STATS; i++)
				if (qede_stats_arr[i].pf_only)
					num_stats--;
		}
		return num_stats + QEDE_NUM_RQSTATS;
	case ETH_SS_PRIV_FLAGS:
		return QEDE_PRI_FLAG_LEN;
	case ETH_SS_TEST:
		return QEDE_ETHTOOL_TEST_MAX;
	default:
		DP_VERBOSE(edev, QED_MSG_DEBUG,
			   "Unsupported stringset 0x%08x\n", stringset);
		return -EINVAL;
	}
}
示例#5
0
static int
qed_start_txq(struct ecore_dev *edev,
	      uint8_t rss_num,
	      struct ecore_queue_start_common_params *p_params,
	      dma_addr_t pbl_addr,
	      uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell)
{
	struct ecore_hwfn *p_hwfn;
	int rc, hwfn_index;

	hwfn_index = rss_num % edev->num_hwfns;
	p_hwfn = &edev->hwfns[hwfn_index];

	p_params->queue_id = p_params->queue_id / edev->num_hwfns;
	p_params->qzone_id = p_params->queue_id;
	p_params->stats_id = p_params->vport_id;

	rc = ecore_sp_eth_tx_queue_start(p_hwfn,
					 p_hwfn->hw_info.opaque_fid,
					 p_params,
					 0 /* tc */,
					 pbl_addr, pbl_size, pp_doorbell);

	if (rc) {
		DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
		return rc;
	}

	DP_VERBOSE(edev, ECORE_MSG_SPQ,
		   "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
		   p_params->queue_id, rss_num, p_params->vport_id,
		   p_params->sb);

	return 0;
}
示例#6
0
static void
ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
			u32 *pfc, struct ecore_dcbx_params *p_params)
{
	u8 pfc_map = 0;
	int i;

	if (p_params->pfc.willing)
		*pfc |= DCBX_PFC_WILLING_MASK;
	else
		*pfc &= ~DCBX_PFC_WILLING_MASK;

	if (p_params->pfc.enabled)
		*pfc |= DCBX_PFC_ENABLED_MASK;
	else
		*pfc &= ~DCBX_PFC_ENABLED_MASK;

	*pfc &= ~DCBX_PFC_CAPS_MASK;
	*pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_OFFSET;

	for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
		if (p_params->pfc.prio[i])
			pfc_map |= (1 << i);
	*pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
	*pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_OFFSET);

	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "pfc = 0x%x\n", *pfc);
}
示例#7
0
文件: qed_mcp.c 项目: panyfx/ath
int qed_mcp_reset(struct qed_hwfn *p_hwfn,
		  struct qed_ptt *p_ptt)
{
	u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
	u8 delay = CHIP_MCP_RESP_ITER_US;
	u32 org_mcp_reset_seq, cnt = 0;
	int rc = 0;

	/* Set drv command along with the updated sequence */
	org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
		  (DRV_MSG_CODE_MCP_RESET | seq));

	do {
		/* Wait for MFW response */
		udelay(delay);
		/* Give the FW up to 500 second (50*1000*10usec) */
	} while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
					      MISCS_REG_GENERIC_POR_0)) &&
		 (cnt++ < QED_MCP_RESET_RETRIES));

	if (org_mcp_reset_seq !=
	    qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
		DP_VERBOSE(p_hwfn, QED_MSG_SP,
			   "MCP was reset after %d usec\n", cnt * delay);
	} else {
		DP_ERR(p_hwfn, "Failed to reset MCP\n");
		rc = -EAGAIN;
	}

	return rc;
}
示例#8
0
bool qed_update_rss_parm_cmt(struct ecore_dev *edev, uint16_t *p_tbl)
{
	uint16_t max = 0, k;
	bool rss_mode = 0; /* disable */
	int divisor;

	/* Find largest entry, since it's possible RSS needs to
	 * be disabled [in case only 1 queue per-hwfn]
	 */
	for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
		max = (max > p_tbl[k]) ?  max : p_tbl[k];

	/* Either fix RSS values or disable RSS */
	if (edev->num_hwfns < max + 1) {
		divisor = (max + edev->num_hwfns - 1) / edev->num_hwfns;
		DP_VERBOSE(edev, ECORE_MSG_SPQ,
			   "CMT - fixing RSS values (modulo %02x)\n",
			   divisor);
		for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
			p_tbl[k] = p_tbl[k] % divisor;

		rss_mode = 1;
	}

	return rss_mode;
}
示例#9
0
struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
{
	struct ecore_ptt *p_ptt;
	unsigned int i;

	/* Take the free PTT from the list */
	for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
		OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
		if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
			p_ptt = OSAL_LIST_FIRST_ENTRY(
						&p_hwfn->p_ptt_pool->free_list,
						struct ecore_ptt, list_entry);
			OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
					       &p_hwfn->p_ptt_pool->free_list);

			OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);

			DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
				   "allocated ptt %d\n", p_ptt->idx);

			return p_ptt;
		}

		OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
		OSAL_MSLEEP(1);
	}
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
		    enum mf_mode mode)
{
	struct qed_sp_init_request_params params;
	struct pf_start_ramrod_data *p_ramrod = NULL;
	u16 sb = qed_int_get_sp_sb_id(p_hwfn);
	u8 sb_index = p_hwfn->p_eq->eq_sb_index;
	struct qed_spq_entry *p_ent = NULL;
	int rc = -EINVAL;

	/* update initial eq producer */
	qed_eq_prod_update(p_hwfn,
			   qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));

	memset(&params, 0, sizeof(params));
	params.ramrod_data_size = sizeof(*p_ramrod);
	params.comp_mode = QED_SPQ_MODE_EBLOCK;

	rc = qed_sp_init_request(p_hwfn,
				 &p_ent,
				 qed_spq_get_cid(p_hwfn),
				 p_hwfn->hw_info.opaque_fid,
				 COMMON_RAMROD_PF_START,
				 PROTOCOLID_COMMON,
				 &params);
	if (rc)
		return rc;

	p_ramrod = &p_ent->ramrod.pf_start;

	p_ramrod->event_ring_sb_id	= cpu_to_le16(sb);
	p_ramrod->event_ring_sb_index	= sb_index;
	p_ramrod->path_id		= QED_PATH_ID(p_hwfn);
	p_ramrod->dont_log_ramrods	= 0;
	p_ramrod->log_type_mask		= cpu_to_le16(0xf);
	p_ramrod->mf_mode = mode;
	p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;

	/* Place EQ address in RAMROD */
	p_ramrod->event_ring_pbl_addr.hi =
			DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
	p_ramrod->event_ring_pbl_addr.lo =
			DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
	p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;

	p_ramrod->consolid_q_pbl_addr.hi =
			DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
	p_ramrod->consolid_q_pbl_addr.lo =
			DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);

	p_hwfn->hw_info.personality = PERSONALITY_ETH;

	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
		   "Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n",
		   sb, sb_index,
		   (p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf",
		   p_ramrod->outer_tag);

	return qed_spq_post(p_hwfn, p_ent, NULL);
}
示例#11
0
static void
ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
			    struct dcbx_local_params *local_admin,
			    struct ecore_dcbx_set *params)
{
	bool ieee = false;

	local_admin->flags = 0;
	OSAL_MEMCPY(&local_admin->features,
		    &p_hwfn->p_dcbx_info->operational.features,
		    sizeof(local_admin->features));

	if (params->enabled) {
		local_admin->config = params->ver_num;
		ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
	} else {
		local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
	}

	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx version = %d\n",
		   local_admin->config);

	if (params->override_flags & ECORE_DCBX_OVERRIDE_PFC_CFG)
		ecore_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
					&params->config.params);

	if (params->override_flags & ECORE_DCBX_OVERRIDE_ETS_CFG)
		ecore_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets,
					&params->config.params);

	if (params->override_flags & ECORE_DCBX_OVERRIDE_APP_CFG)
		ecore_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
					&params->config.params, ieee);
}
示例#12
0
enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
					struct ecore_rl_update_params *params)
{
	struct ecore_spq_entry *p_ent = OSAL_NULL;
	enum _ecore_status_t rc = ECORE_NOTIMPL;
	struct rl_update_ramrod_data *rl_update;
	struct ecore_sp_init_data init_data;

	/* Get SPQ entry */
	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
	init_data.cid = ecore_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;

	rc = ecore_sp_init_request(p_hwfn, &p_ent,
				   COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
				   &init_data);
	if (rc != ECORE_SUCCESS)
		return rc;

	rl_update = &p_ent->ramrod.rl_update;

	rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
	rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
	rl_update->rl_init_flg = params->rl_init_flg;
	rl_update->rl_start_flg = params->rl_start_flg;
	rl_update->rl_stop_flg = params->rl_stop_flg;
	rl_update->rl_id_first = params->rl_id_first;
	rl_update->rl_id_last = params->rl_id_last;
	rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
	rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
	rl_update->rl_max_rate =
		OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate));
	rl_update->rl_r_ai =
		OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_ai));
	rl_update->rl_r_hai =
		OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_hai));
	rl_update->dcqcn_g =
		OSAL_CPU_TO_LE16(ecore_sp_rl_gd_denom(params->dcqcn_gd));
	rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
	rl_update->dcqcn_timeuot_us =
		OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us);
	rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);

	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
		   rl_update->qcn_update_param_flg,
		   rl_update->dcqcn_update_param_flg,
		   rl_update->rl_init_flg, rl_update->rl_start_flg,
		   rl_update->rl_stop_flg, rl_update->rl_id_first,
		   rl_update->rl_id_last, rl_update->rl_dc_qcn_flg,
		   rl_update->rl_bc_rate, rl_update->rl_max_rate,
		   rl_update->rl_r_ai, rl_update->rl_r_hai,
		   rl_update->dcqcn_g, rl_update->dcqcn_k_us,
		   rl_update->dcqcn_timeuot_us, rl_update->qcn_timeuot_us);

	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
示例#13
0
文件: qed_int.c 项目: raoy1990/linux
static int qed_int_attentions(struct qed_hwfn *p_hwfn)
{
	struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
	struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
	u32 attn_bits = 0, attn_acks = 0;
	u16 asserted_bits, deasserted_bits;
	__le16 index;
	int rc = 0;

	/* Read current attention bits/acks - safeguard against attentions
	 * by guaranting work on a synchronized timeframe
	 */
	do {
		index = p_sb_attn->sb_index;
		attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
		attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
	} while (index != p_sb_attn->sb_index);
	p_sb_attn->sb_index = index;

	/* Attention / Deassertion are meaningful (and in correct state)
	 * only when they differ and consistent with known state - deassertion
	 * when previous attention & current ack, and assertion when current
	 * attention with no previous attention
	 */
	asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
		~p_sb_attn_sw->known_attn;
	deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
		p_sb_attn_sw->known_attn;

	if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
		DP_INFO(p_hwfn,
			"Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
			index, attn_bits, attn_acks, asserted_bits,
			deasserted_bits, p_sb_attn_sw->known_attn);
	} else if (asserted_bits == 0x100) {
		DP_INFO(p_hwfn,
			"MFW indication via attention\n");
	} else {
		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
			   "MFW indication [deassertion]\n");
	}

	if (asserted_bits) {
		rc = qed_int_assertion(p_hwfn, asserted_bits);
		if (rc)
			return rc;
	}

	if (deasserted_bits) {
		rc = qed_int_deassertion(p_hwfn, deasserted_bits);
		if (rc)
			return rc;
	}

	return rc;
}
示例#14
0
static void
qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
{
	enum dcbx_protocol_type id;
	int i;

	DP_VERBOSE(p_hwfn, QED_MSG_DCB, "DCBX negotiated: %d\n",
		   p_data->dcbx_enabled);

	for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
		id = qed_dcbx_app_update[i].id;

		DP_VERBOSE(p_hwfn, QED_MSG_DCB,
			   "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
			   qed_dcbx_app_update[i].name, p_data->arr[id].update,
			   p_data->arr[id].enable, p_data->arr[id].priority,
			   p_data->arr[id].tc, p_hwfn->hw_info.num_tc);
	}
}
示例#15
0
static int qede_set_channels(struct net_device *dev,
			     struct ethtool_channels *channels)
{
	struct qede_dev *edev = netdev_priv(dev);

	DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
		   "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
		   channels->rx_count, channels->tx_count,
		   channels->other_count, channels->combined_count);

	/* We don't support separate rx / tx, nor `other' channels. */
	if (channels->rx_count || channels->tx_count ||
	    channels->other_count || (channels->combined_count == 0) ||
	    (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) {
		DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
			   "command parameters not supported\n");
		return -EINVAL;
	}

	/* Check if there was a change in the active parameters */
	if (channels->combined_count == QEDE_RSS_CNT(edev)) {
		DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
			   "No change in active parameters\n");
		return 0;
	}

	/* We need the number of queues to be divisible between the hwfns */
	if (channels->combined_count % edev->dev_info.common.num_hwfns) {
		DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
			   "Number of channels must be divisable by %04x\n",
			   edev->dev_info.common.num_hwfns);
		return -EINVAL;
	}

	/* Set number of queues and reload if necessary */
	edev->req_rss = channels->combined_count;
	if (netif_running(dev))
		qede_reload(edev, NULL, NULL);

	return 0;
}
示例#16
0
static enum _ecore_status_t ecore_mfw_update_tlvs(struct ecore_hwfn *p_hwfn,
						  u8 tlv_group, u8 *p_mfw_buf,
						  u32 size)
{
	union ecore_mfw_tlv_data *p_tlv_data;
	struct ecore_tlv_parsed_buf buffer;
	struct ecore_drv_tlv_hdr tlv;
	u32 offset;
	int len;
	u8 *p_tlv;

	p_tlv_data = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_tlv_data));
	if (!p_tlv_data)
		return ECORE_NOMEM;

	if (OSAL_MFW_FILL_TLV_DATA(p_hwfn,tlv_group, p_tlv_data)) {
		OSAL_VFREE(p_hwfn->p_dev, p_tlv_data);
		return ECORE_INVAL;
	}

	OSAL_MEMSET(&tlv, 0, sizeof(tlv));
	for (offset = 0; offset < size;
	     offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
		p_tlv = &p_mfw_buf[offset];
		tlv.tlv_type = TLV_TYPE(p_tlv);
		tlv.tlv_length = TLV_LENGTH(p_tlv);
		tlv.tlv_flags = TLV_FLAGS(p_tlv);

		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
			   "Type %d length = %d flags = 0x%x\n", tlv.tlv_type,
			   tlv.tlv_length, tlv.tlv_flags);

		if (tlv_group == ECORE_MFW_TLV_GENERIC)
			len = ecore_mfw_get_gen_tlv_value(&tlv, &p_tlv_data->generic, &buffer);
		else if (tlv_group == ECORE_MFW_TLV_ETH)
			len = ecore_mfw_get_eth_tlv_value(&tlv, &p_tlv_data->eth, &buffer);
		else if (tlv_group == ECORE_MFW_TLV_FCOE)
			len = ecore_mfw_get_fcoe_tlv_value(&tlv, &p_tlv_data->fcoe, &buffer);
		else
			len = ecore_mfw_get_iscsi_tlv_value(&tlv, &p_tlv_data->iscsi, &buffer);

		if (len > 0) {
			OSAL_WARN(len > 4 * tlv.tlv_length,
				  "Incorrect MFW TLV length %d, it shouldn't be greater than %d\n",
				  len, 4 * tlv.tlv_length);
			len = OSAL_MIN_T(int, len, 4 * tlv.tlv_length);
			tlv.tlv_flags |= ECORE_DRV_TLV_FLAGS_CHANGED;
			TLV_FLAGS(p_tlv) = tlv.tlv_flags;
			OSAL_MEMCPY(p_mfw_buf + offset + sizeof(tlv),
				    buffer.p_val, len);
		}
	}
示例#17
0
文件: qed_mcp.c 项目: panyfx/ath
void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
			   struct qed_ptt *p_ptt)
{
	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
					PUBLIC_PORT);
	u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);

	p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
						   MFW_PORT(p_hwfn));
	DP_VERBOSE(p_hwfn, QED_MSG_SP,
		   "port_addr = 0x%x, port_id 0x%02x\n",
		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
}
示例#18
0
文件: qed_int.c 项目: raoy1990/linux
/**
 *  @brief qed_int_assertion - handles asserted attention bits
 *
 *  @param p_hwfn
 *  @param asserted_bits newly asserted bits
 *  @return int
 */
static int qed_int_assertion(struct qed_hwfn *p_hwfn,
			     u16 asserted_bits)
{
	struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
	u32 igu_mask;

	/* Mask the source of the attention in the IGU */
	igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
			  IGU_REG_ATTENTION_ENABLE);
	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
		   igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
	igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);

	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
		   "inner known ATTN state: 0x%04x --> 0x%04x\n",
		   sb_attn_sw->known_attn,
		   sb_attn_sw->known_attn | asserted_bits);
	sb_attn_sw->known_attn |= asserted_bits;

	/* Handle MCP events */
	if (asserted_bits & 0x100) {
		qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
		/* Clean the MCP attention */
		qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
		       sb_attn_sw->mfw_attn_addr, 0);
	}

	DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
		      GTT_BAR0_MAP_REG_IGU_CMD +
		      ((IGU_CMD_ATTN_BIT_SET_UPPER -
			IGU_CMD_INT_ACK_BASE) << 3),
		      (u32)asserted_bits);

	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
		   asserted_bits);

	return 0;
}
示例#19
0
文件: qed_mcp.c 项目: panyfx/ath
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
		     struct qed_ptt *p_ptt,
		     u32 *p_load_code)
{
	struct qed_dev *cdev = p_hwfn->cdev;
	u32 param;
	int rc;

	if (!qed_mcp_is_init(p_hwfn)) {
		DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
		return -EBUSY;
	}

	/* Save driver's version to shmem */
	qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);

	DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
		   p_hwfn->mcp_info->drv_mb_seq,
		   p_hwfn->mcp_info->drv_pulse_seq);

	/* Load Request */
	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
			 (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
			  cdev->drv_type),
			 p_load_code, &param);

	/* if mcp fails to respond we must abort */
	if (rc) {
		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
		return rc;
	}

	/* If MFW refused (e.g. other port is in diagnostic mode) we
	 * must abort. This can happen in the following cases:
	 * - Other port is in diagnostic mode
	 * - Previously loaded function on the engine is not compliant with
	 *   the requester.
	 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
	 *      -
	 */
	if (!(*p_load_code) ||
	    ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
	    ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
	    ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
		DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
		return -EBUSY;
	}

	return 0;
}
示例#20
0
static enum _ecore_status_t
ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
		    struct ecore_ptt *p_ptt,
		    struct ecore_dcbx_mib_meta_data *p_data,
		    enum ecore_mib_read_type type)
{
	u32 prefix_seq_num, suffix_seq_num;
	int read_count = 0;
	enum _ecore_status_t rc = ECORE_SUCCESS;

	/* The data is considered to be valid only if both sequence numbers are
	 * the same.
	 */
	do {
		if (type == ECORE_DCBX_REMOTE_LLDP_MIB) {
			ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
					  p_data->addr, p_data->size);
			prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
			suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
		} else if (type == ECORE_DCBX_LLDP_TLVS) {
			ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_tlvs,
					  p_data->addr, p_data->size);
			prefix_seq_num = p_data->lldp_tlvs->prefix_seq_num;
			suffix_seq_num = p_data->lldp_tlvs->suffix_seq_num;

		} else {
			ecore_memcpy_from(p_hwfn, p_ptt, p_data->mib,
					  p_data->addr, p_data->size);
			prefix_seq_num = p_data->mib->prefix_seq_num;
			suffix_seq_num = p_data->mib->suffix_seq_num;
		}
		read_count++;

		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
			   "mib type = %d, try count = %d prefix seq num  ="
			   " %d suffix seq num = %d\n",
			   type, read_count, prefix_seq_num, suffix_seq_num);
	} while ((prefix_seq_num != suffix_seq_num) &&
		 (read_count < ECORE_DCBX_MAX_MIB_READ_TRY));

	if (read_count >= ECORE_DCBX_MAX_MIB_READ_TRY) {
		DP_ERR(p_hwfn,
		       "MIB read err, mib type = %d, try count ="
		       " %d prefix seq num = %d suffix seq num = %d\n",
		       type, read_count, prefix_seq_num, suffix_seq_num);
		rc = ECORE_IO;
	}

	return rc;
}
示例#21
0
static void
ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
			struct dcbx_ets_feature *p_ets,
			struct ecore_dcbx_params *p_params)
{
	u8 *bw_map, *tsa_map;
	u32 val;
	int i;

	if (p_params->ets_willing)
		p_ets->flags |= DCBX_ETS_WILLING_MASK;
	else
		p_ets->flags &= ~DCBX_ETS_WILLING_MASK;

	if (p_params->ets_cbs)
		p_ets->flags |= DCBX_ETS_CBS_MASK;
	else
		p_ets->flags &= ~DCBX_ETS_CBS_MASK;

	if (p_params->ets_enabled)
		p_ets->flags |= DCBX_ETS_ENABLED_MASK;
	else
		p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;

	p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
	p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_OFFSET;

	bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
	tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
	p_ets->pri_tc_tbl[0] = 0;
	for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
		bw_map[i] = p_params->ets_tc_bw_tbl[i];
		tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
		/* Copy the priority value to the corresponding 4 bits in the
		 * traffic class table.
		 */
		val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
		p_ets->pri_tc_tbl[0] |= val;
	}
	for (i = 0; i < 2; i++) {
		p_ets->tc_bw_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_bw_tbl[i]);
		p_ets->tc_tsa_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_tsa_tbl[i]);
	}

	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
		   "flags = 0x%x pri_tc = 0x%x tc_bwl[] = {0x%x, 0x%x} tc_tsa = {0x%x, 0x%x}\n",
		   p_ets->flags, p_ets->pri_tc_tbl[0], p_ets->tc_bw_tbl[0],
		   p_ets->tc_bw_tbl[1], p_ets->tc_tsa_tbl[0],
		   p_ets->tc_tsa_tbl[1]);
}
示例#22
0
static void
ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
		       struct ecore_dcbx_results *p_data)
{
	enum dcbx_protocol_type id;
	int i;

	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "DCBX negotiated: %d\n",
		   p_data->dcbx_enabled);

	for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
		id = ecore_dcbx_app_update[i].id;

		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
			   "%s info: update %d, enable %d, prio %d, tc %d,"
			   " num_active_tc %d dscp_enable = %d dscp_val = %d\n",
			   ecore_dcbx_app_update[i].name,
			   p_data->arr[id].update,
			   p_data->arr[id].enable, p_data->arr[id].priority,
			   p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc,
			   p_data->arr[id].dscp_enable,
			   p_data->arr[id].dscp_val);
	}
}
示例#23
0
static void
ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
			struct dcbx_ets_feature *p_ets,
			struct ecore_dcbx_params *p_params)
{
	u32 bw_map[2], tsa_map[2], pri_map;
	int i;

	p_params->ets_willing = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_WILLING);
	p_params->ets_enabled = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_ENABLED);
	p_params->ets_cbs = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_CBS);
	p_params->max_ets_tc = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
		   "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
		   p_params->ets_willing, p_params->ets_enabled,
		   p_params->ets_cbs, p_ets->pri_tc_tbl[0],
		   p_params->max_ets_tc);

	/* 8 bit tsa and bw data corresponding to each of the 8 TC's are
	 * encoded in a type u32 array of size 2.
	 */
	bw_map[0] = OSAL_BE32_TO_CPU(p_ets->tc_bw_tbl[0]);
	bw_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_bw_tbl[1]);
	tsa_map[0] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[0]);
	tsa_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[1]);
	pri_map = p_ets->pri_tc_tbl[0];
	for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
		p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
		p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
		p_params->ets_pri_tc_tbl[i] = ECORE_DCBX_PRIO2TC(pri_map, i);
		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
			   "elem %d  bw_tbl %x tsa_tbl %x\n",
			   i, p_params->ets_tc_bw_tbl[i],
			   p_params->ets_tc_tsa_tbl[i]);
	}
}
示例#24
0
static void
ecore_dcbx_get_priority_info(struct ecore_hwfn *p_hwfn,
			     struct ecore_dcbx_app_prio *p_prio,
			     struct ecore_dcbx_results *p_results)
{
	u8 val;

	if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
	    p_results->arr[DCBX_PROTOCOL_ETH].enable)
		p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;

	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
		   "Priorities: eth %d\n",
		   p_prio->eth);
}
示例#25
0
static enum _ecore_status_t
ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
			   struct dcb_dscp_map *p_dscp_map,
			   struct ecore_dcbx_set *p_params)
{
	int entry, i, j;
	u32 val;

	OSAL_MEMCPY(p_dscp_map, &p_hwfn->p_dcbx_info->dscp_map,
		    sizeof(*p_dscp_map));

	p_dscp_map->flags &= ~DCB_DSCP_ENABLE_MASK;
	if (p_params->dscp.enabled)
		p_dscp_map->flags |= DCB_DSCP_ENABLE_MASK;

	for (i = 0, entry = 0; i < 8; i++) {
		val = 0;
		for (j = 0; j < 8; j++, entry++)
			val |= (((u32)p_params->dscp.dscp_pri_map[entry]) <<
				(j * 4));

		p_dscp_map->dscp_pri_map[i] = OSAL_CPU_TO_BE32(val);
	}

	p_hwfn->p_dcbx_info->dscp_nig_update = true;

	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_dscp_map->flags);
	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
		   "pri_map[] = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
		   p_dscp_map->dscp_pri_map[0], p_dscp_map->dscp_pri_map[1],
		   p_dscp_map->dscp_pri_map[2], p_dscp_map->dscp_pri_map[3],
		   p_dscp_map->dscp_pri_map[4], p_dscp_map->dscp_pri_map[5],
		   p_dscp_map->dscp_pri_map[6], p_dscp_map->dscp_pri_map[7]);

	return ECORE_SUCCESS;
}
示例#26
0
static void qelr_open_debug_file(struct qelr_devctx *ctx)
{
	char *env;

	env = getenv("QELR_DEBUG_FILE");
	if (!env) {
		ctx->dbg_fp = stderr;
		DP_VERBOSE(ctx->dbg_fp, QELR_MSG_INIT,
			   "Debug file opened: stderr\n");
		return;
	}

	ctx->dbg_fp = fopen(env, "aw+");
	if (!ctx->dbg_fp) {
		fprintf(stderr, "Failed opening debug file %s, using stderr\n",
			env);
		ctx->dbg_fp = stderr;
		DP_VERBOSE(ctx->dbg_fp, QELR_MSG_INIT,
			   "Debug file opened: stderr\n");
		return;
	}

	DP_VERBOSE(ctx->dbg_fp, QELR_MSG_INIT, "Debug file opened: %s\n", env);
}
示例#27
0
文件: qed_cxt.c 项目: 513855417/linux
static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
{
	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
	struct qed_ilt_client_cfg *clients = p_mngr->clients;
	struct qed_ilt_cli_blk *p_blk;
	u32 size, i, j, k;
	int rc;

	size = qed_cxt_ilt_shadow_size(clients);
	p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
				     GFP_KERNEL);
	if (!p_mngr->ilt_shadow) {
		DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
		rc = -ENOMEM;
		goto ilt_shadow_fail;
	}

	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
		   "Allocated 0x%x bytes for ilt shadow\n",
		   (u32)(size * sizeof(struct qed_dma_mem)));

	for_each_ilt_valid_client(i, clients) {
		if (!clients[i].active)
			continue;
		for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
			p_blk = &clients[i].pf_blks[j];
			rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
			if (rc != 0)
				goto ilt_shadow_fail;
		}
		for (k = 0; k < p_mngr->vf_count; k++) {
			for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
				u32 lines = clients[i].vf_total_lines * k;

				p_blk = &clients[i].vf_blks[j];
				rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
				if (rc != 0)
					goto ilt_shadow_fail;
			}
		}
	}

	return 0;

ilt_shadow_fail:
	qed_ilt_shadow_free(p_hwfn);
	return rc;
}
示例#28
0
文件: qed_cxt.c 项目: 513855417/linux
static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
			    struct qed_qm_iids *iids)
{
	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
	u32 vf_cids = 0, type;

	for (type = 0; type < MAX_CONN_TYPES; type++) {
		iids->cids += p_mngr->conn_cfg[type].cid_count;
		vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
	}

	iids->vf_cids += vf_cids * p_mngr->vf_count;
	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
		   "iids: CIDS %08x vf_cids %08x\n",
		   iids->cids, iids->vf_cids);
}
示例#29
0
static bool
ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
				 u32 app_prio_bitmap, u16 id,
				 enum dcbx_protocol_type *type, bool ieee)
{
	if (ecore_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
		*type = DCBX_PROTOCOL_ETH;
	} else {
		*type = DCBX_MAX_PROTOCOL_TYPE;
		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
			    "No action required, App TLV entry = 0x%x\n",
			   app_prio_bitmap);
		return false;
	}

	return true;
}
示例#30
0
static int
qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
{
	struct ecore_sp_vport_update_params sp_params;
	struct ecore_rss_params sp_rss_params;
	int rc, i;

	memset(&sp_params, 0, sizeof(sp_params));
	memset(&sp_rss_params, 0, sizeof(sp_rss_params));

	/* Translate protocol params into sp params */
	sp_params.vport_id = params->vport_id;
	sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
	sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
	sp_params.vport_active_rx_flg = params->vport_active_flg;
	sp_params.vport_active_tx_flg = params->vport_active_flg;
	sp_params.update_inner_vlan_removal_flg =
	    params->update_inner_vlan_removal_flg;
	sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
	sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
	sp_params.tx_switching_flg = params->tx_switching_flg;
	sp_params.accept_any_vlan = params->accept_any_vlan;
	sp_params.update_accept_any_vlan_flg =
	    params->update_accept_any_vlan_flg;
	sp_params.mtu = params->mtu;

	for_each_hwfn(edev, i) {
		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];

		sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
		rc = ecore_sp_vport_update(p_hwfn, &sp_params,
					   ECORE_SPQ_MODE_EBLOCK, NULL);
		if (rc) {
			DP_ERR(edev, "Failed to update VPORT\n");
			return rc;
		}

		DP_VERBOSE(edev, ECORE_MSG_SPQ,
			   "Updated V-PORT %d: active_flag %d [update %d]\n",
			   params->vport_id, params->vport_active_flg,
			   params->update_vport_active_flg);
	}

	return 0;
}