Пример #1
0
static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
				  struct ecore_ptt *p_ptt,
				  u32 addr, u32 dmae_data_offset,
				  u32 size, const u32 *p_buf,
				  bool b_must_dmae, bool b_can_dmae)
{
	enum _ecore_status_t rc	= ECORE_SUCCESS;

	/* Perform DMAE only for lengthy enough sections or for wide-bus */
#ifndef ASIC_ONLY
	if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
	    !b_can_dmae || (!b_must_dmae && (size < 16))) {
#else
	if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
#endif
		const u32 *data = p_buf + dmae_data_offset;
		u32 i;

		for (i = 0; i < size; i++)
			ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
	} else {
		rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
					 (osal_uintptr_t)(p_buf +
							  dmae_data_offset),
					 addr, size,
					 OSAL_NULL /* default parameters */);
	}

	return rc;
}

static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
						 struct ecore_ptt *p_ptt,
						 u32 addr, u32 fill_count)
{
	static u32 zero_buffer[DMAE_MAX_RW_SIZE];
	struct ecore_dmae_params params;

	OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);

	OSAL_MEMSET(&params, 0, sizeof(params));
	params.flags = ECORE_DMAE_FLAG_RW_REPL_SRC;
	return ecore_dmae_host2grc(p_hwfn, p_ptt,
				   (osal_uintptr_t)(&(zero_buffer[0])),
				   addr, fill_count, &params);
}

static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
			    struct ecore_ptt *p_ptt,
			    u32 addr, u32 fill, u32 fill_count)
{
	u32 i;

	for (i = 0; i < fill_count; i++, addr += sizeof(u32))
		ecore_wr(p_hwfn, p_ptt, addr, fill);
}
Пример #2
0
enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
{
	struct ecore_spq_entry *p_ent = OSAL_NULL;
	struct ecore_sp_init_data init_data;
	enum _ecore_status_t rc = ECORE_NOTIMPL;

	/* Get SPQ entry */
	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
	init_data.cid = ecore_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = ECORE_SPQ_MODE_CB;

	rc = ecore_sp_init_request(p_hwfn, &p_ent,
				   COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
				   &init_data);
	if (rc != ECORE_SUCCESS)
		return rc;

	p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
	if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
	    (p_hwfn->p_dcbx_info->results.dcbx_enabled))
		p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
	else
		p_ent->ramrod.pf_update.enable_stag_pri_change = 0;

	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
Пример #3
0
enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
					struct ecore_rl_update_params *params)
{
	struct ecore_spq_entry *p_ent = OSAL_NULL;
	enum _ecore_status_t rc = ECORE_NOTIMPL;
	struct rl_update_ramrod_data *rl_update;
	struct ecore_sp_init_data init_data;

	/* Get SPQ entry */
	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
	init_data.cid = ecore_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;

	rc = ecore_sp_init_request(p_hwfn, &p_ent,
				   COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
				   &init_data);
	if (rc != ECORE_SUCCESS)
		return rc;

	rl_update = &p_ent->ramrod.rl_update;

	rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
	rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
	rl_update->rl_init_flg = params->rl_init_flg;
	rl_update->rl_start_flg = params->rl_start_flg;
	rl_update->rl_stop_flg = params->rl_stop_flg;
	rl_update->rl_id_first = params->rl_id_first;
	rl_update->rl_id_last = params->rl_id_last;
	rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
	rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
	rl_update->rl_max_rate =
		OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate));
	rl_update->rl_r_ai =
		OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_ai));
	rl_update->rl_r_hai =
		OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_hai));
	rl_update->dcqcn_g =
		OSAL_CPU_TO_LE16(ecore_sp_rl_gd_denom(params->dcqcn_gd));
	rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
	rl_update->dcqcn_timeuot_us =
		OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us);
	rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);

	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
		   rl_update->qcn_update_param_flg,
		   rl_update->dcqcn_update_param_flg,
		   rl_update->rl_init_flg, rl_update->rl_start_flg,
		   rl_update->rl_stop_flg, rl_update->rl_id_first,
		   rl_update->rl_id_last, rl_update->rl_dc_qcn_flg,
		   rl_update->rl_bc_rate, rl_update->rl_max_rate,
		   rl_update->rl_r_ai, rl_update->rl_r_hai,
		   rl_update->dcqcn_g, rl_update->dcqcn_k_us,
		   rl_update->dcqcn_timeuot_us, rl_update->qcn_timeuot_us);

	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
Пример #4
0
static enum _ecore_status_t ecore_mfw_update_tlvs(struct ecore_hwfn *p_hwfn,
						  u8 tlv_group, u8 *p_mfw_buf,
						  u32 size)
{
	union ecore_mfw_tlv_data *p_tlv_data;
	struct ecore_tlv_parsed_buf buffer;
	struct ecore_drv_tlv_hdr tlv;
	u32 offset;
	int len;
	u8 *p_tlv;

	p_tlv_data = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_tlv_data));
	if (!p_tlv_data)
		return ECORE_NOMEM;

	if (OSAL_MFW_FILL_TLV_DATA(p_hwfn,tlv_group, p_tlv_data)) {
		OSAL_VFREE(p_hwfn->p_dev, p_tlv_data);
		return ECORE_INVAL;
	}

	OSAL_MEMSET(&tlv, 0, sizeof(tlv));
	for (offset = 0; offset < size;
	     offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
		p_tlv = &p_mfw_buf[offset];
		tlv.tlv_type = TLV_TYPE(p_tlv);
		tlv.tlv_length = TLV_LENGTH(p_tlv);
		tlv.tlv_flags = TLV_FLAGS(p_tlv);

		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
			   "Type %d length = %d flags = 0x%x\n", tlv.tlv_type,
			   tlv.tlv_length, tlv.tlv_flags);

		if (tlv_group == ECORE_MFW_TLV_GENERIC)
			len = ecore_mfw_get_gen_tlv_value(&tlv, &p_tlv_data->generic, &buffer);
		else if (tlv_group == ECORE_MFW_TLV_ETH)
			len = ecore_mfw_get_eth_tlv_value(&tlv, &p_tlv_data->eth, &buffer);
		else if (tlv_group == ECORE_MFW_TLV_FCOE)
			len = ecore_mfw_get_fcoe_tlv_value(&tlv, &p_tlv_data->fcoe, &buffer);
		else
			len = ecore_mfw_get_iscsi_tlv_value(&tlv, &p_tlv_data->iscsi, &buffer);

		if (len > 0) {
			OSAL_WARN(len > 4 * tlv.tlv_length,
				  "Incorrect MFW TLV length %d, it shouldn't be greater than %d\n",
				  len, 4 * tlv.tlv_length);
			len = OSAL_MIN_T(int, len, 4 * tlv.tlv_length);
			tlv.tlv_flags |= ECORE_DRV_TLV_FLAGS_CHANGED;
			TLV_FLAGS(p_tlv) = tlv.tlv_flags;
			OSAL_MEMCPY(p_mfw_buf + offset + sizeof(tlv),
				    buffer.p_val, len);
		}
	}
Пример #5
0
enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
					      struct ecore_ptt *p_ptt,
					      struct ecore_dcbx_set *params,
					      bool hw_commit)
{
	struct dcbx_local_params local_admin;
	struct ecore_dcbx_mib_meta_data data;
	struct dcb_dscp_map dscp_map;
	u32 resp = 0, param = 0;
	enum _ecore_status_t rc = ECORE_SUCCESS;

	OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
		    sizeof(p_hwfn->p_dcbx_info->set));
	if (!hw_commit)
		return ECORE_SUCCESS;

	OSAL_MEMSET(&local_admin, 0, sizeof(local_admin));
	ecore_dcbx_set_local_params(p_hwfn, &local_admin, params);

	data.addr = p_hwfn->mcp_info->port_addr +
			offsetof(struct public_port, local_admin_dcbx_mib);
	data.local_admin = &local_admin;
	data.size = sizeof(struct dcbx_local_params);
	ecore_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size);

	if (params->override_flags & ECORE_DCBX_OVERRIDE_DSCP_CFG) {
		OSAL_MEMSET(&dscp_map, 0, sizeof(dscp_map));
		ecore_dcbx_set_dscp_params(p_hwfn, &dscp_map, params);

		data.addr = p_hwfn->mcp_info->port_addr +
				offsetof(struct public_port, dcb_dscp_map);
		data.dscp_map = &dscp_map;
		data.size = sizeof(struct dcb_dscp_map);
		ecore_memcpy_to(p_hwfn, p_ptt, data.addr, data.dscp_map,
				data.size);
	}
Пример #6
0
/* Set pf update ramrod command params */
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
			    struct ecore_ptt *p_ptt,
			    struct ecore_tunnel_info *p_tunn,
			    enum spq_mode comp_mode,
			    struct ecore_spq_comp_cb *p_comp_data)
{
	struct ecore_spq_entry *p_ent = OSAL_NULL;
	struct ecore_sp_init_data init_data;
	enum _ecore_status_t rc = ECORE_NOTIMPL;

	if (IS_VF(p_hwfn->p_dev))
		return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);

	if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
		DP_NOTICE(p_hwfn, true,
			  "A0 chip: tunnel pf update config is not supported\n");
		return rc;
	}

	if (!p_tunn)
		return ECORE_INVAL;

	/* Get SPQ entry */
	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
	init_data.cid = ecore_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = comp_mode;
	init_data.p_comp_data = p_comp_data;

	rc = ecore_sp_init_request(p_hwfn, &p_ent,
				   COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
				   &init_data);
	if (rc != ECORE_SUCCESS)
		return rc;

	ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
					&p_ent->ramrod.pf_update.tunnel_config);

	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
	if (rc != ECORE_SUCCESS)
		return rc;

	ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel);

	return rc;
}
Пример #7
0
enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
{
	struct ecore_spq_entry *p_ent = OSAL_NULL;
	struct ecore_sp_init_data init_data;
	enum _ecore_status_t rc;

	/* Get SPQ entry */
	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
	init_data.cid = ecore_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;

	rc = ecore_sp_init_request(p_hwfn, &p_ent,
				   COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
				   &init_data);
	if (rc != ECORE_SUCCESS)
		return rc;

	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
Пример #8
0
enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn)
{
	struct ecore_spq_entry *p_ent = OSAL_NULL;
	struct ecore_sp_init_data init_data;
	enum _ecore_status_t rc = ECORE_NOTIMPL;

	/* Get SPQ entry */
	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
	init_data.cid = ecore_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = ECORE_SPQ_MODE_CB;

	rc = ecore_sp_init_request(p_hwfn, &p_ent,
				   COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
				   &init_data);
	if (rc != ECORE_SUCCESS)
		return rc;

	ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
					&p_ent->ramrod.pf_update);

	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
Пример #9
0
enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn)
{
	struct ecore_spq_entry *p_ent = OSAL_NULL;
	struct ecore_sp_init_data init_data;
	enum _ecore_status_t rc = ECORE_NOTIMPL;

	/* Get SPQ entry */
	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
	init_data.cid = ecore_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = ECORE_SPQ_MODE_CB;

	rc = ecore_sp_init_request(p_hwfn, &p_ent,
				   COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
				   &init_data);
	if (rc != ECORE_SUCCESS)
		return rc;

	p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
	p_ent->ramrod.pf_update.mf_vlan =
				OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);

	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
Пример #10
0
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
				       struct ecore_ptt *p_ptt,
				       struct ecore_tunnel_info *p_tunn,
				       bool allow_npar_tx_switch)
{
	struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
	u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
	u8 sb_index = p_hwfn->p_eq->eq_sb_index;
	struct ecore_spq_entry *p_ent = OSAL_NULL;
	struct ecore_sp_init_data init_data;
	enum _ecore_status_t rc = ECORE_NOTIMPL;
	u8 page_cnt;
	u8 i;

	/* update initial eq producer */
	ecore_eq_prod_update(p_hwfn,
			     ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));

	/* Initialize the SPQ entry for the ramrod */
	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
	init_data.cid = ecore_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;

	rc = ecore_sp_init_request(p_hwfn, &p_ent,
				   COMMON_RAMROD_PF_START,
				   PROTOCOLID_COMMON, &init_data);
	if (rc != ECORE_SUCCESS)
		return rc;

	/* Fill the ramrod data */
	p_ramrod = &p_ent->ramrod.pf_start;
	p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
	p_ramrod->event_ring_sb_index = sb_index;
	p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);

	/* For easier debugging */
	p_ramrod->dont_log_ramrods = 0;
	p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);

	if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
		p_ramrod->mf_mode = MF_OVLAN;
	else
		p_ramrod->mf_mode = MF_NPAR;

	p_ramrod->outer_tag_config.outer_tag.tci =
		OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
	if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, &p_hwfn->p_dev->mf_bits)) {
		p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
	} else if (OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
		 &p_hwfn->p_dev->mf_bits)) {
		p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
		p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
	}

	p_ramrod->outer_tag_config.pri_map_valid = 1;
	for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
		p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;

	/* enable_stag_pri_change should be set if port is in BD mode or,
	 * UFP with Host Control mode or, UFP with DCB over base interface.
	 */
	if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
		if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
		    (p_hwfn->p_dcbx_info->results.dcbx_enabled))
			p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
		else
			p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
	}

	/* Place EQ address in RAMROD */
	DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
		       p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
	page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
	p_ramrod->event_ring_num_pages = page_cnt;
	DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
		       p_hwfn->p_consq->chain.pbl_sp.p_phys_table);

	ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
				       &p_ramrod->tunnel_config);

	if (OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH,
			  &p_hwfn->p_dev->mf_bits))
		p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;

	switch (p_hwfn->hw_info.personality) {
	case ECORE_PCI_ETH:
		p_ramrod->personality = PERSONALITY_ETH;
		break;
	default:
		DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
			 p_hwfn->hw_info.personality);
		p_ramrod->personality = PERSONALITY_ETH;
	}

	if (p_hwfn->p_dev->p_iov_info) {
		struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;

		p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
		p_ramrod->num_vfs = (u8)p_iov->total_vfs;
	}
	/* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI
	 * version is available.
	 */
	p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
	p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;

	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
		   "Setting event_ring_sb [id %04x index %02x], outer_tag.tpid [%d], outer_tag.tci [%d]\n",
		   sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid,
		   p_ramrod->outer_tag_config.outer_tag.tci);

	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);

	if (p_tunn)
		ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt,
					    &p_hwfn->p_dev->tunnel);

	return rc;
}
Пример #11
0
enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
					   struct ecore_spq_entry **pp_ent,
					   u8 cmd,
					   u8 protocol,
					   struct ecore_sp_init_data *p_data)
{
	u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
	struct ecore_spq_entry *p_ent = OSAL_NULL;
	enum _ecore_status_t rc;

	if (!pp_ent)
		return ECORE_INVAL;

	/* Get an SPQ entry */
	rc = ecore_spq_get_entry(p_hwfn, pp_ent);
	if (rc != ECORE_SUCCESS)
		return rc;

	/* Fill the SPQ entry */
	p_ent = *pp_ent;
	p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
	p_ent->elem.hdr.cmd_id = cmd;
	p_ent->elem.hdr.protocol_id = protocol;
	p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
	p_ent->comp_mode = p_data->comp_mode;
	p_ent->comp_done.done = 0;

	switch (p_ent->comp_mode) {
	case ECORE_SPQ_MODE_EBLOCK:
		p_ent->comp_cb.cookie = &p_ent->comp_done;
		break;

	case ECORE_SPQ_MODE_BLOCK:
		if (!p_data->p_comp_data)
			return ECORE_INVAL;

		p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
		break;

	case ECORE_SPQ_MODE_CB:
		if (!p_data->p_comp_data)
			p_ent->comp_cb.function = OSAL_NULL;
		else
			p_ent->comp_cb = *p_data->p_comp_data;
		break;

	default:
		DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
			  p_ent->comp_mode);
		return ECORE_INVAL;
	}

	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
		   "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
		   opaque_cid, cmd, protocol,
		   (unsigned long)&p_ent->ramrod,
		   D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
			   ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
			   "MODE_CB"));

	OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));

	return ECORE_SUCCESS;
}
Пример #12
0
static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
						 struct ecore_ptt *p_ptt,
						 struct init_write_op *cmd,
						 bool b_must_dmae,
						 bool b_can_dmae)
{
	u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
	u32 data = OSAL_LE32_TO_CPU(cmd->data);
	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
#ifdef CONFIG_ECORE_ZIPPED_FW
	u32 offset, output_len, input_len, max_size;
#endif
	struct ecore_dev *p_dev = p_hwfn->p_dev;
	union init_array_hdr *hdr;
	const u32 *array_data;
	enum _ecore_status_t rc = ECORE_SUCCESS;
	u32 size;

	array_data = p_dev->fw_data->arr_data;

	hdr = (union init_array_hdr *) (array_data +
					dmae_array_offset);
	data = OSAL_LE32_TO_CPU(hdr->raw.data);
	switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
	case INIT_ARR_ZIPPED:
#ifdef CONFIG_ECORE_ZIPPED_FW
		offset = dmae_array_offset + 1;
		input_len = GET_FIELD(data,
				      INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
		max_size = MAX_ZIPPED_SIZE * 4;
		OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);

		output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
					     (u8 *)&array_data[offset],
					     max_size, (u8 *)p_hwfn->unzip_buf);
		if (output_len) {
			rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
						   output_len,
						   p_hwfn->unzip_buf,
						   b_must_dmae, b_can_dmae);
		} else {
			DP_NOTICE(p_hwfn, true,
				  "Failed to unzip dmae data\n");
			rc = ECORE_INVAL;
		}
#else
		DP_NOTICE(p_hwfn, true,
			  "Using zipped firmware without config enabled\n");
		rc = ECORE_INVAL;
#endif
		break;
	case INIT_ARR_PATTERN:
	{
		u32 repeats = GET_FIELD(data,
					INIT_ARRAY_PATTERN_HDR_REPETITIONS);
		u32 i;

		size = GET_FIELD(data,
				 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);

		for (i = 0; i < repeats; i++, addr += size << 2) {
			rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
						   dmae_array_offset + 1,
						   size, array_data,
						   b_must_dmae, b_can_dmae);
			if (rc)
				break;
		}
		break;
	}
	case INIT_ARR_STANDARD:
		size = GET_FIELD(data,
				 INIT_ARRAY_STANDARD_HDR_SIZE);
		rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
					   dmae_array_offset + 1,
					   size, array_data,
					   b_must_dmae, b_can_dmae);
		break;
	}

	return rc;
}