int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, bool allow_npar_tx_switch) { struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); u8 sb_index = p_hwfn->p_eq->eq_sb_index; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; u8 page_cnt, i; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(&p_hwfn->p_eq->chain)); memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_START, PROTOCOLID_COMMON, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.pf_start; p_ramrod->event_ring_sb_id = cpu_to_le16(sb); p_ramrod->event_ring_sb_index = sb_index; p_ramrod->path_id = QED_PATH_ID(p_hwfn); p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) p_ramrod->mf_mode = MF_OVLAN; else p_ramrod->mf_mode = MF_NPAR; p_ramrod->outer_tag_config.outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan); if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) { p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q; } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; p_ramrod->outer_tag_config.enable_stag_pri_change = 1; } p_ramrod->outer_tag_config.pri_map_valid = 1; for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i; /* enable_stag_pri_change should be set if port is in BD mode or, * UFP with Host Control mode. */ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) { if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) p_ramrod->outer_tag_config.enable_stag_pri_change = 1; else p_ramrod->outer_tag_config.enable_stag_pri_change = 0; p_ramrod->outer_tag_config.outer_tag.tci |= cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); } /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, p_hwfn->p_eq->chain.pbl_sp.p_phys_table); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl_sp.p_phys_table); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; switch (p_hwfn->hw_info.personality) { case QED_PCI_ETH: p_ramrod->personality = PERSONALITY_ETH; break; case QED_PCI_FCOE: p_ramrod->personality = PERSONALITY_FCOE; break; case QED_PCI_ISCSI: p_ramrod->personality = PERSONALITY_ISCSI; break; case QED_PCI_ETH_ROCE: case QED_PCI_ETH_IWARP: p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; break; default: DP_NOTICE(p_hwfn, "Unknown personality %d\n", p_hwfn->hw_info.personality); p_ramrod->personality = PERSONALITY_ETH; } if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; p_ramrod->num_vfs = (u8) p_iov->total_vfs; } p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n", sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (p_tunn) qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel); return rc; }
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, enum qed_mf_mode mode) { struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); u8 sb_index = p_hwfn->p_eq->eq_sb_index; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(&p_hwfn->p_eq->chain)); memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_START, PROTOCOLID_COMMON, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.pf_start; p_ramrod->event_ring_sb_id = cpu_to_le16(sb); p_ramrod->event_ring_sb_index = sb_index; p_ramrod->path_id = QED_PATH_ID(p_hwfn); p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); p_ramrod->mf_mode = mode; switch (mode) { case QED_MF_DEFAULT: case QED_MF_NPAR: p_ramrod->mf_mode = MF_NPAR; break; case QED_MF_OVLAN: p_ramrod->mf_mode = MF_OVLAN; break; default: DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); p_ramrod->mf_mode = MF_NPAR; } p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, p_hwfn->p_eq->chain.pbl.p_phys_table); p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl.p_phys_table); p_hwfn->hw_info.personality = PERSONALITY_ETH; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", sb, sb_index, p_ramrod->outer_tag); return qed_spq_post(p_hwfn, p_ent, NULL); }
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_tunnel_info *p_tunn, bool allow_npar_tx_switch) { struct pf_start_ramrod_data *p_ramrod = OSAL_NULL; u16 sb = ecore_int_get_sp_sb_id(p_hwfn); u8 sb_index = p_hwfn->p_eq->eq_sb_index; struct ecore_spq_entry *p_ent = OSAL_NULL; struct ecore_sp_init_data init_data; enum _ecore_status_t rc = ECORE_NOTIMPL; u8 page_cnt; u8 i; /* update initial eq producer */ ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain)); /* Initialize the SPQ entry for the ramrod */ OSAL_MEMSET(&init_data, 0, sizeof(init_data)); init_data.cid = ecore_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; rc = ecore_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_START, PROTOCOLID_COMMON, &init_data); if (rc != ECORE_SUCCESS) return rc; /* Fill the ramrod data */ p_ramrod = &p_ent->ramrod.pf_start; p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb); p_ramrod->event_ring_sb_index = sb_index; p_ramrod->path_id = ECORE_PATH_ID(p_hwfn); /* For easier debugging */ p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f); if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) p_ramrod->mf_mode = MF_OVLAN; else p_ramrod->mf_mode = MF_NPAR; p_ramrod->outer_tag_config.outer_tag.tci = OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan); if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, &p_hwfn->p_dev->mf_bits)) { p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q; } else if (OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING, &p_hwfn->p_dev->mf_bits)) { p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; p_ramrod->outer_tag_config.enable_stag_pri_change = 1; } p_ramrod->outer_tag_config.pri_map_valid = 1; for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i; /* enable_stag_pri_change should be set if port is in BD mode or, * UFP with Host Control mode or, UFP with DCB over base interface. */ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) { if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) || (p_hwfn->p_dcbx_info->results.dcbx_enabled)) p_ramrod->outer_tag_config.enable_stag_pri_change = 1; else p_ramrod->outer_tag_config.enable_stag_pri_change = 0; } /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, p_hwfn->p_eq->chain.pbl_sp.p_phys_table); page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl_sp.p_phys_table); ecore_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); if (OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH, &p_hwfn->p_dev->mf_bits)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; switch (p_hwfn->hw_info.personality) { case ECORE_PCI_ETH: p_ramrod->personality = PERSONALITY_ETH; break; default: DP_NOTICE(p_hwfn, true, "Unknown personality %d\n", p_hwfn->hw_info.personality); p_ramrod->personality = PERSONALITY_ETH; } if (p_hwfn->p_dev->p_iov_info) { struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf; p_ramrod->num_vfs = (u8)p_iov->total_vfs; } /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI * version is available. */ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag.tpid [%d], outer_tag.tci [%d]\n", sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid, p_ramrod->outer_tag_config.outer_tag.tci); rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); if (p_tunn) ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel); return rc; }
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_tunn_start_params *p_tunn, enum qed_mf_mode mode, bool allow_npar_tx_switch) { struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); u8 sb_index = p_hwfn->p_eq->eq_sb_index; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; u8 page_cnt; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(&p_hwfn->p_eq->chain)); memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_START, PROTOCOLID_COMMON, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.pf_start; p_ramrod->event_ring_sb_id = cpu_to_le16(sb); p_ramrod->event_ring_sb_index = sb_index; p_ramrod->path_id = QED_PATH_ID(p_hwfn); p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); switch (mode) { case QED_MF_DEFAULT: case QED_MF_NPAR: p_ramrod->mf_mode = MF_NPAR; break; case QED_MF_OVLAN: p_ramrod->mf_mode = MF_OVLAN; break; default: DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); p_ramrod->mf_mode = MF_NPAR; } p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, p_hwfn->p_eq->chain.pbl_sp.p_phys_table); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl_sp.p_phys_table); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); if (IS_MF_SI(p_hwfn)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; switch (p_hwfn->hw_info.personality) { case QED_PCI_ETH: p_ramrod->personality = PERSONALITY_ETH; break; case QED_PCI_ISCSI: p_ramrod->personality = PERSONALITY_ISCSI; break; case QED_PCI_ETH_ROCE: p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; break; default: DP_NOTICE(p_hwfn, "Unkown personality %d\n", p_hwfn->hw_info.personality); p_ramrod->personality = PERSONALITY_ETH; } if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; p_ramrod->num_vfs = (u8) p_iov->total_vfs; } p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", sb, sb_index, p_ramrod->outer_tag); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (p_tunn) { qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode); p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode; } return rc; }