static int qedr_init_hw(struct qedr_dev *dev) { struct qed_rdma_add_user_out_params out_params; struct qed_rdma_start_in_params *in_params; struct qed_rdma_cnq_params *cur_pbl; struct qed_rdma_events events; dma_addr_t p_phys_table; u32 page_cnt; int rc = 0; int i; in_params = kzalloc(sizeof(*in_params), GFP_KERNEL); if (!in_params) { rc = -ENOMEM; goto out; } in_params->desired_cnq = dev->num_cnq; for (i = 0; i < dev->num_cnq; i++) { cur_pbl = &in_params->cnq_pbl_list[i]; page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl); cur_pbl->num_pbl_pages = page_cnt; p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl); cur_pbl->pbl_ptr = (u64)p_phys_table; } events.affiliated_event = qedr_affiliated_event; events.unaffiliated_event = qedr_unaffiliated_event; events.context = dev; in_params->events = &events; in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS; in_params->max_mtu = dev->ndev->mtu; dev->iwarp_max_mtu = dev->ndev->mtu; ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr); rc = dev->ops->rdma_init(dev->cdev, in_params); if (rc) goto out; rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params); if (rc) goto out; dev->db_addr = (void __iomem *)(uintptr_t)out_params.dpi_addr; dev->db_phys_addr = out_params.dpi_phys_addr; dev->db_size = out_params.dpi_size; dev->dpi = out_params.dpi; rc = qedr_set_device_attr(dev); out: kfree(in_params); if (rc) DP_ERR(dev, "Init HW Failed rc = %d\n", rc); return rc; }
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, bool allow_npar_tx_switch) { struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); u8 sb_index = p_hwfn->p_eq->eq_sb_index; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; u8 page_cnt, i; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(&p_hwfn->p_eq->chain)); memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_START, PROTOCOLID_COMMON, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.pf_start; p_ramrod->event_ring_sb_id = cpu_to_le16(sb); p_ramrod->event_ring_sb_index = sb_index; p_ramrod->path_id = QED_PATH_ID(p_hwfn); p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) p_ramrod->mf_mode = MF_OVLAN; else p_ramrod->mf_mode = MF_NPAR; p_ramrod->outer_tag_config.outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan); if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) { p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q; } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; p_ramrod->outer_tag_config.enable_stag_pri_change = 1; } p_ramrod->outer_tag_config.pri_map_valid = 1; for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i; /* enable_stag_pri_change should be set if port is in BD mode or, * UFP with Host Control mode. */ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) { if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) p_ramrod->outer_tag_config.enable_stag_pri_change = 1; else p_ramrod->outer_tag_config.enable_stag_pri_change = 0; p_ramrod->outer_tag_config.outer_tag.tci |= cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); } /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, p_hwfn->p_eq->chain.pbl_sp.p_phys_table); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl_sp.p_phys_table); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; switch (p_hwfn->hw_info.personality) { case QED_PCI_ETH: p_ramrod->personality = PERSONALITY_ETH; break; case QED_PCI_FCOE: p_ramrod->personality = PERSONALITY_FCOE; break; case QED_PCI_ISCSI: p_ramrod->personality = PERSONALITY_ISCSI; break; case QED_PCI_ETH_ROCE: case QED_PCI_ETH_IWARP: p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; break; default: DP_NOTICE(p_hwfn, "Unknown personality %d\n", p_hwfn->hw_info.personality); p_ramrod->personality = PERSONALITY_ETH; } if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; p_ramrod->num_vfs = (u8) p_iov->total_vfs; } p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n", sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (p_tunn) qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel); return rc; }
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_tunn_start_params *p_tunn, enum qed_mf_mode mode, bool allow_npar_tx_switch) { struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); u8 sb_index = p_hwfn->p_eq->eq_sb_index; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; u8 page_cnt; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(&p_hwfn->p_eq->chain)); memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_START, PROTOCOLID_COMMON, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.pf_start; p_ramrod->event_ring_sb_id = cpu_to_le16(sb); p_ramrod->event_ring_sb_index = sb_index; p_ramrod->path_id = QED_PATH_ID(p_hwfn); p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); switch (mode) { case QED_MF_DEFAULT: case QED_MF_NPAR: p_ramrod->mf_mode = MF_NPAR; break; case QED_MF_OVLAN: p_ramrod->mf_mode = MF_OVLAN; break; default: DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); p_ramrod->mf_mode = MF_NPAR; } p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, p_hwfn->p_eq->chain.pbl_sp.p_phys_table); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl_sp.p_phys_table); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); if (IS_MF_SI(p_hwfn)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; switch (p_hwfn->hw_info.personality) { case QED_PCI_ETH: p_ramrod->personality = PERSONALITY_ETH; break; case QED_PCI_ISCSI: p_ramrod->personality = PERSONALITY_ISCSI; break; case QED_PCI_ETH_ROCE: p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; break; default: DP_NOTICE(p_hwfn, "Unkown personality %d\n", p_hwfn->hw_info.personality); p_ramrod->personality = PERSONALITY_ETH; } if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; p_ramrod->num_vfs = (u8) p_iov->total_vfs; } p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", sb, sb_index, p_ramrod->outer_tag); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (p_tunn) { qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode); p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode; } return rc; }