static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 addr, u32 dmae_data_offset, u32 size, const u32 *p_buf, bool b_must_dmae, bool b_can_dmae) { enum _ecore_status_t rc = ECORE_SUCCESS; /* Perform DMAE only for lengthy enough sections or for wide-bus */ #ifndef ASIC_ONLY if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) || !b_can_dmae || (!b_must_dmae && (size < 16))) { #else if (!b_can_dmae || (!b_must_dmae && (size < 16))) { #endif const u32 *data = p_buf + dmae_data_offset; u32 i; for (i = 0; i < size; i++) ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]); } else { rc = ecore_dmae_host2grc(p_hwfn, p_ptt, (osal_uintptr_t)(p_buf + dmae_data_offset), addr, size, OSAL_NULL /* default parameters */); } return rc; } static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 addr, u32 fill_count) { static u32 zero_buffer[DMAE_MAX_RW_SIZE]; struct ecore_dmae_params params; OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE); OSAL_MEMSET(¶ms, 0, sizeof(params)); params.flags = ECORE_DMAE_FLAG_RW_REPL_SRC; return ecore_dmae_host2grc(p_hwfn, p_ptt, (osal_uintptr_t)(&(zero_buffer[0])), addr, fill_count, ¶ms); } static void ecore_init_fill(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 addr, u32 fill, u32 fill_count) { u32 i; for (i = 0; i < fill_count; i++, addr += sizeof(u32)) ecore_wr(p_hwfn, p_ptt, addr, fill); }
/* init_ops write command */ static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct init_write_op *p_cmd, bool b_can_dmae) { enum _ecore_status_t rc = ECORE_SUCCESS; bool b_must_dmae; u32 addr, data; data = OSAL_LE32_TO_CPU(p_cmd->data); b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS); addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; /* Sanitize */ if (b_must_dmae && !b_can_dmae) { DP_NOTICE(p_hwfn, true, "Need to write to %08x for Wide-bus but DMAE isn't" " allowed\n", addr); return ECORE_INVAL; } switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) { case INIT_SRC_INLINE: data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val); ecore_wr(p_hwfn, p_ptt, addr, data); break; case INIT_SRC_ZEROS: data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count); if (b_must_dmae || (b_can_dmae && (data >= 64))) rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data); else ecore_init_fill(p_hwfn, p_ptt, addr, 0, data); break; case INIT_SRC_ARRAY: rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd, b_must_dmae, b_can_dmae); break; case INIT_SRC_RUNTIME: ecore_init_rt(p_hwfn, p_ptt, addr, OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset), OSAL_LE16_TO_CPU(p_cmd->args.runtime.size), b_must_dmae); break; } return rc; }
static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 addr, u16 rt_offset, u16 size, bool b_must_dmae) { u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; u16 i, segment; enum _ecore_status_t rc = ECORE_SUCCESS; /* Since not all RT entries are initialized, go over the RT and * for each segment of initialized values use DMA. */ for (i = 0; i < size; i++) { if (!p_valid[i]) continue; /* In case there isn't any wide-bus configuration here, * simply write the data instead of using dmae. */ if (!b_must_dmae) { ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]); continue; } /* Start of a new segment */ for (segment = 1; i + segment < size; segment++) if (!p_valid[i + segment]) break; rc = ecore_dmae_host2grc(p_hwfn, p_ptt, (osal_uintptr_t)(p_init_val + i), addr + (i << 2), segment, OSAL_NULL /* default parameters */); if (rc != ECORE_SUCCESS) return rc; /* Jump over the entire segment, including invalid entry */ i += segment; } return rc; }
void ecore_gtt_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 gtt_base; u32 i; #ifndef ASIC_ONLY if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { /* This is done by MFW on ASIC; regardless, this should only * be done once per chip [i.e., common]. Implementation is * not too bright, but it should work on the simple FPGA/EMUL * scenarios. */ static bool initialized = false; int poll_cnt = 500; u32 val; /* initialize PTT/GTT (poll for completion) */ if (!initialized) { ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_START_INIT_PTT_GTT, 1); initialized = true; } do { /* ptt might be overrided by HW until this is done */ OSAL_UDELAY(10); ecore_ptt_invalidate(p_hwfn); val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_INIT_DONE_PTT_GTT); } while ((val != 1) && --poll_cnt); if (!poll_cnt) DP_ERR(p_hwfn, "PGLUE_B_REG_INIT_DONE didn't complete\n"); } #endif /* Set the global windows */ gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START; for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++) if (pxp_global_win[i]) REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE, pxp_global_win[i]); }
/* * Read updated MIB. * Reconfigure QM and invoke PF update ramrod command if operational MIB * change is detected. */ enum _ecore_status_t ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_mib_read_type type) { enum _ecore_status_t rc = ECORE_SUCCESS; rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type); if (rc) return rc; if (type == ECORE_DCBX_OPERATIONAL_MIB) { ecore_dcbx_get_dscp_params(p_hwfn, &p_hwfn->p_dcbx_info->get); rc = ecore_dcbx_process_mib_info(p_hwfn); if (!rc) { /* reconfigure tcs of QM queues according * to negotiation results */ ecore_qm_reconf(p_hwfn, p_ptt); /* update storm FW with negotiation results */ ecore_sp_pf_update_dcbx(p_hwfn); } } ecore_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type); /* Update the DSCP to TC mapping bit if required */ if ((type == ECORE_DCBX_OPERATIONAL_MIB) && p_hwfn->p_dcbx_info->dscp_nig_update) { u8 val = !!p_hwfn->p_dcbx_info->get.dscp.enabled; ecore_wr(p_hwfn, p_ptt, NIG_REG_DSCP_TO_TC_MAP_ENABLE, val); p_hwfn->p_dcbx_info->dscp_nig_update = false; } OSAL_DCBX_AEN(p_hwfn, type); return rc; }