static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 addr, u32 dmae_data_offset, u32 size, const u32 *buf, bool b_must_dmae, bool b_can_dmae) { int rc = 0; /* Perform DMAE only for lengthy enough sections or for wide-bus */ if (!b_can_dmae || (!b_must_dmae && (size < 16))) { const u32 *data = buf + dmae_data_offset; u32 i; for (i = 0; i < size; i++) qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]); } else { rc = qed_dmae_host2grc(p_hwfn, p_ptt, (uintptr_t)(buf + dmae_data_offset), addr, size, 0); } return rc; }
static int qed_init_rt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 addr, u16 rt_offset, u16 size, bool b_must_dmae) { u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; u16 i, segment; int rc = 0; /* Since not all RT entries are initialized, go over the RT and * for each segment of initialized values use DMA. */ for (i = 0; i < size; i++) { if (!p_valid[i]) continue; /* In case there isn't any wide-bus configuration here, * simply write the data instead of using dmae. */ if (!b_must_dmae) { qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]); continue; } /* Start of a new segment */ for (segment = 1; i + segment < size; segment++) if (!p_valid[i + segment]) break; rc = qed_dmae_host2grc(p_hwfn, p_ptt, (uintptr_t)(p_init_val + i), addr + (i << 2), segment, 0); if (rc != 0) return rc; /* Jump over the entire segment, including invalid entry */ i += segment; } return rc; }
static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 addr, u32 fill, u32 fill_count) { static u32 zero_buffer[DMAE_MAX_RW_SIZE]; memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE); /* invoke the DMAE virtual/physical buffer API with * 1. DMAE init channel * 2. addr, * 3. p_hwfb->temp_data, * 4. fill_count */ return qed_dmae_host2grc(p_hwfn, p_ptt, (uintptr_t)(&zero_buffer[0]), addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC); }