void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, dma_addr_t sb_phys, u16 igu_sb_id, u16 vf_number, u8 vf_valid) { struct cau_sb_entry sb_entry; u32 val; qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, vf_number, vf_valid); if (p_hwfn->hw_init_done) { val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64); qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys)); qed_wr(p_hwfn, p_ptt, val + sizeof(u32), upper_32_bits(sb_phys)); val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64); qed_wr(p_hwfn, p_ptt, val, sb_entry.data); qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params); } else { /* Initialize Status Block Address */ STORE_RT_REG_AGG(p_hwfn, CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + igu_sb_id * 2, sb_phys); STORE_RT_REG_AGG(p_hwfn, CAU_REG_SB_VAR_MEMORY_RT_OFFSET + igu_sb_id * 2, sb_entry); } /* Configure pi coalescing if set */ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >> (QED_CAU_DEF_RX_TIMER_RES + 1); u8 num_tc = 1, i; qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, QED_COAL_RX_STATE_MACHINE, timeset); timeset = p_hwfn->cdev->tx_coalesce_usecs >> (QED_CAU_DEF_TX_TIMER_RES + 1); for (i = 0; i < num_tc; i++) { qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, TX_PI(i), QED_COAL_TX_STATE_MACHINE, timeset); } }
static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 addr, u32 dmae_data_offset, u32 size, const u32 *buf, bool b_must_dmae, bool b_can_dmae) { int rc = 0; /* Perform DMAE only for lengthy enough sections or for wide-bus */ if (!b_can_dmae || (!b_must_dmae && (size < 16))) { const u32 *data = buf + dmae_data_offset; u32 i; for (i = 0; i < size; i++) qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]); } else { rc = qed_dmae_host2grc(p_hwfn, p_ptt, (uintptr_t)(buf + dmae_data_offset), addr, size, 0); } return rc; }
static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); sb_info->index = 0; sb_info->known_attn = 0; /* Configure Attention Status Block in IGU */ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); }
/** * @brief - handles deassertion of previously asserted attentions. * * @param p_hwfn * @param deasserted_bits - newly deasserted bits * @return int * */ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, u16 deasserted_bits) { struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; u32 aeu_mask; if (deasserted_bits != 0x100) DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n"); /* Clear IGU indication for the deasserted bits */ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_IGU_CMD + ((IGU_CMD_ATTN_BIT_CLR_UPPER - IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits)); /* Unmask deasserted attentions in IGU */ aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); /* Clear deassertion from inner state */ sb_attn_sw->known_attn &= ~deasserted_bits; return 0; }
static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb) { if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) return false; qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr); qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb); qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb); qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1); qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0); return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt); }
static void qed_init_fill(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 addr, u32 fill, u32 fill_count) { u32 i; for (i = 0; i < fill_count; i++, addr += sizeof(u32)) qed_wr(p_hwfn, p_ptt, addr, fill); }
int qed_init_vport_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl) { u32 inc_val = QM_RL_INC_VAL(vport_rl); if (inc_val > QM_RL_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration"); return -1; } qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4, QM_RL_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val); return 0; }
int qed_init_pf_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl) { u32 inc_val = QM_RL_INC_VAL(pf_rl); if (inc_val > QM_RL_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration"); return -1; } qed_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4, QM_RL_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); return 0; }
/** * @brief qed_int_assertion - handles asserted attention bits * * @param p_hwfn * @param asserted_bits newly asserted bits * @return int */ static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits) { struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; u32 igu_mask; /* Mask the source of the attention in the IGU */ igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "inner known ATTN state: 0x%04x --> 0x%04x\n", sb_attn_sw->known_attn, sb_attn_sw->known_attn | asserted_bits); sb_attn_sw->known_attn |= asserted_bits; /* Handle MCP events */ if (asserted_bits & 0x100) { qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); /* Clean the MCP attention */ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, sb_attn_sw->mfw_attn_addr, 0); } DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_IGU_CMD + ((IGU_CMD_ATTN_BIT_SET_UPPER - IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", asserted_bits); return 0; }
/* init_ops write command */ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct init_write_op *cmd, bool b_can_dmae) { u32 data = le32_to_cpu(cmd->data); u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS); union init_write_args *arg = &cmd->args; int rc = 0; /* Sanitize */ if (b_must_dmae && !b_can_dmae) { DP_NOTICE(p_hwfn, "Need to write to %08x for Wide-bus but DMAE isn't allowed\n", addr); return -EINVAL; } switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) { case INIT_SRC_INLINE: qed_wr(p_hwfn, p_ptt, addr, le32_to_cpu(arg->inline_val)); break; case INIT_SRC_ZEROS: if (b_must_dmae || (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64))) rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, le32_to_cpu(arg->zeros_count)); else qed_init_fill(p_hwfn, p_ptt, addr, 0, le32_to_cpu(arg->zeros_count)); break; case INIT_SRC_ARRAY: rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd, b_must_dmae, b_can_dmae); break; case INIT_SRC_RUNTIME: qed_init_rt(p_hwfn, p_ptt, addr, le16_to_cpu(arg->runtime.offset), le16_to_cpu(arg->runtime.size), b_must_dmae); break; } return rc; }
static int qed_init_rt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 addr, u16 rt_offset, u16 size, bool b_must_dmae) { u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; u16 i, segment; int rc = 0; /* Since not all RT entries are initialized, go over the RT and * for each segment of initialized values use DMA. */ for (i = 0; i < size; i++) { if (!p_valid[i]) continue; /* In case there isn't any wide-bus configuration here, * simply write the data instead of using dmae. */ if (!b_must_dmae) { qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]); continue; } /* Start of a new segment */ for (segment = 1; i + segment < size; segment++) if (!p_valid[i + segment]) break; rc = qed_dmae_host2grc(p_hwfn, p_ptt, (uintptr_t)(p_init_val + i), addr + (i << 2), segment, 0); if (rc != 0) return rc; /* Jump over the entire segment, including invalid entry */ i += segment; } return rc; }
/* Read Tx timestamp */ static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; u32 val; *timestamp = 0; val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID); if (!(val & QED_TIMESTAMP_MASK)) { DP_INFO(p_hwfn, "Invalid Tx timestamp, buf_seqid = %d\n", val); return -EINVAL; } val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB); *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB); *timestamp <<= 32; *timestamp |= val; /* Reset timestamp register to allow new timestamp */ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK); return 0; }
/* Filter PTP protocol packets that need to be timestamped */ static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev, enum qed_ptp_filter_type rx_type, enum qed_ptp_hwtstamp_tx_type tx_type) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; u32 rule_mask, enable_cfg = 0x0; switch (rx_type) { case QED_PTP_FILTER_NONE: enable_cfg = 0x0; rule_mask = 0x3FFF; break; case QED_PTP_FILTER_ALL: enable_cfg = 0x7; rule_mask = 0x3CAA; break; case QED_PTP_FILTER_V1_L4_EVENT: enable_cfg = 0x3; rule_mask = 0x3FFA; break; case QED_PTP_FILTER_V1_L4_GEN: enable_cfg = 0x3; rule_mask = 0x3FFE; break; case QED_PTP_FILTER_V2_L4_EVENT: enable_cfg = 0x5; rule_mask = 0x3FAA; break; case QED_PTP_FILTER_V2_L4_GEN: enable_cfg = 0x5; rule_mask = 0x3FEE; break; case QED_PTP_FILTER_V2_L2_EVENT: enable_cfg = 0x5; rule_mask = 0x3CFF; break; case QED_PTP_FILTER_V2_L2_GEN: enable_cfg = 0x5; rule_mask = 0x3EFF; break; case QED_PTP_FILTER_V2_EVENT: enable_cfg = 0x5; rule_mask = 0x3CAA; break; case QED_PTP_FILTER_V2_GEN: enable_cfg = 0x5; rule_mask = 0x3EEE; break; default: DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type); return -EINVAL; } qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0); qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask); qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg); if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) { qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); } else { qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask); } /* Reset possibly old timestamps */ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, QED_TIMESTAMP_MASK); return 0; }