/* Prepare runtime init values to allocate PBF command queue lines. */
static void qed_cmdq_lines_rt_init(
	struct qed_hwfn *p_hwfn,
	u8 max_ports_per_engine,
	u8 max_phys_tcs_per_port,
	struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
	u8 tc, voq, port_id;

	/* clear PBF lines for all VOQs */
	for (voq = 0; voq < MAX_NUM_VOQS; voq++)
		STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
		if (port_params[port_id].active) {
			u16 phys_lines, phys_lines_per_tc;
			u8 phys_tcs = port_params[port_id].num_active_phys_tcs;

			/* find #lines to divide between the active
			 * physical TCs.
			 */
			phys_lines = port_params[port_id].num_pbf_cmd_lines -
				     PBF_CMDQ_PURE_LB_LINES;
			/* find #lines per active physical TC */
			phys_lines_per_tc = phys_lines / phys_tcs;
			/* init registers per active TC */
			for (tc = 0; tc < phys_tcs; tc++) {
				voq = PHYS_VOQ(port_id, tc,
					       max_phys_tcs_per_port);
				qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
							   phys_lines_per_tc);
			}
			/* init registers for pure LB TC */
			qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
						   PBF_CMDQ_PURE_LB_LINES);
		}
	}
}
Exemple #2
0
/* DQ PF */
static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
{
	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
	u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;

	dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);

	dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);

	dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);

	dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);

	dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);

	dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);

	dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);

	dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);

	dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);

	dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);

	dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);

	dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);

	/* Connection types 6 & 7 are not in use, yet they must be configured
	 * as the highest possible connection. Not configuring them means the
	 * defaults will be  used, and with a large number of cids a bug may
	 * occur, if the defaults will be smaller than dq_pf_max_cid /
	 * dq_vf_max_cid.
	 */
	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);

	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
}
/* Prepare Tx PQ mapping runtime init values for the specified PF */
static void qed_tx_pq_map_rt_init(
	struct qed_hwfn *p_hwfn,
	struct qed_ptt *p_ptt,
	struct qed_qm_pf_rt_init_params *p_params,
	u32 base_mem_addr_4kb)
{
	struct init_qm_vport_params *vport_params = p_params->vport_params;
	u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
	u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
	u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
			    QM_PF_QUEUE_GROUP_SIZE;
	bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
	u16 i, pq_id, pq_group;

	/* a bit per Tx PQ indicating if the PQ is associated with a VF */
	u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
	u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
	u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
	u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
	u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
	u32 mem_addr_4kb = base_mem_addr_4kb;

	/* set mapping from PQ group to PF */
	for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
		STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
			     (u32)(p_params->pf_id));
	/* set PQ sizes */
	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
		     QM_PQ_SIZE_256B(p_params->num_pf_cids));
	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
		     QM_PQ_SIZE_256B(p_params->num_vf_cids));

	/* go over all Tx PQs */
	for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
		u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
			     p_params->max_phys_tcs_per_port);
		bool is_vf_pq = (i >= p_params->num_pf_pqs);
		struct qm_rf_pq_map tx_pq_map;

		/* update first Tx PQ of VPORT/TC */
		u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
				    p_params->start_vport;
		u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
		u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];

		if (first_tx_pq_id == QM_INVALID_PQ_ID) {
			/* create new VP PQ */
			pq_ids[p_params->pq_params[i].tc_id] = pq_id;
			first_tx_pq_id = pq_id;
			/* map VP PQ to VOQ and PF */
			STORE_RT_REG(p_hwfn,
				     QM_REG_WFQVPMAP_RT_OFFSET +
				     first_tx_pq_id,
				     (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
				     (p_params->pf_id <<
				      QM_WFQ_VP_PQ_PF_SHIFT));
		}
		/* fill PQ map entry */
		memset(&tx_pq_map, 0, sizeof(tx_pq_map));
		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
			  is_vf_pq ? 1 : 0);
		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
			  is_vf_pq ? p_params->pq_params[i].vport_id : 0);
		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
			  p_params->pq_params[i].wrr_group);
		/* write PQ map entry to CAM */
		STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
			     *((u32 *)&tx_pq_map));
		/* set base address */
		STORE_RT_REG(p_hwfn,
			     QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
			     mem_addr_4kb);
		/* check if VF PQ */
		if (is_vf_pq) {
			/* if PQ is associated with a VF, add indication
			 * to PQ VF mask
			 */
			tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
				(1 << (pq_id % tx_pq_vf_mask_width));
			mem_addr_4kb += vport_pq_mem_4kb;
		} else {
			mem_addr_4kb += pq_mem_4kb;
		}
	}

	/* store Tx PQ VF mask to size select register */
	for (i = 0; i < num_tx_pq_vf_masks; i++) {
		if (tx_pq_vf_mask[i]) {
			if (is_bb_a0) {
				u32 curr_mask = 0, addr;

				addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
				if (!p_params->is_first_pf)
					curr_mask = qed_rd(p_hwfn, p_ptt,
							   addr);

				addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;

				STORE_RT_REG(p_hwfn, addr,
					     curr_mask | tx_pq_vf_mask[i]);
			} else {
				u32 addr;

				addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
				STORE_RT_REG(p_hwfn, addr,
					     tx_pq_vf_mask[i]);
			}
		}
	}
}