static int init_cgr(struct device *qidev)
{
	int ret;
	struct qm_mcc_initcgr opts;
	const u64 cpus = *(u64 *)qman_affine_cpus();
	const int num_cpus = hweight64(cpus);
	const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;

	ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
	if (ret) {
		dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
		return ret;
	}

	qipriv.cgr.cb = cgr_cb;
	memset(&opts, 0, sizeof(opts));
	opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
				   QM_CGR_WE_MODE);
	opts.cgr.cscn_en = QM_CGR_EN;
	opts.cgr.mode = QMAN_CGR_MODE_FRAME;
	qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);

	ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
	if (ret) {
		dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
			qipriv.cgr.cgrid);
		return ret;
	}

	dev_info(qidev, "Congestion threshold set to %llu\n", val);
	return 0;
}
Beispiel #2
0
static int alloc_cgrs(struct device *qidev)
{
	struct qm_mcc_initcgr opts;
	int ret;
	const u64 cpus = *(u64 *)qman_affine_cpus();
	const int num_cpus = hweight64(cpus);
	u64 val;

	/*Allocate response CGR*/
	ret = qman_alloc_cgrid(&qipriv.rsp_cgr.cgrid);
	if (ret) {
		dev_err(qidev, "CGR alloc failed for rsp FQs");
		return ret;
	}

	qipriv.rsp_cgr.cb = rsp_cgr_cb;
	memset(&opts, 0, sizeof(opts));
	opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
			QM_CGR_WE_MODE;
	opts.cgr.cscn_en = QM_CGR_EN;
	opts.cgr.mode = QMAN_CGR_MODE_FRAME;
#ifdef CONFIG_FSL_DPAA_ETH
	/*
	 * This effectively sets the to-CPU threshold equal to half of the
	 * number of buffers available to dpa_eth driver. It means that at most
	 * half of the buffers can be in the queues from SEC, waiting
	 * to be transmitted to the core (and then on the TX queues).
	 * NOTE: This is an arbitrary division; the factor '2' below could
	 *       also be '3' or '4'. It also depends on the number of devices
	 *       using the dpa_eth buffers (which can be >1 if f.i. PME/DCE are
	 *       also used.
	 */
	val = num_cpus * CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT / 2;
#else
	val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
#endif
	qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);

	ret = qman_create_cgr(&qipriv.rsp_cgr,
				QMAN_CGR_FLAG_USE_INIT, &opts);
	if (ret) {
		dev_err(qidev, "Error %d creating CAAM rsp CGRID: %u\n",
			ret, qipriv.rsp_cgr.cgrid);
		return ret;
	}
#ifdef DEBUG
	dev_info(qidev, "CAAM to CPU threshold set to %llu\n", val);
#endif
	return 0;
}