Ejemplo n.º 1
0
static struct qman_fq *create_caam_req_fq(struct device *qidev,
					  struct qman_fq *rsp_fq,
					  dma_addr_t hwdesc,
					  int fq_sched_flag)
{
	int ret, flags;
	struct qman_fq *req_fq;
	struct qm_mcc_initfq opts;

	req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
	if (!req_fq) {
		dev_err(qidev, "Mem alloc for CAAM req FQ failed\n");
		return ERR_PTR(-ENOMEM);
	}

	req_fq->cb.ern = caam_fq_ern_cb;
	req_fq->cb.fqs = NULL;

	flags = QMAN_FQ_FLAG_DYNAMIC_FQID |
		QMAN_FQ_FLAG_TO_DCPORTAL |
		QMAN_FQ_FLAG_LOCKED;

	ret = qman_create_fq(0, flags, req_fq);
	if (ret) {
		dev_err(qidev, "Failed to create session REQ FQ\n");
		goto create_req_fq_fail;
	}

	flags = fq_sched_flag;
	opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
			QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;

	opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH;
	opts.fqd.dest.channel = qm_channel_caam;
	opts.fqd.dest.wq = 2;
	opts.fqd.context_b = qman_fq_fqid(rsp_fq);
	opts.fqd.context_a.hi = upper_32_bits(hwdesc);
	opts.fqd.context_a.lo = lower_32_bits(hwdesc);

	ret = qman_init_fq(req_fq, flags, &opts);
	if (ret) {
		dev_err(qidev, "Failed to init session req FQ\n");
		goto init_req_fq_fail;
	}
#ifdef DEBUG
	dev_info(qidev, "Allocated request FQ %u for CPU %u\n",
		 req_fq->fqid, smp_processor_id());
#endif
	return req_fq;

init_req_fq_fail:
	qman_destroy_fq(req_fq, 0);

create_req_fq_fail:
	kfree(req_fq);
	return ERR_PTR(ret);
}
Ejemplo n.º 2
0
static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
{
	struct qm_mcc_initfq opts;
	struct qman_fq *fq;
	int ret;
	u32 flags;

	fq = &per_cpu(pcpu_qipriv.rsp_fq, cpu);

	fq->cb.dqrr = caam_rsp_fq_dqrr_cb;

	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_DYNAMIC_FQID;

	ret = qman_create_fq(0, flags, fq);
	if (ret) {
		dev_err(qidev, "Rsp FQ create failed\n");
		return -ENODEV;
	}

	flags = QMAN_INITFQ_FLAG_SCHED;

	opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
		QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
		QM_INITFQ_WE_CGID | QMAN_INITFQ_FLAG_LOCAL;

	opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING |
			   QM_FQCTRL_CPCSTASH |
			   QM_FQCTRL_CGE;

	opts.fqd.dest.channel = qman_affine_channel(cpu);
	opts.fqd.cgid = qipriv.rsp_cgr.cgrid;
	opts.fqd.dest.wq = 2;
	opts.fqd.context_a.stashing.exclusive =
					QM_STASHING_EXCL_CTX |
					QM_STASHING_EXCL_DATA;

	opts.fqd.context_a.stashing.data_cl = 1;
	opts.fqd.context_a.stashing.context_cl = 1;

	ret = qman_init_fq(fq, flags, &opts);
	if (ret) {
		dev_err(qidev, "Rsp FQ init failed\n");
		return -ENODEV;
	}
#ifdef DEBUG
	dev_info(qidev, "Allocated response FQ %u for CPU %u",
		 fq->fqid, cpu);
#endif
	return 0;
}
Ejemplo n.º 3
0
void qman_test_high(void)
{
	int flags, res;
	struct qman_fq *fq = &fq_base;

	pr_info("qman_test_high starting\n");
	fd_init(&fd);
	fd_init(&fd_dq);

	/* Initialise (parked) FQ */
	if (qman_create_fq(0, FQ_FLAGS, fq))
		panic("qman_create_fq() failed\n");
	if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL))
		panic("qman_init_fq() failed\n");

	/* Do enqueues + VDQCR, twice. (Parked FQ) */
	do_enqueues(fq);
	pr_info("VDQCR (till-empty);\n");
	if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
			QM_VDQCR_NUMFRAMES_TILLEMPTY))
		panic("qman_volatile_dequeue() failed\n");
	do_enqueues(fq);
	pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
	if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
			QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL)))
		panic("qman_volatile_dequeue() failed\n");
	pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
					NUM_ENQUEUES);
	if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
			QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL)))
		panic("qman_volatile_dequeue() failed\n");

	do_enqueues(fq);
	pr_info("scheduled dequeue (till-empty)\n");
	if (qman_schedule_fq(fq))
		panic("qman_schedule_fq() failed\n");
	wait_event(waitqueue, sdqcr_complete);

	/* Retire and OOS the FQ */
	res = qman_retire_fq(fq, &flags);
	if (res < 0)
		panic("qman_retire_fq() failed\n");
	wait_event(waitqueue, retire_complete);
	if (flags & QMAN_FQ_STATE_BLOCKOOS)
		panic("leaking frames\n");
	if (qman_oos_fq(fq))
		panic("qman_oos_fq() failed\n");
	qman_destroy_fq(fq, 0);
	pr_info("qman_test_high finished\n");
}
Ejemplo n.º 4
0
static struct qman_fq *create_caam_req_fq(struct device *qidev,
					  struct qman_fq *rsp_fq,
					  dma_addr_t hwdesc,
					  int fq_sched_flag)
{
	int ret;
	struct qman_fq *req_fq;
	struct qm_mcc_initfq opts;

	req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
	if (!req_fq)
		return ERR_PTR(-ENOMEM);

	req_fq->cb.ern = caam_fq_ern_cb;
	req_fq->cb.fqs = NULL;

	ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
				QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
	if (ret) {
		dev_err(qidev, "Failed to create session req FQ\n");
		goto create_req_fq_fail;
	}

	memset(&opts, 0, sizeof(opts));
	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
				   QM_INITFQ_WE_CONTEXTB |
				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
	qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
	opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
	qm_fqd_context_a_set64(&opts.fqd, hwdesc);
	opts.fqd.cgid = qipriv.cgr.cgrid;

	ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
	if (ret) {
		dev_err(qidev, "Failed to init session req FQ\n");
		goto init_req_fq_fail;
	}

	dev_info(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
		 smp_processor_id());
	return req_fq;

init_req_fq_fail:
	qman_destroy_fq(req_fq);
create_req_fq_fail:
	kfree(req_fq);
	return ERR_PTR(ret);
}
Ejemplo n.º 5
0
static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
{
	struct qm_mcc_initfq opts;
	struct qman_fq *fq;
	int ret;

	fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
	if (!fq)
		return -ENOMEM;

	fq->cb.dqrr = caam_rsp_fq_dqrr_cb;

	ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
			     QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
	if (ret) {
		dev_err(qidev, "Rsp FQ create failed\n");
		kfree(fq);
		return -ENODEV;
	}

	memset(&opts, 0, sizeof(opts));
	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
				   QM_INITFQ_WE_CONTEXTB |
				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
				       QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
	qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
	opts.fqd.cgid = qipriv.cgr.cgrid;
	opts.fqd.context_a.stashing.exclusive =	QM_STASHING_EXCL_CTX |
						QM_STASHING_EXCL_DATA;
	qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);

	ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
	if (ret) {
		dev_err(qidev, "Rsp FQ init failed\n");
		kfree(fq);
		return -ENODEV;
	}

	per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;

	dev_info(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
	return 0;
}
Ejemplo n.º 6
0
/* Creates Frame Queues */
static uint32_t oh_fq_create(struct qman_fq *fq,
	uint32_t fq_id, uint16_t channel,
	uint16_t wq_id)
{
	struct qm_mcc_initfq fq_opts;
	uint32_t create_flags, init_flags;
	uint32_t ret = 0;

	if (fq == NULL)
		return 1;

	/* Set flags for FQ create */
	create_flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_TO_DCPORTAL;

	/* Create frame queue */
	ret = qman_create_fq(fq_id, create_flags, fq);
	if (ret != 0)
		return 1;

	/* Set flags for FQ init */
	init_flags = QMAN_INITFQ_FLAG_SCHED;

	/* Set FQ init options. Specify destination WQ ID and channel */
	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ;
	fq_opts.fqd.dest.wq = wq_id;
	fq_opts.fqd.dest.channel = channel;

	/* Initialize frame queue */
	ret = qman_init_fq(fq, init_flags, &fq_opts);
	if (ret != 0) {
		qman_destroy_fq(fq, 0);
		return 1;
	}

	return 0;
}
Ejemplo n.º 7
0
int qman_test_api(void)
{
    unsigned int flags, frmcnt;
    int err;
    struct qman_fq *fq = &fq_base;

    pr_info("%s(): Starting\n", __func__);
    fd_init(&fd);
    fd_init(&fd_dq);

    /* Initialise (parked) FQ */
    err = qman_create_fq(0, FQ_FLAGS, fq);
    if (err) {
        pr_crit("qman_create_fq() failed\n");
        goto failed;
    }
    err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL);
    if (err) {
        pr_crit("qman_init_fq() failed\n");
        goto failed;
    }
    /* Do enqueues + VDQCR, twice. (Parked FQ) */
    err = do_enqueues(fq);
    if (err)
        goto failed;
    pr_info("VDQCR (till-empty);\n");
    frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY;
    err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
    if (err) {
        pr_crit("qman_volatile_dequeue() failed\n");
        goto failed;
    }
    err = do_enqueues(fq);
    if (err)
        goto failed;
    pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
    frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL);
    err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
    if (err) {
        pr_crit("qman_volatile_dequeue() failed\n");
        goto failed;
    }
    pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
            NUM_ENQUEUES);
    frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL);
    err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
    if (err) {
        pr_err("qman_volatile_dequeue() failed\n");
        goto failed;
    }

    err = do_enqueues(fq);
    if (err)
        goto failed;
    pr_info("scheduled dequeue (till-empty)\n");
    err = qman_schedule_fq(fq);
    if (err) {
        pr_crit("qman_schedule_fq() failed\n");
        goto failed;
    }
    wait_event(waitqueue, sdqcr_complete);

    /* Retire and OOS the FQ */
    err = qman_retire_fq(fq, &flags);
    if (err < 0) {
        pr_crit("qman_retire_fq() failed\n");
        goto failed;
    }
    wait_event(waitqueue, retire_complete);
    if (flags & QMAN_FQ_STATE_BLOCKOOS) {
        err = -EIO;
        pr_crit("leaking frames\n");
        goto failed;
    }
    err = qman_oos_fq(fq);
    if (err) {
        pr_crit("qman_oos_fq() failed\n");
        goto failed;
    }
    qman_destroy_fq(fq);
    pr_info("%s(): Finished\n", __func__);
    return 0;

failed:
    WARN_ON(1);
    return err;
}