static struct qman_fq *create_caam_req_fq(struct device *qidev, struct qman_fq *rsp_fq, dma_addr_t hwdesc, int fq_sched_flag) { int ret, flags; struct qman_fq *req_fq; struct qm_mcc_initfq opts; req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC); if (!req_fq) { dev_err(qidev, "Mem alloc for CAAM req FQ failed\n"); return ERR_PTR(-ENOMEM); } req_fq->cb.ern = caam_fq_ern_cb; req_fq->cb.fqs = NULL; flags = QMAN_FQ_FLAG_DYNAMIC_FQID | QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED; ret = qman_create_fq(0, flags, req_fq); if (ret) { dev_err(qidev, "Failed to create session REQ FQ\n"); goto create_req_fq_fail; } flags = fq_sched_flag; opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA; opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH; opts.fqd.dest.channel = qm_channel_caam; opts.fqd.dest.wq = 2; opts.fqd.context_b = qman_fq_fqid(rsp_fq); opts.fqd.context_a.hi = upper_32_bits(hwdesc); opts.fqd.context_a.lo = lower_32_bits(hwdesc); ret = qman_init_fq(req_fq, flags, &opts); if (ret) { dev_err(qidev, "Failed to init session req FQ\n"); goto init_req_fq_fail; } #ifdef DEBUG dev_info(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, smp_processor_id()); #endif return req_fq; init_req_fq_fail: qman_destroy_fq(req_fq, 0); create_req_fq_fail: kfree(req_fq); return ERR_PTR(ret); }
/* Destroys Frame Queues */ static void oh_fq_destroy(struct qman_fq *fq) { int _errno = 0; _errno = qman_retire_fq(fq, NULL); if (unlikely(_errno < 0)) pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_retire_fq(%u)=%d\n", KBUILD_BASENAME".c", __LINE__, __func__, qman_fq_fqid(fq), _errno); _errno = qman_oos_fq(fq); if (unlikely(_errno < 0)) { pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_oos_fq(%u)=%d\n", KBUILD_BASENAME".c", __LINE__, __func__, qman_fq_fqid(fq), _errno); } qman_destroy_fq(fq, 0); }
static struct qman_fq *create_caam_req_fq(struct device *qidev, struct qman_fq *rsp_fq, dma_addr_t hwdesc, int fq_sched_flag) { int ret; struct qman_fq *req_fq; struct qm_mcc_initfq opts; req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC); if (!req_fq) return ERR_PTR(-ENOMEM); req_fq->cb.ern = caam_fq_ern_cb; req_fq->cb.fqs = NULL; ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | QMAN_FQ_FLAG_TO_DCPORTAL, req_fq); if (ret) { dev_err(qidev, "Failed to create session req FQ\n"); goto create_req_fq_fail; } memset(&opts, 0, sizeof(opts)); opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID); opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE); qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2); opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq)); qm_fqd_context_a_set64(&opts.fqd, hwdesc); opts.fqd.cgid = qipriv.cgr.cgrid; ret = qman_init_fq(req_fq, fq_sched_flag, &opts); if (ret) { dev_err(qidev, "Failed to init session req FQ\n"); goto init_req_fq_fail; } dev_info(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, smp_processor_id()); return req_fq; init_req_fq_fail: qman_destroy_fq(req_fq); create_req_fq_fail: kfree(req_fq); return ERR_PTR(ret); }