static struct qman_fq *create_caam_req_fq(struct device *qidev, struct qman_fq *rsp_fq, dma_addr_t hwdesc, int fq_sched_flag) { int ret, flags; struct qman_fq *req_fq; struct qm_mcc_initfq opts; req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC); if (!req_fq) { dev_err(qidev, "Mem alloc for CAAM req FQ failed\n"); return ERR_PTR(-ENOMEM); } req_fq->cb.ern = caam_fq_ern_cb; req_fq->cb.fqs = NULL; flags = QMAN_FQ_FLAG_DYNAMIC_FQID | QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED; ret = qman_create_fq(0, flags, req_fq); if (ret) { dev_err(qidev, "Failed to create session REQ FQ\n"); goto create_req_fq_fail; } flags = fq_sched_flag; opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA; opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH; opts.fqd.dest.channel = qm_channel_caam; opts.fqd.dest.wq = 2; opts.fqd.context_b = qman_fq_fqid(rsp_fq); opts.fqd.context_a.hi = upper_32_bits(hwdesc); opts.fqd.context_a.lo = lower_32_bits(hwdesc); ret = qman_init_fq(req_fq, flags, &opts); if (ret) { dev_err(qidev, "Failed to init session req FQ\n"); goto init_req_fq_fail; } #ifdef DEBUG dev_info(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, smp_processor_id()); #endif return req_fq; init_req_fq_fail: qman_destroy_fq(req_fq, 0); create_req_fq_fail: kfree(req_fq); return ERR_PTR(ret); }
void qman_test_high(void) { int flags, res; struct qman_fq *fq = &fq_base; pr_info("qman_test_high starting\n"); fd_init(&fd); fd_init(&fd_dq); /* Initialise (parked) FQ */ if (qman_create_fq(0, FQ_FLAGS, fq)) panic("qman_create_fq() failed\n"); if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL)) panic("qman_init_fq() failed\n"); /* Do enqueues + VDQCR, twice. (Parked FQ) */ do_enqueues(fq); pr_info("VDQCR (till-empty);\n"); if (qman_volatile_dequeue(fq, VDQCR_FLAGS, QM_VDQCR_NUMFRAMES_TILLEMPTY)) panic("qman_volatile_dequeue() failed\n"); do_enqueues(fq); pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES); if (qman_volatile_dequeue(fq, VDQCR_FLAGS, QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL))) panic("qman_volatile_dequeue() failed\n"); pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL, NUM_ENQUEUES); if (qman_volatile_dequeue(fq, VDQCR_FLAGS, QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL))) panic("qman_volatile_dequeue() failed\n"); do_enqueues(fq); pr_info("scheduled dequeue (till-empty)\n"); if (qman_schedule_fq(fq)) panic("qman_schedule_fq() failed\n"); wait_event(waitqueue, sdqcr_complete); /* Retire and OOS the FQ */ res = qman_retire_fq(fq, &flags); if (res < 0) panic("qman_retire_fq() failed\n"); wait_event(waitqueue, retire_complete); if (flags & QMAN_FQ_STATE_BLOCKOOS) panic("leaking frames\n"); if (qman_oos_fq(fq)) panic("qman_oos_fq() failed\n"); qman_destroy_fq(fq, 0); pr_info("qman_test_high finished\n"); }
static struct qman_fq *create_caam_req_fq(struct device *qidev, struct qman_fq *rsp_fq, dma_addr_t hwdesc, int fq_sched_flag) { int ret; struct qman_fq *req_fq; struct qm_mcc_initfq opts; req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC); if (!req_fq) return ERR_PTR(-ENOMEM); req_fq->cb.ern = caam_fq_ern_cb; req_fq->cb.fqs = NULL; ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | QMAN_FQ_FLAG_TO_DCPORTAL, req_fq); if (ret) { dev_err(qidev, "Failed to create session req FQ\n"); goto create_req_fq_fail; } memset(&opts, 0, sizeof(opts)); opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID); opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE); qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2); opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq)); qm_fqd_context_a_set64(&opts.fqd, hwdesc); opts.fqd.cgid = qipriv.cgr.cgrid; ret = qman_init_fq(req_fq, fq_sched_flag, &opts); if (ret) { dev_err(qidev, "Failed to init session req FQ\n"); goto init_req_fq_fail; } dev_info(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, smp_processor_id()); return req_fq; init_req_fq_fail: qman_destroy_fq(req_fq); create_req_fq_fail: kfree(req_fq); return ERR_PTR(ret); }
static int kill_fq(struct device *qidev, struct qman_fq *fq) { enum qman_fq_state state; u32 flags; int ret; ret = qman_retire_fq(fq, &flags); if (ret < 0) { dev_err(qidev, "qman_retire_fq failed\n"); return ret; } if (!ret) goto empty_fq; /* Async FQ retirement condition */ if (1 == ret) { /* Retry till FQ gets in retired state */ do { msleep(20); qman_fq_state(fq, &state, &flags); } while (qman_fq_state_retired != state); WARN_ON(flags & QMAN_FQ_STATE_BLOCKOOS); WARN_ON(flags & QMAN_FQ_STATE_ORL); } empty_fq: if (flags & QMAN_FQ_STATE_NE) { ret = empty_retired_fq(qidev, fq); if (ret) { dev_err(qidev, "empty_retired_fq fail for FQ: %u\n", fq->fqid); return ret; } } ret = qman_oos_fq(fq); if (ret) dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid); qman_destroy_fq(fq, 0); return ret; }
/* Destroys Frame Queues */ static void oh_fq_destroy(struct qman_fq *fq) { int _errno = 0; _errno = qman_retire_fq(fq, NULL); if (unlikely(_errno < 0)) pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_retire_fq(%u)=%d\n", KBUILD_BASENAME".c", __LINE__, __func__, qman_fq_fqid(fq), _errno); _errno = qman_oos_fq(fq); if (unlikely(_errno < 0)) { pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_oos_fq(%u)=%d\n", KBUILD_BASENAME".c", __LINE__, __func__, qman_fq_fqid(fq), _errno); } qman_destroy_fq(fq, 0); }
/* Creates Frame Queues */ static uint32_t oh_fq_create(struct qman_fq *fq, uint32_t fq_id, uint16_t channel, uint16_t wq_id) { struct qm_mcc_initfq fq_opts; uint32_t create_flags, init_flags; uint32_t ret = 0; if (fq == NULL) return 1; /* Set flags for FQ create */ create_flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_TO_DCPORTAL; /* Create frame queue */ ret = qman_create_fq(fq_id, create_flags, fq); if (ret != 0) return 1; /* Set flags for FQ init */ init_flags = QMAN_INITFQ_FLAG_SCHED; /* Set FQ init options. Specify destination WQ ID and channel */ fq_opts.we_mask = QM_INITFQ_WE_DESTWQ; fq_opts.fqd.dest.wq = wq_id; fq_opts.fqd.dest.channel = channel; /* Initialize frame queue */ ret = qman_init_fq(fq, init_flags, &fq_opts); if (ret != 0) { qman_destroy_fq(fq, 0); return 1; } return 0; }
int qman_test_api(void) { unsigned int flags, frmcnt; int err; struct qman_fq *fq = &fq_base; pr_info("%s(): Starting\n", __func__); fd_init(&fd); fd_init(&fd_dq); /* Initialise (parked) FQ */ err = qman_create_fq(0, FQ_FLAGS, fq); if (err) { pr_crit("qman_create_fq() failed\n"); goto failed; } err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL); if (err) { pr_crit("qman_init_fq() failed\n"); goto failed; } /* Do enqueues + VDQCR, twice. (Parked FQ) */ err = do_enqueues(fq); if (err) goto failed; pr_info("VDQCR (till-empty);\n"); frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY; err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); if (err) { pr_crit("qman_volatile_dequeue() failed\n"); goto failed; } err = do_enqueues(fq); if (err) goto failed; pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES); frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL); err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); if (err) { pr_crit("qman_volatile_dequeue() failed\n"); goto failed; } pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL, NUM_ENQUEUES); frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL); err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); if (err) { pr_err("qman_volatile_dequeue() failed\n"); goto failed; } err = do_enqueues(fq); if (err) goto failed; pr_info("scheduled dequeue (till-empty)\n"); err = qman_schedule_fq(fq); if (err) { pr_crit("qman_schedule_fq() failed\n"); goto failed; } wait_event(waitqueue, sdqcr_complete); /* Retire and OOS the FQ */ err = qman_retire_fq(fq, &flags); if (err < 0) { pr_crit("qman_retire_fq() failed\n"); goto failed; } wait_event(waitqueue, retire_complete); if (flags & QMAN_FQ_STATE_BLOCKOOS) { err = -EIO; pr_crit("leaking frames\n"); goto failed; } err = qman_oos_fq(fq); if (err) { pr_crit("qman_oos_fq() failed\n"); goto failed; } qman_destroy_fq(fq); pr_info("%s(): Finished\n", __func__); return 0; failed: WARN_ON(1); return err; }