void qman_test_high(void) { int flags, res; struct qman_fq *fq = &fq_base; pr_info("qman_test_high starting\n"); fd_init(&fd); fd_init(&fd_dq); /* Initialise (parked) FQ */ if (qman_create_fq(0, FQ_FLAGS, fq)) panic("qman_create_fq() failed\n"); if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL)) panic("qman_init_fq() failed\n"); /* Do enqueues + VDQCR, twice. (Parked FQ) */ do_enqueues(fq); pr_info("VDQCR (till-empty);\n"); if (qman_volatile_dequeue(fq, VDQCR_FLAGS, QM_VDQCR_NUMFRAMES_TILLEMPTY)) panic("qman_volatile_dequeue() failed\n"); do_enqueues(fq); pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES); if (qman_volatile_dequeue(fq, VDQCR_FLAGS, QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL))) panic("qman_volatile_dequeue() failed\n"); pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL, NUM_ENQUEUES); if (qman_volatile_dequeue(fq, VDQCR_FLAGS, QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL))) panic("qman_volatile_dequeue() failed\n"); do_enqueues(fq); pr_info("scheduled dequeue (till-empty)\n"); if (qman_schedule_fq(fq)) panic("qman_schedule_fq() failed\n"); wait_event(waitqueue, sdqcr_complete); /* Retire and OOS the FQ */ res = qman_retire_fq(fq, &flags); if (res < 0) panic("qman_retire_fq() failed\n"); wait_event(waitqueue, retire_complete); if (flags & QMAN_FQ_STATE_BLOCKOOS) panic("leaking frames\n"); if (qman_oos_fq(fq)) panic("qman_oos_fq() failed\n"); qman_destroy_fq(fq, 0); pr_info("qman_test_high finished\n"); }
int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) { int ret; u32 num_words; struct qman_fq *new_fq, *old_fq; struct device *qidev = drv_ctx->qidev; num_words = desc_len(sh_desc); if (num_words > MAX_SDLEN) { dev_err(qidev, "Invalid descriptor len: %d words\n", num_words); return -EINVAL; } /* Note down older req FQ */ old_fq = drv_ctx->req_fq; /* Create a new req FQ in parked state */ new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq, drv_ctx->context_a, 0); if (unlikely(IS_ERR_OR_NULL(new_fq))) { dev_err(qidev, "FQ allocation for shdesc update failed\n"); return PTR_ERR(new_fq); } /* Hook up new FQ to context so that new requests keep queuing */ drv_ctx->req_fq = new_fq; /* Empty and remove the older FQ */ ret = empty_caam_fq(old_fq); if (ret) { dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret); /* We can revert to older FQ */ drv_ctx->req_fq = old_fq; if (kill_fq(qidev, new_fq)) dev_warn(qidev, "New CAAM FQ: %u kill failed\n", new_fq->fqid); return ret; } /* * Re-initialise pre-header. Set RSLS and SDLEN. * Update the shared descriptor for driver context. */ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | num_words); memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); dma_sync_single_for_device(qidev, drv_ctx->context_a, sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr), DMA_BIDIRECTIONAL); /* Put the new FQ in scheduled state */ ret = qman_schedule_fq(new_fq); if (ret) { dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret); /* * We can kill new FQ and revert to old FQ. * Since the desc is already modified, it is success case */ drv_ctx->req_fq = old_fq; if (kill_fq(qidev, new_fq)) dev_warn(qidev, "New CAAM FQ: %u kill failed\n", new_fq->fqid); } else if (kill_fq(qidev, old_fq)) { dev_warn(qidev, "Old CAAM FQ: %u kill failed\n", old_fq->fqid); } return 0; }
int qman_test_api(void) { unsigned int flags, frmcnt; int err; struct qman_fq *fq = &fq_base; pr_info("%s(): Starting\n", __func__); fd_init(&fd); fd_init(&fd_dq); /* Initialise (parked) FQ */ err = qman_create_fq(0, FQ_FLAGS, fq); if (err) { pr_crit("qman_create_fq() failed\n"); goto failed; } err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL); if (err) { pr_crit("qman_init_fq() failed\n"); goto failed; } /* Do enqueues + VDQCR, twice. (Parked FQ) */ err = do_enqueues(fq); if (err) goto failed; pr_info("VDQCR (till-empty);\n"); frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY; err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); if (err) { pr_crit("qman_volatile_dequeue() failed\n"); goto failed; } err = do_enqueues(fq); if (err) goto failed; pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES); frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL); err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); if (err) { pr_crit("qman_volatile_dequeue() failed\n"); goto failed; } pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL, NUM_ENQUEUES); frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL); err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); if (err) { pr_err("qman_volatile_dequeue() failed\n"); goto failed; } err = do_enqueues(fq); if (err) goto failed; pr_info("scheduled dequeue (till-empty)\n"); err = qman_schedule_fq(fq); if (err) { pr_crit("qman_schedule_fq() failed\n"); goto failed; } wait_event(waitqueue, sdqcr_complete); /* Retire and OOS the FQ */ err = qman_retire_fq(fq, &flags); if (err < 0) { pr_crit("qman_retire_fq() failed\n"); goto failed; } wait_event(waitqueue, retire_complete); if (flags & QMAN_FQ_STATE_BLOCKOOS) { err = -EIO; pr_crit("leaking frames\n"); goto failed; } err = qman_oos_fq(fq); if (err) { pr_crit("qman_oos_fq() failed\n"); goto failed; } qman_destroy_fq(fq); pr_info("%s(): Finished\n", __func__); return 0; failed: WARN_ON(1); return err; }