void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx) { if (IS_ERR_OR_NULL(drv_ctx)) return; /* Remove request FQ */ if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq)) dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n"); dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a, sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr), DMA_BIDIRECTIONAL); kfree(drv_ctx); }
int caam_qi_shutdown(struct device *qidev) { struct caam_qi_priv *priv = dev_get_drvdata(qidev); int i, ret; const cpumask_t *cpus = qman_affine_cpus(); struct cpumask old_cpumask = *tsk_cpus_allowed(current); for_each_cpu(i, cpus) { struct napi_struct *irqtask; irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask; napi_disable(irqtask); netif_napi_del(irqtask); if (kill_fq(qidev, &per_cpu(pcpu_qipriv.rsp_fq, i))) dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); } /* * QMAN driver requires CGRs to be deleted from same CPU from where * they were instantiated. Hence we get the module removal execute * from the same CPU from where it was originally inserted. */ set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu)); ret = qman_delete_cgr(&priv->rsp_cgr); if (ret) dev_err(qidev, "Delete response CGR failed: %d\n", ret); else qman_release_cgrid(priv->rsp_cgr.cgrid); if (qi_cache) kmem_cache_destroy(qi_cache); /* Now that we're done with the CGRs, restore the cpus allowed mask */ set_cpus_allowed_ptr(current, &old_cpumask); platform_device_unregister(priv->qi_pdev); return ret; }
void caam_qi_shutdown(struct device *qidev) { int i; struct caam_qi_priv *priv = dev_get_drvdata(qidev); const cpumask_t *cpus = qman_affine_cpus(); for_each_cpu(i, cpus) { struct napi_struct *irqtask; irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask; napi_disable(irqtask); netif_napi_del(irqtask); if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i))) dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); } qman_delete_cgr_safe(&priv->cgr); qman_release_cgrid(priv->cgr.cgrid); kmem_cache_destroy(qi_cache); platform_device_unregister(priv->qi_pdev); }
int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) { int ret; u32 num_words; struct qman_fq *new_fq, *old_fq; struct device *qidev = drv_ctx->qidev; num_words = desc_len(sh_desc); if (num_words > MAX_SDLEN) { dev_err(qidev, "Invalid descriptor len: %d words\n", num_words); return -EINVAL; } /* Note down older req FQ */ old_fq = drv_ctx->req_fq; /* Create a new req FQ in parked state */ new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq, drv_ctx->context_a, 0); if (unlikely(IS_ERR_OR_NULL(new_fq))) { dev_err(qidev, "FQ allocation for shdesc update failed\n"); return PTR_ERR(new_fq); } /* Hook up new FQ to context so that new requests keep queuing */ drv_ctx->req_fq = new_fq; /* Empty and remove the older FQ */ ret = empty_caam_fq(old_fq); if (ret) { dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret); /* We can revert to older FQ */ drv_ctx->req_fq = old_fq; if (kill_fq(qidev, new_fq)) dev_warn(qidev, "New CAAM FQ: %u kill failed\n", new_fq->fqid); return ret; } /* * Re-initialise pre-header. Set RSLS and SDLEN. * Update the shared descriptor for driver context. */ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | num_words); memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); dma_sync_single_for_device(qidev, drv_ctx->context_a, sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr), DMA_BIDIRECTIONAL); /* Put the new FQ in scheduled state */ ret = qman_schedule_fq(new_fq); if (ret) { dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret); /* * We can kill new FQ and revert to old FQ. * Since the desc is already modified, it is success case */ drv_ctx->req_fq = old_fq; if (kill_fq(qidev, new_fq)) dev_warn(qidev, "New CAAM FQ: %u kill failed\n", new_fq->fqid); } else if (kill_fq(qidev, old_fq)) { dev_warn(qidev, "Old CAAM FQ: %u kill failed\n", old_fq->fqid); } return 0; }