Beispiel #1
0
/* Descriptor for RSA Public operation */
void *caam_rsa_pub_desc(struct rsa_edesc *edesc)
{
	struct rsa_pub_edesc_s *pub_edesc = &edesc->dma_u.rsa_pub_edesc;
	u32 *desc = edesc->hw_desc;
#ifdef CAAM_DEBUG
	u32 i;
#endif

	init_job_desc_pdb(desc, 0, sizeof(struct rsa_pub_desc_s) -
			  2 * CAAM_CMD_SZ);
	append_cmd(desc, (pub_edesc->sg_flgs.e_len << RSA_PDB_E_SHIFT) |
		   pub_edesc->sg_flgs.n_len);
	append_ptr(desc, pub_edesc->f_dma);
	append_ptr(desc, pub_edesc->g_dma);
	append_ptr(desc, pub_edesc->n_dma);
	append_ptr(desc, pub_edesc->e_dma);
	append_cmd(desc, pub_edesc->f_len);
	append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSAENC_PUBKEY);

#ifdef CAAM_DEBUG
	for (i = 0; i < desc_len(desc); i++)
		pr_debug("[%d] %x\n", i, desc[i]);
#endif
	return desc;
}
Beispiel #2
0
/* Descriptor for RSA Private operation Form3 */
void *caam_rsa_priv_f3_desc(struct rsa_edesc *edesc)
{
	u32 *desc = edesc->hw_desc;
	struct rsa_priv_frm3_edesc_s *priv_edesc =
			&edesc->dma_u.rsa_priv_f3_edesc;
#ifdef CAAM_DEBUG
	u32 i;
#endif

	init_job_desc_pdb(desc, 0, sizeof(struct rsa_priv_frm3_desc_s) -
			  2 * CAAM_CMD_SZ);
	append_cmd(desc, priv_edesc->sg_flgs.n_len);
	append_ptr(desc, priv_edesc->g_dma);
	append_ptr(desc, priv_edesc->f_dma);
	append_ptr(desc, priv_edesc->c_dma);
	append_ptr(desc, priv_edesc->p_dma);
	append_ptr(desc, priv_edesc->q_dma);
	append_ptr(desc, priv_edesc->dp_dma);
	append_ptr(desc, priv_edesc->dq_dma);
	append_ptr(desc, priv_edesc->tmp1_dma);
	append_ptr(desc, priv_edesc->tmp2_dma);
	append_cmd(desc, (priv_edesc->q_len << RSA_PDB_Q_SHIFT) |
		   priv_edesc->p_len);
	append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
			 RSA_PRIV_KEY_FRM_3);

#ifdef CAAM_DEBUG
	for (i = 0; i < desc_len(desc); i++)
		pr_debug("[%d] %x\n", i, desc[i]);
#endif
	return desc;
}
Beispiel #3
0
/* DH sign CAAM descriptor */
void *caam_dh_key_desc(struct dh_edesc_s *edesc)
{
	u32 *desc = edesc->hw_desc;
	u32 op = OP_TYPE_UNI_PROTOCOL | OP_PCLID_DH;
#ifdef CAAM_DEBUG
	u32 i;
#endif

	init_job_desc_pdb(desc, 0, sizeof(struct dh_key_desc_s) -
			  2 * CAAM_CMD_SZ);
	append_cmd(desc, (edesc->l_len << DH_PDB_L_SHIFT) |
			 (edesc->n_len & DH_PDB_N_MASK));
	append_ptr(desc, edesc->q_dma);
	/* pointer to r (unused) */
	append_ptr(desc, 0);
	append_ptr(desc, edesc->w_dma);
	append_ptr(desc, edesc->s_dma);
	append_ptr(desc, edesc->z_dma);
	if (edesc->req_type == ECDH_COMPUTE_KEY) {
		append_ptr(desc, edesc->ab_dma);
		op |= OP_PCL_PKPROT_ECC;
		if (edesc->curve_type == ECC_BINARY)
			op |= OP_PCL_PKPROT_F2M;
	}
	append_operation(desc, op);

#ifdef CAAM_DEBUG
	pr_debug("DH Descriptor:\n");
	for (i = 0; i < desc_len(desc); i++)
		pr_debug("[%d] %x\n", i, desc[i]);
#endif
	return desc;
}
Beispiel #4
0
/* DSA verify CAAM descriptor */
void *caam_dsa_verify_desc(struct dsa_edesc_s *edesc)
{
	u32 *desc = edesc->hw_desc;
	u32 op = OP_TYPE_UNI_PROTOCOL | OP_PCLID_DSAVERIFY;
#ifdef CAAM_DEBUG
	u32 i;
#endif

	if (edesc->req_type == ECDSA_VERIFY) {
		op |= OP_PCL_PKPROT_ECC;
		if (edesc->curve_type == ECC_BINARY)
			op |= OP_PCL_PKPROT_F2M;

		init_job_desc_pdb(desc, 0, sizeof(struct ecdsa_verify_desc_s) -
				  2 * CAAM_CMD_SZ);
		append_cmd(desc, (edesc->l_len << DSA_PDB_L_SHIFT) |
				 (edesc->n_len & DSA_PDB_N_MASK));
		append_ptr(desc, edesc->q_dma);
		append_ptr(desc, edesc->r_dma);
		append_ptr(desc, edesc->g_dma);
		append_ptr(desc, edesc->key_dma);
		append_ptr(desc, edesc->f_dma);
		append_ptr(desc, edesc->c_dma);
		append_ptr(desc, edesc->d_dma);
		append_ptr(desc, edesc->tmp_dma);
		append_ptr(desc, edesc->ab_dma);
		append_operation(desc, op);
	} else {
		init_job_desc_pdb(desc, 0, sizeof(struct dsa_verify_desc_s) -
				  2 * CAAM_CMD_SZ);
		append_cmd(desc, (edesc->l_len << DSA_PDB_L_SHIFT) |
				 (edesc->n_len & DSA_PDB_N_MASK));
		append_ptr(desc, edesc->q_dma);
		append_ptr(desc, edesc->r_dma);
		append_ptr(desc, edesc->g_dma);
		append_ptr(desc, edesc->key_dma);
		append_ptr(desc, edesc->f_dma);
		append_ptr(desc, edesc->c_dma);
		append_ptr(desc, edesc->d_dma);
		append_ptr(desc, edesc->tmp_dma);
		append_operation(desc, op);
	}

#ifdef CAAM_DEBUG
	pr_debug("DSA Descriptor:\n");
	for (i = 0; i < desc_len(desc); i++)
		pr_debug("[%d] %x\n", i, desc[i]);
#endif
	return desc;
}
Beispiel #5
0
/* DSA/ECDSA/DH/ECDH keygen CAAM descriptor */
void *caam_keygen_desc(struct dsa_edesc_s *edesc)
{
	u32 *desc = edesc->hw_desc;
	u32 sgf_len = (edesc->l_len << DSA_PDB_L_SHIFT) |
		      (edesc->n_len & DSA_PDB_N_MASK);
	u32 op = OP_TYPE_UNI_PROTOCOL | OP_PCLID_PUBLICKEYPAIR;
	dma_addr_t g_dma = edesc->g_dma;
#ifdef CAAM_DEBUG
	u32 i;
#endif

	if (edesc->req_type == ECC_KEYGEN) {
		if (edesc->erratum_A_006899) {
			sgf_len |= DSA_PDB_SGF_G;
			g_dma = edesc->g_sg_dma;
		}

		op |= OP_PCL_PKPROT_ECC;
		if (edesc->curve_type == ECC_BINARY)
			op |= OP_PCL_PKPROT_F2M;

		init_job_desc_pdb(desc, 0, sizeof(struct ecc_keygen_desc_s) -
				  2 * CAAM_CMD_SZ);
		append_cmd(desc, sgf_len);
		append_ptr(desc, edesc->q_dma);
		append_ptr(desc, edesc->r_dma);
		append_ptr(desc, g_dma);
		append_ptr(desc, edesc->s_dma);
		append_ptr(desc, edesc->key_dma);
		append_ptr(desc, edesc->ab_dma);
		append_operation(desc, op);
	} else {
		init_job_desc_pdb(desc, 0, sizeof(struct dlc_keygen_desc_s) -
				  2 * CAAM_CMD_SZ);
		append_cmd(desc, sgf_len);
		append_ptr(desc, edesc->q_dma);
		append_ptr(desc, edesc->r_dma);
		append_ptr(desc, g_dma);
		append_ptr(desc, edesc->s_dma);
		append_ptr(desc, edesc->key_dma);
		append_operation(desc, op);
	}

#ifdef CAAM_DEBUG
	pr_debug("DSA Keygen Descriptor:\n");
	for (i = 0; i < desc_len(desc); i++)
		pr_debug("[%d] %x ", i, desc[i]);
#endif
	return desc;
}
struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
				       int *cpu,
				       u32 *sh_desc)
{
	size_t size;
	u32 num_words;
	dma_addr_t hwdesc;
	struct caam_drv_ctx *drv_ctx;
	const cpumask_t *cpus = qman_affine_cpus();
	static DEFINE_PER_CPU(int, last_cpu);

	num_words = desc_len(sh_desc);
	if (num_words > MAX_SDLEN) {
		dev_err(qidev, "Invalid descriptor len: %d words\n",
			num_words);
		return ERR_PTR(-EINVAL);
	}

	drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
	if (!drv_ctx)
		return ERR_PTR(-ENOMEM);

	/*
	 * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
	 * and dma-map them.
	 */
	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
					   num_words);
	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
	size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
	hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
				DMA_BIDIRECTIONAL);
	if (dma_mapping_error(qidev, hwdesc)) {
		dev_err(qidev, "DMA map error for preheader + shdesc\n");
		kfree(drv_ctx);
		return ERR_PTR(-ENOMEM);
	}
	drv_ctx->context_a = hwdesc;

	/* If given CPU does not own the portal, choose another one that does */
	if (!cpumask_test_cpu(*cpu, cpus)) {
		int *pcpu = &get_cpu_var(last_cpu);

		*pcpu = cpumask_next(*pcpu, cpus);
		if (*pcpu >= nr_cpu_ids)
			*pcpu = cpumask_first(cpus);
		*cpu = *pcpu;

		put_cpu_var(last_cpu);
	}
	drv_ctx->cpu = *cpu;

	/* Find response FQ hooked with this CPU */
	drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);

	/* Attach request FQ */
	drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
					     QMAN_INITFQ_FLAG_SCHED);
	if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
		dev_err(qidev, "create_caam_req_fq failed\n");
		dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
		kfree(drv_ctx);
		return ERR_PTR(-ENOMEM);
	}

	drv_ctx->qidev = qidev;
	return drv_ctx;
}
int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
{
	int ret;
	u32 num_words;
	struct qman_fq *new_fq, *old_fq;
	struct device *qidev = drv_ctx->qidev;

	num_words = desc_len(sh_desc);
	if (num_words > MAX_SDLEN) {
		dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
		return -EINVAL;
	}

	/* Note down older req FQ */
	old_fq = drv_ctx->req_fq;

	/* Create a new req FQ in parked state */
	new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
				    drv_ctx->context_a, 0);
	if (unlikely(IS_ERR_OR_NULL(new_fq))) {
		dev_err(qidev, "FQ allocation for shdesc update failed\n");
		return PTR_ERR(new_fq);
	}

	/* Hook up new FQ to context so that new requests keep queuing */
	drv_ctx->req_fq = new_fq;

	/* Empty and remove the older FQ */
	ret = empty_caam_fq(old_fq);
	if (ret) {
		dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);

		/* We can revert to older FQ */
		drv_ctx->req_fq = old_fq;

		if (kill_fq(qidev, new_fq))
			dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
				 new_fq->fqid);

		return ret;
	}

	/*
	 * Re-initialise pre-header. Set RSLS and SDLEN.
	 * Update the shared descriptor for driver context.
	 */
	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
					   num_words);
	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
	dma_sync_single_for_device(qidev, drv_ctx->context_a,
				   sizeof(drv_ctx->sh_desc) +
				   sizeof(drv_ctx->prehdr),
				   DMA_BIDIRECTIONAL);

	/* Put the new FQ in scheduled state */
	ret = qman_schedule_fq(new_fq);
	if (ret) {
		dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);

		/*
		 * We can kill new FQ and revert to old FQ.
		 * Since the desc is already modified, it is success case
		 */

		drv_ctx->req_fq = old_fq;

		if (kill_fq(qidev, new_fq))
			dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
				 new_fq->fqid);
	} else if (kill_fq(qidev, old_fq)) {
		dev_warn(qidev, "Old CAAM FQ: %u kill failed\n", old_fq->fqid);
	}

	return 0;
}
Beispiel #8
0
/* -1 --- error, can't enqueue -- no space available */
static int jr_enqueue(uint32_t *desc_addr,
	       void (*callback)(uint32_t status, void *arg),
	       void *arg)
{
	struct jr_regs *regs = (struct jr_regs *)CONFIG_SYS_FSL_JR0_ADDR;
	int head = jr.head;
	uint32_t desc_word;
	int length = desc_len(desc_addr);
	int i;
#ifdef CONFIG_PHYS_64BIT
	uint32_t *addr_hi, *addr_lo;
#endif

	/* The descriptor must be submitted to SEC block as per endianness
	 * of the SEC Block.
	 * So, if the endianness of Core and SEC block is different, each word
	 * of the descriptor will be byte-swapped.
	 */
	for (i = 0; i < length; i++) {
		desc_word = desc_addr[i];
		sec_out32((uint32_t *)&desc_addr[i], desc_word);
	}

	phys_addr_t desc_phys_addr = virt_to_phys(desc_addr);

	if (sec_in32(&regs->irsa) == 0 ||
	    CIRC_SPACE(jr.head, jr.tail, jr.size) <= 0)
		return -1;

	jr.info[head].desc_phys_addr = desc_phys_addr;
	jr.info[head].callback = (void *)callback;
	jr.info[head].arg = arg;
	jr.info[head].op_done = 0;

	unsigned long start = (unsigned long)&jr.info[head] &
					~(ARCH_DMA_MINALIGN - 1);
	unsigned long end = ALIGN((unsigned long)&jr.info[head] +
				  sizeof(struct jr_info), ARCH_DMA_MINALIGN);
	flush_dcache_range(start, end);

#ifdef CONFIG_PHYS_64BIT
	/* Write the 64 bit Descriptor address on Input Ring.
	 * The 32 bit hign and low part of the address will
	 * depend on endianness of SEC block.
	 */
#ifdef CONFIG_SYS_FSL_SEC_LE
	addr_lo = (uint32_t *)(&jr.input_ring[head]);
	addr_hi = (uint32_t *)(&jr.input_ring[head]) + 1;
#elif defined(CONFIG_SYS_FSL_SEC_BE)
	addr_hi = (uint32_t *)(&jr.input_ring[head]);
	addr_lo = (uint32_t *)(&jr.input_ring[head]) + 1;
#endif /* ifdef CONFIG_SYS_FSL_SEC_LE */

	sec_out32(addr_hi, (uint32_t)(desc_phys_addr >> 32));
	sec_out32(addr_lo, (uint32_t)(desc_phys_addr));

#else
	/* Write the 32 bit Descriptor address on Input Ring. */
	sec_out32(&jr.input_ring[head], desc_phys_addr);
#endif /* ifdef CONFIG_PHYS_64BIT */

	start = (unsigned long)&jr.input_ring[head] & ~(ARCH_DMA_MINALIGN - 1);
	end = ALIGN((unsigned long)&jr.input_ring[head] +
		     sizeof(dma_addr_t), ARCH_DMA_MINALIGN);
	flush_dcache_range(start, end);

	jr.head = (head + 1) & (jr.size - 1);

	/* Invalidate output ring */
	start = (unsigned long)jr.output_ring &
					~(ARCH_DMA_MINALIGN - 1);
	end = ALIGN((unsigned long)jr.output_ring + jr.op_size,
		     ARCH_DMA_MINALIGN);
	invalidate_dcache_range(start, end);

	sec_out32(&regs->irja, 1);

	return 0;
}
Beispiel #9
0
/*
 * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
 *			  the software (no JR/QI used).
 * @ctrldev - pointer to device
 * @status - descriptor status, after being run
 *
 * Return: - 0 if no error occurred
 *	   - -ENODEV if the DECO couldn't be acquired
 *	   - -EAGAIN if an error occurred while executing the descriptor
 */
static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
					u32 *status)
{
	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
	struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
	struct caam_deco __iomem *deco = ctrlpriv->deco;
	unsigned int timeout = 100000;
	u32 deco_dbg_reg, flags;
	int i;


	if (ctrlpriv->virt_en == 1) {
		clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);

		while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
		       --timeout)
			cpu_relax();

		timeout = 100000;
	}

	clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);

	while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
								 --timeout)
		cpu_relax();

	if (!timeout) {
		dev_err(ctrldev, "failed to acquire DECO 0\n");
		clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
		return -ENODEV;
	}

	for (i = 0; i < desc_len(desc); i++)
		wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));

	flags = DECO_JQCR_WHL;
	/*
	 * If the descriptor length is longer than 4 words, then the
	 * FOUR bit in JRCTRL register must be set.
	 */
	if (desc_len(desc) >= 4)
		flags |= DECO_JQCR_FOUR;

	/* Instruct the DECO to execute it */
	clrsetbits_32(&deco->jr_ctl_hi, 0, flags);

	timeout = 10000000;
	do {
		deco_dbg_reg = rd_reg32(&deco->desc_dbg);
		/*
		 * If an error occured in the descriptor, then
		 * the DECO status field will be set to 0x0D
		 */
		if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
		    DESC_DBG_DECO_STAT_HOST_ERR)
			break;
		cpu_relax();
	} while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);

	*status = rd_reg32(&deco->op_status_hi) &
		  DECO_OP_STATUS_HI_ERR_MASK;

	if (ctrlpriv->virt_en == 1)
		clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);

	/* Mark the DECO as free */
	clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);

	if (!timeout)
		return -EAGAIN;

	return 0;
}