Exemplo n.º 1
0
int
aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
    struct cryptop *crp)
{
	struct thread *td;
	uint8_t *buf;
	int error, allocated;

	buf = aesni_cipher_alloc(enccrd, crp, &allocated);
	if (buf == NULL) {
		error = ENOMEM;
		goto out;
	}

	td = curthread;
	error = fpu_kern_enter(td, &ses->fpu_ctx, FPU_KERN_NORMAL);
	if (error != 0)
		goto out1;

	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);

		if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
			crypto_copyback(crp->crp_flags, crp->crp_buf,
			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);

		aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
		    enccrd->crd_len, buf, buf, ses->iv);
	} else {
		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
		else
			crypto_copydata(crp->crp_flags, crp->crp_buf,
			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
		aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
		    enccrd->crd_len, buf, ses->iv);
	}
	fpu_kern_leave(td, &ses->fpu_ctx);
	if (allocated)
		crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
		    enccrd->crd_len, buf);
	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0)
		crypto_copydata(crp->crp_flags, crp->crp_buf,
		    enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
		    AES_BLOCK_LEN, ses->iv);
 out1:
	if (allocated) {
		bzero(buf, enccrd->crd_len);
		free(buf, M_AESNI);
	}
 out:
	return (error);
}
Exemplo n.º 2
0
/*
 * Compute keyed-hash authenticator.
 */
static int
padlock_authcompute(struct padlock_session *ses, struct cryptodesc *crd,
    caddr_t buf, int flags)
{
	u_char hash[HASH_MAX_LEN];
	struct auth_hash *axf;
	union authctx ctx;
	int error;

	axf = ses->ses_axf;

	padlock_copy_ctx(axf, ses->ses_ictx, &ctx);
	error = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
	    (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
	if (error != 0) {
		padlock_free_ctx(axf, &ctx);
		return (error);
	}
	axf->Final(hash, &ctx);

	padlock_copy_ctx(axf, ses->ses_octx, &ctx);
	axf->Update(&ctx, hash, axf->hashsize);
	axf->Final(hash, &ctx);

	/* Inject the authentication data */
	crypto_copyback(flags, buf, crd->crd_inject,
	    ses->ses_mlen == 0 ? axf->hashsize : ses->ses_mlen, hash);
	return (0);
}
Exemplo n.º 3
0
static int
xlp_copyiv(struct xlp_sec_softc *sc, struct xlp_sec_command *cmd,
    struct cryptodesc *enccrd)
{
	unsigned int ivlen = 0;
	int session;
	struct cryptop *crp = NULL;

	crp = cmd->crp;
	session = cmd->session_num;

	if (enccrd->crd_alg != CRYPTO_ARC4) {
		ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
		    XLP_SEC_AES_IV_LENGTH : XLP_SEC_DES_IV_LENGTH);
		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
				bcopy(enccrd->crd_iv, cmd->iv, ivlen);
			} else {
				bcopy(sc->sc_sessions[session].ses_iv, cmd->iv,
				    ivlen);
			}
			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
				crypto_copyback(crp->crp_flags,
				    crp->crp_buf, enccrd->crd_inject,
				    ivlen, cmd->iv);
			}
		} else {
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
				bcopy(enccrd->crd_iv, cmd->iv, ivlen);
			} else {
				crypto_copydata(crp->crp_flags, crp->crp_buf,
				    enccrd->crd_inject, ivlen, cmd->iv);
			}
		}
	}
	return (0);
}
Exemplo n.º 4
0
/*
 * Apply a symmetric encryption/decryption algorithm.
 */
static int
swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
    int flags)
{
	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
	unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
	struct enc_xform *exf;
	int i, k, j, blks;

	exf = sw->sw_exf;
	blks = exf->blocksize;

	/* Check for non-padded data */
	if (crd->crd_len % blks)
		return EINVAL;

	/* Initialize the IV */
	if (crd->crd_flags & CRD_F_ENCRYPT) {
		/* IV explicitly provided ? */
		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
			bcopy(crd->crd_iv, iv, blks);
		else
			arc4rand(iv, blks, 0);

		/* Do we need to write the IV */
		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
			crypto_copyback(flags, buf, crd->crd_inject, blks, iv);

	} else {	/* Decryption */
			/* IV explicitly provided ? */
		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
			bcopy(crd->crd_iv, iv, blks);
		else {
			/* Get IV off buf */
			crypto_copydata(flags, buf, crd->crd_inject, blks, iv);
		}
	}

	if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
		int error; 

		if (sw->sw_kschedule)
			exf->zerokey(&(sw->sw_kschedule));
		error = exf->setkey(&sw->sw_kschedule,
				crd->crd_key, crd->crd_klen / 8);
		if (error)
			return (error);
	}

	ivp = iv;

	/*
	 * xforms that provide a reinit method perform all IV
	 * handling themselves.
	 */
	if (exf->reinit)
		exf->reinit(sw->sw_kschedule, iv);

	if (flags & CRYPTO_F_IMBUF) {
		struct mbuf *m = (struct mbuf *) buf;

		/* Find beginning of data */
		m = m_getptr(m, crd->crd_skip, &k);
		if (m == NULL)
			return EINVAL;

		i = crd->crd_len;

		while (i > 0) {
			/*
			 * If there's insufficient data at the end of
			 * an mbuf, we have to do some copying.
			 */
			if (m->m_len < k + blks && m->m_len != k) {
				m_copydata(m, k, blks, blk);

				/* Actual encryption/decryption */
				if (exf->reinit) {
					if (crd->crd_flags & CRD_F_ENCRYPT) {
						exf->encrypt(sw->sw_kschedule,
						    blk);
					} else {
						exf->decrypt(sw->sw_kschedule,
						    blk);
					}
				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
					/* XOR with previous block */
					for (j = 0; j < blks; j++)
						blk[j] ^= ivp[j];

					exf->encrypt(sw->sw_kschedule, blk);

					/*
					 * Keep encrypted block for XOR'ing
					 * with next block
					 */
					bcopy(blk, iv, blks);
					ivp = iv;
				} else {	/* decrypt */
					/*	
					 * Keep encrypted block for XOR'ing
					 * with next block
					 */
					if (ivp == iv)
						bcopy(blk, piv, blks);
					else
						bcopy(blk, iv, blks);

					exf->decrypt(sw->sw_kschedule, blk);

					/* XOR with previous block */
					for (j = 0; j < blks; j++)
						blk[j] ^= ivp[j];

					if (ivp == iv)
						bcopy(piv, iv, blks);
					else
						ivp = iv;
				}

				/* Copy back decrypted block */
				m_copyback(m, k, blks, blk);

				/* Advance pointer */
				m = m_getptr(m, k + blks, &k);
				if (m == NULL)
					return EINVAL;

				i -= blks;

				/* Could be done... */
				if (i == 0)
					break;
			}

			/* Skip possibly empty mbufs */
			if (k == m->m_len) {
				for (m = m->m_next; m && m->m_len == 0;
				    m = m->m_next)
					;
				k = 0;
			}

			/* Sanity check */
			if (m == NULL)
				return EINVAL;

			/*
			 * Warning: idat may point to garbage here, but
			 * we only use it in the while() loop, only if
			 * there are indeed enough data.
			 */
			idat = mtod(m, unsigned char *) + k;

	   		while (m->m_len >= k + blks && i > 0) {
				if (exf->reinit) {
					if (crd->crd_flags & CRD_F_ENCRYPT) {
						exf->encrypt(sw->sw_kschedule,
						    idat);
					} else {
						exf->decrypt(sw->sw_kschedule,
						    idat);
					}
				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
					/* XOR with previous block/IV */
					for (j = 0; j < blks; j++)
						idat[j] ^= ivp[j];

					exf->encrypt(sw->sw_kschedule, idat);
					ivp = idat;
				} else {	/* decrypt */
					/*
					 * Keep encrypted block to be used
					 * in next block's processing.
					 */
					if (ivp == iv)
						bcopy(idat, piv, blks);
					else
						bcopy(idat, iv, blks);

					exf->decrypt(sw->sw_kschedule, idat);

					/* XOR with previous block/IV */
					for (j = 0; j < blks; j++)
						idat[j] ^= ivp[j];

					if (ivp == iv)
						bcopy(piv, iv, blks);
					else
						ivp = iv;
				}

				idat += blks;
				k += blks;
				i -= blks;
			}
		}

		return 0; /* Done with mbuf encryption/decryption */
	} else if (flags & CRYPTO_F_IOV) {
Exemplo n.º 5
0
static int
pasemi_process(device_t dev, struct cryptop *crp, int hint)
{

	int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
	struct pasemi_softc *sc = device_get_softc(dev);
	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
	caddr_t ivp;
	struct pasemi_desc init_desc, work_desc;
	struct pasemi_session *ses;
	struct sk_buff *skb;
	struct uio *uiop;
	unsigned long flags;
	struct pasemi_fnu_txring *txring;

	DPRINTF("%s()\n", __FUNCTION__);

	if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
		return -EINVAL;

	crp->crp_etype = 0;
	if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
		return -EINVAL;

	ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];

	crd1 = crp->crp_desc;
	if (crd1 == NULL) {
		err = -EINVAL;
		goto errout;
	}
	crd2 = crd1->crd_next;

	if (ALG_IS_SIG(crd1->crd_alg)) {
		maccrd = crd1;
		if (crd2 == NULL)
			enccrd = NULL;
		else if (ALG_IS_CIPHER(crd2->crd_alg) &&
			 (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
			enccrd = crd2;
		else
			goto erralg;
	} else if (ALG_IS_CIPHER(crd1->crd_alg)) {
		enccrd = crd1;
		if (crd2 == NULL)
			maccrd = NULL;
		else if (ALG_IS_SIG(crd2->crd_alg) &&
			 (crd1->crd_flags & CRD_F_ENCRYPT))
			maccrd = crd2;
		else
			goto erralg;
	} else
		goto erralg;

	chsel = ses->chan;

	txring = &sc->tx[chsel];

	if (enccrd && !maccrd) {
		if (enccrd->crd_alg == CRYPTO_ARC4)
			reinit = 1;
		reinit_size = 0x40;
		srclen = crp->crp_ilen;

		pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
				  | XCT_FUN_FUN(chsel));
		if (enccrd->crd_flags & CRD_F_ENCRYPT)
			pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
		else
			pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
	} else if (enccrd && maccrd) {
		if (enccrd->crd_alg == CRYPTO_ARC4)
			reinit = 1;
		reinit_size = 0x68;

		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
			/* Encrypt -> Authenticate */
			pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
					  | XCT_FUN_A | XCT_FUN_FUN(chsel));
			srclen = maccrd->crd_skip + maccrd->crd_len;
		} else {
			/* Authenticate -> Decrypt */
			pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
					  | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
			pasemi_desc_build(&work_desc, 0);
			pasemi_desc_build(&work_desc, 0);
			pasemi_desc_build(&work_desc, 0);
			work_desc.postop = PASEMI_CHECK_SIG;
			srclen = crp->crp_ilen;
		}

		pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
		pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
	} else if (!enccrd && maccrd) {
		srclen = maccrd->crd_len;

		pasemi_desc_start(&init_desc,
				  XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
		pasemi_desc_build(&init_desc,
				  XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));

		pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
				  | XCT_FUN_A | XCT_FUN_FUN(chsel));
	}

	if (enccrd) {
		switch (enccrd->crd_alg) {
		case CRYPTO_3DES_CBC:
			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
					XCT_FUN_BCM_CBC);
			ivsize = sizeof(u64);
			break;
		case CRYPTO_DES_CBC:
			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
					XCT_FUN_BCM_CBC);
			ivsize = sizeof(u64);
			break;
		case CRYPTO_AES_CBC:
			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
					XCT_FUN_BCM_CBC);
			ivsize = 2 * sizeof(u64);
			break;
		case CRYPTO_ARC4:
			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
			ivsize = 0;
			break;
		default:
			printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
			       enccrd->crd_alg);
			err = -EINVAL;
			goto errout;
		}

		ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
				memcpy(ivp, enccrd->crd_iv, ivsize);
			/* If IV is not present in the buffer already, it has to be copied there */
			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
				crypto_copyback(crp->crp_flags, crp->crp_buf,
						enccrd->crd_inject, ivsize, ivp);
		} else {
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
				/* IV is provided expicitly in descriptor */
				memcpy(ivp, enccrd->crd_iv, ivsize);
			else
				/* IV is provided in the packet */
				crypto_copydata(crp->crp_flags, crp->crp_buf,
						enccrd->crd_inject, ivsize,
						ivp);
		}
	}

	if (maccrd) {
		switch (maccrd->crd_alg) {
		case CRYPTO_MD5:
			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
			break;
		case CRYPTO_SHA1:
			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
			break;
		case CRYPTO_MD5_HMAC:
			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
			break;
		case CRYPTO_SHA1_HMAC:
			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
			break;
		default:
			printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
			       maccrd->crd_alg);
			err = -EINVAL;
			goto errout;
		}
	}

	if (crp->crp_flags & CRYPTO_F_SKBUF) {
		/* using SKB buffers */
		skb = (struct sk_buff *)crp->crp_buf;
		if (skb_shinfo(skb)->nr_frags) {
			printk(DRV_NAME ": skb frags unimplemented\n");
			err = -EINVAL;
			goto errout;
		}
		pasemi_desc_build(
			&work_desc,
			XCT_FUN_DST_PTR(skb->len, pci_map_single(
						sc->dma_pdev, skb->data,
						skb->len, DMA_TO_DEVICE)));
		pasemi_desc_build(
			&work_desc,
			XCT_FUN_SRC_PTR(
				srclen, pci_map_single(
					sc->dma_pdev, skb->data,
					srclen, DMA_TO_DEVICE)));
		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
	} else if (crp->crp_flags & CRYPTO_F_IOV) {
		/* using IOV buffers */
		uiop = (struct uio *)crp->crp_buf;
		if (uiop->uio_iovcnt > 1) {
			printk(DRV_NAME ": iov frags unimplemented\n");
			err = -EINVAL;
			goto errout;
		}

		/* crp_olen is never set; always use crp_ilen */
		pasemi_desc_build(
			&work_desc,
			XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
						sc->dma_pdev,
						uiop->uio_iov->iov_base,
						crp->crp_ilen, DMA_TO_DEVICE)));
		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));

		pasemi_desc_build(
			&work_desc,
			XCT_FUN_SRC_PTR(srclen, pci_map_single(
						sc->dma_pdev,
						uiop->uio_iov->iov_base,
						srclen, DMA_TO_DEVICE)));
	} else {
		/* using contig buffers */
		pasemi_desc_build(
			&work_desc,
			XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
						sc->dma_pdev,
						crp->crp_buf,
						crp->crp_ilen, DMA_TO_DEVICE)));
		pasemi_desc_build(
			&work_desc,
			XCT_FUN_SRC_PTR(srclen, pci_map_single(
						sc->dma_pdev,
						crp->crp_buf, srclen,
						DMA_TO_DEVICE)));
		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
	}

	spin_lock_irqsave(&txring->fill_lock, flags);

	if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
		txring->sesn = PASEMI_SESSION(crp->crp_sid);
		reinit = 1;
	}

	if (enccrd) {
		pasemi_desc_start(&init_desc,
				  XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
		pasemi_desc_build(&init_desc,
				  XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
	}

	if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
	      pasemi_desc_size(&work_desc)) -
	     txring->next_to_clean) > TX_RING_SIZE) {
		spin_unlock_irqrestore(&txring->fill_lock, flags);
		err = ERESTART;
		goto errout;
	}

	pasemi_ring_add_desc(txring, &init_desc, NULL);
	pasemi_ring_add_desc(txring, &work_desc, crp);

	pasemi_ring_incr(sc, chsel,
			 pasemi_desc_size(&init_desc) +
			 pasemi_desc_size(&work_desc));

	spin_unlock_irqrestore(&txring->fill_lock, flags);

	mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);

	return 0;

erralg:
	printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
	       crd1->crd_alg, crd2->crd_alg);
	err = -EINVAL;

errout:
	if (err != ERESTART) {
		crp->crp_etype = err;
		crypto_done(crp);
	}
	return err;
}
Exemplo n.º 6
0
static int
talitos_process(device_t dev, struct cryptop *crp, int hint)
{
	int i, err = 0, ivsize;
	struct talitos_softc *sc = device_get_softc(dev);
	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
	caddr_t iv;
	struct talitos_session *ses;
	struct talitos_desc *td;
	unsigned long flags;
	/* descriptor mappings */
	int hmac_key, hmac_data, cipher_iv, cipher_key,
		in_fifo, out_fifo, cipher_iv_out;
	static int chsel = -1;
	u_int32_t rand_iv[4];

	DPRINTF("%s()\n", __FUNCTION__);

	if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
		return EINVAL;
	}
	crp->crp_etype = 0;
	if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
		return EINVAL;
	}

	ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)];

        /* enter the channel scheduler */
	spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);

	/* reuse channel that already had/has requests for the required EU */
	for (i = 0; i < sc->sc_num_channels; i++) {
		if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg)
			break;
	}
	if (i == sc->sc_num_channels) {
		/*
		 * haven't seen this algo the last sc_num_channels or more
		 * use round robin in this case
		 * nb: sc->sc_num_channels must be power of 2
		 */
		chsel = (chsel + 1) & (sc->sc_num_channels - 1);
	} else {
		/*
		 * matches channel with same target execution unit;
		 * use same channel in this case
		 */
		chsel = i;
	}
	sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg;

        /* release the channel scheduler lock */
	spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);

	/* acquire the selected channel fifo lock */
	spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags);

	/* find and reserve next available descriptor-cryptop pair */
	for (i = 0; i < sc->sc_chfifo_len; i++) {
		if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) {
			/*
			 * ensure correct descriptor formation by
			 * avoiding inadvertently setting "optional" entries
			 * e.g. not using "optional" dptr2 for MD/HMAC descs
			 */
			memset(&sc->sc_chnfifo[chsel][i].cf_desc,
				0, sizeof(*td));
			/* reserve it with done notification request bit */
			sc->sc_chnfifo[chsel][i].cf_desc.hdr |=
				TALITOS_DONE_NOTIFY;
			break;
		}
	}
	spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags);

	if (i == sc->sc_chfifo_len) {
		/* fifo full */
		err = ERESTART;
		goto errout;
	}

	td = &sc->sc_chnfifo[chsel][i].cf_desc;
	sc->sc_chnfifo[chsel][i].cf_crp = crp;

	crd1 = crp->crp_desc;
	if (crd1 == NULL) {
		err = EINVAL;
		goto errout;
	}
	crd2 = crd1->crd_next;
	/* prevent compiler warning */
	hmac_key = 0;
	hmac_data = 0;
	if (crd2 == NULL) {
		td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU;
		/* assign descriptor dword ptr mappings for this desc. type */
		cipher_iv = 1;
		cipher_key = 2;
		in_fifo = 3;
		cipher_iv_out = 5;
		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
		    crd1->crd_alg == CRYPTO_SHA1 ||
		    crd1->crd_alg == CRYPTO_MD5) {
			out_fifo = 5;
			maccrd = crd1;
			enccrd = NULL;
		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
		    crd1->crd_alg == CRYPTO_3DES_CBC ||
		    crd1->crd_alg == CRYPTO_AES_CBC ||
		    crd1->crd_alg == CRYPTO_ARC4) {
			out_fifo = 4;
			maccrd = NULL;
			enccrd = crd1;
		} else {
			DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg);
			err = EINVAL;
			goto errout;
		}
	} else {
		if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) {
			td->hdr |= TD_TYPE_IPSEC_ESP;
		} else {
			DPRINTF("unimplemented: multiple descriptor ipsec\n");
			err = EINVAL;
			goto errout;
		}
		/* assign descriptor dword ptr mappings for this desc. type */
		hmac_key = 0;
		hmac_data = 1;
		cipher_iv = 2;
		cipher_key = 3;
		in_fifo = 4;
		out_fifo = 5;
		cipher_iv_out = 6;
		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
                     crd1->crd_alg == CRYPTO_MD5 ||
                     crd1->crd_alg == CRYPTO_SHA1) &&
		    (crd2->crd_alg == CRYPTO_DES_CBC ||
		     crd2->crd_alg == CRYPTO_3DES_CBC ||
		     crd2->crd_alg == CRYPTO_AES_CBC ||
		     crd2->crd_alg == CRYPTO_ARC4) &&
		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
			maccrd = crd1;
			enccrd = crd2;
		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
		     crd1->crd_alg == CRYPTO_ARC4 ||
		     crd1->crd_alg == CRYPTO_3DES_CBC ||
		     crd1->crd_alg == CRYPTO_AES_CBC) &&
		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
                     crd2->crd_alg == CRYPTO_MD5 ||
                     crd2->crd_alg == CRYPTO_SHA1) &&
		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
			enccrd = crd1;
			maccrd = crd2;
		} else {
			/* We cannot order the SEC as requested */
			printk("%s: cannot do the order\n",
					device_get_nameunit(sc->sc_cdev));
			err = EINVAL;
			goto errout;
		}
	}
	/* assign in_fifo and out_fifo based on input/output struct type */
	if (crp->crp_flags & CRYPTO_F_SKBUF) {
		/* using SKB buffers */
		struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
		if (skb_shinfo(skb)->nr_frags) {
			printk("%s: skb frags unimplemented\n",
					device_get_nameunit(sc->sc_cdev));
			err = EINVAL;
			goto errout;
		}
		td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data,
			skb->len, DMA_TO_DEVICE);
		td->ptr[in_fifo].len = skb->len;
		td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data,
			skb->len, DMA_TO_DEVICE);
		td->ptr[out_fifo].len = skb->len;
		td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
			skb->len, DMA_TO_DEVICE);
	} else if (crp->crp_flags & CRYPTO_F_IOV) {
		/* using IOV buffers */
		struct uio *uiop = (struct uio *)crp->crp_buf;
		if (uiop->uio_iovcnt > 1) {
			printk("%s: iov frags unimplemented\n",
					device_get_nameunit(sc->sc_cdev));
			err = EINVAL;
			goto errout;
		}
		td->ptr[in_fifo].ptr = dma_map_single(NULL,
			uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
		td->ptr[in_fifo].len = crp->crp_ilen;
		/* crp_olen is never set; always use crp_ilen */
		td->ptr[out_fifo].ptr = dma_map_single(NULL,
			uiop->uio_iov->iov_base,
			crp->crp_ilen, DMA_TO_DEVICE);
		td->ptr[out_fifo].len = crp->crp_ilen;
	} else {
		/* using contig buffers */
		td->ptr[in_fifo].ptr = dma_map_single(NULL,
			crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
		td->ptr[in_fifo].len = crp->crp_ilen;
		td->ptr[out_fifo].ptr = dma_map_single(NULL,
			crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
		td->ptr[out_fifo].len = crp->crp_ilen;
	}
	if (enccrd) {
		switch (enccrd->crd_alg) {
		case CRYPTO_3DES_CBC:
			td->hdr |= TALITOS_MODE0_DEU_3DES;
			/* FALLTHROUGH */
		case CRYPTO_DES_CBC:
			td->hdr |= TALITOS_SEL0_DEU
				|  TALITOS_MODE0_DEU_CBC;
			if (enccrd->crd_flags & CRD_F_ENCRYPT)
				td->hdr |= TALITOS_MODE0_DEU_ENC;
			ivsize = 2*sizeof(u_int32_t);
			DPRINTF("%cDES ses %d ch %d len %d\n",
				(td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1',
				(u32)TALITOS_SESSION(crp->crp_sid),
				chsel, td->ptr[in_fifo].len);
			break;
		case CRYPTO_AES_CBC:
			td->hdr |= TALITOS_SEL0_AESU
				|  TALITOS_MODE0_AESU_CBC;
			if (enccrd->crd_flags & CRD_F_ENCRYPT)
				td->hdr |= TALITOS_MODE0_AESU_ENC;
			ivsize = 4*sizeof(u_int32_t);
			DPRINTF("AES  ses %d ch %d len %d\n",
				(u32)TALITOS_SESSION(crp->crp_sid),
				chsel, td->ptr[in_fifo].len);
			break;
		default:
			printk("%s: unimplemented enccrd->crd_alg %d\n",
					device_get_nameunit(sc->sc_cdev), enccrd->crd_alg);
			err = EINVAL;
			goto errout;
		}
		/*
		 * Setup encrypt/decrypt state.  When using basic ops
		 * we can't use an inline IV because hash/crypt offset
		 * must be from the end of the IV to the start of the
		 * crypt data and this leaves out the preceding header
		 * from the hash calculation.  Instead we place the IV
		 * in the state record and set the hash/crypt offset to
		 * copy both the header+IV.
		 */
		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
			td->hdr |= TALITOS_DIR_OUTBOUND;
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
				iv = enccrd->crd_iv;
			else
				read_random((iv = (caddr_t) rand_iv), sizeof(rand_iv));
			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
				crypto_copyback(crp->crp_flags, crp->crp_buf,
				    enccrd->crd_inject, ivsize, iv);
			}
		} else {
			td->hdr |= TALITOS_DIR_INBOUND;
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
				iv = enccrd->crd_iv;
			} else {
				iv = (caddr_t) rand_iv;
				crypto_copydata(crp->crp_flags, crp->crp_buf,
				    enccrd->crd_inject, ivsize, iv);
			}
		}
		td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize,
			DMA_TO_DEVICE);
		td->ptr[cipher_iv].len = ivsize;
		/*
		 * we don't need the cipher iv out length/pointer
		 * field to do ESP IPsec. Therefore we set the len field as 0,
		 * which tells the SEC not to do anything with this len/ptr
		 * field. Previously, when length/pointer as pointing to iv,
		 * it gave us corruption of packets.
		 */
		td->ptr[cipher_iv_out].len = 0;
	}
	if (enccrd && maccrd) {
		/* this is ipsec only for now */
		td->hdr |= TALITOS_SEL1_MDEU
			|  TALITOS_MODE1_MDEU_INIT
			|  TALITOS_MODE1_MDEU_PAD;
		switch (maccrd->crd_alg) {
			case	CRYPTO_MD5:
				td->hdr |= TALITOS_MODE1_MDEU_MD5;
				break;
			case	CRYPTO_MD5_HMAC:
				td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC;
				break;
			case	CRYPTO_SHA1:
				td->hdr |= TALITOS_MODE1_MDEU_SHA1;
				break;
			case	CRYPTO_SHA1_HMAC:
				td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC;
				break;
			default:
				/* We cannot order the SEC as requested */
				printk("%s: cannot do the order\n",
						device_get_nameunit(sc->sc_cdev));
				err = EINVAL;
				goto errout;
		}
		if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
		   (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
			/*
			 * The offset from hash data to the start of
			 * crypt data is the difference in the skips.
			 */
			/* ipsec only for now */
			td->ptr[hmac_key].ptr = dma_map_single(NULL,
				ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE);
			td->ptr[hmac_key].len = ses->ses_hmac_len;
			td->ptr[in_fifo].ptr  += enccrd->crd_skip;
			td->ptr[in_fifo].len  =  enccrd->crd_len;
			td->ptr[out_fifo].ptr += enccrd->crd_skip;
			td->ptr[out_fifo].len =  enccrd->crd_len;
			/* bytes of HMAC to postpend to ciphertext */
			td->ptr[out_fifo].extent =  ses->ses_mlen;
			td->ptr[hmac_data].ptr += maccrd->crd_skip;
			td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip;
		}
		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
			printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n",
					device_get_nameunit(sc->sc_cdev));
		}
	}
	if (!enccrd && maccrd) {
		/* single MD5 or SHA */
		td->hdr |= TALITOS_SEL0_MDEU
				|  TALITOS_MODE0_MDEU_INIT
				|  TALITOS_MODE0_MDEU_PAD;
		switch (maccrd->crd_alg) {
			case	CRYPTO_MD5:
				td->hdr |= TALITOS_MODE0_MDEU_MD5;
				DPRINTF("MD5  ses %d ch %d len %d\n",
					(u32)TALITOS_SESSION(crp->crp_sid),
					chsel, td->ptr[in_fifo].len);
				break;
			case	CRYPTO_MD5_HMAC:
				td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC;
				break;
			case	CRYPTO_SHA1:
				td->hdr |= TALITOS_MODE0_MDEU_SHA1;
				DPRINTF("SHA1 ses %d ch %d len %d\n",
					(u32)TALITOS_SESSION(crp->crp_sid),
					chsel, td->ptr[in_fifo].len);
				break;
			case	CRYPTO_SHA1_HMAC:
				td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC;
				break;
			default:
				/* We cannot order the SEC as requested */
				DPRINTF("cannot do the order\n");
				err = EINVAL;
				goto errout;
		}

		if (crp->crp_flags & CRYPTO_F_IOV)
			td->ptr[out_fifo].ptr += maccrd->crd_inject;

		if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
		   (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
			td->ptr[hmac_key].ptr = dma_map_single(NULL,
				ses->ses_hmac, ses->ses_hmac_len,
				DMA_TO_DEVICE);
			td->ptr[hmac_key].len = ses->ses_hmac_len;
		}
	}
	else {
		/* using process key (session data has duplicate) */
		td->ptr[cipher_key].ptr = dma_map_single(NULL,
			enccrd->crd_key, (enccrd->crd_klen + 7) / 8,
			DMA_TO_DEVICE);
		td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8;
	}
	/* descriptor complete - GO! */
	return talitos_submit(sc, td, chsel);

errout:
	if (err != ERESTART) {
		crp->crp_etype = err;
		crypto_done(crp);
	}
	return err;
}
Exemplo n.º 7
0
int
aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
    struct cryptop *crp)
{
	struct thread *td;
	uint8_t *buf;
	int error, allocated, saved_ctx;

	buf = aesni_cipher_alloc(enccrd, crp, &allocated);
	if (buf == NULL)
		return (ENOMEM);

	td = curthread;
	if (!is_fpu_kern_thread(0)) {
		error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL);
		if (error != 0)
			goto out;
		saved_ctx = 1;
	} else {
		saved_ctx = 0;
		error = 0;
	}

	if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
		error = aesni_cipher_setup_common(ses, enccrd->crd_key,
		    enccrd->crd_klen);
		if (error != 0)
			goto out;
	}

	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
		if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
			crypto_copyback(crp->crp_flags, crp->crp_buf,
			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
		if (ses->algo == CRYPTO_AES_CBC) {
			aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
			    enccrd->crd_len, buf, buf, ses->iv);
		} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
			aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
			    ses->xts_schedule, enccrd->crd_len, buf, buf,
			    ses->iv);
		}
	} else {
		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
		else
			crypto_copydata(crp->crp_flags, crp->crp_buf,
			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
		if (ses->algo == CRYPTO_AES_CBC) {
			aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
			    enccrd->crd_len, buf, ses->iv);
		} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
			aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
			    ses->xts_schedule, enccrd->crd_len, buf, buf,
			    ses->iv);
		}
	}
	if (saved_ctx)
		fpu_kern_leave(td, ses->fpu_ctx);
	if (allocated)
		crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
		    enccrd->crd_len, buf);
	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0)
		crypto_copydata(crp->crp_flags, crp->crp_buf,
		    enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
		    AES_BLOCK_LEN, ses->iv);
 out:
	if (allocated) {
		bzero(buf, enccrd->crd_len);
		free(buf, M_AESNI);
	}
	return (error);
}
Exemplo n.º 8
0
/*
 * Process a request.
 */
static int 
cesa_ocf_process(device_t dev, struct cryptop *crp, int hint)
{
	struct cesa_ocf_process *cesa_ocf_cmd = NULL;
	struct cesa_ocf_process *cesa_ocf_cmd_wa = NULL;
	MV_CESA_COMMAND	*cesa_cmd;
	struct cryptodesc *crd;
	struct cesa_ocf_data *cesa_ocf_cur_ses;
	int sid = 0, temp_len = 0, i;
	int encrypt = 0, decrypt = 0, auth = 0;
	int  status;
	struct sk_buff *skb = NULL;
	struct uio *uiop = NULL;
	unsigned char *ivp;
	MV_BUF_INFO *p_buf_info;	
	MV_CESA_MBUF *p_mbuf_info;
	unsigned long flags;

        dprintk("%s()\n", __FUNCTION__);

	if( cesaReqResources <= 1 ) {
                dprintk("%s,%d: ERESTART\n", __FILE__, __LINE__);
                return ERESTART;
	}

#ifdef RT_DEBUG
        /* Sanity check */
        if (crp == NULL) {
                printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
                return EINVAL;
        }

        if (crp->crp_desc == NULL || crp->crp_buf == NULL ) {
                printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
                crp->crp_etype = EINVAL;
                return EINVAL;
        }

        sid = crp->crp_sid & 0xffffffff;
        if ((sid >= CESA_OCF_MAX_SES) || (cesa_ocf_sessions[sid] == NULL)) {
                crp->crp_etype = ENOENT;
                printk("%s,%d: ENOENT session %d \n", __FILE__, __LINE__, sid);
                return EINVAL;
        }
#endif

	sid = crp->crp_sid & 0xffffffff;
	crp->crp_etype = 0;
	cesa_ocf_cur_ses = cesa_ocf_sessions[sid];

#ifdef RT_DEBUG
	if(ocf_check_action(crp, cesa_ocf_cur_ses)){
		goto p_error;
	}
#endif

	/* malloc a new  cesa process */	
	cesa_ocf_cmd = kmalloc(sizeof(struct cesa_ocf_process), GFP_ATOMIC);
	
        if (cesa_ocf_cmd == NULL) {
            	printk("%s,%d: ENOBUFS \n", __FILE__, __LINE__);
            	goto p_error;
      	}
	memset(cesa_ocf_cmd, 0, sizeof(struct cesa_ocf_process));

	/* init cesa_process */
	cesa_ocf_cmd->crp = crp;
	/* always call callback */
	cesa_ocf_cmd->need_cb = 1;

	/* init cesa_cmd for usage of the HALs */
	cesa_cmd = &cesa_ocf_cmd->cesa_cmd;
	cesa_cmd->pReqPrv = (void *)cesa_ocf_cmd;
	cesa_cmd->sessionId = cesa_ocf_cur_ses->sid_encrypt; /* defualt use encrypt */

	/* prepare src buffer 	*/
	/* we send the entire buffer to the HAL, even if only part of it should be encrypt/auth.  */
	/* if not using seesions for both encrypt and auth, then it will be wiser to to copy only */
	/* from skip to crd_len. 								  */
	p_buf_info = cesa_ocf_cmd->cesa_bufs;	
	p_mbuf_info = &cesa_ocf_cmd->cesa_mbuf;

	p_buf_info += 2; /* save 2 first buffers for IV and digest - 
			    we won't append them to the end since, they 
			    might be places in an unaligned addresses. */
	
	p_mbuf_info->pFrags = p_buf_info;
	temp_len = 0;

	/* handle SKB */
	if (crp->crp_flags & CRYPTO_F_SKBUF) {
		
		dprintk("%s,%d: handle SKB.\n", __FILE__, __LINE__);
		skb = (struct sk_buff *) crp->crp_buf;

                if (skb_shinfo(skb)->nr_frags >= (MV_CESA_MAX_MBUF_FRAGS - 1)) {
                        printk("%s,%d: %d nr_frags > MV_CESA_MAX_MBUF_FRAGS", __FILE__, __LINE__, skb_shinfo(skb)->nr_frags);
                        goto p_error;
                }

		p_mbuf_info->mbufSize = skb->len;
		temp_len = skb->len;
        	/* first skb fragment */
        	p_buf_info->bufSize = skb_headlen(skb);
        	p_buf_info->bufVirtPtr = skb->data;
		p_buf_info++;

        	/* now handle all other skb fragments */
        	for ( i = 0; i < skb_shinfo(skb)->nr_frags; i++ ) {
            		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
            		p_buf_info->bufSize = frag->size;
            		p_buf_info->bufVirtPtr = page_address(FRAG_PAGE(frag->page)) + frag->page_offset;
            		p_buf_info++;
        	}
        	p_mbuf_info->numFrags = skb_shinfo(skb)->nr_frags + 1;
	}
	/* handle UIO */
	else if(crp->crp_flags & CRYPTO_F_IOV) {
	
		dprintk("%s,%d: handle UIO.\n", __FILE__, __LINE__);
		uiop = (struct uio *) crp->crp_buf;

                if (uiop->uio_iovcnt > (MV_CESA_MAX_MBUF_FRAGS - 1)) {
                        printk("%s,%d: %d uio_iovcnt > MV_CESA_MAX_MBUF_FRAGS \n", __FILE__, __LINE__, uiop->uio_iovcnt);
                        goto p_error;
                }

		p_mbuf_info->mbufSize = crp->crp_ilen;
		p_mbuf_info->numFrags = uiop->uio_iovcnt;
		for(i = 0; i < uiop->uio_iovcnt; i++) {
			p_buf_info->bufVirtPtr = uiop->uio_iov[i].iov_base;
			p_buf_info->bufSize = uiop->uio_iov[i].iov_len;
			temp_len += p_buf_info->bufSize;
			dprintk("%s,%d: buf %x-> addr %x, size %x \n"
				, __FILE__, __LINE__, i, (unsigned int)p_buf_info->bufVirtPtr, p_buf_info->bufSize);
			p_buf_info++;			
		}

	}
	/* handle CONTIG */
	else {
		dprintk("%s,%d: handle CONTIG.\n", __FILE__, __LINE__); 
		p_mbuf_info->numFrags = 1;
		p_mbuf_info->mbufSize = crp->crp_ilen;
		p_buf_info->bufVirtPtr = crp->crp_buf;
		p_buf_info->bufSize = crp->crp_ilen;
		temp_len = crp->crp_ilen;
		p_buf_info++;
	}
	
	/* Support up to 64K why? cause! */
	if(crp->crp_ilen > 64*1024) {
		printk("%s,%d: buf too big %x \n", __FILE__, __LINE__, crp->crp_ilen);
		goto p_error;
	}

	if( temp_len != crp->crp_ilen ) {
		printk("%s,%d: warning size don't match.(%x %x) \n", __FILE__, __LINE__, temp_len, crp->crp_ilen);
	}	

	cesa_cmd->pSrc = p_mbuf_info;
	cesa_cmd->pDst = p_mbuf_info;
	
	/* restore p_buf_info to point to first available buf */
	p_buf_info = cesa_ocf_cmd->cesa_bufs;	
	p_buf_info += 1; 


        /* Go through crypto descriptors, processing as we go */
        for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
		
		/* Encryption /Decryption */
		if(crd->crd_alg == cesa_ocf_cur_ses->cipher_alg) {

			dprintk("%s,%d: cipher", __FILE__, __LINE__);

			cesa_cmd->cryptoOffset = crd->crd_skip;
    	              	cesa_cmd->cryptoLength = crd->crd_len;

			if(crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
				dprintk(" encrypt \n");
				encrypt++;

				/* handle IV */
				if (crd->crd_flags & CRD_F_IV_EXPLICIT) {  /* IV from USER */
					dprintk("%s,%d: IV from USER (offset %x) \n", __FILE__, __LINE__, crd->crd_inject);
					cesa_cmd->ivFromUser = 1;
					ivp = crd->crd_iv;

                                	/*
                                 	 * do we have to copy the IV back to the buffer ?
                                 	 */
                                	if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
						dprintk("%s,%d: copy the IV back to the buffer\n", __FILE__, __LINE__);
						cesa_cmd->ivOffset = crd->crd_inject;
						crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, cesa_ocf_cur_ses->ivlen, ivp);
                                	}
					else {
						dprintk("%s,%d: don't copy the IV back to the buffer \n", __FILE__, __LINE__);
						p_mbuf_info->numFrags++;
						p_mbuf_info->mbufSize += cesa_ocf_cur_ses->ivlen; 
						p_mbuf_info->pFrags = p_buf_info;

						p_buf_info->bufVirtPtr = ivp;
						p_buf_info->bufSize = cesa_ocf_cur_ses->ivlen; 
						p_buf_info--;

						/* offsets */
						cesa_cmd->ivOffset = 0;
						cesa_cmd->cryptoOffset += cesa_ocf_cur_ses->ivlen;
						if(auth) {
							cesa_cmd->macOffset += cesa_ocf_cur_ses->ivlen;
							cesa_cmd->digestOffset += cesa_ocf_cur_ses->ivlen; 
						}	
					}
                                }
				else {					/* random IV */
					dprintk("%s,%d: random IV \n", __FILE__, __LINE__);
					cesa_cmd->ivFromUser = 0;

                                	/*
                                 	 * do we have to copy the IV back to the buffer ?
                                 	 */
					/* in this mode the HAL will always copy the IV */
					/* given by the session to the ivOffset  	*/
					if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
						cesa_cmd->ivOffset = crd->crd_inject;
					} 
					else {
						/* if IV isn't copy, then how will the user know which IV did we use??? */
						printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
						goto p_error; 
					}
				}
			}
			else { 					/* decrypt */
				dprintk(" decrypt \n");
				decrypt++;
				cesa_cmd->sessionId = cesa_ocf_cur_ses->sid_decrypt;

				/* handle IV */
				if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
					dprintk("%s,%d: IV from USER \n", __FILE__, __LINE__);
					/* append the IV buf to the mbuf */
					cesa_cmd->ivFromUser = 1;	
					p_mbuf_info->numFrags++;
					p_mbuf_info->mbufSize += cesa_ocf_cur_ses->ivlen; 
					p_mbuf_info->pFrags = p_buf_info;

					p_buf_info->bufVirtPtr = crd->crd_iv;
					p_buf_info->bufSize = cesa_ocf_cur_ses->ivlen; 
					p_buf_info--;

					/* offsets */
					cesa_cmd->ivOffset = 0;
					cesa_cmd->cryptoOffset += cesa_ocf_cur_ses->ivlen;
					if(auth) {
						cesa_cmd->macOffset += cesa_ocf_cur_ses->ivlen;
						cesa_cmd->digestOffset += cesa_ocf_cur_ses->ivlen; 
					}
                                }
				else {
					dprintk("%s,%d: IV inside the buffer \n", __FILE__, __LINE__);
					cesa_cmd->ivFromUser = 0;
					cesa_cmd->ivOffset = crd->crd_inject;
				}
			}

		}
		/* Authentication */
		else if(crd->crd_alg == cesa_ocf_cur_ses->auth_alg) {
			dprintk("%s,%d:  Authentication \n", __FILE__, __LINE__);
			auth++;
			cesa_cmd->macOffset = crd->crd_skip;
			cesa_cmd->macLength = crd->crd_len;

			/* digest + mac */
			cesa_cmd->digestOffset = crd->crd_inject;
		} 
		else {
			printk("%s,%d: Alg isn't supported by this session.\n", __FILE__, __LINE__);
			goto p_error;
		}
	}

	dprintk("\n");
	dprintk("%s,%d: Sending Action: \n", __FILE__, __LINE__);
	dprintk("%s,%d: IV from user: %d. IV offset %x \n",  __FILE__, __LINE__, cesa_cmd->ivFromUser, cesa_cmd->ivOffset);
	dprintk("%s,%d: crypt offset %x len %x \n", __FILE__, __LINE__, cesa_cmd->cryptoOffset, cesa_cmd->cryptoLength);
	dprintk("%s,%d: Auth offset %x len %x \n", __FILE__, __LINE__, cesa_cmd->macOffset, cesa_cmd->macLength);
	dprintk("%s,%d: set digest in offset %x . \n", __FILE__, __LINE__, cesa_cmd->digestOffset);
	if(debug) {
		mvCesaDebugMbuf("SRC BUFFER", cesa_cmd->pSrc, 0, cesa_cmd->pSrc->mbufSize);
	}


	/* send action to HAL */
	spin_lock_irqsave(&cesa_lock, flags);
	status = mvCesaAction(cesa_cmd);
	spin_unlock_irqrestore(&cesa_lock, flags);

	/* action not allowed */
	if(status == MV_NOT_ALLOWED) {
#ifdef CESA_OCF_SPLIT
		/* if both encrypt and auth try to split */
		if(auth && (encrypt || decrypt)) {
			MV_CESA_COMMAND	*cesa_cmd_wa;

			/* malloc a new cesa process and init it */	
			cesa_ocf_cmd_wa = kmalloc(sizeof(struct cesa_ocf_process), GFP_ATOMIC);
	
        		if (cesa_ocf_cmd_wa == NULL) {
            			printk("%s,%d: ENOBUFS \n", __FILE__, __LINE__);
            			goto p_error;
      			}
			memcpy(cesa_ocf_cmd_wa, cesa_ocf_cmd, sizeof(struct cesa_ocf_process));
			cesa_cmd_wa = &cesa_ocf_cmd_wa->cesa_cmd;
			cesa_cmd_wa->pReqPrv = (void *)cesa_ocf_cmd_wa;
			cesa_ocf_cmd_wa->need_cb = 0;

			/* break requests to two operation, first operation completion won't call callback */
			if((decrypt) && (cesa_ocf_cur_ses->auth_tn_decrypt)) {
				cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
				cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_decrypt;
			}
			else if((decrypt) && !(cesa_ocf_cur_ses->auth_tn_decrypt)) {
				cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_decrypt;
				cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
			}
			else if((encrypt) && (cesa_ocf_cur_ses->encrypt_tn_auth)) {
				cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_encrypt;
				cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
			}
			else if((encrypt) && !(cesa_ocf_cur_ses->encrypt_tn_auth)){
				cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
				cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_encrypt;
			}
			else {
				printk("%s,%d: Unsupporterd fragment wa mode \n", __FILE__, __LINE__);
            			goto p_error;
			}

			/* send the 2 actions to the HAL */
			spin_lock_irqsave(&cesa_lock, flags);
			status = mvCesaAction(cesa_cmd_wa);
			spin_unlock_irqrestore(&cesa_lock, flags);

			if((status != MV_NO_MORE) && (status != MV_OK)) {
				printk("%s,%d: cesa action failed, status = 0x%x\n", __FILE__, __LINE__, status);
				goto p_error;
			}
			spin_lock_irqsave(&cesa_lock, flags);
			status = mvCesaAction(cesa_cmd);
			spin_unlock_irqrestore(&cesa_lock, flags);

		}
		/* action not allowed and can't split */
		else 
#endif
		{
			goto p_error;
		}
	}

	/* Hal Q is full, send again. This should never happen */
	if(status == MV_NO_RESOURCE) {
		printk("%s,%d: cesa no more resources \n", __FILE__, __LINE__);
		if(cesa_ocf_cmd)
			kfree(cesa_ocf_cmd);
		if(cesa_ocf_cmd_wa)
			kfree(cesa_ocf_cmd_wa);
		return ERESTART;
	} 
	else if((status != MV_NO_MORE) && (status != MV_OK)) {
                printk("%s,%d: cesa action failed, status = 0x%x\n", __FILE__, __LINE__, status);
		goto p_error;
        }


#ifdef CESA_OCF_POLLING
	cesa_interrupt_polling();
#endif
	cesaTestTraceAdd(5);

	return 0;
p_error:
	crp->crp_etype = EINVAL;
	if(cesa_ocf_cmd)
		kfree(cesa_ocf_cmd);
	if(cesa_ocf_cmd_wa)
		kfree(cesa_ocf_cmd_wa);
       	return EINVAL;
}
Exemplo n.º 9
0
/* Name        : icp_ocfDrvProcessDataSetup
 *
 * Description : This function will setup all the cryptographic operation data
 *               that is required by LAC to execute the operation.
 */
static int icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
				      struct cryptodesc *crp_desc)
{
	CpaCyRandGenOpData randGenOpData;
	CpaFlatBuffer randData;

	drvOpData->lacOpData.packetType = CPA_CY_SYM_PACKET_TYPE_FULL;

	/* Convert from the cryptop to the ICP LAC crypto parameters */
	switch (crp_desc->crd_alg) {
	case CRYPTO_NULL_CBC:
		drvOpData->lacOpData.
		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
		drvOpData->lacOpData.
		    messageLenToCipherInBytes = crp_desc->crd_len;
		drvOpData->verifyResult = CPA_FALSE;
		drvOpData->lacOpData.ivLenInBytes = NULL_BLOCK_LEN;
		break;
	case CRYPTO_DES_CBC:
		drvOpData->lacOpData.
		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
		drvOpData->lacOpData.
		    messageLenToCipherInBytes = crp_desc->crd_len;
		drvOpData->verifyResult = CPA_FALSE;
		drvOpData->lacOpData.ivLenInBytes = DES_BLOCK_LEN;
		break;
	case CRYPTO_3DES_CBC:
		drvOpData->lacOpData.
		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
		drvOpData->lacOpData.
		    messageLenToCipherInBytes = crp_desc->crd_len;
		drvOpData->verifyResult = CPA_FALSE;
		drvOpData->lacOpData.ivLenInBytes = DES3_BLOCK_LEN;
		break;
	case CRYPTO_ARC4:
		drvOpData->lacOpData.
		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
		drvOpData->lacOpData.
		    messageLenToCipherInBytes = crp_desc->crd_len;
		drvOpData->verifyResult = CPA_FALSE;
		drvOpData->lacOpData.ivLenInBytes = ARC4_COUNTER_LEN;
		break;
	case CRYPTO_AES_CBC:
		drvOpData->lacOpData.
		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
		drvOpData->lacOpData.
		    messageLenToCipherInBytes = crp_desc->crd_len;
		drvOpData->verifyResult = CPA_FALSE;
		drvOpData->lacOpData.ivLenInBytes = RIJNDAEL128_BLOCK_LEN;
		break;
	case CRYPTO_SHA1:
	case CRYPTO_SHA1_HMAC:
	case CRYPTO_SHA2_256:
	case CRYPTO_SHA2_256_HMAC:
	case CRYPTO_SHA2_384:
	case CRYPTO_SHA2_384_HMAC:
	case CRYPTO_SHA2_512:
	case CRYPTO_SHA2_512_HMAC:
	case CRYPTO_MD5:
	case CRYPTO_MD5_HMAC:
		drvOpData->lacOpData.
		    hashStartSrcOffsetInBytes = crp_desc->crd_skip;
		drvOpData->lacOpData.
		    messageLenToHashInBytes = crp_desc->crd_len;
		drvOpData->lacOpData.
		    pDigestResult =
		    icp_ocfDrvDigestPointerFind(drvOpData, crp_desc);

		if (NULL == drvOpData->lacOpData.pDigestResult) {
			DPRINTK("%s(): ERROR - could not calculate "
				"Digest Result memory address\n", __FUNCTION__);
			return ICP_OCF_DRV_STATUS_FAIL;
		}

		drvOpData->lacOpData.digestVerify = CPA_FALSE;
		break;
	default:
		DPRINTK("%s(): Crypto process error - algorithm not "
			"found \n", __FUNCTION__);
		return ICP_OCF_DRV_STATUS_FAIL;
	}

	/* Figure out what the IV is supposed to be */
	if ((crp_desc->crd_alg == CRYPTO_DES_CBC) ||
	    (crp_desc->crd_alg == CRYPTO_3DES_CBC) ||
	    (crp_desc->crd_alg == CRYPTO_AES_CBC)) {
		/*ARC4 doesn't use an IV */
		if (crp_desc->crd_flags & CRD_F_IV_EXPLICIT) {
			/* Explicit IV provided to OCF */
			drvOpData->lacOpData.pIv = crp_desc->crd_iv;
		} else {
			/* IV is not explicitly provided to OCF */

			/* Point the LAC OP Data IV pointer to our allocated
			   storage location for this session. */
			drvOpData->lacOpData.pIv = drvOpData->ivData;

			if ((crp_desc->crd_flags & CRD_F_ENCRYPT) &&
			    ((crp_desc->crd_flags & CRD_F_IV_PRESENT) == 0)) {

				/* Encrypting - need to create IV */
				randGenOpData.generateBits = CPA_TRUE;
				randGenOpData.lenInBytes = MAX_IV_LEN_IN_BYTES;

				icp_ocfDrvPtrAndLenToFlatBuffer((Cpa8U *)
								drvOpData->
								ivData,
								MAX_IV_LEN_IN_BYTES,
								&randData);

				if (CPA_STATUS_SUCCESS !=
				    cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
						 NULL, NULL,
						 &randGenOpData, &randData)) {
					DPRINTK("%s(): ERROR - Failed to"
						" generate"
						" Initialisation Vector\n",
						__FUNCTION__);
					return ICP_OCF_DRV_STATUS_FAIL;
				}

				crypto_copyback(drvOpData->crp->
						crp_flags,
						drvOpData->crp->crp_buf,
						crp_desc->crd_inject,
						drvOpData->lacOpData.
						ivLenInBytes,
						(caddr_t) (drvOpData->lacOpData.
							   pIv));
			} else {
				/* Reading IV from buffer */
				crypto_copydata(drvOpData->crp->
						crp_flags,
						drvOpData->crp->crp_buf,
						crp_desc->crd_inject,
						drvOpData->lacOpData.
						ivLenInBytes,
						(caddr_t) (drvOpData->lacOpData.
							   pIv));
			}

		}

	}

	return ICP_OCF_DRV_STATUS_SUCCESS;
}
Exemplo n.º 10
0
/* This function is called from an interrupt handler */
void
nlm_xlpsec_msgring_handler(int vc, int size, int code, int src_id,
    struct nlm_fmn_msg *msg, void *data)
{
	struct xlp_sec_command *cmd = NULL;
	struct xlp_sec_softc *sc = NULL;
	struct cryptodesc *crd = NULL;
	unsigned int ivlen = 0;

	KASSERT(code == FMN_SWCODE_CRYPTO,
	    ("%s: bad code = %d, expected code = %d\n", __FUNCTION__,
	    code, FMN_SWCODE_CRYPTO));

	sc = (struct xlp_sec_softc *)data;
	KASSERT(src_id >= sc->sec_vc_start && src_id <= sc->sec_vc_end,
	    ("%s: bad src_id = %d, expect %d - %d\n", __FUNCTION__,
	    src_id, sc->sec_vc_start, sc->sec_vc_end));

	cmd = (struct xlp_sec_command *)(uintptr_t)msg->msg[0];
	KASSERT(cmd != NULL && cmd->crp != NULL,
		("%s :cmd not received properly\n",__FUNCTION__));

	KASSERT(CRYPTO_ERROR(msg->msg[1]) == 0,
	    ("%s: Message rcv msg0 %llx msg1 %llx err %x \n", __FUNCTION__,
	    (unsigned long long)msg->msg[0], (unsigned long long)msg->msg[1],
	    (int)CRYPTO_ERROR(msg->msg[1])));

	crd = cmd->enccrd;
	/* Copy the last 8 or 16 bytes to the session iv, so that in few
	 * cases this will be used as IV for the next request
	 */
	if (crd != NULL) {
		if ((crd->crd_alg == CRYPTO_DES_CBC ||
		    crd->crd_alg == CRYPTO_3DES_CBC ||
		    crd->crd_alg == CRYPTO_AES_CBC) &&
		    (crd->crd_flags & CRD_F_ENCRYPT)) {
			ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
			    XLP_SEC_AES_IV_LENGTH : XLP_SEC_DES_IV_LENGTH);
			crypto_copydata(cmd->crp->crp_flags, cmd->crp->crp_buf,
			    crd->crd_skip + crd->crd_len - ivlen, ivlen,
			    sc->sc_sessions[cmd->session_num].ses_iv);
		}
	}

	/* If there are not enough credits to send, then send request
	 * will fail with ERESTART and the driver will be blocked until it is
	 * unblocked here after knowing that there are sufficient credits to
	 * send the request again.
	 */
	if (sc->sc_needwakeup) {
		atomic_add_int(&creditleft, sc->sec_msgsz);
		if (creditleft >= (NLM_CRYPTO_LEFT_REQS)) {
			crypto_unblock(sc->sc_cid, sc->sc_needwakeup);
			sc->sc_needwakeup &= (~(CRYPTO_SYMQ | CRYPTO_ASYMQ));
		}
	}
	if(cmd->maccrd) {
		crypto_copyback(cmd->crp->crp_flags,
		    cmd->crp->crp_buf, cmd->maccrd->crd_inject,
		    cmd->hash_dst_len, cmd->hashdest);
	}

	/* This indicates completion of the crypto operation */
	crypto_done(cmd->crp);

	xlp_free_cmd_params(cmd);

	return;
}