예제 #1
0
int
aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
    struct cryptop *crp)
{
	struct thread *td;
	uint8_t *buf;
	int error, allocated;

	buf = aesni_cipher_alloc(enccrd, crp, &allocated);
	if (buf == NULL) {
		error = ENOMEM;
		goto out;
	}

	td = curthread;
	error = fpu_kern_enter(td, &ses->fpu_ctx, FPU_KERN_NORMAL);
	if (error != 0)
		goto out1;

	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);

		if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
			crypto_copyback(crp->crp_flags, crp->crp_buf,
			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);

		aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
		    enccrd->crd_len, buf, buf, ses->iv);
	} else {
		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
		else
			crypto_copydata(crp->crp_flags, crp->crp_buf,
			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
		aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
		    enccrd->crd_len, buf, ses->iv);
	}
	fpu_kern_leave(td, &ses->fpu_ctx);
	if (allocated)
		crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
		    enccrd->crd_len, buf);
	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0)
		crypto_copydata(crp->crp_flags, crp->crp_buf,
		    enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
		    AES_BLOCK_LEN, ses->iv);
 out1:
	if (allocated) {
		bzero(buf, enccrd->crd_len);
		free(buf, M_AESNI);
	}
 out:
	return (error);
}
예제 #2
0
파일: nlmsec.c 프로젝트: coyizumi/cs111
static int
xlp_copyiv(struct xlp_sec_softc *sc, struct xlp_sec_command *cmd,
    struct cryptodesc *enccrd)
{
	unsigned int ivlen = 0;
	int session;
	struct cryptop *crp = NULL;

	crp = cmd->crp;
	session = cmd->session_num;

	if (enccrd->crd_alg != CRYPTO_ARC4) {
		ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
		    XLP_SEC_AES_IV_LENGTH : XLP_SEC_DES_IV_LENGTH);
		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
				bcopy(enccrd->crd_iv, cmd->iv, ivlen);
			} else {
				bcopy(sc->sc_sessions[session].ses_iv, cmd->iv,
				    ivlen);
			}
			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
				crypto_copyback(crp->crp_flags,
				    crp->crp_buf, enccrd->crd_inject,
				    ivlen, cmd->iv);
			}
		} else {
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
				bcopy(enccrd->crd_iv, cmd->iv, ivlen);
			} else {
				crypto_copydata(crp->crp_flags, crp->crp_buf,
				    enccrd->crd_inject, ivlen, cmd->iv);
			}
		}
	}
	return (0);
}
예제 #3
0
/*
 * Apply a symmetric encryption/decryption algorithm.
 */
static int
swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
    int flags)
{
	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
	unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
	struct enc_xform *exf;
	int i, k, j, blks;

	exf = sw->sw_exf;
	blks = exf->blocksize;

	/* Check for non-padded data */
	if (crd->crd_len % blks)
		return EINVAL;

	/* Initialize the IV */
	if (crd->crd_flags & CRD_F_ENCRYPT) {
		/* IV explicitly provided ? */
		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
			bcopy(crd->crd_iv, iv, blks);
		else
			arc4rand(iv, blks, 0);

		/* Do we need to write the IV */
		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
			crypto_copyback(flags, buf, crd->crd_inject, blks, iv);

	} else {	/* Decryption */
			/* IV explicitly provided ? */
		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
			bcopy(crd->crd_iv, iv, blks);
		else {
			/* Get IV off buf */
			crypto_copydata(flags, buf, crd->crd_inject, blks, iv);
		}
	}

	if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
		int error; 

		if (sw->sw_kschedule)
			exf->zerokey(&(sw->sw_kschedule));
		error = exf->setkey(&sw->sw_kschedule,
				crd->crd_key, crd->crd_klen / 8);
		if (error)
			return (error);
	}

	ivp = iv;

	/*
	 * xforms that provide a reinit method perform all IV
	 * handling themselves.
	 */
	if (exf->reinit)
		exf->reinit(sw->sw_kschedule, iv);

	if (flags & CRYPTO_F_IMBUF) {
		struct mbuf *m = (struct mbuf *) buf;

		/* Find beginning of data */
		m = m_getptr(m, crd->crd_skip, &k);
		if (m == NULL)
			return EINVAL;

		i = crd->crd_len;

		while (i > 0) {
			/*
			 * If there's insufficient data at the end of
			 * an mbuf, we have to do some copying.
			 */
			if (m->m_len < k + blks && m->m_len != k) {
				m_copydata(m, k, blks, blk);

				/* Actual encryption/decryption */
				if (exf->reinit) {
					if (crd->crd_flags & CRD_F_ENCRYPT) {
						exf->encrypt(sw->sw_kschedule,
						    blk);
					} else {
						exf->decrypt(sw->sw_kschedule,
						    blk);
					}
				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
					/* XOR with previous block */
					for (j = 0; j < blks; j++)
						blk[j] ^= ivp[j];

					exf->encrypt(sw->sw_kschedule, blk);

					/*
					 * Keep encrypted block for XOR'ing
					 * with next block
					 */
					bcopy(blk, iv, blks);
					ivp = iv;
				} else {	/* decrypt */
					/*	
					 * Keep encrypted block for XOR'ing
					 * with next block
					 */
					if (ivp == iv)
						bcopy(blk, piv, blks);
					else
						bcopy(blk, iv, blks);

					exf->decrypt(sw->sw_kschedule, blk);

					/* XOR with previous block */
					for (j = 0; j < blks; j++)
						blk[j] ^= ivp[j];

					if (ivp == iv)
						bcopy(piv, iv, blks);
					else
						ivp = iv;
				}

				/* Copy back decrypted block */
				m_copyback(m, k, blks, blk);

				/* Advance pointer */
				m = m_getptr(m, k + blks, &k);
				if (m == NULL)
					return EINVAL;

				i -= blks;

				/* Could be done... */
				if (i == 0)
					break;
			}

			/* Skip possibly empty mbufs */
			if (k == m->m_len) {
				for (m = m->m_next; m && m->m_len == 0;
				    m = m->m_next)
					;
				k = 0;
			}

			/* Sanity check */
			if (m == NULL)
				return EINVAL;

			/*
			 * Warning: idat may point to garbage here, but
			 * we only use it in the while() loop, only if
			 * there are indeed enough data.
			 */
			idat = mtod(m, unsigned char *) + k;

	   		while (m->m_len >= k + blks && i > 0) {
				if (exf->reinit) {
					if (crd->crd_flags & CRD_F_ENCRYPT) {
						exf->encrypt(sw->sw_kschedule,
						    idat);
					} else {
						exf->decrypt(sw->sw_kschedule,
						    idat);
					}
				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
					/* XOR with previous block/IV */
					for (j = 0; j < blks; j++)
						idat[j] ^= ivp[j];

					exf->encrypt(sw->sw_kschedule, idat);
					ivp = idat;
				} else {	/* decrypt */
					/*
					 * Keep encrypted block to be used
					 * in next block's processing.
					 */
					if (ivp == iv)
						bcopy(idat, piv, blks);
					else
						bcopy(idat, iv, blks);

					exf->decrypt(sw->sw_kschedule, idat);

					/* XOR with previous block/IV */
					for (j = 0; j < blks; j++)
						idat[j] ^= ivp[j];

					if (ivp == iv)
						bcopy(piv, iv, blks);
					else
						ivp = iv;
				}

				idat += blks;
				k += blks;
				i -= blks;
			}
		}

		return 0; /* Done with mbuf encryption/decryption */
	} else if (flags & CRYPTO_F_IOV) {
예제 #4
0
static int
pasemi_process(device_t dev, struct cryptop *crp, int hint)
{

	int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
	struct pasemi_softc *sc = device_get_softc(dev);
	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
	caddr_t ivp;
	struct pasemi_desc init_desc, work_desc;
	struct pasemi_session *ses;
	struct sk_buff *skb;
	struct uio *uiop;
	unsigned long flags;
	struct pasemi_fnu_txring *txring;

	DPRINTF("%s()\n", __FUNCTION__);

	if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
		return -EINVAL;

	crp->crp_etype = 0;
	if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
		return -EINVAL;

	ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];

	crd1 = crp->crp_desc;
	if (crd1 == NULL) {
		err = -EINVAL;
		goto errout;
	}
	crd2 = crd1->crd_next;

	if (ALG_IS_SIG(crd1->crd_alg)) {
		maccrd = crd1;
		if (crd2 == NULL)
			enccrd = NULL;
		else if (ALG_IS_CIPHER(crd2->crd_alg) &&
			 (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
			enccrd = crd2;
		else
			goto erralg;
	} else if (ALG_IS_CIPHER(crd1->crd_alg)) {
		enccrd = crd1;
		if (crd2 == NULL)
			maccrd = NULL;
		else if (ALG_IS_SIG(crd2->crd_alg) &&
			 (crd1->crd_flags & CRD_F_ENCRYPT))
			maccrd = crd2;
		else
			goto erralg;
	} else
		goto erralg;

	chsel = ses->chan;

	txring = &sc->tx[chsel];

	if (enccrd && !maccrd) {
		if (enccrd->crd_alg == CRYPTO_ARC4)
			reinit = 1;
		reinit_size = 0x40;
		srclen = crp->crp_ilen;

		pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
				  | XCT_FUN_FUN(chsel));
		if (enccrd->crd_flags & CRD_F_ENCRYPT)
			pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
		else
			pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
	} else if (enccrd && maccrd) {
		if (enccrd->crd_alg == CRYPTO_ARC4)
			reinit = 1;
		reinit_size = 0x68;

		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
			/* Encrypt -> Authenticate */
			pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
					  | XCT_FUN_A | XCT_FUN_FUN(chsel));
			srclen = maccrd->crd_skip + maccrd->crd_len;
		} else {
			/* Authenticate -> Decrypt */
			pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
					  | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
			pasemi_desc_build(&work_desc, 0);
			pasemi_desc_build(&work_desc, 0);
			pasemi_desc_build(&work_desc, 0);
			work_desc.postop = PASEMI_CHECK_SIG;
			srclen = crp->crp_ilen;
		}

		pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
		pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
	} else if (!enccrd && maccrd) {
		srclen = maccrd->crd_len;

		pasemi_desc_start(&init_desc,
				  XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
		pasemi_desc_build(&init_desc,
				  XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));

		pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
				  | XCT_FUN_A | XCT_FUN_FUN(chsel));
	}

	if (enccrd) {
		switch (enccrd->crd_alg) {
		case CRYPTO_3DES_CBC:
			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
					XCT_FUN_BCM_CBC);
			ivsize = sizeof(u64);
			break;
		case CRYPTO_DES_CBC:
			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
					XCT_FUN_BCM_CBC);
			ivsize = sizeof(u64);
			break;
		case CRYPTO_AES_CBC:
			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
					XCT_FUN_BCM_CBC);
			ivsize = 2 * sizeof(u64);
			break;
		case CRYPTO_ARC4:
			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
			ivsize = 0;
			break;
		default:
			printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
			       enccrd->crd_alg);
			err = -EINVAL;
			goto errout;
		}

		ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
				memcpy(ivp, enccrd->crd_iv, ivsize);
			/* If IV is not present in the buffer already, it has to be copied there */
			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
				crypto_copyback(crp->crp_flags, crp->crp_buf,
						enccrd->crd_inject, ivsize, ivp);
		} else {
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
				/* IV is provided expicitly in descriptor */
				memcpy(ivp, enccrd->crd_iv, ivsize);
			else
				/* IV is provided in the packet */
				crypto_copydata(crp->crp_flags, crp->crp_buf,
						enccrd->crd_inject, ivsize,
						ivp);
		}
	}

	if (maccrd) {
		switch (maccrd->crd_alg) {
		case CRYPTO_MD5:
			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
			break;
		case CRYPTO_SHA1:
			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
			break;
		case CRYPTO_MD5_HMAC:
			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
			break;
		case CRYPTO_SHA1_HMAC:
			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
			break;
		default:
			printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
			       maccrd->crd_alg);
			err = -EINVAL;
			goto errout;
		}
	}

	if (crp->crp_flags & CRYPTO_F_SKBUF) {
		/* using SKB buffers */
		skb = (struct sk_buff *)crp->crp_buf;
		if (skb_shinfo(skb)->nr_frags) {
			printk(DRV_NAME ": skb frags unimplemented\n");
			err = -EINVAL;
			goto errout;
		}
		pasemi_desc_build(
			&work_desc,
			XCT_FUN_DST_PTR(skb->len, pci_map_single(
						sc->dma_pdev, skb->data,
						skb->len, DMA_TO_DEVICE)));
		pasemi_desc_build(
			&work_desc,
			XCT_FUN_SRC_PTR(
				srclen, pci_map_single(
					sc->dma_pdev, skb->data,
					srclen, DMA_TO_DEVICE)));
		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
	} else if (crp->crp_flags & CRYPTO_F_IOV) {
		/* using IOV buffers */
		uiop = (struct uio *)crp->crp_buf;
		if (uiop->uio_iovcnt > 1) {
			printk(DRV_NAME ": iov frags unimplemented\n");
			err = -EINVAL;
			goto errout;
		}

		/* crp_olen is never set; always use crp_ilen */
		pasemi_desc_build(
			&work_desc,
			XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
						sc->dma_pdev,
						uiop->uio_iov->iov_base,
						crp->crp_ilen, DMA_TO_DEVICE)));
		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));

		pasemi_desc_build(
			&work_desc,
			XCT_FUN_SRC_PTR(srclen, pci_map_single(
						sc->dma_pdev,
						uiop->uio_iov->iov_base,
						srclen, DMA_TO_DEVICE)));
	} else {
		/* using contig buffers */
		pasemi_desc_build(
			&work_desc,
			XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
						sc->dma_pdev,
						crp->crp_buf,
						crp->crp_ilen, DMA_TO_DEVICE)));
		pasemi_desc_build(
			&work_desc,
			XCT_FUN_SRC_PTR(srclen, pci_map_single(
						sc->dma_pdev,
						crp->crp_buf, srclen,
						DMA_TO_DEVICE)));
		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
	}

	spin_lock_irqsave(&txring->fill_lock, flags);

	if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
		txring->sesn = PASEMI_SESSION(crp->crp_sid);
		reinit = 1;
	}

	if (enccrd) {
		pasemi_desc_start(&init_desc,
				  XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
		pasemi_desc_build(&init_desc,
				  XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
	}

	if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
	      pasemi_desc_size(&work_desc)) -
	     txring->next_to_clean) > TX_RING_SIZE) {
		spin_unlock_irqrestore(&txring->fill_lock, flags);
		err = ERESTART;
		goto errout;
	}

	pasemi_ring_add_desc(txring, &init_desc, NULL);
	pasemi_ring_add_desc(txring, &work_desc, crp);

	pasemi_ring_incr(sc, chsel,
			 pasemi_desc_size(&init_desc) +
			 pasemi_desc_size(&work_desc));

	spin_unlock_irqrestore(&txring->fill_lock, flags);

	mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);

	return 0;

erralg:
	printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
	       crd1->crd_alg, crd2->crd_alg);
	err = -EINVAL;

errout:
	if (err != ERESTART) {
		crp->crp_etype = err;
		crypto_done(crp);
	}
	return err;
}
예제 #5
0
static int
talitos_process(device_t dev, struct cryptop *crp, int hint)
{
	int i, err = 0, ivsize;
	struct talitos_softc *sc = device_get_softc(dev);
	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
	caddr_t iv;
	struct talitos_session *ses;
	struct talitos_desc *td;
	unsigned long flags;
	/* descriptor mappings */
	int hmac_key, hmac_data, cipher_iv, cipher_key,
		in_fifo, out_fifo, cipher_iv_out;
	static int chsel = -1;
	u_int32_t rand_iv[4];

	DPRINTF("%s()\n", __FUNCTION__);

	if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
		return EINVAL;
	}
	crp->crp_etype = 0;
	if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
		return EINVAL;
	}

	ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)];

        /* enter the channel scheduler */
	spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);

	/* reuse channel that already had/has requests for the required EU */
	for (i = 0; i < sc->sc_num_channels; i++) {
		if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg)
			break;
	}
	if (i == sc->sc_num_channels) {
		/*
		 * haven't seen this algo the last sc_num_channels or more
		 * use round robin in this case
		 * nb: sc->sc_num_channels must be power of 2
		 */
		chsel = (chsel + 1) & (sc->sc_num_channels - 1);
	} else {
		/*
		 * matches channel with same target execution unit;
		 * use same channel in this case
		 */
		chsel = i;
	}
	sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg;

        /* release the channel scheduler lock */
	spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);

	/* acquire the selected channel fifo lock */
	spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags);

	/* find and reserve next available descriptor-cryptop pair */
	for (i = 0; i < sc->sc_chfifo_len; i++) {
		if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) {
			/*
			 * ensure correct descriptor formation by
			 * avoiding inadvertently setting "optional" entries
			 * e.g. not using "optional" dptr2 for MD/HMAC descs
			 */
			memset(&sc->sc_chnfifo[chsel][i].cf_desc,
				0, sizeof(*td));
			/* reserve it with done notification request bit */
			sc->sc_chnfifo[chsel][i].cf_desc.hdr |=
				TALITOS_DONE_NOTIFY;
			break;
		}
	}
	spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags);

	if (i == sc->sc_chfifo_len) {
		/* fifo full */
		err = ERESTART;
		goto errout;
	}

	td = &sc->sc_chnfifo[chsel][i].cf_desc;
	sc->sc_chnfifo[chsel][i].cf_crp = crp;

	crd1 = crp->crp_desc;
	if (crd1 == NULL) {
		err = EINVAL;
		goto errout;
	}
	crd2 = crd1->crd_next;
	/* prevent compiler warning */
	hmac_key = 0;
	hmac_data = 0;
	if (crd2 == NULL) {
		td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU;
		/* assign descriptor dword ptr mappings for this desc. type */
		cipher_iv = 1;
		cipher_key = 2;
		in_fifo = 3;
		cipher_iv_out = 5;
		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
		    crd1->crd_alg == CRYPTO_SHA1 ||
		    crd1->crd_alg == CRYPTO_MD5) {
			out_fifo = 5;
			maccrd = crd1;
			enccrd = NULL;
		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
		    crd1->crd_alg == CRYPTO_3DES_CBC ||
		    crd1->crd_alg == CRYPTO_AES_CBC ||
		    crd1->crd_alg == CRYPTO_ARC4) {
			out_fifo = 4;
			maccrd = NULL;
			enccrd = crd1;
		} else {
			DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg);
			err = EINVAL;
			goto errout;
		}
	} else {
		if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) {
			td->hdr |= TD_TYPE_IPSEC_ESP;
		} else {
			DPRINTF("unimplemented: multiple descriptor ipsec\n");
			err = EINVAL;
			goto errout;
		}
		/* assign descriptor dword ptr mappings for this desc. type */
		hmac_key = 0;
		hmac_data = 1;
		cipher_iv = 2;
		cipher_key = 3;
		in_fifo = 4;
		out_fifo = 5;
		cipher_iv_out = 6;
		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
                     crd1->crd_alg == CRYPTO_MD5 ||
                     crd1->crd_alg == CRYPTO_SHA1) &&
		    (crd2->crd_alg == CRYPTO_DES_CBC ||
		     crd2->crd_alg == CRYPTO_3DES_CBC ||
		     crd2->crd_alg == CRYPTO_AES_CBC ||
		     crd2->crd_alg == CRYPTO_ARC4) &&
		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
			maccrd = crd1;
			enccrd = crd2;
		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
		     crd1->crd_alg == CRYPTO_ARC4 ||
		     crd1->crd_alg == CRYPTO_3DES_CBC ||
		     crd1->crd_alg == CRYPTO_AES_CBC) &&
		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
                     crd2->crd_alg == CRYPTO_MD5 ||
                     crd2->crd_alg == CRYPTO_SHA1) &&
		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
			enccrd = crd1;
			maccrd = crd2;
		} else {
			/* We cannot order the SEC as requested */
			printk("%s: cannot do the order\n",
					device_get_nameunit(sc->sc_cdev));
			err = EINVAL;
			goto errout;
		}
	}
	/* assign in_fifo and out_fifo based on input/output struct type */
	if (crp->crp_flags & CRYPTO_F_SKBUF) {
		/* using SKB buffers */
		struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
		if (skb_shinfo(skb)->nr_frags) {
			printk("%s: skb frags unimplemented\n",
					device_get_nameunit(sc->sc_cdev));
			err = EINVAL;
			goto errout;
		}
		td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data,
			skb->len, DMA_TO_DEVICE);
		td->ptr[in_fifo].len = skb->len;
		td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data,
			skb->len, DMA_TO_DEVICE);
		td->ptr[out_fifo].len = skb->len;
		td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
			skb->len, DMA_TO_DEVICE);
	} else if (crp->crp_flags & CRYPTO_F_IOV) {
		/* using IOV buffers */
		struct uio *uiop = (struct uio *)crp->crp_buf;
		if (uiop->uio_iovcnt > 1) {
			printk("%s: iov frags unimplemented\n",
					device_get_nameunit(sc->sc_cdev));
			err = EINVAL;
			goto errout;
		}
		td->ptr[in_fifo].ptr = dma_map_single(NULL,
			uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
		td->ptr[in_fifo].len = crp->crp_ilen;
		/* crp_olen is never set; always use crp_ilen */
		td->ptr[out_fifo].ptr = dma_map_single(NULL,
			uiop->uio_iov->iov_base,
			crp->crp_ilen, DMA_TO_DEVICE);
		td->ptr[out_fifo].len = crp->crp_ilen;
	} else {
		/* using contig buffers */
		td->ptr[in_fifo].ptr = dma_map_single(NULL,
			crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
		td->ptr[in_fifo].len = crp->crp_ilen;
		td->ptr[out_fifo].ptr = dma_map_single(NULL,
			crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
		td->ptr[out_fifo].len = crp->crp_ilen;
	}
	if (enccrd) {
		switch (enccrd->crd_alg) {
		case CRYPTO_3DES_CBC:
			td->hdr |= TALITOS_MODE0_DEU_3DES;
			/* FALLTHROUGH */
		case CRYPTO_DES_CBC:
			td->hdr |= TALITOS_SEL0_DEU
				|  TALITOS_MODE0_DEU_CBC;
			if (enccrd->crd_flags & CRD_F_ENCRYPT)
				td->hdr |= TALITOS_MODE0_DEU_ENC;
			ivsize = 2*sizeof(u_int32_t);
			DPRINTF("%cDES ses %d ch %d len %d\n",
				(td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1',
				(u32)TALITOS_SESSION(crp->crp_sid),
				chsel, td->ptr[in_fifo].len);
			break;
		case CRYPTO_AES_CBC:
			td->hdr |= TALITOS_SEL0_AESU
				|  TALITOS_MODE0_AESU_CBC;
			if (enccrd->crd_flags & CRD_F_ENCRYPT)
				td->hdr |= TALITOS_MODE0_AESU_ENC;
			ivsize = 4*sizeof(u_int32_t);
			DPRINTF("AES  ses %d ch %d len %d\n",
				(u32)TALITOS_SESSION(crp->crp_sid),
				chsel, td->ptr[in_fifo].len);
			break;
		default:
			printk("%s: unimplemented enccrd->crd_alg %d\n",
					device_get_nameunit(sc->sc_cdev), enccrd->crd_alg);
			err = EINVAL;
			goto errout;
		}
		/*
		 * Setup encrypt/decrypt state.  When using basic ops
		 * we can't use an inline IV because hash/crypt offset
		 * must be from the end of the IV to the start of the
		 * crypt data and this leaves out the preceding header
		 * from the hash calculation.  Instead we place the IV
		 * in the state record and set the hash/crypt offset to
		 * copy both the header+IV.
		 */
		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
			td->hdr |= TALITOS_DIR_OUTBOUND;
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
				iv = enccrd->crd_iv;
			else
				read_random((iv = (caddr_t) rand_iv), sizeof(rand_iv));
			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
				crypto_copyback(crp->crp_flags, crp->crp_buf,
				    enccrd->crd_inject, ivsize, iv);
			}
		} else {
			td->hdr |= TALITOS_DIR_INBOUND;
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
				iv = enccrd->crd_iv;
			} else {
				iv = (caddr_t) rand_iv;
				crypto_copydata(crp->crp_flags, crp->crp_buf,
				    enccrd->crd_inject, ivsize, iv);
			}
		}
		td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize,
			DMA_TO_DEVICE);
		td->ptr[cipher_iv].len = ivsize;
		/*
		 * we don't need the cipher iv out length/pointer
		 * field to do ESP IPsec. Therefore we set the len field as 0,
		 * which tells the SEC not to do anything with this len/ptr
		 * field. Previously, when length/pointer as pointing to iv,
		 * it gave us corruption of packets.
		 */
		td->ptr[cipher_iv_out].len = 0;
	}
	if (enccrd && maccrd) {
		/* this is ipsec only for now */
		td->hdr |= TALITOS_SEL1_MDEU
			|  TALITOS_MODE1_MDEU_INIT
			|  TALITOS_MODE1_MDEU_PAD;
		switch (maccrd->crd_alg) {
			case	CRYPTO_MD5:
				td->hdr |= TALITOS_MODE1_MDEU_MD5;
				break;
			case	CRYPTO_MD5_HMAC:
				td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC;
				break;
			case	CRYPTO_SHA1:
				td->hdr |= TALITOS_MODE1_MDEU_SHA1;
				break;
			case	CRYPTO_SHA1_HMAC:
				td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC;
				break;
			default:
				/* We cannot order the SEC as requested */
				printk("%s: cannot do the order\n",
						device_get_nameunit(sc->sc_cdev));
				err = EINVAL;
				goto errout;
		}
		if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
		   (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
			/*
			 * The offset from hash data to the start of
			 * crypt data is the difference in the skips.
			 */
			/* ipsec only for now */
			td->ptr[hmac_key].ptr = dma_map_single(NULL,
				ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE);
			td->ptr[hmac_key].len = ses->ses_hmac_len;
			td->ptr[in_fifo].ptr  += enccrd->crd_skip;
			td->ptr[in_fifo].len  =  enccrd->crd_len;
			td->ptr[out_fifo].ptr += enccrd->crd_skip;
			td->ptr[out_fifo].len =  enccrd->crd_len;
			/* bytes of HMAC to postpend to ciphertext */
			td->ptr[out_fifo].extent =  ses->ses_mlen;
			td->ptr[hmac_data].ptr += maccrd->crd_skip;
			td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip;
		}
		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
			printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n",
					device_get_nameunit(sc->sc_cdev));
		}
	}
	if (!enccrd && maccrd) {
		/* single MD5 or SHA */
		td->hdr |= TALITOS_SEL0_MDEU
				|  TALITOS_MODE0_MDEU_INIT
				|  TALITOS_MODE0_MDEU_PAD;
		switch (maccrd->crd_alg) {
			case	CRYPTO_MD5:
				td->hdr |= TALITOS_MODE0_MDEU_MD5;
				DPRINTF("MD5  ses %d ch %d len %d\n",
					(u32)TALITOS_SESSION(crp->crp_sid),
					chsel, td->ptr[in_fifo].len);
				break;
			case	CRYPTO_MD5_HMAC:
				td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC;
				break;
			case	CRYPTO_SHA1:
				td->hdr |= TALITOS_MODE0_MDEU_SHA1;
				DPRINTF("SHA1 ses %d ch %d len %d\n",
					(u32)TALITOS_SESSION(crp->crp_sid),
					chsel, td->ptr[in_fifo].len);
				break;
			case	CRYPTO_SHA1_HMAC:
				td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC;
				break;
			default:
				/* We cannot order the SEC as requested */
				DPRINTF("cannot do the order\n");
				err = EINVAL;
				goto errout;
		}

		if (crp->crp_flags & CRYPTO_F_IOV)
			td->ptr[out_fifo].ptr += maccrd->crd_inject;

		if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
		   (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
			td->ptr[hmac_key].ptr = dma_map_single(NULL,
				ses->ses_hmac, ses->ses_hmac_len,
				DMA_TO_DEVICE);
			td->ptr[hmac_key].len = ses->ses_hmac_len;
		}
	}
	else {
		/* using process key (session data has duplicate) */
		td->ptr[cipher_key].ptr = dma_map_single(NULL,
			enccrd->crd_key, (enccrd->crd_klen + 7) / 8,
			DMA_TO_DEVICE);
		td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8;
	}
	/* descriptor complete - GO! */
	return talitos_submit(sc, td, chsel);

errout:
	if (err != ERESTART) {
		crp->crp_etype = err;
		crypto_done(crp);
	}
	return err;
}
예제 #6
0
int
aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
    struct cryptop *crp)
{
	struct thread *td;
	uint8_t *buf;
	int error, allocated, saved_ctx;

	buf = aesni_cipher_alloc(enccrd, crp, &allocated);
	if (buf == NULL)
		return (ENOMEM);

	td = curthread;
	if (!is_fpu_kern_thread(0)) {
		error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL);
		if (error != 0)
			goto out;
		saved_ctx = 1;
	} else {
		saved_ctx = 0;
		error = 0;
	}

	if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
		error = aesni_cipher_setup_common(ses, enccrd->crd_key,
		    enccrd->crd_klen);
		if (error != 0)
			goto out;
	}

	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
		if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
			crypto_copyback(crp->crp_flags, crp->crp_buf,
			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
		if (ses->algo == CRYPTO_AES_CBC) {
			aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
			    enccrd->crd_len, buf, buf, ses->iv);
		} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
			aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
			    ses->xts_schedule, enccrd->crd_len, buf, buf,
			    ses->iv);
		}
	} else {
		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
		else
			crypto_copydata(crp->crp_flags, crp->crp_buf,
			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
		if (ses->algo == CRYPTO_AES_CBC) {
			aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
			    enccrd->crd_len, buf, ses->iv);
		} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
			aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
			    ses->xts_schedule, enccrd->crd_len, buf, buf,
			    ses->iv);
		}
	}
	if (saved_ctx)
		fpu_kern_leave(td, ses->fpu_ctx);
	if (allocated)
		crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
		    enccrd->crd_len, buf);
	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0)
		crypto_copydata(crp->crp_flags, crp->crp_buf,
		    enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
		    AES_BLOCK_LEN, ses->iv);
 out:
	if (allocated) {
		bzero(buf, enccrd->crd_len);
		free(buf, M_AESNI);
	}
	return (error);
}
예제 #7
0
/*
 * Process a request.
 */
static int
cryptocteon_process(device_t dev, struct cryptop *crp, int hint)
{
	struct cryptodesc *crd;
	struct octo_sess *od;
	u_int32_t lid;
	size_t iovcnt, iovlen;
	struct mbuf *m = NULL;
	struct uio *uiop = NULL;
	struct cryptodesc *enccrd = NULL, *maccrd = NULL;
	unsigned char *ivp = NULL;
	unsigned char iv_data[HASH_MAX_LEN];
	int auth_off = 0, auth_len = 0, crypt_off = 0, crypt_len = 0, icv_off = 0;
	struct cryptocteon_softc *sc;

	sc = device_get_softc(dev);

	if (sc == NULL || crp == NULL)
		return EINVAL;

	crp->crp_etype = 0;

	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
		dprintf("%s,%d: EINVAL\n", __FILE__, __LINE__);
		crp->crp_etype = EINVAL;
		goto done;
	}

	lid = crp->crp_sid & 0xffffffff;
	if (lid >= sc->sc_sesnum || lid == 0 || sc->sc_sessions == NULL ||
	    sc->sc_sessions[lid] == NULL) {
		crp->crp_etype = ENOENT;
		dprintf("%s,%d: ENOENT\n", __FILE__, __LINE__);
		goto done;
	}
	od = sc->sc_sessions[lid];

	/*
	 * do some error checking outside of the loop for m and IOV processing
	 * this leaves us with valid m or uiop pointers for later
	 */
	if (crp->crp_flags & CRYPTO_F_IMBUF) {
		unsigned frags;

		m = (struct mbuf *) crp->crp_buf;
		for (frags = 0; m != NULL; frags++)
			m = m->m_next;

		if (frags >= UIO_MAXIOV) {
			printf("%s,%d: %d frags > UIO_MAXIOV", __FILE__, __LINE__, frags);
			goto done;
		}

		m = (struct mbuf *) crp->crp_buf;
	} else if (crp->crp_flags & CRYPTO_F_IOV) {
		uiop = (struct uio *) crp->crp_buf;
		if (uiop->uio_iovcnt > UIO_MAXIOV) {
			printf("%s,%d: %d uio_iovcnt > UIO_MAXIOV", __FILE__, __LINE__,
			       uiop->uio_iovcnt);
			goto done;
		}
	}

	/* point our enccrd and maccrd appropriately */
	crd = crp->crp_desc;
	if (crd->crd_alg == od->octo_encalg) enccrd = crd;
	if (crd->crd_alg == od->octo_macalg) maccrd = crd;
	crd = crd->crd_next;
	if (crd) {
		if (crd->crd_alg == od->octo_encalg) enccrd = crd;
		if (crd->crd_alg == od->octo_macalg) maccrd = crd;
		crd = crd->crd_next;
	}
	if (crd) {
		crp->crp_etype = EINVAL;
		dprintf("%s,%d: ENOENT - descriptors do not match session\n",
				__FILE__, __LINE__);
		goto done;
	}

	if (enccrd) {
		if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
			ivp = enccrd->crd_iv;
		} else {
			ivp = iv_data;
			crypto_copydata(crp->crp_flags, crp->crp_buf,
					enccrd->crd_inject, od->octo_ivsize, (caddr_t) ivp);
		}

		if (maccrd) {
			auth_off = maccrd->crd_skip;
			auth_len = maccrd->crd_len;
			icv_off  = maccrd->crd_inject;
		}

		crypt_off = enccrd->crd_skip;
		crypt_len = enccrd->crd_len;
	} else { /* if (maccrd) */
		auth_off = maccrd->crd_skip;
		auth_len = maccrd->crd_len;
		icv_off  = maccrd->crd_inject;
	}

	/*
	 * setup the I/O vector to cover the buffer
	 */
	if (crp->crp_flags & CRYPTO_F_IMBUF) {
		iovcnt = 0;
		iovlen = 0;

		while (m != NULL) {
			od->octo_iov[iovcnt].iov_base = mtod(m, void *);
			od->octo_iov[iovcnt].iov_len = m->m_len;

			m = m->m_next;
			iovlen += od->octo_iov[iovcnt++].iov_len;
		}
	} else if (crp->crp_flags & CRYPTO_F_IOV) {
예제 #8
0
/* Name        : icp_ocfDrvProcessDataSetup
 *
 * Description : This function will setup all the cryptographic operation data
 *               that is required by LAC to execute the operation.
 */
static int icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
				      struct cryptodesc *crp_desc)
{
	CpaCyRandGenOpData randGenOpData;
	CpaFlatBuffer randData;

	drvOpData->lacOpData.packetType = CPA_CY_SYM_PACKET_TYPE_FULL;

	/* Convert from the cryptop to the ICP LAC crypto parameters */
	switch (crp_desc->crd_alg) {
	case CRYPTO_NULL_CBC:
		drvOpData->lacOpData.
		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
		drvOpData->lacOpData.
		    messageLenToCipherInBytes = crp_desc->crd_len;
		drvOpData->verifyResult = CPA_FALSE;
		drvOpData->lacOpData.ivLenInBytes = NULL_BLOCK_LEN;
		break;
	case CRYPTO_DES_CBC:
		drvOpData->lacOpData.
		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
		drvOpData->lacOpData.
		    messageLenToCipherInBytes = crp_desc->crd_len;
		drvOpData->verifyResult = CPA_FALSE;
		drvOpData->lacOpData.ivLenInBytes = DES_BLOCK_LEN;
		break;
	case CRYPTO_3DES_CBC:
		drvOpData->lacOpData.
		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
		drvOpData->lacOpData.
		    messageLenToCipherInBytes = crp_desc->crd_len;
		drvOpData->verifyResult = CPA_FALSE;
		drvOpData->lacOpData.ivLenInBytes = DES3_BLOCK_LEN;
		break;
	case CRYPTO_ARC4:
		drvOpData->lacOpData.
		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
		drvOpData->lacOpData.
		    messageLenToCipherInBytes = crp_desc->crd_len;
		drvOpData->verifyResult = CPA_FALSE;
		drvOpData->lacOpData.ivLenInBytes = ARC4_COUNTER_LEN;
		break;
	case CRYPTO_AES_CBC:
		drvOpData->lacOpData.
		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
		drvOpData->lacOpData.
		    messageLenToCipherInBytes = crp_desc->crd_len;
		drvOpData->verifyResult = CPA_FALSE;
		drvOpData->lacOpData.ivLenInBytes = RIJNDAEL128_BLOCK_LEN;
		break;
	case CRYPTO_SHA1:
	case CRYPTO_SHA1_HMAC:
	case CRYPTO_SHA2_256:
	case CRYPTO_SHA2_256_HMAC:
	case CRYPTO_SHA2_384:
	case CRYPTO_SHA2_384_HMAC:
	case CRYPTO_SHA2_512:
	case CRYPTO_SHA2_512_HMAC:
	case CRYPTO_MD5:
	case CRYPTO_MD5_HMAC:
		drvOpData->lacOpData.
		    hashStartSrcOffsetInBytes = crp_desc->crd_skip;
		drvOpData->lacOpData.
		    messageLenToHashInBytes = crp_desc->crd_len;
		drvOpData->lacOpData.
		    pDigestResult =
		    icp_ocfDrvDigestPointerFind(drvOpData, crp_desc);

		if (NULL == drvOpData->lacOpData.pDigestResult) {
			DPRINTK("%s(): ERROR - could not calculate "
				"Digest Result memory address\n", __FUNCTION__);
			return ICP_OCF_DRV_STATUS_FAIL;
		}

		drvOpData->lacOpData.digestVerify = CPA_FALSE;
		break;
	default:
		DPRINTK("%s(): Crypto process error - algorithm not "
			"found \n", __FUNCTION__);
		return ICP_OCF_DRV_STATUS_FAIL;
	}

	/* Figure out what the IV is supposed to be */
	if ((crp_desc->crd_alg == CRYPTO_DES_CBC) ||
	    (crp_desc->crd_alg == CRYPTO_3DES_CBC) ||
	    (crp_desc->crd_alg == CRYPTO_AES_CBC)) {
		/*ARC4 doesn't use an IV */
		if (crp_desc->crd_flags & CRD_F_IV_EXPLICIT) {
			/* Explicit IV provided to OCF */
			drvOpData->lacOpData.pIv = crp_desc->crd_iv;
		} else {
			/* IV is not explicitly provided to OCF */

			/* Point the LAC OP Data IV pointer to our allocated
			   storage location for this session. */
			drvOpData->lacOpData.pIv = drvOpData->ivData;

			if ((crp_desc->crd_flags & CRD_F_ENCRYPT) &&
			    ((crp_desc->crd_flags & CRD_F_IV_PRESENT) == 0)) {

				/* Encrypting - need to create IV */
				randGenOpData.generateBits = CPA_TRUE;
				randGenOpData.lenInBytes = MAX_IV_LEN_IN_BYTES;

				icp_ocfDrvPtrAndLenToFlatBuffer((Cpa8U *)
								drvOpData->
								ivData,
								MAX_IV_LEN_IN_BYTES,
								&randData);

				if (CPA_STATUS_SUCCESS !=
				    cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
						 NULL, NULL,
						 &randGenOpData, &randData)) {
					DPRINTK("%s(): ERROR - Failed to"
						" generate"
						" Initialisation Vector\n",
						__FUNCTION__);
					return ICP_OCF_DRV_STATUS_FAIL;
				}

				crypto_copyback(drvOpData->crp->
						crp_flags,
						drvOpData->crp->crp_buf,
						crp_desc->crd_inject,
						drvOpData->lacOpData.
						ivLenInBytes,
						(caddr_t) (drvOpData->lacOpData.
							   pIv));
			} else {
				/* Reading IV from buffer */
				crypto_copydata(drvOpData->crp->
						crp_flags,
						drvOpData->crp->crp_buf,
						crp_desc->crd_inject,
						drvOpData->lacOpData.
						ivLenInBytes,
						(caddr_t) (drvOpData->lacOpData.
							   pIv));
			}

		}

	}

	return ICP_OCF_DRV_STATUS_SUCCESS;
}
예제 #9
0
파일: nlmsec.c 프로젝트: coyizumi/cs111
/* This function is called from an interrupt handler */
void
nlm_xlpsec_msgring_handler(int vc, int size, int code, int src_id,
    struct nlm_fmn_msg *msg, void *data)
{
	struct xlp_sec_command *cmd = NULL;
	struct xlp_sec_softc *sc = NULL;
	struct cryptodesc *crd = NULL;
	unsigned int ivlen = 0;

	KASSERT(code == FMN_SWCODE_CRYPTO,
	    ("%s: bad code = %d, expected code = %d\n", __FUNCTION__,
	    code, FMN_SWCODE_CRYPTO));

	sc = (struct xlp_sec_softc *)data;
	KASSERT(src_id >= sc->sec_vc_start && src_id <= sc->sec_vc_end,
	    ("%s: bad src_id = %d, expect %d - %d\n", __FUNCTION__,
	    src_id, sc->sec_vc_start, sc->sec_vc_end));

	cmd = (struct xlp_sec_command *)(uintptr_t)msg->msg[0];
	KASSERT(cmd != NULL && cmd->crp != NULL,
		("%s :cmd not received properly\n",__FUNCTION__));

	KASSERT(CRYPTO_ERROR(msg->msg[1]) == 0,
	    ("%s: Message rcv msg0 %llx msg1 %llx err %x \n", __FUNCTION__,
	    (unsigned long long)msg->msg[0], (unsigned long long)msg->msg[1],
	    (int)CRYPTO_ERROR(msg->msg[1])));

	crd = cmd->enccrd;
	/* Copy the last 8 or 16 bytes to the session iv, so that in few
	 * cases this will be used as IV for the next request
	 */
	if (crd != NULL) {
		if ((crd->crd_alg == CRYPTO_DES_CBC ||
		    crd->crd_alg == CRYPTO_3DES_CBC ||
		    crd->crd_alg == CRYPTO_AES_CBC) &&
		    (crd->crd_flags & CRD_F_ENCRYPT)) {
			ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
			    XLP_SEC_AES_IV_LENGTH : XLP_SEC_DES_IV_LENGTH);
			crypto_copydata(cmd->crp->crp_flags, cmd->crp->crp_buf,
			    crd->crd_skip + crd->crd_len - ivlen, ivlen,
			    sc->sc_sessions[cmd->session_num].ses_iv);
		}
	}

	/* If there are not enough credits to send, then send request
	 * will fail with ERESTART and the driver will be blocked until it is
	 * unblocked here after knowing that there are sufficient credits to
	 * send the request again.
	 */
	if (sc->sc_needwakeup) {
		atomic_add_int(&creditleft, sc->sec_msgsz);
		if (creditleft >= (NLM_CRYPTO_LEFT_REQS)) {
			crypto_unblock(sc->sc_cid, sc->sc_needwakeup);
			sc->sc_needwakeup &= (~(CRYPTO_SYMQ | CRYPTO_ASYMQ));
		}
	}
	if(cmd->maccrd) {
		crypto_copyback(cmd->crp->crp_flags,
		    cmd->crp->crp_buf, cmd->maccrd->crd_inject,
		    cmd->hash_dst_len, cmd->hashdest);
	}

	/* This indicates completion of the crypto operation */
	crypto_done(cmd->crp);

	xlp_free_cmd_params(cmd);

	return;
}