/* go through all channels descriptors, notifying OCF what's been done */ static void talitos_doneprocessing(struct talitos_softc *sc) { unsigned long flags; int i, j; /* go through descriptors looking for done bits */ for (i = 0; i < sc->sc_num_channels; i++) { spin_lock_irqsave(&sc->sc_chnfifolock[i], flags); for (j = 0; j < sc->sc_chfifo_len; j++) { /* descriptor has done bits set? */ if ((sc->sc_chnfifo[i][j].cf_desc.hdr & TALITOS_HDR_DONE_BITS) == TALITOS_HDR_DONE_BITS) { /* notify ocf */ crypto_done(sc->sc_chnfifo[i][j].cf_crp); /* and tag it available again * * memset to ensure correct descriptor formation by * avoiding inadvertently setting "optional" entries * e.g. not using "optional" dptr2 MD/HMAC processing */ memset(&sc->sc_chnfifo[i][j].cf_desc, 0, sizeof(struct talitos_desc)); } } spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags); } return; }
/* go through all channels descriptors, notifying OCF what has * _and_hasn't_ successfully completed and reset the device * (otherwise it's up to decoding desc hdrs!) */ static void talitos_errorprocessing(struct talitos_softc *sc) { unsigned long flags; int i, j; /* disable further scheduling until under control */ spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags); if (debug) dump_talitos_status(sc); /* go through descriptors, try and salvage those successfully done, * and EIO those that weren't */ for (i = 0; i < sc->sc_num_channels; i++) { spin_lock_irqsave(&sc->sc_chnfifolock[i], flags); for (j = 0; j < sc->sc_chfifo_len; j++) { if (sc->sc_chnfifo[i][j].cf_desc.hdr) { if ((sc->sc_chnfifo[i][j].cf_desc.hdr & TALITOS_HDR_DONE_BITS) != TALITOS_HDR_DONE_BITS) { /* this one didn't finish */ /* signify in crp->etype */ sc->sc_chnfifo[i][j].cf_crp->crp_etype = EIO; } } else continue; /* free entry */ /* either way, notify ocf */ crypto_done(sc->sc_chnfifo[i][j].cf_crp); /* and tag it available again * * memset to ensure correct descriptor formation by * avoiding inadvertently setting "optional" entries * e.g. not using "optional" dptr2 MD/HMAC processing */ memset(&sc->sc_chnfifo[i][j].cf_desc, 0, sizeof(struct talitos_desc)); } spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags); } /* reset and initialize the SEC h/w device */ talitos_reset_device(sc); talitos_init_device(sc); #ifdef CONFIG_OCF_RANDOMHARVEST if (sc->sc_exec_units & TALITOS_HAS_EU_RNG) talitos_rng_init(sc); #endif /* Okay. Stand by. */ spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags); return; }
int via_padlock_crypto_process(void *arg, struct cryptop *crp, int hint) { struct via_padlock_softc *sc = arg; struct via_padlock_session *ses; struct cryptodesc *crd; int sesn, err = 0; KASSERT(sc != NULL /*, ("via_padlock_crypto_process: null softc")*/); if (crp == NULL || crp->crp_callback == NULL) { err = EINVAL; goto out; } sesn = VIAC3_SESSION(crp->crp_sid); if (sesn >= sc->sc_nsessions) { err = EINVAL; goto out; } ses = &sc->sc_sessions[sesn]; for (crd = crp->crp_desc; crd; crd = crd->crd_next) { switch (crd->crd_alg) { case CRYPTO_AES_CBC: if ((err = via_padlock_crypto_encdec(crp, crd, ses, sc, crp->crp_buf)) != 0) goto out; break; case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_RIPEMD160_HMAC: case CRYPTO_SHA2_HMAC: if ((err = via_padlock_crypto_swauth(crp, crd, ses->swd, crp->crp_buf)) != 0) goto out; break; default: err = EINVAL; goto out; } } out: crp->crp_etype = err; crypto_done(crp); return (err); }
void _IKED::loop() { // // start our ike network thread // ith_nwork.exec( this ); // // start our ike pfkey thread // ith_pfkey.exec( this ); // // start our ike client / server thread // ith_ikes.exec( this ); // // enter event timer loop // ith_timer.run(); // // wait for all threads to exit // cond_run.wait( -1 ); // // cleanup // socket_done(); ikes.done(); log.close(); // // cleanup openssl libcrypto // crypto_done(); }
/* * Process a request. */ static int null_process(device_t arg, struct cryptop *crp, int hint) { unsigned int lid; dprintk("%s()\n", __FUNCTION__); /* Sanity check */ if (crp == NULL) { dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); return EINVAL; } crp->crp_etype = 0; if (crp->crp_desc == NULL || crp->crp_buf == NULL) { dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); crp->crp_etype = EINVAL; goto done; } /* * find the session we are using */ lid = crp->crp_sid & 0xffffffff; if (lid >= null_sesnum || lid == 0) { crp->crp_etype = ENOENT; dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__); goto done; } done: crypto_done(crp); return 0; }
static int pasemi_clean_tx(struct pasemi_softc *sc, int chan) { int i, j, ring_idx; struct pasemi_fnu_txring *ring = &sc->tx[chan]; u16 delta_cnt; int flags, loops = 10; int desc_size; struct cryptop *crp; spin_lock_irqsave(&ring->clean_lock, flags); while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan] & PAS_STATUS_PCNT_M) - ring->total_pktcnt) && loops--) { for (i = 0; i < delta_cnt; i++) { desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size; crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp; if (crp) { ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1)); if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) { /* Need to make sure signature matched, * if not - return error */ if (!(ring->desc[ring_idx + 1] & (1ULL << 63))) crp->crp_etype = -EINVAL; } crypto_done(TX_DESC_INFO(ring, ring->next_to_clean).cf_crp); TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL; pci_unmap_single( sc->dma_pdev, XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]), PCI_DMA_TODEVICE); ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0; ring->next_to_clean++; for (j = 1; j < desc_size; j++) { ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1)); pci_unmap_single( sc->dma_pdev, XCT_PTR_ADDR_LEN(ring->desc[ring_idx]), PCI_DMA_TODEVICE); if (ring->desc[ring_idx + 1]) pci_unmap_single( sc->dma_pdev, XCT_PTR_ADDR_LEN( ring->desc[ ring_idx + 1]), PCI_DMA_TODEVICE); ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0; ring->next_to_clean++; } } else { for (j = 0; j < desc_size; j++) { ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1)); ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0; ring->next_to_clean++; } } } ring->total_pktcnt += delta_cnt; } spin_unlock_irqrestore(&ring->clean_lock, flags); return 0; }
static int pasemi_process(device_t dev, struct cryptop *crp, int hint) { int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel; struct pasemi_softc *sc = device_get_softc(dev); struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; caddr_t ivp; struct pasemi_desc init_desc, work_desc; struct pasemi_session *ses; struct sk_buff *skb; struct uio *uiop; unsigned long flags; struct pasemi_fnu_txring *txring; DPRINTF("%s()\n", __FUNCTION__); if (crp == NULL || crp->crp_callback == NULL || sc == NULL) return -EINVAL; crp->crp_etype = 0; if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions) return -EINVAL; ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)]; crd1 = crp->crp_desc; if (crd1 == NULL) { err = -EINVAL; goto errout; } crd2 = crd1->crd_next; if (ALG_IS_SIG(crd1->crd_alg)) { maccrd = crd1; if (crd2 == NULL) enccrd = NULL; else if (ALG_IS_CIPHER(crd2->crd_alg) && (crd2->crd_flags & CRD_F_ENCRYPT) == 0) enccrd = crd2; else goto erralg; } else if (ALG_IS_CIPHER(crd1->crd_alg)) { enccrd = crd1; if (crd2 == NULL) maccrd = NULL; else if (ALG_IS_SIG(crd2->crd_alg) && (crd1->crd_flags & CRD_F_ENCRYPT)) maccrd = crd2; else goto erralg; } else goto erralg; chsel = ses->chan; txring = &sc->tx[chsel]; if (enccrd && !maccrd) { if (enccrd->crd_alg == CRYPTO_ARC4) reinit = 1; reinit_size = 0x40; srclen = crp->crp_ilen; pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_FUN(chsel)); if (enccrd->crd_flags & CRD_F_ENCRYPT) pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC); else pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC); } else if (enccrd && maccrd) { if (enccrd->crd_alg == CRYPTO_ARC4) reinit = 1; reinit_size = 0x68; if (enccrd->crd_flags & CRD_F_ENCRYPT) { /* Encrypt -> Authenticate */ pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG | XCT_FUN_A | XCT_FUN_FUN(chsel)); srclen = maccrd->crd_skip + maccrd->crd_len; } else { /* Authenticate -> Decrypt */ pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC | XCT_FUN_24BRES | XCT_FUN_FUN(chsel)); pasemi_desc_build(&work_desc, 0); pasemi_desc_build(&work_desc, 0); pasemi_desc_build(&work_desc, 0); work_desc.postop = PASEMI_CHECK_SIG; srclen = crp->crp_ilen; } pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4)); pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip)); } else if (!enccrd && maccrd) { srclen = maccrd->crd_len; pasemi_desc_start(&init_desc, XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0)); pasemi_desc_build(&init_desc, XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey)); pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG | XCT_FUN_A | XCT_FUN_FUN(chsel)); } if (enccrd) { switch (enccrd->crd_alg) { case CRYPTO_3DES_CBC: pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES | XCT_FUN_BCM_CBC); ivsize = sizeof(u64); break; case CRYPTO_DES_CBC: pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES | XCT_FUN_BCM_CBC); ivsize = sizeof(u64); break; case CRYPTO_AES_CBC: pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES | XCT_FUN_BCM_CBC); ivsize = 2 * sizeof(u64); break; case CRYPTO_ARC4: pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC); ivsize = 0; break; default: printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n", enccrd->crd_alg); err = -EINVAL; goto errout; } ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0]; if (enccrd->crd_flags & CRD_F_ENCRYPT) { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) memcpy(ivp, enccrd->crd_iv, ivsize); /* If IV is not present in the buffer already, it has to be copied there */ if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, ivp); } else { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) /* IV is provided expicitly in descriptor */ memcpy(ivp, enccrd->crd_iv, ivsize); else /* IV is provided in the packet */ crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, ivp); } } if (maccrd) { switch (maccrd->crd_alg) { case CRYPTO_MD5: pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 | XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4)); break; case CRYPTO_SHA1: pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 | XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4)); break; case CRYPTO_MD5_HMAC: pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 | XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4)); break; case CRYPTO_SHA1_HMAC: pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 | XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4)); break; default: printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n", maccrd->crd_alg); err = -EINVAL; goto errout; } } if (crp->crp_flags & CRYPTO_F_SKBUF) { /* using SKB buffers */ skb = (struct sk_buff *)crp->crp_buf; if (skb_shinfo(skb)->nr_frags) { printk(DRV_NAME ": skb frags unimplemented\n"); err = -EINVAL; goto errout; } pasemi_desc_build( &work_desc, XCT_FUN_DST_PTR(skb->len, pci_map_single( sc->dma_pdev, skb->data, skb->len, DMA_TO_DEVICE))); pasemi_desc_build( &work_desc, XCT_FUN_SRC_PTR( srclen, pci_map_single( sc->dma_pdev, skb->data, srclen, DMA_TO_DEVICE))); pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen)); } else if (crp->crp_flags & CRYPTO_F_IOV) { /* using IOV buffers */ uiop = (struct uio *)crp->crp_buf; if (uiop->uio_iovcnt > 1) { printk(DRV_NAME ": iov frags unimplemented\n"); err = -EINVAL; goto errout; } /* crp_olen is never set; always use crp_ilen */ pasemi_desc_build( &work_desc, XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single( sc->dma_pdev, uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE))); pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen)); pasemi_desc_build( &work_desc, XCT_FUN_SRC_PTR(srclen, pci_map_single( sc->dma_pdev, uiop->uio_iov->iov_base, srclen, DMA_TO_DEVICE))); } else { /* using contig buffers */ pasemi_desc_build( &work_desc, XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single( sc->dma_pdev, crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE))); pasemi_desc_build( &work_desc, XCT_FUN_SRC_PTR(srclen, pci_map_single( sc->dma_pdev, crp->crp_buf, srclen, DMA_TO_DEVICE))); pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen)); } spin_lock_irqsave(&txring->fill_lock, flags); if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) { txring->sesn = PASEMI_SESSION(crp->crp_sid); reinit = 1; } if (enccrd) { pasemi_desc_start(&init_desc, XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0)); pasemi_desc_build(&init_desc, XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr)); } if (((txring->next_to_fill + pasemi_desc_size(&init_desc) + pasemi_desc_size(&work_desc)) - txring->next_to_clean) > TX_RING_SIZE) { spin_unlock_irqrestore(&txring->fill_lock, flags); err = ERESTART; goto errout; } pasemi_ring_add_desc(txring, &init_desc, NULL); pasemi_ring_add_desc(txring, &work_desc, crp); pasemi_ring_incr(sc, chsel, pasemi_desc_size(&init_desc) + pasemi_desc_size(&work_desc)); spin_unlock_irqrestore(&txring->fill_lock, flags); mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL); return 0; erralg: printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n", crd1->crd_alg, crd2->crd_alg); err = -EINVAL; errout: if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); } return err; }
static int talitos_process(device_t dev, struct cryptop *crp, int hint) { int i, err = 0, ivsize; struct talitos_softc *sc = device_get_softc(dev); struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; caddr_t iv; struct talitos_session *ses; struct talitos_desc *td; unsigned long flags; /* descriptor mappings */ int hmac_key, hmac_data, cipher_iv, cipher_key, in_fifo, out_fifo, cipher_iv_out; static int chsel = -1; u_int32_t rand_iv[4]; DPRINTF("%s()\n", __FUNCTION__); if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { return EINVAL; } crp->crp_etype = 0; if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) { return EINVAL; } ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)]; /* enter the channel scheduler */ spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags); /* reuse channel that already had/has requests for the required EU */ for (i = 0; i < sc->sc_num_channels; i++) { if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg) break; } if (i == sc->sc_num_channels) { /* * haven't seen this algo the last sc_num_channels or more * use round robin in this case * nb: sc->sc_num_channels must be power of 2 */ chsel = (chsel + 1) & (sc->sc_num_channels - 1); } else { /* * matches channel with same target execution unit; * use same channel in this case */ chsel = i; } sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg; /* release the channel scheduler lock */ spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags); /* acquire the selected channel fifo lock */ spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags); /* find and reserve next available descriptor-cryptop pair */ for (i = 0; i < sc->sc_chfifo_len; i++) { if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) { /* * ensure correct descriptor formation by * avoiding inadvertently setting "optional" entries * e.g. not using "optional" dptr2 for MD/HMAC descs */ memset(&sc->sc_chnfifo[chsel][i].cf_desc, 0, sizeof(*td)); /* reserve it with done notification request bit */ sc->sc_chnfifo[chsel][i].cf_desc.hdr |= TALITOS_DONE_NOTIFY; break; } } spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags); if (i == sc->sc_chfifo_len) { /* fifo full */ err = ERESTART; goto errout; } td = &sc->sc_chnfifo[chsel][i].cf_desc; sc->sc_chnfifo[chsel][i].cf_crp = crp; crd1 = crp->crp_desc; if (crd1 == NULL) { err = EINVAL; goto errout; } crd2 = crd1->crd_next; /* prevent compiler warning */ hmac_key = 0; hmac_data = 0; if (crd2 == NULL) { td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU; /* assign descriptor dword ptr mappings for this desc. type */ cipher_iv = 1; cipher_key = 2; in_fifo = 3; cipher_iv_out = 5; if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_SHA1 || crd1->crd_alg == CRYPTO_MD5) { out_fifo = 5; maccrd = crd1; enccrd = NULL; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_ARC4) { out_fifo = 4; maccrd = NULL; enccrd = crd1; } else { DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg); err = EINVAL; goto errout; } } else { if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) { td->hdr |= TD_TYPE_IPSEC_ESP; } else { DPRINTF("unimplemented: multiple descriptor ipsec\n"); err = EINVAL; goto errout; } /* assign descriptor dword ptr mappings for this desc. type */ hmac_key = 0; hmac_data = 1; cipher_iv = 2; cipher_key = 3; in_fifo = 4; out_fifo = 5; cipher_iv_out = 6; if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_MD5 || crd1->crd_alg == CRYPTO_SHA1) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC || crd2->crd_alg == CRYPTO_ARC4) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_ARC4 || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC || crd2->crd_alg == CRYPTO_MD5 || crd2->crd_alg == CRYPTO_SHA1) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { /* We cannot order the SEC as requested */ printk("%s: cannot do the order\n", device_get_nameunit(sc->sc_cdev)); err = EINVAL; goto errout; } } /* assign in_fifo and out_fifo based on input/output struct type */ if (crp->crp_flags & CRYPTO_F_SKBUF) { /* using SKB buffers */ struct sk_buff *skb = (struct sk_buff *)crp->crp_buf; if (skb_shinfo(skb)->nr_frags) { printk("%s: skb frags unimplemented\n", device_get_nameunit(sc->sc_cdev)); err = EINVAL; goto errout; } td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); td->ptr[in_fifo].len = skb->len; td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); td->ptr[out_fifo].len = skb->len; td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); } else if (crp->crp_flags & CRYPTO_F_IOV) { /* using IOV buffers */ struct uio *uiop = (struct uio *)crp->crp_buf; if (uiop->uio_iovcnt > 1) { printk("%s: iov frags unimplemented\n", device_get_nameunit(sc->sc_cdev)); err = EINVAL; goto errout; } td->ptr[in_fifo].ptr = dma_map_single(NULL, uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE); td->ptr[in_fifo].len = crp->crp_ilen; /* crp_olen is never set; always use crp_ilen */ td->ptr[out_fifo].ptr = dma_map_single(NULL, uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE); td->ptr[out_fifo].len = crp->crp_ilen; } else { /* using contig buffers */ td->ptr[in_fifo].ptr = dma_map_single(NULL, crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE); td->ptr[in_fifo].len = crp->crp_ilen; td->ptr[out_fifo].ptr = dma_map_single(NULL, crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE); td->ptr[out_fifo].len = crp->crp_ilen; } if (enccrd) { switch (enccrd->crd_alg) { case CRYPTO_3DES_CBC: td->hdr |= TALITOS_MODE0_DEU_3DES; /* FALLTHROUGH */ case CRYPTO_DES_CBC: td->hdr |= TALITOS_SEL0_DEU | TALITOS_MODE0_DEU_CBC; if (enccrd->crd_flags & CRD_F_ENCRYPT) td->hdr |= TALITOS_MODE0_DEU_ENC; ivsize = 2*sizeof(u_int32_t); DPRINTF("%cDES ses %d ch %d len %d\n", (td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1', (u32)TALITOS_SESSION(crp->crp_sid), chsel, td->ptr[in_fifo].len); break; case CRYPTO_AES_CBC: td->hdr |= TALITOS_SEL0_AESU | TALITOS_MODE0_AESU_CBC; if (enccrd->crd_flags & CRD_F_ENCRYPT) td->hdr |= TALITOS_MODE0_AESU_ENC; ivsize = 4*sizeof(u_int32_t); DPRINTF("AES ses %d ch %d len %d\n", (u32)TALITOS_SESSION(crp->crp_sid), chsel, td->ptr[in_fifo].len); break; default: printk("%s: unimplemented enccrd->crd_alg %d\n", device_get_nameunit(sc->sc_cdev), enccrd->crd_alg); err = EINVAL; goto errout; } /* * Setup encrypt/decrypt state. When using basic ops * we can't use an inline IV because hash/crypt offset * must be from the end of the IV to the start of the * crypt data and this leaves out the preceding header * from the hash calculation. Instead we place the IV * in the state record and set the hash/crypt offset to * copy both the header+IV. */ if (enccrd->crd_flags & CRD_F_ENCRYPT) { td->hdr |= TALITOS_DIR_OUTBOUND; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) iv = enccrd->crd_iv; else read_random((iv = (caddr_t) rand_iv), sizeof(rand_iv)); if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, iv); } } else { td->hdr |= TALITOS_DIR_INBOUND; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) { iv = enccrd->crd_iv; } else { iv = (caddr_t) rand_iv; crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, iv); } } td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize, DMA_TO_DEVICE); td->ptr[cipher_iv].len = ivsize; /* * we don't need the cipher iv out length/pointer * field to do ESP IPsec. Therefore we set the len field as 0, * which tells the SEC not to do anything with this len/ptr * field. Previously, when length/pointer as pointing to iv, * it gave us corruption of packets. */ td->ptr[cipher_iv_out].len = 0; } if (enccrd && maccrd) { /* this is ipsec only for now */ td->hdr |= TALITOS_SEL1_MDEU | TALITOS_MODE1_MDEU_INIT | TALITOS_MODE1_MDEU_PAD; switch (maccrd->crd_alg) { case CRYPTO_MD5: td->hdr |= TALITOS_MODE1_MDEU_MD5; break; case CRYPTO_MD5_HMAC: td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC; break; case CRYPTO_SHA1: td->hdr |= TALITOS_MODE1_MDEU_SHA1; break; case CRYPTO_SHA1_HMAC: td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC; break; default: /* We cannot order the SEC as requested */ printk("%s: cannot do the order\n", device_get_nameunit(sc->sc_cdev)); err = EINVAL; goto errout; } if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) || (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) { /* * The offset from hash data to the start of * crypt data is the difference in the skips. */ /* ipsec only for now */ td->ptr[hmac_key].ptr = dma_map_single(NULL, ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE); td->ptr[hmac_key].len = ses->ses_hmac_len; td->ptr[in_fifo].ptr += enccrd->crd_skip; td->ptr[in_fifo].len = enccrd->crd_len; td->ptr[out_fifo].ptr += enccrd->crd_skip; td->ptr[out_fifo].len = enccrd->crd_len; /* bytes of HMAC to postpend to ciphertext */ td->ptr[out_fifo].extent = ses->ses_mlen; td->ptr[hmac_data].ptr += maccrd->crd_skip; td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip; } if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) { printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n", device_get_nameunit(sc->sc_cdev)); } } if (!enccrd && maccrd) { /* single MD5 or SHA */ td->hdr |= TALITOS_SEL0_MDEU | TALITOS_MODE0_MDEU_INIT | TALITOS_MODE0_MDEU_PAD; switch (maccrd->crd_alg) { case CRYPTO_MD5: td->hdr |= TALITOS_MODE0_MDEU_MD5; DPRINTF("MD5 ses %d ch %d len %d\n", (u32)TALITOS_SESSION(crp->crp_sid), chsel, td->ptr[in_fifo].len); break; case CRYPTO_MD5_HMAC: td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC; break; case CRYPTO_SHA1: td->hdr |= TALITOS_MODE0_MDEU_SHA1; DPRINTF("SHA1 ses %d ch %d len %d\n", (u32)TALITOS_SESSION(crp->crp_sid), chsel, td->ptr[in_fifo].len); break; case CRYPTO_SHA1_HMAC: td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC; break; default: /* We cannot order the SEC as requested */ DPRINTF("cannot do the order\n"); err = EINVAL; goto errout; } if (crp->crp_flags & CRYPTO_F_IOV) td->ptr[out_fifo].ptr += maccrd->crd_inject; if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) || (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) { td->ptr[hmac_key].ptr = dma_map_single(NULL, ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE); td->ptr[hmac_key].len = ses->ses_hmac_len; } } else { /* using process key (session data has duplicate) */ td->ptr[cipher_key].ptr = dma_map_single(NULL, enccrd->crd_key, (enccrd->crd_klen + 7) / 8, DMA_TO_DEVICE); td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8; } /* descriptor complete - GO! */ return talitos_submit(sc, td, chsel); errout: if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); } return err; }
static int xlr_sec_process(device_t dev, struct cryptop *crp, int hint) { struct xlr_sec_softc *sc = device_get_softc(dev); struct xlr_sec_command *cmd = NULL; int session, err; struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; struct xlr_sec_session *ses; if (crp == NULL || crp->crp_callback == NULL) { return (EINVAL); } session = XLR_SEC_SESSION(crp->crp_sid); if (sc == NULL || session >= sc->sc_nsessions) { err = EINVAL; goto errout; } ses = &sc->sc_sessions[session]; cmd = &ses->cmd; if (cmd == NULL) { err = ENOMEM; goto errout; } crd1 = crp->crp_desc; if (crd1 == NULL) { err = EINVAL; goto errout; } crd2 = crd1->crd_next; if (crd2 == NULL) { if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_SHA1 || crd1->crd_alg == CRYPTO_MD5) { maccrd = crd1; enccrd = NULL; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_ARC4) { maccrd = NULL; enccrd = crd1; } else { err = EINVAL; goto errout; } } else { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_MD5 || crd1->crd_alg == CRYPTO_SHA1) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC || crd2->crd_alg == CRYPTO_ARC4)) { maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_ARC4 || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC || crd2->crd_alg == CRYPTO_MD5 || crd2->crd_alg == CRYPTO_SHA1) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { err = EINVAL; goto errout; } } bzero(&cmd->op, sizeof(xlr_sec_io_t)); cmd->op.source_buf = (uint64_t) (unsigned long)crp->crp_buf; cmd->op.source_buf_size = crp->crp_ilen; cmd->op.dest_buf = (uint64_t) (unsigned long)crp->crp_buf; cmd->op.dest_buf_size = crp->crp_ilen; cmd->op.num_packets = 1; cmd->op.num_fragments = 1; if (cmd->op.source_buf_size > SEC_MAX_FRAG_LEN) { ses->multi_frag_flag = 1; } else { ses->multi_frag_flag = 0; } if (maccrd) { cmd->maccrd = maccrd; cmd->op.cipher_op = XLR_SEC_CIPHER_MODE_PASS; cmd->op.cipher_mode = XLR_SEC_CIPHER_MODE_NONE; cmd->op.cipher_type = XLR_SEC_CIPHER_TYPE_NONE; cmd->op.cipher_init = 0; cmd->op.cipher_offset = 0; switch (maccrd->crd_alg) { case CRYPTO_MD5: cmd->op.digest_type = XLR_SEC_DIGEST_TYPE_MD5; cmd->op.digest_init = XLR_SEC_DIGEST_INIT_NEWKEY; cmd->op.digest_src = XLR_SEC_DIGEST_SRC_DMA; cmd->op.digest_offset = 0; cmd->op.cksum_type = XLR_SEC_CKSUM_TYPE_NOP; cmd->op.cksum_src = XLR_SEC_CKSUM_SRC_CIPHER; cmd->op.cksum_offset = 0; cmd->op.pkt_hmac = XLR_SEC_LOADHMACKEY_MODE_OLD; cmd->op.pkt_hash = XLR_SEC_PADHASH_PAD; cmd->op.pkt_hashbytes = XLR_SEC_HASHBYTES_ALL8; cmd->op.pkt_next = XLR_SEC_NEXT_FINISH; cmd->op.pkt_iv = XLR_SEC_PKT_IV_OLD; cmd->op.pkt_lastword = XLR_SEC_LASTWORD_128; default: printf("currently not handled\n"); } } if (enccrd) { cmd->enccrd = enccrd; #ifdef RMI_SEC_DEBUG xlr_sec_print_data(crp); #endif if (enccrd->crd_flags & CRD_F_ENCRYPT) { cmd->op.cipher_op = XLR_SEC_CIPHER_OP_ENCRYPT; } else cmd->op.cipher_op = XLR_SEC_CIPHER_OP_DECRYPT; switch (enccrd->crd_alg) { case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: if (enccrd->crd_alg == CRYPTO_DES_CBC) { cmd->op.cipher_type = XLR_SEC_CIPHER_TYPE_DES; memcpy(&cmd->op.crypt_key[0], enccrd->crd_key, XLR_SEC_DES_KEY_LENGTH); } else { cmd->op.cipher_type = XLR_SEC_CIPHER_TYPE_3DES; //if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) { memcpy(&cmd->op.crypt_key[0], enccrd->crd_key, XLR_SEC_3DES_KEY_LENGTH); } } cmd->op.cipher_mode = XLR_SEC_CIPHER_MODE_CBC; cmd->op.cipher_init = XLR_SEC_CIPHER_INIT_NK; cmd->op.cipher_offset = XLR_SEC_DES_IV_LENGTH; cmd->op.digest_type = XLR_SEC_DIGEST_TYPE_NONE; cmd->op.digest_init = XLR_SEC_DIGEST_INIT_OLDKEY; cmd->op.digest_src = XLR_SEC_DIGEST_SRC_DMA; cmd->op.digest_offset = 0; cmd->op.cksum_type = XLR_SEC_CKSUM_TYPE_NOP; cmd->op.cksum_src = XLR_SEC_CKSUM_SRC_CIPHER; cmd->op.cksum_offset = 0; cmd->op.pkt_hmac = XLR_SEC_LOADHMACKEY_MODE_OLD; cmd->op.pkt_hash = XLR_SEC_PADHASH_PAD; cmd->op.pkt_hashbytes = XLR_SEC_HASHBYTES_ALL8; cmd->op.pkt_next = XLR_SEC_NEXT_FINISH; cmd->op.pkt_iv = XLR_SEC_PKT_IV_NEW; cmd->op.pkt_lastword = XLR_SEC_LASTWORD_128; //if ((!(enccrd->crd_flags & CRD_F_IV_PRESENT)) && if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT)) { memcpy(&cmd->op.initial_vector[0], enccrd->crd_iv, XLR_SEC_DES_IV_LENGTH); } break; case CRYPTO_AES_CBC: if (enccrd->crd_alg == CRYPTO_AES_CBC) { cmd->op.cipher_type = XLR_SEC_CIPHER_TYPE_AES128; //if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) { memcpy(&cmd->op.crypt_key[0], enccrd->crd_key, XLR_SEC_AES128_KEY_LENGTH); } } cmd->op.cipher_mode = XLR_SEC_CIPHER_MODE_CBC; cmd->op.cipher_init = XLR_SEC_CIPHER_INIT_NK; cmd->op.cipher_offset = XLR_SEC_AES_BLOCK_SIZE; cmd->op.digest_type = XLR_SEC_DIGEST_TYPE_NONE; cmd->op.digest_init = XLR_SEC_DIGEST_INIT_OLDKEY; cmd->op.digest_src = XLR_SEC_DIGEST_SRC_DMA; cmd->op.digest_offset = 0; cmd->op.cksum_type = XLR_SEC_CKSUM_TYPE_NOP; cmd->op.cksum_src = XLR_SEC_CKSUM_SRC_CIPHER; cmd->op.cksum_offset = 0; cmd->op.pkt_hmac = XLR_SEC_LOADHMACKEY_MODE_OLD; cmd->op.pkt_hash = XLR_SEC_PADHASH_PAD; cmd->op.pkt_hashbytes = XLR_SEC_HASHBYTES_ALL8; cmd->op.pkt_next = XLR_SEC_NEXT_FINISH; cmd->op.pkt_iv = XLR_SEC_PKT_IV_NEW; cmd->op.pkt_lastword = XLR_SEC_LASTWORD_128; //if (!(enccrd->crd_flags & CRD_F_IV_PRESENT)) { if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT)) { memcpy(&cmd->op.initial_vector[0], enccrd->crd_iv, XLR_SEC_AES_BLOCK_SIZE); } //} break; } } cmd->crp = crp; cmd->session_num = session; xlr_sec_setup(ses, cmd, (symkey_desc_pt) ses->desc_ptr); return (0); errout: if (cmd != NULL) free(cmd, M_DEVBUF); crp->crp_etype = err; crypto_done(crp); return (err); }
/* * cesa callback. */ static void cesa_callback(unsigned long dummy) { struct cesa_ocf_process *cesa_ocf_cmd = NULL; struct cryptop *crp = NULL; MV_CESA_RESULT result[MV_CESA_MAX_CHAN]; int res_idx = 0,i; MV_STATUS status; dprintk("%s()\n", __FUNCTION__); #ifdef CESA_OCF_TASKLET disable_irq(cesa_device.irq); #endif while(MV_TRUE) { /* Get Ready requests */ spin_lock(&cesa_lock); status = mvCesaReadyGet(&result[res_idx]); spin_unlock(&cesa_lock); cesaTestTraceAdd(2); if(status != MV_OK) { #ifdef CESA_OCF_POLLING if(status == MV_BUSY) { /* Fragment */ cesa_interrupt_polling(); return; } #endif break; } res_idx++; break; } for(i = 0; i < res_idx; i++) { if(!result[i].pReqPrv) { printk("%s,%d: warning private is NULL\n", __FILE__, __LINE__); break; } cesa_ocf_cmd = result[i].pReqPrv; crp = cesa_ocf_cmd->crp; // ignore HMAC error. //if(result->retCode) // crp->crp_etype = EIO; #if defined(CESA_OCF_POLLING) if(!cesa_ocf_cmd->need_cb){ cesa_interrupt_polling(); } #endif if(cesa_ocf_cmd->need_cb) { if(debug) { mvCesaDebugMbuf("DST BUFFER", cesa_ocf_cmd->cesa_cmd.pDst, 0, cesa_ocf_cmd->cesa_cmd.pDst->mbufSize); } crypto_done(crp); } kfree(cesa_ocf_cmd); } #ifdef CESA_OCF_TASKLET enable_irq(cesa_device.irq); #endif cesaTestTraceAdd(3); return; }
/* Name : icp_ocfDrvSymCallBack * * Description : When this function returns it signifies that the LAC * component has completed the relevant symmetric operation. * * Notes : The callbackTag is a pointer to an icp_drvOpData. This memory * object was passed to LAC for the cryptographic processing and contains all * the relevant information for cleaning up buffer handles etc. so that the * OCF EP80579 Driver portion of this crypto operation can be fully completed. */ static void icp_ocfDrvSymCallBack(void *callbackTag, CpaStatus status, const CpaCySymOp operationType, void *pOpData, CpaBufferList * pDstBuffer, CpaBoolean verifyResult) { struct cryptop *crp = NULL; struct icp_drvOpData *temp_drvOpData = (struct icp_drvOpData *)callbackTag; uint64_t *tempBasePtr = NULL; uint32_t tempLen = 0; if (NULL == temp_drvOpData) { DPRINTK("%s(): The callback from the LAC component" " has failed due to Null userOpaque data" "(status == %d).\n", __FUNCTION__, status); DPRINTK("%s(): Unable to call OCF back! \n", __FUNCTION__); return; } crp = temp_drvOpData->crp; crp->crp_etype = ICP_OCF_DRV_NO_CRYPTO_PROCESS_ERROR; if (NULL == pOpData) { DPRINTK("%s(): The callback from the LAC component" " has failed due to Null Symmetric Op data" "(status == %d).\n", __FUNCTION__, status); crp->crp_etype = ECANCELED; crypto_done(crp); return; } if (NULL == pDstBuffer) { DPRINTK("%s(): The callback from the LAC component" " has failed due to Null Dst Bufferlist data" "(status == %d).\n", __FUNCTION__, status); crp->crp_etype = ECANCELED; crypto_done(crp); return; } if (CPA_STATUS_SUCCESS == status) { if (temp_drvOpData->bufferType == ICP_CRYPTO_F_PACKET_BUF) { if (ICP_OCF_DRV_STATUS_SUCCESS != icp_ocfDrvBufferListToPacketBuff(pDstBuffer, (icp_packet_buffer_t **) & (crp->crp_buf))) { EPRINTK("%s(): BufferList to SkBuff " "conversion error.\n", __FUNCTION__); crp->crp_etype = EPERM; } } else { icp_ocfDrvBufferListToPtrAndLen(pDstBuffer, (void **)&tempBasePtr, &tempLen); crp->crp_olen = (int)tempLen; } } else { DPRINTK("%s(): The callback from the LAC component has failed" "(status == %d).\n", __FUNCTION__, status); crp->crp_etype = ECANCELED; } if (temp_drvOpData->numBufferListArray > ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) { icp_kfree(pDstBuffer->pBuffers); } icp_ocfDrvFreeMetaData(pDstBuffer); ICP_CACHE_FREE(drvOpData_zone, temp_drvOpData); /* Invoke the OCF callback function */ crypto_done(crp); return; }
static int xlp_sec_process(device_t dev, struct cryptop *crp, int hint) { struct xlp_sec_softc *sc = device_get_softc(dev); struct xlp_sec_command *cmd = NULL; int session, err = -1, ret = 0; struct cryptodesc *crd1, *crd2; struct xlp_sec_session *ses; unsigned int nsegs = 0; if (crp == NULL || crp->crp_callback == NULL) { return (EINVAL); } session = XLP_SEC_SESSION(crp->crp_sid); if (sc == NULL || session >= sc->sc_nsessions) { err = EINVAL; goto errout; } ses = &sc->sc_sessions[session]; if ((cmd = malloc(sizeof(struct xlp_sec_command), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { err = ENOMEM; goto errout; } cmd->crp = crp; cmd->session_num = session; cmd->hash_dst_len = ses->hs_mlen; if ((crd1 = crp->crp_desc) == NULL) { err = EINVAL; goto errout; } crd2 = crd1->crd_next; if ((ret = xlp_get_nsegs(crp, &nsegs)) != 0) { err = EINVAL; goto errout; } if (((crd1 != NULL) && (crd1->crd_flags & CRD_F_IV_EXPLICIT)) || ((crd2 != NULL) && (crd2->crd_flags & CRD_F_IV_EXPLICIT))) { /* Since IV is given as separate segment to avoid copy */ nsegs += 1; } cmd->nsegs = nsegs; if ((err = xlp_alloc_cmd_params(cmd, nsegs)) != 0) goto errout; if ((crd1 != NULL) && (crd2 == NULL)) { if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_ARC4) { cmd->enccrd = crd1; cmd->maccrd = NULL; if ((ret = nlm_get_cipher_param(cmd)) != 0) { err = EINVAL; goto errout; } if (crd1->crd_flags & CRD_F_IV_EXPLICIT) cmd->cipheroff = cmd->ivlen; else cmd->cipheroff = cmd->enccrd->crd_skip; cmd->cipherlen = cmd->enccrd->crd_len; if (crd1->crd_flags & CRD_F_IV_PRESENT) cmd->ivoff = 0; else cmd->ivoff = cmd->enccrd->crd_inject; if ((err = xlp_copyiv(sc, cmd, cmd->enccrd)) != 0) goto errout; if ((err = nlm_crypto_do_cipher(sc, cmd)) != 0) goto errout; } else if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_SHA1 || crd1->crd_alg == CRYPTO_MD5) { cmd->enccrd = NULL; cmd->maccrd = crd1; if ((ret = nlm_get_digest_param(cmd)) != 0) { err = EINVAL; goto errout; } cmd->hashoff = cmd->maccrd->crd_skip; cmd->hashlen = cmd->maccrd->crd_len; cmd->hmacpad = 0; cmd->hashsrc = 0; if ((err = nlm_crypto_do_digest(sc, cmd)) != 0) goto errout; } else { err = EINVAL; goto errout; } } else if( (crd1 != NULL) && (crd2 != NULL) ) { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_MD5 || crd1->crd_alg == CRYPTO_SHA1) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC || crd2->crd_alg == CRYPTO_ARC4)) { cmd->maccrd = crd1; cmd->enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_ARC4 || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC || crd2->crd_alg == CRYPTO_MD5 || crd2->crd_alg == CRYPTO_SHA1)) { cmd->enccrd = crd1; cmd->maccrd = crd2; } else { err = EINVAL; goto errout; } if ((ret = nlm_get_cipher_param(cmd)) != 0) { err = EINVAL; goto errout; } if ((ret = nlm_get_digest_param(cmd)) != 0) { err = EINVAL; goto errout; } cmd->ivoff = cmd->enccrd->crd_inject; cmd->hashoff = cmd->maccrd->crd_skip; cmd->hashlen = cmd->maccrd->crd_len; cmd->hmacpad = 0; if (cmd->enccrd->crd_flags & CRD_F_ENCRYPT) cmd->hashsrc = 1; else cmd->hashsrc = 0; cmd->cipheroff = cmd->enccrd->crd_skip; cmd->cipherlen = cmd->enccrd->crd_len; if ((err = xlp_copyiv(sc, cmd, cmd->enccrd)) != 0) goto errout; if ((err = nlm_crypto_do_cipher_digest(sc, cmd)) != 0) goto errout; } else { err = EINVAL; goto errout; } return (0); errout: xlp_free_cmd_params(cmd); if (err == ERESTART) { sc->sc_needwakeup |= CRYPTO_SYMQ; creditleft = 0; return (err); } crp->crp_etype = err; crypto_done(crp); return (err); }
/* This function is called from an interrupt handler */ void nlm_xlpsec_msgring_handler(int vc, int size, int code, int src_id, struct nlm_fmn_msg *msg, void *data) { struct xlp_sec_command *cmd = NULL; struct xlp_sec_softc *sc = NULL; struct cryptodesc *crd = NULL; unsigned int ivlen = 0; KASSERT(code == FMN_SWCODE_CRYPTO, ("%s: bad code = %d, expected code = %d\n", __FUNCTION__, code, FMN_SWCODE_CRYPTO)); sc = (struct xlp_sec_softc *)data; KASSERT(src_id >= sc->sec_vc_start && src_id <= sc->sec_vc_end, ("%s: bad src_id = %d, expect %d - %d\n", __FUNCTION__, src_id, sc->sec_vc_start, sc->sec_vc_end)); cmd = (struct xlp_sec_command *)(uintptr_t)msg->msg[0]; KASSERT(cmd != NULL && cmd->crp != NULL, ("%s :cmd not received properly\n",__FUNCTION__)); KASSERT(CRYPTO_ERROR(msg->msg[1]) == 0, ("%s: Message rcv msg0 %llx msg1 %llx err %x \n", __FUNCTION__, (unsigned long long)msg->msg[0], (unsigned long long)msg->msg[1], (int)CRYPTO_ERROR(msg->msg[1]))); crd = cmd->enccrd; /* Copy the last 8 or 16 bytes to the session iv, so that in few * cases this will be used as IV for the next request */ if (crd != NULL) { if ((crd->crd_alg == CRYPTO_DES_CBC || crd->crd_alg == CRYPTO_3DES_CBC || crd->crd_alg == CRYPTO_AES_CBC) && (crd->crd_flags & CRD_F_ENCRYPT)) { ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? XLP_SEC_AES_IV_LENGTH : XLP_SEC_DES_IV_LENGTH); crypto_copydata(cmd->crp->crp_flags, cmd->crp->crp_buf, crd->crd_skip + crd->crd_len - ivlen, ivlen, sc->sc_sessions[cmd->session_num].ses_iv); } } /* If there are not enough credits to send, then send request * will fail with ERESTART and the driver will be blocked until it is * unblocked here after knowing that there are sufficient credits to * send the request again. */ if (sc->sc_needwakeup) { atomic_add_int(&creditleft, sc->sec_msgsz); if (creditleft >= (NLM_CRYPTO_LEFT_REQS)) { crypto_unblock(sc->sc_cid, sc->sc_needwakeup); sc->sc_needwakeup &= (~(CRYPTO_SYMQ | CRYPTO_ASYMQ)); } } if(cmd->maccrd) { crypto_copyback(cmd->crp->crp_flags, cmd->crp->crp_buf, cmd->maccrd->crd_inject, cmd->hash_dst_len, cmd->hashdest); } /* This indicates completion of the crypto operation */ crypto_done(cmd->crp); xlp_free_cmd_params(cmd); return; }