/* * Apply a symmetric encryption/decryption algorithm. */ static int swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, int flags) { unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN]; struct enc_xform *exf; int i, k, j, blks; exf = sw->sw_exf; blks = exf->blocksize; /* Check for non-padded data */ if (crd->crd_len % blks) return EINVAL; /* Initialize the IV */ if (crd->crd_flags & CRD_F_ENCRYPT) { /* IV explicitly provided ? */ if (crd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(crd->crd_iv, iv, blks); else arc4rand(iv, blks, 0); /* Do we need to write the IV */ if (!(crd->crd_flags & CRD_F_IV_PRESENT)) crypto_copyback(flags, buf, crd->crd_inject, blks, iv); } else { /* Decryption */ /* IV explicitly provided ? */ if (crd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(crd->crd_iv, iv, blks); else { /* Get IV off buf */ crypto_copydata(flags, buf, crd->crd_inject, blks, iv); } } if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { int error; if (sw->sw_kschedule) exf->zerokey(&(sw->sw_kschedule)); error = exf->setkey(&sw->sw_kschedule, crd->crd_key, crd->crd_klen / 8); if (error) return (error); } ivp = iv; /* * xforms that provide a reinit method perform all IV * handling themselves. */ if (exf->reinit) exf->reinit(sw->sw_kschedule, iv); if (flags & CRYPTO_F_IMBUF) { struct mbuf *m = (struct mbuf *) buf; /* Find beginning of data */ m = m_getptr(m, crd->crd_skip, &k); if (m == NULL) return EINVAL; i = crd->crd_len; while (i > 0) { /* * If there's insufficient data at the end of * an mbuf, we have to do some copying. */ if (m->m_len < k + blks && m->m_len != k) { m_copydata(m, k, blks, blk); /* Actual encryption/decryption */ if (exf->reinit) { if (crd->crd_flags & CRD_F_ENCRYPT) { exf->encrypt(sw->sw_kschedule, blk); } else { exf->decrypt(sw->sw_kschedule, blk); } } else if (crd->crd_flags & CRD_F_ENCRYPT) { /* XOR with previous block */ for (j = 0; j < blks; j++) blk[j] ^= ivp[j]; exf->encrypt(sw->sw_kschedule, blk); /* * Keep encrypted block for XOR'ing * with next block */ bcopy(blk, iv, blks); ivp = iv; } else { /* decrypt */ /* * Keep encrypted block for XOR'ing * with next block */ if (ivp == iv) bcopy(blk, piv, blks); else bcopy(blk, iv, blks); exf->decrypt(sw->sw_kschedule, blk); /* XOR with previous block */ for (j = 0; j < blks; j++) blk[j] ^= ivp[j]; if (ivp == iv) bcopy(piv, iv, blks); else ivp = iv; } /* Copy back decrypted block */ m_copyback(m, k, blks, blk); /* Advance pointer */ m = m_getptr(m, k + blks, &k); if (m == NULL) return EINVAL; i -= blks; /* Could be done... */ if (i == 0) break; } /* Skip possibly empty mbufs */ if (k == m->m_len) { for (m = m->m_next; m && m->m_len == 0; m = m->m_next) ; k = 0; } /* Sanity check */ if (m == NULL) return EINVAL; /* * Warning: idat may point to garbage here, but * we only use it in the while() loop, only if * there are indeed enough data. */ idat = mtod(m, unsigned char *) + k; while (m->m_len >= k + blks && i > 0) { if (exf->reinit) { if (crd->crd_flags & CRD_F_ENCRYPT) { exf->encrypt(sw->sw_kschedule, idat); } else { exf->decrypt(sw->sw_kschedule, idat); } } else if (crd->crd_flags & CRD_F_ENCRYPT) { /* XOR with previous block/IV */ for (j = 0; j < blks; j++) idat[j] ^= ivp[j]; exf->encrypt(sw->sw_kschedule, idat); ivp = idat; } else { /* decrypt */ /* * Keep encrypted block to be used * in next block's processing. */ if (ivp == iv) bcopy(idat, piv, blks); else bcopy(idat, iv, blks); exf->decrypt(sw->sw_kschedule, idat); /* XOR with previous block/IV */ for (j = 0; j < blks; j++) idat[j] ^= ivp[j]; if (ivp == iv) bcopy(piv, iv, blks); else ivp = iv; } idat += blks; k += blks; i -= blks; } } return 0; /* Done with mbuf encryption/decryption */ } else if (flags & CRYPTO_F_IOV) {
static void aes_encrypt(const struct krb5_key_state *ks, struct mbuf *inout, size_t skip, size_t len, void *ivec, size_t ivlen) { size_t blocklen = 16, plen; struct { uint8_t cn_1[16], cn[16]; } last2; int i, off; /* * AES encryption with cyphertext stealing: * * CTSencrypt(P[0], ..., P[n], IV, K): * len = length(P[n]) * (C[0], ..., C[n-2], E[n-1]) = * CBCencrypt(P[0], ..., P[n-1], IV, K) * P = pad(P[n], 0, blocksize) * E[n] = CBCencrypt(P, E[n-1], K); * C[n-1] = E[n] * C[n] = E[n-1]{0..len-1} */ plen = len % blocklen; if (len == blocklen) { /* * Note: caller will ensure len >= blocklen. */ aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec, CRD_F_ENCRYPT); } else if (plen == 0) { /* * This is equivalent to CBC mode followed by swapping * the last two blocks. We assume that neither of the * last two blocks cross iov boundaries. */ aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec, CRD_F_ENCRYPT); off = skip + len - 2 * blocklen; m_copydata(inout, off, 2 * blocklen, (void*) &last2); m_copyback(inout, off, blocklen, last2.cn); m_copyback(inout, off + blocklen, blocklen, last2.cn_1); } else { /* * This is the difficult case. We encrypt all but the * last partial block first. We then create a padded * copy of the last block and encrypt that using the * second to last encrypted block as IV. Once we have * the encrypted versions of the last two blocks, we * reshuffle to create the final result. */ aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len - plen, ivec, CRD_F_ENCRYPT); /* * Copy out the last two blocks, pad the last block * and encrypt it. Rearrange to get the final * result. The cyphertext for cn_1 is in cn. The * cyphertext for cn is the first plen bytes of what * is in cn_1 now. */ off = skip + len - blocklen - plen; m_copydata(inout, off, blocklen + plen, (void*) &last2); for (i = plen; i < blocklen; i++) last2.cn[i] = 0; aes_encrypt_1(ks, 0, last2.cn, 0, blocklen, last2.cn_1, CRD_F_ENCRYPT); m_copyback(inout, off, blocklen, last2.cn); m_copyback(inout, off + blocklen, plen, last2.cn_1); } }
/* * Massage IPv4/IPv6 headers for AH processing. */ static int ah_massage_headers(struct mbuf **m0, int proto, int skip, int alg, int out) { struct mbuf *m = *m0; unsigned char *ptr; int off, count; #ifdef INET struct ip *ip; #endif /* INET */ #ifdef INET6 struct ip6_ext *ip6e; struct ip6_hdr ip6; int alloc, len, ad; #endif /* INET6 */ switch (proto) { #ifdef INET case AF_INET: /* * This is the least painful way of dealing with IPv4 header * and option processing -- just make sure they're in * contiguous memory. */ *m0 = m = m_pullup(m, skip); if (m == NULL) { DPRINTF(("%s: m_pullup failed\n", __func__)); return ENOBUFS; } /* Fix the IP header */ ip = mtod(m, struct ip *); if (V_ah_cleartos) ip->ip_tos = 0; ip->ip_ttl = 0; ip->ip_sum = 0; /* * On input, fix ip_len which has been byte-swapped * at ip_input(). */ if (!out) { ip->ip_len = htons(ip->ip_len + skip); if (alg == CRYPTO_MD5_KPDK || alg == CRYPTO_SHA1_KPDK) ip->ip_off = htons(ip->ip_off & IP_DF); else ip->ip_off = 0; } else { if (alg == CRYPTO_MD5_KPDK || alg == CRYPTO_SHA1_KPDK) ip->ip_off = htons(ntohs(ip->ip_off) & IP_DF); else ip->ip_off = 0; } ptr = mtod(m, unsigned char *) + sizeof(struct ip); /* IPv4 option processing */ for (off = sizeof(struct ip); off < skip;) { if (ptr[off] == IPOPT_EOL || ptr[off] == IPOPT_NOP || off + 1 < skip) ; else { DPRINTF(("%s: illegal IPv4 option length for " "option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } switch (ptr[off]) { case IPOPT_EOL: off = skip; /* End the loop. */ break; case IPOPT_NOP: off++; break; case IPOPT_SECURITY: /* 0x82 */ case 0x85: /* Extended security. */ case 0x86: /* Commercial security. */ case 0x94: /* Router alert */ case 0x95: /* RFC1770 */ /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } off += ptr[off + 1]; break; case IPOPT_LSRR: case IPOPT_SSRR: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } /* * On output, if we have either of the * source routing options, we should * swap the destination address of the * IP header with the last address * specified in the option, as that is * what the destination's IP header * will look like. */ if (out) bcopy(ptr + off + ptr[off + 1] - sizeof(struct in_addr), &(ip->ip_dst), sizeof(struct in_addr)); /* Fall through */ default: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } /* Zeroize all other options. */ count = ptr[off + 1]; bcopy(ipseczeroes, ptr, count); off += count; break; } /* Sanity check. */ if (off > skip) { DPRINTF(("%s: malformed IPv4 options header\n", __func__)); m_freem(m); return EINVAL; } } break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Ugly... */ /* Copy and "cook" the IPv6 header. */ m_copydata(m, 0, sizeof(ip6), (caddr_t) &ip6); /* We don't do IPv6 Jumbograms. */ if (ip6.ip6_plen == 0) { DPRINTF(("%s: unsupported IPv6 jumbogram\n", __func__)); m_freem(m); return EMSGSIZE; } ip6.ip6_flow = 0; ip6.ip6_hlim = 0; ip6.ip6_vfc &= ~IPV6_VERSION_MASK; ip6.ip6_vfc |= IPV6_VERSION; /* Scoped address handling. */ if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_src)) ip6.ip6_src.s6_addr16[1] = 0; if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_dst)) ip6.ip6_dst.s6_addr16[1] = 0; /* Done with IPv6 header. */ m_copyback(m, 0, sizeof(struct ip6_hdr), (caddr_t) &ip6); /* Let's deal with the remaining headers (if any). */ if (skip - sizeof(struct ip6_hdr) > 0) { if (m->m_len <= skip) { ptr = (unsigned char *) malloc( skip - sizeof(struct ip6_hdr), M_XDATA, M_NOWAIT); if (ptr == NULL) { DPRINTF(("%s: failed to allocate memory" "for IPv6 headers\n",__func__)); m_freem(m); return ENOBUFS; } /* * Copy all the protocol headers after * the IPv6 header. */ m_copydata(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); alloc = 1; } else { /* No need to allocate memory. */ ptr = mtod(m, unsigned char *) + sizeof(struct ip6_hdr); alloc = 0; } } else break; off = ip6.ip6_nxt & 0xff; /* Next header type. */ for (len = 0; len < skip - sizeof(struct ip6_hdr);) switch (off) { case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: ip6e = (struct ip6_ext *) (ptr + len); /* * Process the mutable/immutable * options -- borrows heavily from the * KAME code. */ for (count = len + sizeof(struct ip6_ext); count < len + ((ip6e->ip6e_len + 1) << 3);) { if (ptr[count] == IP6OPT_PAD1) { count++; continue; /* Skip padding. */ } /* Sanity check. */ if (count > len + ((ip6e->ip6e_len + 1) << 3)) { m_freem(m); /* Free, if we allocated. */ if (alloc) free(ptr, M_XDATA); return EINVAL; } ad = ptr[count + 1]; /* If mutable option, zeroize. */ if (ptr[count] & IP6OPT_MUTABLE) bcopy(ipseczeroes, ptr + count, ptr[count + 1]); count += ad; /* Sanity check. */ if (count > skip - sizeof(struct ip6_hdr)) { m_freem(m); /* Free, if we allocated. */ if (alloc) free(ptr, M_XDATA); return EINVAL; } } /* Advance. */ len += ((ip6e->ip6e_len + 1) << 3); off = ip6e->ip6e_nxt; break; case IPPROTO_ROUTING: /* * Always include routing headers in * computation. */ ip6e = (struct ip6_ext *) (ptr + len); len += ((ip6e->ip6e_len + 1) << 3); off = ip6e->ip6e_nxt; break; default: DPRINTF(("%s: unexpected IPv6 header type %d", __func__, off)); if (alloc) free(ptr, M_XDATA); m_freem(m); return EINVAL; } /* Copyback and free, if we allocated. */ if (alloc) { m_copyback(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); free(ptr, M_XDATA); } break; #endif /* INET6 */ }
int via_padlock_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd, struct via_padlock_session *ses, struct via_padlock_softc *sc, void *buf) { uint32_t *key; int err = 0; if ((crd->crd_len % 16) != 0) { err = EINVAL; return (err); } sc->op_buf = malloc(crd->crd_len, M_DEVBUF, M_NOWAIT); if (sc->op_buf == NULL) { err = ENOMEM; return (err); } if (crd->crd_flags & CRD_F_ENCRYPT) { sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_ENCRYPT; key = ses->ses_ekey; if (crd->crd_flags & CRD_F_IV_EXPLICIT) memcpy(sc->op_iv, crd->crd_iv, 16); else memcpy(sc->op_iv, ses->ses_iv, 16); if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) { if (crp->crp_flags & CRYPTO_F_IMBUF) m_copyback((struct mbuf *)crp->crp_buf, crd->crd_inject, 16, sc->op_iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copyback((struct uio *)crp->crp_buf, crd->crd_inject, 16, sc->op_iv); else memcpy((char *)crp->crp_buf + crd->crd_inject, sc->op_iv, 16); } } else { sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_DECRYPT; key = ses->ses_dkey; if (crd->crd_flags & CRD_F_IV_EXPLICIT) memcpy(sc->op_iv, crd->crd_iv, 16); else { if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata((struct mbuf *)crp->crp_buf, crd->crd_inject, 16, sc->op_iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copydata((struct uio *)crp->crp_buf, crd->crd_inject, 16, sc->op_iv); else memcpy(sc->op_iv, (char *)crp->crp_buf + crd->crd_inject, 16); } } if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata((struct mbuf *)crp->crp_buf, crd->crd_skip, crd->crd_len, sc->op_buf); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copydata((struct uio *)crp->crp_buf, crd->crd_skip, crd->crd_len, sc->op_buf); else memcpy(sc->op_buf, (char *)crp->crp_buf + crd->crd_skip, crd->crd_len); sc->op_cw[1] = sc->op_cw[2] = sc->op_cw[3] = 0; via_padlock_cbc(&sc->op_cw, sc->op_buf, sc->op_buf, key, crd->crd_len / 16, sc->op_iv); if (crp->crp_flags & CRYPTO_F_IMBUF) m_copyback((struct mbuf *)crp->crp_buf, crd->crd_skip, crd->crd_len, sc->op_buf); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copyback((struct uio *)crp->crp_buf, crd->crd_skip, crd->crd_len, sc->op_buf); else memcpy((char *)crp->crp_buf + crd->crd_skip, sc->op_buf, crd->crd_len); /* copy out last block for use as next session IV */ if (crd->crd_flags & CRD_F_ENCRYPT) { if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata((struct mbuf *)crp->crp_buf, crd->crd_skip + crd->crd_len - 16, 16, ses->ses_iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copydata((struct uio *)crp->crp_buf, crd->crd_skip + crd->crd_len - 16, 16, ses->ses_iv); else memcpy(ses->ses_iv, (char *)crp->crp_buf + crd->crd_skip + crd->crd_len - 16, 16); } if (sc->op_buf != NULL) { memset(sc->op_buf, 0, crd->crd_len); free(sc->op_buf, M_DEVBUF); sc->op_buf = NULL; } return (err); }
/* * Common function for DMA map synchronization. May be called * by chipset-specific DMA map synchronization functions. * * This version works with the virtually-indexed, write-back cache * found in the MIPS-3/MIPS-4 CPUs available for the Algorithmics. */ void _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops) { bus_size_t minlen; #ifdef DIAGNOSTIC /* * Mixing PRE and POST operations is not allowed. */ if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) panic("_bus_dmamap_sync: mix PRE and POST"); if (offset >= map->dm_mapsize) panic("_bus_dmamap_sync: bad offset %"PRIxPADDR " (map size is %"PRIxPSIZE")", offset, map->dm_mapsize); if (len == 0 || (offset + len) > map->dm_mapsize) panic("_bus_dmamap_sync: bad length"); #endif /* * Since we're dealing with a virtually-indexed, write-back * cache, we need to do the following things: * * PREREAD -- Invalidate D-cache. Note we might have * to also write-back here if we have to use an Index * op, or if the buffer start/end is not cache-line aligned. * * PREWRITE -- Write-back the D-cache. If we have to use * an Index op, we also have to invalidate. Note that if * we are doing PREREAD|PREWRITE, we can collapse everything * into a single op. * * POSTREAD -- Nothing. * * POSTWRITE -- Nothing. */ #ifdef _MIPS_NEED_BUS_DMA_BOUNCE struct mips_bus_dma_cookie * const cookie = map->_dm_cookie; if (cookie != NULL && (cookie->id_flags & _BUS_DMA_IS_BOUNCING) && (ops & BUS_DMASYNC_PREWRITE)) { STAT_INCR(write_bounces); /* * Copy the caller's buffer to the bounce buffer. */ switch (cookie->id_buftype) { case _BUS_DMA_BUFTYPE_LINEAR: memcpy((char *)cookie->id_bouncebuf + offset, cookie->id_origlinearbuf + offset, len); break; case _BUS_DMA_BUFTYPE_MBUF: m_copydata(cookie->id_origmbuf, offset, len, (char *)cookie->id_bouncebuf + offset); break; case _BUS_DMA_BUFTYPE_UIO: _bus_dma_uiomove((char *)cookie->id_bouncebuf + offset, cookie->id_origuio, len, UIO_WRITE); break; #ifdef DIAGNOSTIC case _BUS_DMA_BUFTYPE_RAW: panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW"); break; case _BUS_DMA_BUFTYPE_INVALID: panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID"); break; default: panic("_bus_dmamap_sync: unknown buffer type %d\n", cookie->id_buftype); break; #endif /* DIAGNOSTIC */ } } #endif /* _MIPS_NEED_BUS_DMA_BOUNCE */ /* * Flush the write buffer. * XXX Is this always necessary? */ wbflush(); /* * If the mapping is of COHERENT DMA-safe memory or this isn't a * PREREAD or PREWRITE, no cache flush is necessary. Check to see * if we need to bounce it. */ if ((map->_dm_flags & _BUS_DMAMAP_COHERENT) || (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0) goto bounce_it; /* * If the mapping belongs to the kernel, or it belongs * to the currently-running process (XXX actually, vmspace), * then we can use Hit ops. Otherwise, Index ops. * * This should be true the vast majority of the time. */ const bool useindex = (!VMSPACE_IS_KERNEL_P(map->_dm_vmspace) && map->_dm_vmspace != curproc->p_vmspace); bus_dma_segment_t *seg = map->dm_segs; bus_dma_segment_t * const lastseg = seg + map->dm_nsegs; /* * Skip segments until offset are withing a segment. */ for (; offset >= seg->ds_len; seg++) { offset -= seg->ds_len; } for (; seg < lastseg && len != 0; seg++, offset = 0, len -= minlen) { /* * Now at the first segment to sync; nail each segment until we * have exhausted the length. */ vaddr_t vaddr = seg->_ds_vaddr + offset; minlen = ulmin(len, seg->ds_len - offset); #ifdef BUS_DMA_DEBUG printf("bus_dmamap_sync: flushing segment %p " "(0x%"PRIxBUSADDR"+%"PRIxBUSADDR ", 0x%"PRIxBUSADDR"+0x%"PRIxBUSADDR ") (olen = %"PRIxBUSADDR")...", seg, vaddr - offset, offset, vaddr - offset, offset + minlen - 1, len); #endif /* * If we are forced to use Index ops, it's always a * Write-back,Invalidate, so just do one test. */ if (__predict_false(useindex)) { mips_dcache_wbinv_range_index(vaddr, minlen); #ifdef BUS_DMA_DEBUG printf("\n"); #endif continue; } switch (ops) { case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: mips_dcache_wbinv_range(vaddr, minlen); break; case BUS_DMASYNC_PREREAD: #if 1 mips_dcache_wbinv_range(vaddr, minlen); #else mips_dcache_inv_range(vaddr, minlen); #endif break; case BUS_DMASYNC_PREWRITE: mips_dcache_wb_range(vaddr, minlen); break; } #ifdef BUS_DMA_DEBUG printf("\n"); #endif } bounce_it: #ifdef _MIPS_NEED_BUS_DMA_BOUNCE if ((ops & BUS_DMASYNC_POSTREAD) == 0 || cookie == NULL || (cookie->id_flags & _BUS_DMA_IS_BOUNCING) == 0) return; STAT_INCR(read_bounces); /* * Copy the bounce buffer to the caller's buffer. */ switch (cookie->id_buftype) { case _BUS_DMA_BUFTYPE_LINEAR: memcpy(cookie->id_origlinearbuf + offset, (char *)cookie->id_bouncebuf + offset, len); break; case _BUS_DMA_BUFTYPE_MBUF: m_copyback(cookie->id_origmbuf, offset, len, (char *)cookie->id_bouncebuf + offset); break; case _BUS_DMA_BUFTYPE_UIO: _bus_dma_uiomove((char *)cookie->id_bouncebuf + offset, cookie->id_origuio, len, UIO_READ); break; #ifdef DIAGNOSTIC case _BUS_DMA_BUFTYPE_RAW: panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW"); break; case _BUS_DMA_BUFTYPE_INVALID: panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID"); break; default: panic("_bus_dmamap_sync: unknown buffer type %d\n", cookie->id_buftype); break; #endif } #endif /* _MIPS_NEED_BUS_DMA_BOUNCE */ ; }
/* * Apply a symmetric encryption/decryption algorithm. */ static int swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, int outtype) { unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN]; struct enc_xform *exf; int i, k, j, blks; exf = sw->sw_exf; blks = exf->blocksize; /* Check for non-padded data */ if (crd->crd_len % blks) return EINVAL; /* Initialize the IV */ if (crd->crd_flags & CRD_F_ENCRYPT) { /* IV explicitly provided ? */ if (crd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(crd->crd_iv, iv, blks); else { /* Get random IV */ for (i = 0; i + sizeof (u_int32_t) < EALG_MAX_BLOCK_LEN; i += sizeof (u_int32_t)) { u_int32_t temp = arc4random(); bcopy(&temp, iv + i, sizeof(u_int32_t)); } /* * What if the block size is not a multiple * of sizeof (u_int32_t), which is the size of * what arc4random() returns ? */ if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) { u_int32_t temp = arc4random(); bcopy (&temp, iv + i, EALG_MAX_BLOCK_LEN - i); } } /* Do we need to write the IV */ if (!(crd->crd_flags & CRD_F_IV_PRESENT)) { COPYBACK(outtype, buf, crd->crd_inject, blks, iv); } } else { /* Decryption */ /* IV explicitly provided ? */ if (crd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(crd->crd_iv, iv, blks); else { /* Get IV off buf */ COPYDATA(outtype, buf, crd->crd_inject, blks, iv); } } ivp = iv; if (outtype == CRYPTO_BUF_CONTIG) { if (crd->crd_flags & CRD_F_ENCRYPT) { for (i = crd->crd_skip; i < crd->crd_skip + crd->crd_len; i += blks) { /* XOR with the IV/previous block, as appropriate. */ if (i == crd->crd_skip) for (k = 0; k < blks; k++) buf[i + k] ^= ivp[k]; else for (k = 0; k < blks; k++) buf[i + k] ^= buf[i + k - blks]; exf->encrypt(sw->sw_kschedule, buf + i); } } else { /* Decrypt */ /* * Start at the end, so we don't need to keep the encrypted * block as the IV for the next block. */ for (i = crd->crd_skip + crd->crd_len - blks; i >= crd->crd_skip; i -= blks) { exf->decrypt(sw->sw_kschedule, buf + i); /* XOR with the IV/previous block, as appropriate */ if (i == crd->crd_skip) for (k = 0; k < blks; k++) buf[i + k] ^= ivp[k]; else for (k = 0; k < blks; k++) buf[i + k] ^= buf[i + k - blks]; } } return 0; } else if (outtype == CRYPTO_BUF_MBUF) { struct mbuf *m = (struct mbuf *) buf; /* Find beginning of data */ m = m_getptr(m, crd->crd_skip, &k); if (m == NULL) return EINVAL; i = crd->crd_len; while (i > 0) { /* * If there's insufficient data at the end of * an mbuf, we have to do some copying. */ if (m->m_len < k + blks && m->m_len != k) { m_copydata(m, k, blks, blk); /* Actual encryption/decryption */ if (crd->crd_flags & CRD_F_ENCRYPT) { /* XOR with previous block */ for (j = 0; j < blks; j++) blk[j] ^= ivp[j]; exf->encrypt(sw->sw_kschedule, blk); /* * Keep encrypted block for XOR'ing * with next block */ bcopy(blk, iv, blks); ivp = iv; } else { /* decrypt */ /* * Keep encrypted block for XOR'ing * with next block */ if (ivp == iv) bcopy(blk, piv, blks); else bcopy(blk, iv, blks); exf->decrypt(sw->sw_kschedule, blk); /* XOR with previous block */ for (j = 0; j < blks; j++) blk[j] ^= ivp[j]; if (ivp == iv) bcopy(piv, iv, blks); else ivp = iv; } /* Copy back decrypted block */ m_copyback(m, k, blks, blk); /* Advance pointer */ m = m_getptr(m, k + blks, &k); if (m == NULL) return EINVAL; i -= blks; /* Could be done... */ if (i == 0) break; } /* Skip possibly empty mbufs */ if (k == m->m_len) { for (m = m->m_next; m && m->m_len == 0; m = m->m_next) ; k = 0; } /* Sanity check */ if (m == NULL) return EINVAL; /* * Warning: idat may point to garbage here, but * we only use it in the while() loop, only if * there are indeed enough data. */ idat = mtod(m, unsigned char *) + k; while (m->m_len >= k + blks && i > 0) { if (crd->crd_flags & CRD_F_ENCRYPT) { /* XOR with previous block/IV */ for (j = 0; j < blks; j++) idat[j] ^= ivp[j]; exf->encrypt(sw->sw_kschedule, idat); ivp = idat; } else { /* decrypt */ /* * Keep encrypted block to be used * in next block's processing. */ if (ivp == iv) bcopy(idat, piv, blks); else bcopy(idat, iv, blks); exf->decrypt(sw->sw_kschedule, idat); /* XOR with previous block/IV */ for (j = 0; j < blks; j++) idat[j] ^= ivp[j]; if (ivp == iv) bcopy(piv, iv, blks); else ivp = iv; } idat += blks; k += blks; i -= blks; } } return 0; /* Done with mbuf encryption/decryption */ } else if (outtype == CRYPTO_BUF_IOV) {
/* * ESP output routine, called by ipsp_process_packet(). */ int esp_output(struct mbuf *m, struct tdb *tdb, struct mbuf **mp, int skip, int protoff) { struct enc_xform *espx = (struct enc_xform *) tdb->tdb_encalgxform; struct auth_hash *esph = (struct auth_hash *) tdb->tdb_authalgxform; int ilen, hlen, rlen, padding, blks, alen; struct mbuf *mi, *mo = (struct mbuf *) NULL; struct tdb_crypto *tc; unsigned char *pad; u_int8_t prot; struct cryptodesc *crde = NULL, *crda = NULL; struct cryptop *crp; #if NBPFILTER > 0 struct ifnet *ifn = &(encif[0].sc_if); ifn->if_opackets++; ifn->if_obytes += m->m_pkthdr.len; if (ifn->if_bpf) { struct enchdr hdr; bzero (&hdr, sizeof(hdr)); hdr.af = tdb->tdb_dst.sa.sa_family; hdr.spi = tdb->tdb_spi; if (espx) hdr.flags |= M_CONF; if (esph) hdr.flags |= M_AUTH; bpf_mtap_hdr(ifn->if_bpf, (char *)&hdr, ENC_HDRLEN, m, BPF_DIRECTION_OUT); } #endif if (tdb->tdb_flags & TDBF_NOREPLAY) hlen = sizeof(u_int32_t) + tdb->tdb_ivlen; else hlen = 2 * sizeof(u_int32_t) + tdb->tdb_ivlen; rlen = m->m_pkthdr.len - skip; /* Raw payload length. */ if (espx) blks = espx->blocksize; else blks = 4; /* If no encryption, we have to be 4-byte aligned. */ padding = ((blks - ((rlen + 2) % blks)) % blks) + 2; if (esph) alen = AH_HMAC_HASHLEN; else alen = 0; espstat.esps_output++; switch (tdb->tdb_dst.sa.sa_family) { #ifdef INET case AF_INET: /* Check for IP maximum packet size violations. */ if (skip + hlen + rlen + padding + alen > IP_MAXPACKET) { DPRINTF(("esp_output(): packet in SA %s/%08x got " "too big\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); espstat.esps_toobig++; return EMSGSIZE; } break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Check for IPv6 maximum packet size violations. */ if (skip + hlen + rlen + padding + alen > IPV6_MAXPACKET) { DPRINTF(("esp_output(): packet in SA %s/%08x got too " "big\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); espstat.esps_toobig++; return EMSGSIZE; } break; #endif /* INET6 */ default: DPRINTF(("esp_output(): unknown/unsupported protocol " "family %d, SA %s/%08x\n", tdb->tdb_dst.sa.sa_family , ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); espstat.esps_nopf++; return EPFNOSUPPORT; } /* Update the counters. */ tdb->tdb_cur_bytes += m->m_pkthdr.len - skip; espstat.esps_obytes += m->m_pkthdr.len - skip; /* Hard byte expiration. */ if (tdb->tdb_flags & TDBF_BYTES && tdb->tdb_cur_bytes >= tdb->tdb_exp_bytes) { pfkeyv2_expire(tdb, SADB_EXT_LIFETIME_HARD); tdb_delete(tdb); m_freem(m); return EINVAL; } /* Soft byte expiration. */ if (tdb->tdb_flags & TDBF_SOFT_BYTES && tdb->tdb_cur_bytes >= tdb->tdb_soft_bytes) { pfkeyv2_expire(tdb, SADB_EXT_LIFETIME_SOFT); tdb->tdb_flags &= ~TDBF_SOFT_BYTES; /* Turn off checking. */ } /* * Loop through mbuf chain; if we find a readonly mbuf, * replace the rest of the chain. */ mo = NULL; mi = m; while (mi != NULL && !M_READONLY(mi)) { mo = mi; mi = mi->m_next; } if (mi != NULL) { /* Replace the rest of the mbuf chain. */ struct mbuf *n = m_copym2(mi, 0, M_COPYALL, M_DONTWAIT); if (n == NULL) { DPRINTF(("esp_output(): bad mbuf chain, SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); espstat.esps_hdrops++; m_freem(m); return ENOBUFS; } if (mo != NULL) mo->m_next = n; else m = n; m_freem(mi); } /* Inject ESP header. */ mo = m_inject(m, skip, hlen, M_DONTWAIT); if (mo == NULL) { DPRINTF(("esp_output(): failed to inject ESP header for " "SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); espstat.esps_hdrops++; return ENOBUFS; } /* Initialize ESP header. */ bcopy((caddr_t) &tdb->tdb_spi, mtod(mo, caddr_t), sizeof(u_int32_t)); if (!(tdb->tdb_flags & TDBF_NOREPLAY)) { u_int32_t replay = htonl(tdb->tdb_rpl++); bcopy((caddr_t) &replay, mtod(mo, caddr_t) + sizeof(u_int32_t), sizeof(u_int32_t)); #if NPFSYNC > 0 pfsync_update_tdb(tdb,1); #endif } /* * Add padding -- better to do it ourselves than use the crypto engine, * although if/when we support compression, we'd have to do that. */ mo = m_inject(m, m->m_pkthdr.len, padding + alen, M_DONTWAIT); if (mo == NULL) { DPRINTF(("esp_output(): m_inject failed for SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); return ENOBUFS; } pad = mtod(mo, u_char *); /* Self-describing or random padding ? */ if (!(tdb->tdb_flags & TDBF_RANDOMPADDING)) for (ilen = 0; ilen < padding - 2; ilen++) pad[ilen] = ilen + 1; else arc4random_buf((void *) pad, padding - 2); /* Fix padding length and Next Protocol in padding itself. */ pad[padding - 2] = padding - 2; m_copydata(m, protoff, sizeof(u_int8_t), pad + padding - 1); /* Fix Next Protocol in IPv4/IPv6 header. */ prot = IPPROTO_ESP; m_copyback(m, protoff, sizeof(u_int8_t), &prot); /* Get crypto descriptors. */ crp = crypto_getreq(esph && espx ? 2 : 1); if (crp == NULL) { m_freem(m); DPRINTF(("esp_output(): failed to acquire crypto " "descriptors\n")); espstat.esps_crypto++; return ENOBUFS; } if (espx) { crde = crp->crp_desc; crda = crde->crd_next; /* Encryption descriptor. */ crde->crd_skip = skip + hlen; crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_flags = CRD_F_ENCRYPT; crde->crd_inject = skip + hlen - tdb->tdb_ivlen; if (tdb->tdb_flags & TDBF_HALFIV) { /* Copy half-iv in the packet. */ m_copyback(m, crde->crd_inject, tdb->tdb_ivlen, tdb->tdb_iv); /* Cook half-iv. */ bcopy(tdb->tdb_iv, crde->crd_iv, tdb->tdb_ivlen); for (ilen = 0; ilen < tdb->tdb_ivlen; ilen++) crde->crd_iv[tdb->tdb_ivlen + ilen] = ~crde->crd_iv[ilen]; crde->crd_flags |= CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT; } /* Encryption operation. */ crde->crd_alg = espx->type; crde->crd_key = tdb->tdb_emxkey; crde->crd_klen = tdb->tdb_emxkeylen * 8; /* XXX Rounds ? */ } else crda = crp->crp_desc; /* IPsec-specific opaque crypto info. */ tc = malloc(sizeof(*tc), M_XDATA, M_NOWAIT | M_ZERO); if (tc == NULL) { m_freem(m); crypto_freereq(crp); DPRINTF(("esp_output(): failed to allocate tdb_crypto\n")); espstat.esps_crypto++; return ENOBUFS; } tc->tc_spi = tdb->tdb_spi; tc->tc_proto = tdb->tdb_sproto; bcopy(&tdb->tdb_dst, &tc->tc_dst, sizeof(union sockaddr_union)); /* Crypto operation descriptor. */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_flags = CRYPTO_F_IMBUF; crp->crp_buf = (caddr_t) m; crp->crp_callback = (int (*) (struct cryptop *)) esp_output_cb; crp->crp_opaque = (caddr_t) tc; crp->crp_sid = tdb->tdb_cryptoid; if (esph) { /* Authentication descriptor. */ crda->crd_skip = skip; crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; /* Authentication operation. */ crda->crd_alg = esph->type; crda->crd_key = tdb->tdb_amxkey; crda->crd_klen = tdb->tdb_amxkeylen * 8; } if ((tdb->tdb_flags & TDBF_SKIPCRYPTO) == 0) return crypto_dispatch(crp); else return esp_output_cb(crp); }
int ubsec_process(struct cryptop *crp) { struct ubsec_q *q = NULL; int card, err = 0, i, j, s, nicealign; struct ubsec_softc *sc; struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; int encoffset = 0, macoffset = 0, cpskip, cpoffset; int sskip, dskip, stheend, dtheend; int16_t coffset; struct ubsec_session *ses, key; struct ubsec_dma *dmap = NULL; u_int16_t flags = 0; int ivlen = 0, keylen = 0; if (crp == NULL || crp->crp_callback == NULL) { ubsecstats.hst_invalid++; return (EINVAL); } card = UBSEC_CARD(crp->crp_sid); if (card >= ubsec_cd.cd_ndevs || ubsec_cd.cd_devs[card] == NULL) { ubsecstats.hst_invalid++; return (EINVAL); } sc = ubsec_cd.cd_devs[card]; s = splnet(); if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { ubsecstats.hst_queuefull++; splx(s); err = ENOMEM; goto errout2; } q = SIMPLEQ_FIRST(&sc->sc_freequeue); SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next); splx(s); dmap = q->q_dma; /* Save dma pointer */ bzero(q, sizeof(struct ubsec_q)); bzero(&key, sizeof(key)); q->q_sesn = UBSEC_SESSION(crp->crp_sid); q->q_dma = dmap; ses = &sc->sc_sessions[q->q_sesn]; if (crp->crp_flags & CRYPTO_F_IMBUF) { q->q_src_m = (struct mbuf *)crp->crp_buf; q->q_dst_m = (struct mbuf *)crp->crp_buf; } else if (crp->crp_flags & CRYPTO_F_IOV) { q->q_src_io = (struct uio *)crp->crp_buf; q->q_dst_io = (struct uio *)crp->crp_buf; } else { err = EINVAL; goto errout; /* XXX we don't handle contiguous blocks! */ } bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr)); dmap->d_dma->d_mcr.mcr_pkts = htole16(1); dmap->d_dma->d_mcr.mcr_flags = 0; q->q_crp = crp; crd1 = crp->crp_desc; if (crd1 == NULL) { err = EINVAL; goto errout; } crd2 = crd1->crd_next; if (crd2 == NULL) { if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC) { maccrd = crd1; enccrd = NULL; } else if (crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC) { maccrd = NULL; enccrd = crd1; } else { err = EINVAL; goto errout; } } else { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC) && (crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { /* * We cannot order the ubsec as requested */ err = EINVAL; goto errout; } } if (enccrd) { if (enccrd->crd_alg == CRYPTO_AES_CBC) { if ((sc->sc_flags & UBS_FLAGS_AES) == 0) { err = EINVAL; goto errout; } flags |= htole16(UBS_PKTCTX_ENC_AES); switch (enccrd->crd_klen) { case 128: case 192: case 256: keylen = enccrd->crd_klen / 8; break; default: err = EINVAL; goto errout; } ivlen = 16; } else { flags |= htole16(UBS_PKTCTX_ENC_3DES); ivlen = 8; keylen = 24; } encoffset = enccrd->crd_skip; if (enccrd->crd_flags & CRD_F_ENCRYPT) { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, key.ses_iv, ivlen); else arc4random_buf(key.ses_iv, ivlen); if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { if (crp->crp_flags & CRYPTO_F_IMBUF) err = m_copyback(q->q_src_m, enccrd->crd_inject, ivlen, key.ses_iv, M_NOWAIT); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copyback(q->q_src_io, enccrd->crd_inject, ivlen, key.ses_iv); if (err) goto errout; } } else { flags |= htole16(UBS_PKTCTX_INBOUND); if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, key.ses_iv, ivlen); else if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata(q->q_src_m, enccrd->crd_inject, ivlen, (caddr_t)key.ses_iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copydata(q->q_src_io, enccrd->crd_inject, ivlen, (caddr_t)key.ses_iv); } for (i = 0; i < (keylen / 4); i++) key.ses_key[i] = ses->ses_key[i]; for (i = 0; i < (ivlen / 4); i++) SWAP32(key.ses_iv[i]); } if (maccrd) { macoffset = maccrd->crd_skip; if (maccrd->crd_alg == CRYPTO_MD5_HMAC) flags |= htole16(UBS_PKTCTX_AUTH_MD5); else flags |= htole16(UBS_PKTCTX_AUTH_SHA1); for (i = 0; i < 5; i++) { key.ses_hminner[i] = ses->ses_hminner[i]; key.ses_hmouter[i] = ses->ses_hmouter[i]; HTOLE32(key.ses_hminner[i]); HTOLE32(key.ses_hmouter[i]); } } if (enccrd && maccrd) { /* * ubsec cannot handle packets where the end of encryption * and authentication are not the same, or where the * encrypted part begins before the authenticated part. */ if (((encoffset + enccrd->crd_len) != (macoffset + maccrd->crd_len)) || (enccrd->crd_skip < maccrd->crd_skip)) { err = EINVAL; goto errout; } sskip = maccrd->crd_skip; cpskip = dskip = enccrd->crd_skip; stheend = maccrd->crd_len; dtheend = enccrd->crd_len; coffset = enccrd->crd_skip - maccrd->crd_skip; cpoffset = cpskip + dtheend; #ifdef UBSEC_DEBUG printf("mac: skip %d, len %d, inject %d\n", maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); printf("enc: skip %d, len %d, inject %d\n", enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); printf("src: skip %d, len %d\n", sskip, stheend); printf("dst: skip %d, len %d\n", dskip, dtheend); printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", coffset, stheend, cpskip, cpoffset); #endif } else { cpskip = dskip = sskip = macoffset + encoffset; dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; cpoffset = cpskip + dtheend; coffset = 0; } if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) { err = ENOMEM; goto errout; } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, q->q_src_m, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); q->q_src_map = NULL; err = ENOMEM; goto errout; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, q->q_src_io, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); q->q_src_map = NULL; err = ENOMEM; goto errout; } } nicealign = ubsec_dmamap_aligned(q->q_src_map); dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); #ifdef UBSEC_DEBUG printf("src skip: %d\n", sskip); #endif for (i = j = 0; i < q->q_src_map->dm_nsegs; i++) { struct ubsec_pktbuf *pb; bus_size_t packl = q->q_src_map->dm_segs[i].ds_len; bus_addr_t packp = q->q_src_map->dm_segs[i].ds_addr; if (sskip >= packl) { sskip -= packl; continue; } packl -= sskip; packp += sskip; sskip = 0; if (packl > 0xfffc) { err = EIO; goto errout; } if (j == 0) pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; else pb = &dmap->d_dma->d_sbuf[j - 1]; pb->pb_addr = htole32(packp); if (stheend) { if (packl > stheend) { pb->pb_len = htole32(stheend); stheend = 0; } else { pb->pb_len = htole32(packl); stheend -= packl; } } else pb->pb_len = htole32(packl); if ((i + 1) == q->q_src_map->dm_nsegs) pb->pb_next = 0; else pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_sbuf[j])); j++; } if (enccrd == NULL && maccrd != NULL) { dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_macbuf[0])); #ifdef UBSEC_DEBUG printf("opkt: %x %x %x\n", dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); #endif } else {
/* * ESP output routine, called by ipsec[46]_process_packet(). */ static int esp_output( struct mbuf *m, struct ipsecrequest *isr, struct mbuf **mp, int skip, int protoff ) { struct enc_xform *espx; struct auth_hash *esph; int hlen, rlen, plen, padding, blks, alen, i, roff; struct mbuf *mo = (struct mbuf *) NULL; struct tdb_crypto *tc; struct secasvar *sav; struct secasindex *saidx; unsigned char *pad; u_int8_t prot; int error, maxpacketsize; struct cryptodesc *crde = NULL, *crda = NULL; struct cryptop *crp; SPLASSERT(net, "esp_output"); sav = isr->sav; KASSERT(sav != NULL, ("esp_output: null SA")); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; KASSERT(espx != NULL, ("esp_output: null encoding xform")); if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; rlen = m->m_pkthdr.len - skip; /* Raw payload length. */ /* * NB: The null encoding transform has a blocksize of 4 * so that headers are properly aligned. */ blks = espx->blocksize; /* IV blocksize */ /* XXX clamp padding length a la KAME??? */ padding = ((blks - ((rlen + 2) % blks)) % blks) + 2; plen = rlen + padding; /* Padded payload length. */ if (esph) alen = AH_HMAC_HASHLEN; else alen = 0; espstat.esps_output++; saidx = &sav->sah->saidx; /* Check for maximum packet size violations. */ switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: maxpacketsize = IP_MAXPACKET; break; #endif /* INET */ #ifdef INET6 case AF_INET6: maxpacketsize = IPV6_MAXPACKET; break; #endif /* INET6 */ default: DPRINTF(("esp_output: unknown/unsupported protocol " "family %d, SA %s/%08lx\n", saidx->dst.sa.sa_family, ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); espstat.esps_nopf++; error = EPFNOSUPPORT; goto bad; } if (skip + hlen + rlen + padding + alen > maxpacketsize) { DPRINTF(("esp_output: packet in SA %s/%08lx got too big " "(len %u, max len %u)\n", ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi), skip + hlen + rlen + padding + alen, maxpacketsize)); espstat.esps_toobig++; error = EMSGSIZE; goto bad; } /* Update the counters. */ espstat.esps_obytes += m->m_pkthdr.len - skip; m = m_clone(m); if (m == NULL) { DPRINTF(("esp_output: cannot clone mbuf chain, SA %s/%08lx\n", ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); espstat.esps_hdrops++; error = ENOBUFS; goto bad; } /* Inject ESP header. */ mo = m_makespace(m, skip, hlen, &roff); if (mo == NULL) { DPRINTF(("esp_output: failed to inject %u byte ESP hdr for SA " "%s/%08lx\n", hlen, ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); espstat.esps_hdrops++; /* XXX diffs from openbsd */ error = ENOBUFS; goto bad; } /* Initialize ESP header. */ bcopy((caddr_t) &sav->spi, mtod(mo, caddr_t) + roff, sizeof(u_int32_t)); if (sav->replay) { u_int32_t replay = htonl(++(sav->replay->count)); bcopy((caddr_t) &replay, mtod(mo, caddr_t) + roff + sizeof(u_int32_t), sizeof(u_int32_t)); } /* * Add padding -- better to do it ourselves than use the crypto engine, * although if/when we support compression, we'd have to do that. */ pad = (u_char *) m_pad(m, padding + alen); if (pad == NULL) { DPRINTF(("esp_output: m_pad failed for SA %s/%08lx\n", ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); m = NULL; /* NB: free'd by m_pad */ error = ENOBUFS; goto bad; } /* * Add padding: random, zero, or self-describing. * XXX catch unexpected setting */ switch (sav->flags & SADB_X_EXT_PMASK) { case SADB_X_EXT_PRAND: (void) read_random(pad, padding - 2); break; case SADB_X_EXT_PZERO: bzero(pad, padding - 2); break; case SADB_X_EXT_PSEQ: for (i = 0; i < padding - 2; i++) pad[i] = i+1; break; } /* Fix padding length and Next Protocol in padding itself. */ pad[padding - 2] = padding - 2; m_copydata(m, protoff, sizeof(u_int8_t), pad + padding - 1); /* Fix Next Protocol in IPv4/IPv6 header. */ prot = IPPROTO_ESP; m_copyback(m, protoff, sizeof(u_int8_t), (u_char *) &prot); /* Get crypto descriptors. */ crp = crypto_getreq(esph && espx ? 2 : 1); if (crp == NULL) { DPRINTF(("esp_output: failed to acquire crypto descriptors\n")); espstat.esps_crypto++; error = ENOBUFS; goto bad; } if (espx) { crde = crp->crp_desc; crda = crde->crd_next; /* Encryption descriptor. */ crde->crd_skip = skip + hlen; crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_flags = CRD_F_ENCRYPT; crde->crd_inject = skip + hlen - sav->ivlen; /* Encryption operation. */ crde->crd_alg = espx->type; crde->crd_key = _KEYBUF(sav->key_enc); crde->crd_klen = _KEYBITS(sav->key_enc); /* XXX Rounds ? */ } else crda = crp->crp_desc; /* IPsec-specific opaque crypto info. */ tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto), M_XDATA, M_NOWAIT|M_ZERO); if (tc == NULL) { crypto_freereq(crp); DPRINTF(("esp_output: failed to allocate tdb_crypto\n")); espstat.esps_crypto++; error = ENOBUFS; goto bad; } /* Callback parameters */ tc->tc_isr = isr; tc->tc_spi = sav->spi; tc->tc_dst = saidx->dst; tc->tc_proto = saidx->proto; /* Crypto operation descriptor. */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_flags = CRYPTO_F_IMBUF; crp->crp_buf = (caddr_t) m; crp->crp_callback = esp_output_cb; crp->crp_opaque = (caddr_t) tc; crp->crp_sid = sav->tdb_cryptoid; if (esph) { /* Authentication descriptor. */ crda->crd_skip = skip; crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; /* Authentication operation. */ crda->crd_alg = esph->type; crda->crd_key = _KEYBUF(sav->key_auth); crda->crd_klen = _KEYBITS(sav->key_auth); } return crypto_dispatch(crp); bad: if (m) m_freem(m); return (error); }
/* * ESP input callback, called directly by the crypto driver. */ int esp_input_cb(void *op) { u_int8_t lastthree[3], aalg[AH_HMAC_HASHLEN]; int s, hlen, roff, skip, protoff, error; struct mbuf *m1, *mo, *m; struct auth_hash *esph; struct tdb_crypto *tc; struct cryptop *crp; struct m_tag *mtag; struct tdb *tdb; u_int32_t btsx; caddr_t ptr; crp = (struct cryptop *) op; tc = (struct tdb_crypto *) crp->crp_opaque; skip = tc->tc_skip; protoff = tc->tc_protoff; mtag = (struct m_tag *) tc->tc_ptr; m = (struct mbuf *) crp->crp_buf; if (m == NULL) { /* Shouldn't happen... */ free(tc, M_XDATA); crypto_freereq(crp); espstat.esps_crypto++; DPRINTF(("esp_input_cb(): bogus returned buffer from crypto\n")); return (EINVAL); } s = spltdb(); tdb = gettdb(tc->tc_spi, &tc->tc_dst, tc->tc_proto); if (tdb == NULL) { free(tc, M_XDATA); espstat.esps_notdb++; DPRINTF(("esp_input_cb(): TDB is expired while in crypto")); error = EPERM; goto baddone; } esph = (struct auth_hash *) tdb->tdb_authalgxform; /* Check for crypto errors */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ if (tdb->tdb_cryptoid != 0) tdb->tdb_cryptoid = crp->crp_sid; splx(s); return crypto_dispatch(crp); } free(tc, M_XDATA); espstat.esps_noxform++; DPRINTF(("esp_input_cb(): crypto error %d\n", crp->crp_etype)); error = crp->crp_etype; goto baddone; } /* If authentication was performed, check now. */ if (esph != NULL) { /* * If we have a tag, it means an IPsec-aware NIC did the verification * for us. */ if (mtag == NULL) { /* Copy the authenticator from the packet */ m_copydata(m, m->m_pkthdr.len - esph->authsize, esph->authsize, aalg); ptr = (caddr_t) (tc + 1); /* Verify authenticator */ if (bcmp(ptr, aalg, esph->authsize)) { free(tc, M_XDATA); DPRINTF(("esp_input_cb(): authentication failed for packet in SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); espstat.esps_badauth++; error = EACCES; goto baddone; } } /* Remove trailing authenticator */ m_adj(m, -(esph->authsize)); } free(tc, M_XDATA); /* Replay window checking, if appropriate */ if ((tdb->tdb_wnd > 0) && (!(tdb->tdb_flags & TDBF_NOREPLAY))) { m_copydata(m, skip + sizeof(u_int32_t), sizeof(u_int32_t), (unsigned char *) &btsx); btsx = ntohl(btsx); switch (checkreplaywindow32(btsx, 0, &(tdb->tdb_rpl), tdb->tdb_wnd, &(tdb->tdb_bitmap), 1)) { case 0: /* All's well */ #if NPFSYNC > 0 pfsync_update_tdb(tdb,0); #endif break; case 1: DPRINTF(("esp_input_cb(): replay counter wrapped for SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); espstat.esps_wrap++; error = EACCES; goto baddone; case 2: case 3: DPRINTF(("esp_input_cb(): duplicate packet received in SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); error = EACCES; goto baddone; default: DPRINTF(("esp_input_cb(): bogus value from checkreplaywindow32() in SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); espstat.esps_replay++; error = EACCES; goto baddone; } } /* Release the crypto descriptors */ crypto_freereq(crp); /* Determine the ESP header length */ if (tdb->tdb_flags & TDBF_NOREPLAY) hlen = sizeof(u_int32_t) + tdb->tdb_ivlen; /* "old" ESP */ else hlen = 2 * sizeof(u_int32_t) + tdb->tdb_ivlen; /* "new" ESP */ /* Find beginning of ESP header */ m1 = m_getptr(m, skip, &roff); if (m1 == NULL) { espstat.esps_hdrops++; splx(s); DPRINTF(("esp_input_cb(): bad mbuf chain, SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); return EINVAL; } /* Remove the ESP header and IV from the mbuf. */ if (roff == 0) { /* The ESP header was conveniently at the beginning of the mbuf */ m_adj(m1, hlen); if (!(m1->m_flags & M_PKTHDR)) m->m_pkthdr.len -= hlen; } else if (roff + hlen >= m1->m_len) { /* * Part or all of the ESP header is at the end of this mbuf, so * first let's remove the remainder of the ESP header from the * beginning of the remainder of the mbuf chain, if any. */ if (roff + hlen > m1->m_len) { /* Adjust the next mbuf by the remainder */ m_adj(m1->m_next, roff + hlen - m1->m_len); /* The second mbuf is guaranteed not to have a pkthdr... */ m->m_pkthdr.len -= (roff + hlen - m1->m_len); } /* Now, let's unlink the mbuf chain for a second...*/ mo = m1->m_next; m1->m_next = NULL; /* ...and trim the end of the first part of the chain...sick */ m_adj(m1, -(m1->m_len - roff)); if (!(m1->m_flags & M_PKTHDR)) m->m_pkthdr.len -= (m1->m_len - roff); /* Finally, let's relink */ m1->m_next = mo; } else { /* * The ESP header lies in the "middle" of the mbuf...do an * overlapping copy of the remainder of the mbuf over the ESP * header. */ bcopy(mtod(m1, u_char *) + roff + hlen, mtod(m1, u_char *) + roff, m1->m_len - (roff + hlen)); m1->m_len -= hlen; m->m_pkthdr.len -= hlen; } /* Save the last three bytes of decrypted data */ m_copydata(m, m->m_pkthdr.len - 3, 3, lastthree); /* Verify pad length */ if (lastthree[1] + 2 > m->m_pkthdr.len - skip) { espstat.esps_badilen++; splx(s); DPRINTF(("esp_input_cb(): invalid padding length %d for packet in SA %s/%08x\n", lastthree[1], ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); return EINVAL; } /* Verify correct decryption by checking the last padding bytes */ if (!(tdb->tdb_flags & TDBF_RANDOMPADDING)) { if ((lastthree[1] != lastthree[0]) && (lastthree[1] != 0)) { espstat.esps_badenc++; splx(s); DPRINTF(("esp_input(): decryption failed for packet in SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); return EINVAL; } } /* Trim the mbuf chain to remove the trailing authenticator and padding */ m_adj(m, -(lastthree[1] + 2)); /* Restore the Next Protocol field */ m_copyback(m, protoff, sizeof(u_int8_t), lastthree + 2); /* Back to generic IPsec input processing */ error = ipsec_common_input_cb(m, tdb, skip, protoff, mtag); splx(s); return (error); baddone: splx(s); if (m != NULL) m_freem(m); crypto_freereq(crp); return (error); }
/* * ESP input callback from the crypto driver. */ static int esp_input_cb(struct cryptop *crp) { u_int8_t lastthree[3], aalg[AH_HMAC_HASHLEN]; int s, hlen, skip, protoff, error; struct mbuf *m; struct cryptodesc *crd; struct auth_hash *esph; struct enc_xform *espx; struct tdb_crypto *tc; struct m_tag *mtag; struct secasvar *sav; struct secasindex *saidx; caddr_t ptr; crd = crp->crp_desc; KASSERT(crd != NULL, ("esp_input_cb: null crypto descriptor!")); tc = (struct tdb_crypto *) crp->crp_opaque; KASSERT(tc != NULL, ("esp_input_cb: null opaque crypto data area!")); skip = tc->tc_skip; protoff = tc->tc_protoff; mtag = (struct m_tag *) tc->tc_ptr; m = (struct mbuf *) crp->crp_buf; s = splnet(); sav = KEY_ALLOCSA(&tc->tc_dst, tc->tc_proto, tc->tc_spi); if (sav == NULL) { espstat.esps_notdb++; DPRINTF(("esp_input_cb: SA expired while in crypto " "(SA %s/%08lx proto %u)\n", ipsec_address(&tc->tc_dst), (u_long) ntohl(tc->tc_spi), tc->tc_proto)); error = ENOBUFS; /*XXX*/ goto bad; } saidx = &sav->sah->saidx; KASSERT(saidx->dst.sa.sa_family == AF_INET || saidx->dst.sa.sa_family == AF_INET6, ("ah_input_cb: unexpected protocol family %u", saidx->dst.sa.sa_family)); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Check for crypto errors */ if (crp->crp_etype) { /* Reset the session ID */ if (sav->tdb_cryptoid != 0) sav->tdb_cryptoid = crp->crp_sid; if (crp->crp_etype == EAGAIN) { KEY_FREESAV(&sav); splx(s); return crypto_dispatch(crp); } espstat.esps_noxform++; DPRINTF(("esp_input_cb: crypto error %d\n", crp->crp_etype)); error = crp->crp_etype; goto bad; } /* Shouldn't happen... */ if (m == NULL) { espstat.esps_crypto++; DPRINTF(("esp_input_cb: bogus returned buffer from crypto\n")); error = EINVAL; goto bad; } espstat.esps_hist[sav->alg_enc]++; /* If authentication was performed, check now. */ if (esph != NULL) { /* * If we have a tag, it means an IPsec-aware NIC did * the verification for us. Otherwise we need to * check the authentication calculation. */ ahstat.ahs_hist[sav->alg_auth]++; if (mtag == NULL) { /* Copy the authenticator from the packet */ m_copydata(m, m->m_pkthdr.len - esph->authsize, esph->authsize, aalg); ptr = (caddr_t) (tc + 1); /* Verify authenticator */ if (bcmp(ptr, aalg, esph->authsize) != 0) { DPRINTF(("esp_input_cb: " "authentication hash mismatch for packet in SA %s/%08lx\n", ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); espstat.esps_badauth++; error = EACCES; goto bad; } } /* Remove trailing authenticator */ m_adj(m, -(esph->authsize)); } /* Release the crypto descriptors */ free(tc, M_XDATA), tc = NULL; crypto_freereq(crp), crp = NULL; /* * Packet is now decrypted. */ m->m_flags |= M_DECRYPTED; /* Determine the ESP header length */ if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; /* Remove the ESP header and IV from the mbuf. */ error = m_striphdr(m, skip, hlen); if (error) { espstat.esps_hdrops++; DPRINTF(("esp_input_cb: bad mbuf chain, SA %s/%08lx\n", ipsec_address(&sav->sah->saidx.dst), (u_long) ntohl(sav->spi))); goto bad; } /* Save the last three bytes of decrypted data */ m_copydata(m, m->m_pkthdr.len - 3, 3, lastthree); /* Verify pad length */ if (lastthree[1] + 2 > m->m_pkthdr.len - skip) { espstat.esps_badilen++; DPRINTF(("esp_input_cb: invalid padding length %d " "for %u byte packet in SA %s/%08lx\n", lastthree[1], m->m_pkthdr.len - skip, ipsec_address(&sav->sah->saidx.dst), (u_long) ntohl(sav->spi))); error = EINVAL; goto bad; } /* Verify correct decryption by checking the last padding bytes */ if ((sav->flags & SADB_X_EXT_PMASK) != SADB_X_EXT_PRAND) { if (lastthree[1] != lastthree[0] && lastthree[1] != 0) { espstat.esps_badenc++; DPRINTF(("esp_input_cb: decryption failed " "for packet in SA %s/%08lx\n", ipsec_address(&sav->sah->saidx.dst), (u_long) ntohl(sav->spi))); DPRINTF(("esp_input_cb: %x %x\n", lastthree[0], lastthree[1])); error = EINVAL; goto bad; } } /* Trim the mbuf chain to remove trailing authenticator and padding */ m_adj(m, -(lastthree[1] + 2)); /* Restore the Next Protocol field */ m_copyback(m, protoff, sizeof (u_int8_t), lastthree + 2); IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, mtag); KEY_FREESAV(&sav); splx(s); return error; bad: if (sav) KEY_FREESAV(&sav); splx(s); if (m != NULL) m_freem(m); if (tc != NULL) free(tc, M_XDATA); if (crp != NULL) crypto_freereq(crp); return error; }
/* ARGSUSED4 */ int if_route(const struct interface *iface, const struct in_addr *dest, const struct in_addr *net, const struct in_addr *gate, int metric, int action) { int error; union sockunion { struct sockaddr sa; struct sockaddr_in sin; #ifdef INET6 struct sockaddr_in6 sin6; #endif struct sockaddr_dl sdl; struct sockaddr_storage ss; } su; struct rtm { struct rt_msghdr hdr; char buffer[sizeof(su) * 4]; } rtm; char *bp = rtm.buffer, *p; size_t l; struct mbuf *m; #define ADDSU(_su) { \ l = RT_ROUNDUP(_su.sa.sa_len); \ memcpy(bp, &(_su), l); \ bp += l; \ } #define ADDADDR(_a) { \ memset (&su, 0, sizeof(su)); \ su.sin.sin_family = AF_INET; \ su.sin.sin_len = sizeof(su.sin); \ memcpy (&su.sin.sin_addr, _a, sizeof(su.sin.sin_addr)); \ ADDSU(su); \ } memset(&rtm, 0, sizeof(rtm)); rtm.hdr.rtm_version = RTM_VERSION; rtm.hdr.rtm_seq = 1; if (action == 0) rtm.hdr.rtm_type = RTM_CHANGE; else if (action > 0) rtm.hdr.rtm_type = RTM_ADD; else { rtm.hdr.rtm_type = RTM_DELETE; /* shortcircuit a bit for now */ return 0; } rtm.hdr.rtm_flags = RTF_UP; /* None interface subnet routes are static. */ if (gate->s_addr != INADDR_ANY || net->s_addr != iface->net.s_addr || dest->s_addr != (iface->addr.s_addr & iface->net.s_addr)) rtm.hdr.rtm_flags |= RTF_STATIC; rtm.hdr.rtm_addrs = RTA_DST | RTA_GATEWAY; if (dest->s_addr == gate->s_addr && net->s_addr == INADDR_BROADCAST) rtm.hdr.rtm_flags |= RTF_HOST; else { rtm.hdr.rtm_addrs |= RTA_NETMASK; if (rtm.hdr.rtm_flags & RTF_STATIC) rtm.hdr.rtm_flags |= RTF_GATEWAY; if (action >= 0) rtm.hdr.rtm_addrs |= RTA_IFA; } ADDADDR(dest); if (rtm.hdr.rtm_flags & RTF_HOST || !(rtm.hdr.rtm_flags & RTF_STATIC)) { /* Make us a link layer socket for the host gateway */ memset(&su, 0, sizeof(su)); memcpy(&su.sdl, iface->ifp->if_sadl, sizeof(su.sdl)); ADDSU(su); } else { ADDADDR(gate); } if (rtm.hdr.rtm_addrs & RTA_NETMASK) { /* Ensure that netmask is set correctly */ memset(&su, 0, sizeof(su)); su.sin.sin_family = AF_INET; su.sin.sin_len = sizeof(su.sin); memcpy(&su.sin.sin_addr, &net->s_addr, sizeof(su.sin.sin_addr)); p = su.sa.sa_len + (char *)&su; for (su.sa.sa_len = 0; p > (char *)&su;) if (*--p != 0) { su.sa.sa_len = 1 + p - (char *)&su; break; } ADDSU(su); } if (rtm.hdr.rtm_addrs & RTA_IFA) ADDADDR(&iface->addr); m = m_gethdr(M_WAIT, MT_DATA); m->m_pkthdr.len = rtm.hdr.rtm_msglen = l = bp - (char *)&rtm; m_copyback(m, 0, l, &rtm); /* XXX: no check */ solock(routeso); error = routeso->so_proto->pr_output(m, routeso); sounlock(routeso); return error; }
void pflog_bpfcopy(const void *src_arg, void *dst_arg, size_t len) { const struct mbuf *m; struct pfloghdr *pfloghdr; u_int count; u_char *dst; u_short action, reason; int off = 0, hdrlen = 0; union { struct tcphdr tcp; struct udphdr udp; struct icmp icmp; #ifdef INET6 struct icmp6_hdr icmp6; #endif /* INET6 */ } pf_hdrs; struct pf_pdesc pd; struct pf_addr osaddr, odaddr; u_int16_t osport = 0, odport = 0; m = src_arg; dst = dst_arg; if (m == NULL) panic("pflog_bpfcopy got no mbuf"); /* first mbuf holds struct pfloghdr */ pfloghdr = mtod(m, struct pfloghdr *); count = min(m->m_len, len); bcopy(pfloghdr, dst, count); pfloghdr = (struct pfloghdr *)dst; dst += count; len -= count; m = m->m_next; /* second mbuf is pkthdr */ if (len > 0) { if (m == NULL) panic("no second mbuf"); bcopy(m, mfake, sizeof(*mfake)); mfake->m_flags &= ~(M_EXT|M_CLUSTER); mfake->m_next = NULL; mfake->m_nextpkt = NULL; mfake->m_data = dst; mfake->m_len = len; } else return; while (len > 0) { if (m == 0) panic("bpf_mcopy"); count = min(m->m_len, len); bcopy(mtod(m, caddr_t), (caddr_t)dst, count); m = m->m_next; dst += count; len -= count; } if (mfake->m_flags & M_PKTHDR) mfake->m_pkthdr.len = min(mfake->m_pkthdr.len, mfake->m_len); /* rewrite addresses if needed */ memset(&pd, 0, sizeof(pd)); pd.hdr.any = &pf_hdrs; if (pf_setup_pdesc(pfloghdr->af, pfloghdr->dir, &pd, mfake, &action, &reason, NULL, NULL, NULL, NULL, &off, &hdrlen) == -1) return; PF_ACPY(&osaddr, pd.src, pd.af); PF_ACPY(&odaddr, pd.dst, pd.af); if (pd.sport) osport = *pd.sport; if (pd.dport) odport = *pd.dport; if ((pfloghdr->rewritten = pf_translate(&pd, &pfloghdr->saddr, pfloghdr->sport, &pfloghdr->daddr, pfloghdr->dport, 0, pfloghdr->dir))) { m_copyback(mfake, off, min(mfake->m_len - off, hdrlen), pd.hdr.any, M_NOWAIT); PF_ACPY(&pfloghdr->saddr, &osaddr, pd.af); PF_ACPY(&pfloghdr->daddr, &odaddr, pd.af); pfloghdr->sport = osport; pfloghdr->dport = odport; } }
static void aes_decrypt(const struct krb5_key_state *ks, struct mbuf *inout, size_t skip, size_t len, void *ivec, size_t ivlen) { size_t blocklen = 16, plen; struct { uint8_t cn_1[16], cn[16]; } last2; int i, off, t; /* * AES decryption with cyphertext stealing: * * CTSencrypt(C[0], ..., C[n], IV, K): * len = length(C[n]) * E[n] = C[n-1] * X = decrypt(E[n], K) * P[n] = (X ^ C[n]){0..len-1} * E[n-1] = {C[n,0],...,C[n,len-1],X[len],...,X[blocksize-1]} * (P[0],...,P[n-1]) = CBCdecrypt(C[0],...,C[n-2],E[n-1], IV, K) */ plen = len % blocklen; if (len == blocklen) { /* * Note: caller will ensure len >= blocklen. */ aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec, 0); } else if (plen == 0) { /* * This is equivalent to CBC mode followed by swapping * the last two blocks. */ off = skip + len - 2 * blocklen; m_copydata(inout, off, 2 * blocklen, (void*) &last2); m_copyback(inout, off, blocklen, last2.cn); m_copyback(inout, off + blocklen, blocklen, last2.cn_1); aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec, 0); } else { /* * This is the difficult case. We first decrypt the * second to last block with a zero IV to make X. The * plaintext for the last block is the XOR of X and * the last cyphertext block. * * We derive a new cypher text for the second to last * block by mixing the unused bytes of X with the last * cyphertext block. The result of that can be * decrypted with the rest in CBC mode. */ off = skip + len - plen - blocklen; aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, off, blocklen, NULL, 0); m_copydata(inout, off, blocklen + plen, (void*) &last2); for (i = 0; i < plen; i++) { t = last2.cn[i]; last2.cn[i] ^= last2.cn_1[i]; last2.cn_1[i] = t; } m_copyback(inout, off, blocklen + plen, (void*) &last2); aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len - plen, ivec, 0); } }
void pflog_bpfcopy(const void *src_arg, void *dst_arg, size_t len) { struct mbuf *m, *mp, *mhdr, *mptr; struct pfloghdr *pfloghdr; u_int count; u_char *dst, *mdst; int afto, hlen, mlen, off; union pf_headers { struct tcphdr tcp; struct udphdr udp; struct icmp icmp; #ifdef INET6 struct icmp6_hdr icmp6; struct mld_hdr mld; struct nd_neighbor_solicit nd_ns; #endif /* INET6 */ } pdhdrs; struct pf_pdesc pd; struct pf_addr osaddr, odaddr; u_int16_t osport = 0, odport = 0; u_int8_t proto = 0; m = (struct mbuf *)src_arg; dst = dst_arg; mhdr = pflog_mhdr; mptr = pflog_mptr; if (m == NULL) panic("pflog_bpfcopy got no mbuf"); /* first mbuf holds struct pfloghdr */ pfloghdr = mtod(m, struct pfloghdr *); afto = pfloghdr->af != pfloghdr->naf; count = min(m->m_len, len); bcopy(pfloghdr, dst, count); pfloghdr = (struct pfloghdr *)dst; dst += count; len -= count; m = m->m_next; if (len <= 0) return; /* second mbuf is pkthdr */ if (m == NULL) panic("no second mbuf"); /* * temporary mbuf will hold an ip/ip6 header and 8 bytes * of the protocol header */ m_inithdr(mhdr); mhdr->m_len = 0; /* XXX not done in m_inithdr() */ #if INET && INET6 /* offset for a new header */ if (afto && pfloghdr->af == AF_INET) mhdr->m_data += sizeof(struct ip6_hdr) - sizeof(struct ip); #endif /* INET && INET6 */ mdst = mtod(mhdr, char *); switch (pfloghdr->af) { case AF_INET: { struct ip *h; if (m->m_pkthdr.len < sizeof(*h)) goto copy; m_copydata(m, 0, sizeof(*h), mdst); h = (struct ip *)mdst; hlen = h->ip_hl << 2; if (hlen > sizeof(*h) && (m->m_pkthdr.len >= hlen)) m_copydata(m, sizeof(*h), hlen - sizeof(*h), mdst + sizeof(*h)); break; } #ifdef INET6 case AF_INET6: { struct ip6_hdr *h; if (m->m_pkthdr.len < sizeof(*h)) goto copy; hlen = sizeof(struct ip6_hdr); m_copydata(m, 0, hlen, mdst); h = (struct ip6_hdr *)mdst; proto = h->ip6_nxt; break; } #endif /* INET6 */ default: /* shouldn't happen ever :-) */ goto copy; } if (m->m_pkthdr.len < hlen + 8 && proto != IPPROTO_NONE) goto copy; else if (proto != IPPROTO_NONE) { /* copy 8 bytes of the protocol header */ m_copydata(m, hlen, 8, mdst + hlen); hlen += 8; } mhdr->m_len += hlen; mhdr->m_pkthdr.len = mhdr->m_len; /* create a chain mhdr -> mptr, mptr->m_data = (m->m_data+hlen) */ mp = m_getptr(m, hlen, &off); if (mp != NULL) { bcopy(mp, mptr, sizeof(*mptr)); mptr->m_data += off; mptr->m_len -= off; mptr->m_flags &= ~M_PKTHDR; mhdr->m_next = mptr; mhdr->m_pkthdr.len += m->m_pkthdr.len - hlen; } /* * Rewrite addresses if needed. Reason pointer must be NULL to avoid * counting the packet here again. */ if (pf_setup_pdesc(&pd, &pdhdrs, pfloghdr->af, pfloghdr->dir, NULL, mhdr, NULL) != PF_PASS) goto copy; pd.naf = pfloghdr->naf; PF_ACPY(&osaddr, pd.src, pd.af); PF_ACPY(&odaddr, pd.dst, pd.af); if (pd.sport) osport = *pd.sport; if (pd.dport) odport = *pd.dport; if (pd.virtual_proto != PF_VPROTO_FRAGMENT && (pfloghdr->rewritten = pf_translate(&pd, &pfloghdr->saddr, pfloghdr->sport, &pfloghdr->daddr, pfloghdr->dport, 0, pfloghdr->dir))) { m_copyback(pd.m, pd.off, min(pd.m->m_len - pd.off, pd.hdrlen), pd.hdr.any, M_NOWAIT); #if INET && INET6 if (afto) { PF_ACPY(&pd.nsaddr, &pfloghdr->saddr, pd.naf); PF_ACPY(&pd.ndaddr, &pfloghdr->daddr, pd.naf); } #endif /* INET && INET6 */ PF_ACPY(&pfloghdr->saddr, &osaddr, pd.af); PF_ACPY(&pfloghdr->daddr, &odaddr, pd.af); pfloghdr->sport = osport; pfloghdr->dport = odport; } pd.tot_len = min(pd.tot_len, len); pd.tot_len -= pd.m->m_data - pd.m->m_pktdat; #if INET && INET6 if (afto && pfloghdr->rewritten) pf_translate_af(&pd); #endif /* INET && INET6 */ m = pd.m; copy: mlen = min(m->m_pkthdr.len, len); m_copydata(m, 0, mlen, dst); len -= mlen; if (len > 0) bzero(dst + mlen, len); }
/* * Massage IPv4/IPv6 headers for AH processing. */ int ah_massage_headers(struct mbuf **m0, int proto, int skip, int alg, int out) { struct mbuf *m = *m0; unsigned char *ptr; int off, count; #ifdef INET struct ip *ip; #endif /* INET */ #ifdef INET6 struct ip6_ext *ip6e; struct ip6_hdr ip6; int ad, alloc, nxt; #endif /* INET6 */ switch (proto) { #ifdef INET case AF_INET: /* * This is the least painful way of dealing with IPv4 header * and option processing -- just make sure they're in * contiguous memory. */ *m0 = m = m_pullup(m, skip); if (m == NULL) { DPRINTF(("ah_massage_headers(): m_pullup() failed\n")); ahstat.ahs_hdrops++; return ENOBUFS; } /* Fix the IP header */ ip = mtod(m, struct ip *); ip->ip_tos = 0; ip->ip_ttl = 0; ip->ip_sum = 0; /* * On input, fix ip_len which has been byte-swapped * at ip_input(). */ if (alg == CRYPTO_MD5_KPDK || alg == CRYPTO_SHA1_KPDK) ip->ip_off &= htons(IP_DF); else ip->ip_off = 0; ptr = mtod(m, unsigned char *) + sizeof(struct ip); /* IPv4 option processing */ for (off = sizeof(struct ip); off < skip;) { if (ptr[off] == IPOPT_EOL || ptr[off] == IPOPT_NOP || off + 1 < skip) ; else { DPRINTF(("ah_massage_headers(): illegal IPv4 " "option length for option %d\n", ptr[off])); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } switch (ptr[off]) { case IPOPT_EOL: off = skip; /* End the loop. */ break; case IPOPT_NOP: off++; break; case IPOPT_SECURITY: /* 0x82 */ case 0x85: /* Extended security. */ case 0x86: /* Commercial security. */ case 0x94: /* Router alert */ case 0x95: /* RFC1770 */ /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("ah_massage_headers(): " "illegal IPv4 option length for " "option %d\n", ptr[off])); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } off += ptr[off + 1]; break; case IPOPT_LSRR: case IPOPT_SSRR: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("ah_massage_headers(): " "illegal IPv4 option length for " "option %d\n", ptr[off])); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } /* * On output, if we have either of the * source routing options, we should * swap the destination address of the * IP header with the last address * specified in the option, as that is * what the destination's IP header * will look like. */ if (out) bcopy(ptr + off + ptr[off + 1] - sizeof(struct in_addr), &(ip->ip_dst), sizeof(struct in_addr)); /* FALLTHROUGH */ default: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("ah_massage_headers(): " "illegal IPv4 option length for " "option %d\n", ptr[off])); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } /* Zeroize all other options. */ count = ptr[off + 1]; bcopy(ipseczeroes, ptr, count); off += count; break; } /* Sanity check. */ if (off > skip) { DPRINTF(("ah_massage_headers(): malformed " "IPv4 options header\n")); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } } break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Ugly... */ /* Copy and "cook" the IPv6 header. */ m_copydata(m, 0, sizeof(ip6), (caddr_t) &ip6); /* We don't do IPv6 Jumbograms. */ if (ip6.ip6_plen == 0) { DPRINTF(("ah_massage_headers(): unsupported IPv6 " "jumbogram")); ahstat.ahs_hdrops++; m_freem(m); return EMSGSIZE; } ip6.ip6_flow = 0; ip6.ip6_hlim = 0; ip6.ip6_vfc &= ~IPV6_VERSION_MASK; ip6.ip6_vfc |= IPV6_VERSION; /* Scoped address handling. */ if (IN6_IS_SCOPE_EMBED(&ip6.ip6_src)) ip6.ip6_src.s6_addr16[1] = 0; if (IN6_IS_SCOPE_EMBED(&ip6.ip6_dst)) ip6.ip6_dst.s6_addr16[1] = 0; /* Done with IPv6 header. */ m_copyback(m, 0, sizeof(struct ip6_hdr), &ip6, M_NOWAIT); /* Let's deal with the remaining headers (if any). */ if (skip - sizeof(struct ip6_hdr) > 0) { if (m->m_len <= skip) { ptr = malloc(skip - sizeof(struct ip6_hdr), M_XDATA, M_NOWAIT); if (ptr == NULL) { DPRINTF(("ah_massage_headers(): failed to allocate memory for IPv6 headers\n")); ahstat.ahs_hdrops++; m_freem(m); return ENOBUFS; } /* * Copy all the protocol headers after * the IPv6 header. */ m_copydata(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); alloc = 1; } else { /* No need to allocate memory. */ ptr = mtod(m, unsigned char *) + sizeof(struct ip6_hdr); alloc = 0; } } else break; nxt = ip6.ip6_nxt & 0xff; /* Next header type. */ for (off = 0; off < skip - sizeof(struct ip6_hdr);) { switch (nxt) { case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: ip6e = (struct ip6_ext *) (ptr + off); /* * Process the mutable/immutable * options -- borrows heavily from the * KAME code. */ for (count = off + sizeof(struct ip6_ext); count < off + ((ip6e->ip6e_len + 1) << 3);) { if (ptr[count] == IP6OPT_PAD1) { count++; continue; /* Skip padding. */ } /* Sanity check. */ if (count > off + ((ip6e->ip6e_len + 1) << 3)) { ahstat.ahs_hdrops++; m_freem(m); /* Free, if we allocated. */ if (alloc) free(ptr, M_XDATA); return EINVAL; } ad = ptr[count + 1]; /* If mutable option, zeroize. */ if (ptr[count] & IP6OPT_MUTABLE) bcopy(ipseczeroes, ptr + count, ptr[count + 1]); count += ad; /* Sanity check. */ if (count > skip - sizeof(struct ip6_hdr)) { ahstat.ahs_hdrops++; m_freem(m); /* Free, if we allocated. */ if (alloc) free(ptr, M_XDATA); return EINVAL; } } /* Advance. */ off += ((ip6e->ip6e_len + 1) << 3); nxt = ip6e->ip6e_nxt; break; case IPPROTO_ROUTING: /* * Always include routing headers in * computation. */ { struct ip6_rthdr *rh; ip6e = (struct ip6_ext *) (ptr + off); rh = (struct ip6_rthdr *)(ptr + off); /* * must adjust content to make it look like * its final form (as seen at the final * destination). * we only know how to massage type 0 routing * header. */ if (out && rh->ip6r_type == IPV6_RTHDR_TYPE_0) { struct ip6_rthdr0 *rh0; struct in6_addr *addr, finaldst; int i; rh0 = (struct ip6_rthdr0 *)rh; addr = (struct in6_addr *)(rh0 + 1); for (i = 0; i < rh0->ip6r0_segleft; i++) if (IN6_IS_SCOPE_EMBED(&addr[i])) addr[i].s6_addr16[1] = 0; finaldst = addr[rh0->ip6r0_segleft - 1]; memmove(&addr[1], &addr[0], sizeof(struct in6_addr) * (rh0->ip6r0_segleft - 1)); m_copydata(m, 0, sizeof(ip6), (caddr_t)&ip6); addr[0] = ip6.ip6_dst; ip6.ip6_dst = finaldst; m_copyback(m, 0, sizeof(ip6), &ip6, M_NOWAIT); rh0->ip6r0_segleft = 0; } /* advance */ off += ((ip6e->ip6e_len + 1) << 3); nxt = ip6e->ip6e_nxt; break; } default: DPRINTF(("ah_massage_headers(): unexpected " "IPv6 header type %d\n", off)); if (alloc) free(ptr, M_XDATA); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } } /* Copyback and free, if we allocated. */ if (alloc) { m_copyback(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr, M_NOWAIT); free(ptr, M_XDATA); } break; #endif /* INET6 */ }
/* * Massage IPv4/IPv6 headers for AH processing. */ int ah_massage_headers(struct mbuf **m0, int proto, int skip, int alg, int out) { struct mbuf *m = *m0; unsigned char *ptr; int off, count; #ifdef INET struct ip *ip; #endif /* INET */ #ifdef INET6 struct ip6_ext *ip6e; struct ip6_hdr ip6; int alloc, len, ad; #endif /* INET6 */ switch (proto) { #ifdef INET case AF_INET: /* * This is the least painful way of dealing with IPv4 header * and option processing -- just make sure they're in * contiguous memory. */ *m0 = m = m_pullup(m, skip); if (m == NULL) { DPRINTF(("ah_massage_headers(): m_pullup() failed\n")); ahstat.ahs_hdrops++; return ENOBUFS; } /* Fix the IP header */ ip = mtod(m, struct ip *); ip->ip_tos = 0; ip->ip_ttl = 0; ip->ip_sum = 0; /* * On input, fix ip_len which has been byte-swapped * at ip_input(). */ if (alg == CRYPTO_MD5_KPDK || alg == CRYPTO_SHA1_KPDK) ip->ip_off &= htons(IP_DF); else ip->ip_off = 0; ptr = mtod(m, unsigned char *) + sizeof(struct ip); /* IPv4 option processing */ for (off = sizeof(struct ip); off < skip;) { if (ptr[off] == IPOPT_EOL || ptr[off] == IPOPT_NOP || off + 1 < skip) ; else { DPRINTF(("ah_massage_headers(): illegal IPv4 " "option length for option %d\n", ptr[off])); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } switch (ptr[off]) { case IPOPT_EOL: off = skip; /* End the loop. */ break; case IPOPT_NOP: off++; break; case IPOPT_SECURITY: /* 0x82 */ case 0x85: /* Extended security. */ case 0x86: /* Commercial security. */ case 0x94: /* Router alert */ case 0x95: /* RFC1770 */ /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("ah_massage_headers(): " "illegal IPv4 option length for " "option %d\n", ptr[off])); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } off += ptr[off + 1]; break; case IPOPT_LSRR: case IPOPT_SSRR: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("ah_massage_headers(): " "illegal IPv4 option length for " "option %d\n", ptr[off])); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } /* * On output, if we have either of the * source routing options, we should * swap the destination address of the * IP header with the last address * specified in the option, as that is * what the destination's IP header * will look like. */ if (out) bcopy(ptr + off + ptr[off + 1] - sizeof(struct in_addr), &(ip->ip_dst), sizeof(struct in_addr)); /* Fall through */ default: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("ah_massage_headers(): " "illegal IPv4 option length for " "option %d\n", ptr[off])); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } /* Zeroize all other options. */ count = ptr[off + 1]; bcopy(ipseczeroes, ptr, count); off += count; break; } /* Sanity check. */ if (off > skip) { DPRINTF(("ah_massage_headers(): malformed " "IPv4 options header\n")); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } } break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Ugly... */ /* Copy and "cook" the IPv6 header. */ m_copydata(m, 0, sizeof(ip6), (caddr_t) &ip6); /* We don't do IPv6 Jumbograms. */ if (ip6.ip6_plen == 0) { DPRINTF(("ah_massage_headers(): unsupported IPv6 " "jumbogram")); ahstat.ahs_hdrops++; m_freem(m); return EMSGSIZE; } ip6.ip6_flow = 0; ip6.ip6_hlim = 0; ip6.ip6_vfc &= ~IPV6_VERSION_MASK; ip6.ip6_vfc |= IPV6_VERSION; /* Scoped address handling. */ if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_src)) ip6.ip6_src.s6_addr16[1] = 0; if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_dst)) ip6.ip6_dst.s6_addr16[1] = 0; /* Done with IPv6 header. */ m_copyback(m, 0, sizeof(struct ip6_hdr), &ip6); /* Let's deal with the remaining headers (if any). */ if (skip - sizeof(struct ip6_hdr) > 0) { if (m->m_len <= skip) { MALLOC(ptr, unsigned char *, skip - sizeof(struct ip6_hdr), M_XDATA, M_NOWAIT); if (ptr == NULL) { DPRINTF(("ah_massage_headers(): failed to allocate memory for IPv6 headers\n")); ahstat.ahs_hdrops++; m_freem(m); return ENOBUFS; } /* * Copy all the protocol headers after * the IPv6 header. */ m_copydata(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); alloc = 1; } else { /* No need to allocate memory. */ ptr = mtod(m, unsigned char *) + sizeof(struct ip6_hdr); alloc = 0; } } else