Beispiel #1
0
static int
esp_gcm_mature(struct secasvar *sav)
{
	int keylen;
	const struct esp_algorithm *algo;

	if (sav->flags & SADB_X_EXT_OLD) {
		ipseclog((LOG_ERR,
		    "esp_gcm_mature: algorithm incompatible with esp-old\n"));
		return 1;
	}
	if (sav->flags & SADB_X_EXT_DERIV) {
		ipseclog((LOG_ERR,
		    "esp_gcm_mature: algorithm incompatible with derived\n"));
		return 1;
	}
	if (sav->flags & SADB_X_EXT_IIV) {
		ipseclog((LOG_ERR,
		    "esp_gcm_mature: implicit IV not currently implemented\n"));
		return 1;
	}

	if (!sav->key_enc) {
		ipseclog((LOG_ERR, "esp_gcm_mature: no key is given.\n"));
		return 1;
	}

	algo = esp_algorithm_lookup(sav->alg_enc);
	if (!algo) {
		ipseclog((LOG_ERR,
		    "esp_gcm_mature: unsupported algorithm.\n"));
		return 1;
	}

	keylen = sav->key_enc->sadb_key_bits;
	if (keylen < algo->keymin || algo->keymax < keylen) {
		ipseclog((LOG_ERR,
		    "esp_gcm_mature %s: invalid key length %d.\n",
		    algo->name, sav->key_enc->sadb_key_bits));
		return 1;
	}
	switch (sav->alg_enc) {
	case SADB_X_EALG_AES_GCM:
		/* allows specific key sizes only */
		if (!(keylen == ESP_AESGCM_KEYLEN128 || keylen == ESP_AESGCM_KEYLEN192 || keylen == ESP_AESGCM_KEYLEN256)) {
			ipseclog((LOG_ERR,
			    "esp_gcm_mature %s: invalid key length %d.\n",
			    algo->name, keylen));
			return 1;
		}
		break;
	default:
		ipseclog((LOG_ERR,
			  "esp_gcm_mature %s: invalid algo %d.\n", sav->alg_enc));
		return 1;
	}

	return 0;
}
Beispiel #2
0
static int
esp_descbc_mature(struct secasvar *sav)
{
	const struct esp_algorithm *algo;

	if (!(sav->flags & SADB_X_EXT_OLD) && (sav->flags & SADB_X_EXT_IV4B)) {
		ipseclog((LOG_ERR, "esp_cbc_mature: "
		    "algorithm incompatible with 4 octets IV length\n"));
		return 1;
	}

	if (!sav->key_enc) {
		ipseclog((LOG_ERR, "esp_descbc_mature: no key is given.\n"));
		return 1;
	}

	algo = esp_algorithm_lookup(sav->alg_enc);
	if (!algo) {
		ipseclog((LOG_ERR,
		    "esp_descbc_mature: unsupported algorithm.\n"));
		return 1;
	}

	if (_KEYBITS(sav->key_enc) < algo->keymin ||
	    _KEYBITS(sav->key_enc) > algo->keymax) {
		ipseclog((LOG_ERR,
		    "esp_descbc_mature: invalid key length %d.\n",
		    _KEYBITS(sav->key_enc)));
		return 1;
	}

	/* weak key check */
	if (des_is_weak_key((des_cblock *)_KEYBUF(sav->key_enc))) {
		ipseclog((LOG_ERR,
		    "esp_descbc_mature: weak key was passed.\n"));
		return 1;
	}

	return 0;
}
Beispiel #3
0
/*
 * esp_init() is called when an SPI is being set up.
 */
static int
esp_init(struct secasvar *sav, struct xformsw *xsp)
{
	struct enc_xform *txform;
	struct cryptoini cria, crie;
	int keylen;
	int error;

	txform = esp_algorithm_lookup(sav->alg_enc);
	if (txform == NULL) {
		DPRINTF(("%s: unsupported encryption algorithm %d\n",
			__func__, sav->alg_enc));
		return EINVAL;
	}
	if (sav->key_enc == NULL) {
		DPRINTF(("%s: no encoding key for %s algorithm\n",
			 __func__, txform->name));
		return EINVAL;
	}
	if ((sav->flags & (SADB_X_EXT_OLD | SADB_X_EXT_IV4B)) ==
	    SADB_X_EXT_IV4B) {
		DPRINTF(("%s: 4-byte IV not supported with protocol\n",
			__func__));
		return EINVAL;
	}
	/* subtract off the salt, RFC4106, 8.1 and RFC3686, 5.1 */
	keylen = _KEYLEN(sav->key_enc) - SAV_ISCTRORGCM(sav) * 4;
	if (txform->minkey > keylen || keylen > txform->maxkey) {
		DPRINTF(("%s: invalid key length %u, must be in the range "
			"[%u..%u] for algorithm %s\n", __func__,
			keylen, txform->minkey, txform->maxkey,
			txform->name));
		return EINVAL;
	}

	/*
	 * NB: The null xform needs a non-zero blocksize to keep the
	 *      crypto code happy but if we use it to set ivlen then
	 *      the ESP header will be processed incorrectly.  The
	 *      compromise is to force it to zero here.
	 */
	if (SAV_ISCTRORGCM(sav))
		sav->ivlen = 8;	/* RFC4106 3.1 and RFC3686 3.1 */
	else
		sav->ivlen = (txform == &enc_xform_null ? 0 : txform->ivsize);

	/*
	 * Setup AH-related state.
	 */
	if (sav->alg_auth != 0) {
		error = ah_init0(sav, xsp, &cria);
		if (error)
			return error;
	}

	/* NB: override anything set in ah_init0 */
	sav->tdb_xform = xsp;
	sav->tdb_encalgxform = txform;

	/*
	 * Whenever AES-GCM is used for encryption, one
	 * of the AES authentication algorithms is chosen
	 * as well, based on the key size.
	 */
	if (sav->alg_enc == SADB_X_EALG_AESGCM16) {
		switch (keylen) {
		case AES_128_GMAC_KEY_LEN:
			sav->alg_auth = SADB_X_AALG_AES128GMAC;
			sav->tdb_authalgxform = &auth_hash_nist_gmac_aes_128;
			break;
		case AES_192_GMAC_KEY_LEN:
			sav->alg_auth = SADB_X_AALG_AES192GMAC;
			sav->tdb_authalgxform = &auth_hash_nist_gmac_aes_192;
			break;
		case AES_256_GMAC_KEY_LEN:
			sav->alg_auth = SADB_X_AALG_AES256GMAC;
			sav->tdb_authalgxform = &auth_hash_nist_gmac_aes_256;
			break;
		default:
			DPRINTF(("%s: invalid key length %u"
				 "for algorithm %s\n", __func__,
				 keylen, txform->name));
			return EINVAL;
		}
		bzero(&cria, sizeof(cria));
		cria.cri_alg = sav->tdb_authalgxform->type;
		cria.cri_key = sav->key_enc->key_data;
		cria.cri_klen = _KEYBITS(sav->key_enc) - SAV_ISGCM(sav) * 32;
	}

	/* Initialize crypto session. */
	bzero(&crie, sizeof(crie));
	crie.cri_alg = sav->tdb_encalgxform->type;
	crie.cri_key = sav->key_enc->key_data;
	crie.cri_klen = _KEYBITS(sav->key_enc) - SAV_ISCTRORGCM(sav) * 32;

	if (sav->tdb_authalgxform && sav->tdb_encalgxform) {
		/* init both auth & enc */
		crie.cri_next = &cria;
		error = crypto_newsession(&sav->tdb_cryptoid,
					  &crie, V_crypto_support);
	} else if (sav->tdb_encalgxform) {
		error = crypto_newsession(&sav->tdb_cryptoid,
					  &crie, V_crypto_support);
	} else if (sav->tdb_authalgxform) {
		error = crypto_newsession(&sav->tdb_cryptoid,
					  &cria, V_crypto_support);
	} else {
		/* XXX cannot happen? */
		DPRINTF(("%s: no encoding OR authentication xform!\n",
			__func__));
		error = EINVAL;
	}
	return error;
}
Beispiel #4
0
/*
 * esp_init() is called when an SPI is being set up.
 */
static int
esp_init(struct secasvar *sav, struct xformsw *xsp)
{
	struct enc_xform *txform;
	struct cryptoini cria, crie;
	int keylen;
	int error;

	txform = esp_algorithm_lookup(sav->alg_enc);
	if (txform == NULL) {
		DPRINTF(("%s: unsupported encryption algorithm %d\n",
			__func__, sav->alg_enc));
		return EINVAL;
	}
	if (sav->key_enc == NULL) {
		DPRINTF(("%s: no encoding key for %s algorithm\n",
			 __func__, txform->name));
		return EINVAL;
	}
	if ((sav->flags&(SADB_X_EXT_OLD|SADB_X_EXT_IV4B)) == SADB_X_EXT_IV4B) {
		DPRINTF(("%s: 4-byte IV not supported with protocol\n",
			__func__));
		return EINVAL;
	}
	keylen = _KEYLEN(sav->key_enc);
	if (txform->minkey > keylen || keylen > txform->maxkey) {
		DPRINTF(("%s: invalid key length %u, must be in the range "
			"[%u..%u] for algorithm %s\n", __func__,
			keylen, txform->minkey, txform->maxkey,
			txform->name));
		return EINVAL;
	}

	/*
	 * NB: The null xform needs a non-zero blocksize to keep the
	 *      crypto code happy but if we use it to set ivlen then
	 *      the ESP header will be processed incorrectly.  The
	 *      compromise is to force it to zero here.
	 */
	sav->ivlen = (txform == &enc_xform_null ? 0 : txform->blocksize);
	sav->iv = (caddr_t) malloc(sav->ivlen, M_XDATA, M_WAITOK);
	if (sav->iv == NULL) {
		DPRINTF(("%s: no memory for IV\n", __func__));
		return EINVAL;
	}
	key_randomfill(sav->iv, sav->ivlen);	/*XXX*/

	/*
	 * Setup AH-related state.
	 */
	if (sav->alg_auth != 0) {
		error = ah_init0(sav, xsp, &cria);
		if (error)
			return error;
	}

	/* NB: override anything set in ah_init0 */
	sav->tdb_xform = xsp;
	sav->tdb_encalgxform = txform;

	/* Initialize crypto session. */
	bzero(&crie, sizeof (crie));
	crie.cri_alg = sav->tdb_encalgxform->type;
	crie.cri_klen = _KEYBITS(sav->key_enc);
	crie.cri_key = sav->key_enc->key_data;
	/* XXX Rounds ? */

	if (sav->tdb_authalgxform && sav->tdb_encalgxform) {
		/* init both auth & enc */
		crie.cri_next = &cria;
		error = crypto_newsession(&sav->tdb_cryptoid,
					  &crie, V_crypto_support);
	} else if (sav->tdb_encalgxform) {
		error = crypto_newsession(&sav->tdb_cryptoid,
					  &crie, V_crypto_support);
	} else if (sav->tdb_authalgxform) {
		error = crypto_newsession(&sav->tdb_cryptoid,
					  &cria, V_crypto_support);
	} else {
		/* XXX cannot happen? */
		DPRINTF(("%s: no encoding OR authentication xform!\n",
			__func__));
		error = EINVAL;
	}
	return error;
}
Beispiel #5
0
int
esp4_input(struct mbuf **mp, int *offp, int proto)
{
	int off;
	struct ip *ip;
	struct esp *esp;
	struct esptail esptail;
	struct mbuf *m;
	u_int32_t spi;
	struct secasvar *sav = NULL;
	size_t taillen;
	u_int16_t nxt;
	const struct esp_algorithm *algo;
	int ivlen;
	size_t hlen;
	size_t esplen;

	off = *offp;
	m = *mp;
	*mp = NULL;

	/* sanity check for alignment. */
	if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
		ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
			"(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
		ipsecstat.in_inval++;
		goto bad;
	}

	if (m->m_len < off + ESPMAXLEN) {
		m = m_pullup(m, off + ESPMAXLEN);
		if (!m) {
			ipseclog((LOG_DEBUG,
			    "IPv4 ESP input: can't pullup in esp4_input\n"));
			ipsecstat.in_inval++;
			goto bad;
		}
	}

	ip = mtod(m, struct ip *);
	esp = (struct esp *)(((u_int8_t *)ip) + off);
#ifdef _IP_VHL
	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
#else
	hlen = ip->ip_hl << 2;
#endif

	/* find the sassoc. */
	spi = esp->esp_spi;

	if ((sav = key_allocsa(AF_INET,
	                      (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
	                      IPPROTO_ESP, spi)) == 0) {
		ipseclog((LOG_WARNING,
		    "IPv4 ESP input: no key association found for spi %u\n",
		    (u_int32_t)ntohl(spi)));
		ipsecstat.in_nosa++;
		goto bad;
	}
	KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
		kprintf("DP esp4_input called to allocate SA:%p\n", sav));
	if (sav->state != SADB_SASTATE_MATURE
	 && sav->state != SADB_SASTATE_DYING) {
		ipseclog((LOG_DEBUG,
		    "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
		    (u_int32_t)ntohl(spi)));
		ipsecstat.in_badspi++;
		goto bad;
	}
	algo = esp_algorithm_lookup(sav->alg_enc);
	if (!algo) {
		ipseclog((LOG_DEBUG, "IPv4 ESP input: "
		    "unsupported encryption algorithm for spi %u\n",
		    (u_int32_t)ntohl(spi)));
		ipsecstat.in_badspi++;
		goto bad;
	}

	/* check if we have proper ivlen information */
	ivlen = sav->ivlen;
	if (ivlen < 0) {
		ipseclog((LOG_ERR, "improper ivlen in IPv4 ESP input: %s %s\n",
		    ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
		ipsecstat.in_inval++;
		goto bad;
	}

	if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
	 && (sav->alg_auth && sav->key_auth)))
		goto noreplaycheck;

	if (sav->alg_auth == SADB_X_AALG_NULL ||
	    sav->alg_auth == SADB_AALG_NONE)
		goto noreplaycheck;

	/*
	 * check for sequence number.
	 */
	if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav))
		; /* okey */
	else {
		ipsecstat.in_espreplay++;
		ipseclog((LOG_WARNING,
		    "replay packet in IPv4 ESP input: %s %s\n",
		    ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
		goto bad;
	}

	/* check ICV */
    {
	u_char sum0[AH_MAXSUMSIZE];
	u_char sum[AH_MAXSUMSIZE];
	const struct ah_algorithm *sumalgo;
	size_t siz;

	sumalgo = ah_algorithm_lookup(sav->alg_auth);
	if (!sumalgo)
		goto noreplaycheck;
	siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
	if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
		ipsecstat.in_inval++;
		goto bad;
	}
	if (AH_MAXSUMSIZE < siz) {
		ipseclog((LOG_DEBUG,
		    "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
		    (u_long)siz));
		ipsecstat.in_inval++;
		goto bad;
	}

	m_copydata(m, m->m_pkthdr.len - siz, siz, &sum0[0]);

	if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
		ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
		    ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
		ipsecstat.in_espauthfail++;
		goto bad;
	}

	if (bcmp(sum0, sum, siz) != 0) {
		ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
		    ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
		ipsecstat.in_espauthfail++;
		goto bad;
	}

	/* strip off the authentication data */
	m_adj(m, -siz);
	ip = mtod(m, struct ip *);
#ifdef IPLEN_FLIPPED
	ip->ip_len = ip->ip_len - siz;
#else
	ip->ip_len = htons(ntohs(ip->ip_len) - siz);
#endif
	m->m_flags |= M_AUTHIPDGM;
	ipsecstat.in_espauthsucc++;
    }

	/*
	 * update sequence number.
	 */
	if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
		if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) {
			ipsecstat.in_espreplay++;
			goto bad;
		}
	}

noreplaycheck:

	/* process main esp header. */
	if (sav->flags & SADB_X_EXT_OLD) {
		/* RFC 1827 */
		esplen = sizeof(struct esp);
	} else {
		/* RFC 2406 */
		if (sav->flags & SADB_X_EXT_DERIV)
			esplen = sizeof(struct esp);
		else
			esplen = sizeof(struct newesp);
	}

	if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
		ipseclog((LOG_WARNING,
		    "IPv4 ESP input: packet too short\n"));
		ipsecstat.in_inval++;
		goto bad;
	}

	if (m->m_len < off + esplen + ivlen) {
		m = m_pullup(m, off + esplen + ivlen);
		if (!m) {
			ipseclog((LOG_DEBUG,
			    "IPv4 ESP input: can't pullup in esp4_input\n"));
			ipsecstat.in_inval++;
			goto bad;
		}
	}

	/*
	 * pre-compute and cache intermediate key
	 */
	if (esp_schedule(algo, sav) != 0) {
		ipsecstat.in_inval++;
		goto bad;
	}

	/*
	 * decrypt the packet.
	 */
	if (!algo->decrypt)
		panic("internal error: no decrypt function");
	if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
		/* m is already freed */
		m = NULL;
		ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
		    ipsec_logsastr(sav)));
		ipsecstat.in_inval++;
		goto bad;
	}
	ipsecstat.in_esphist[sav->alg_enc]++;

	m->m_flags |= M_DECRYPTED;

	/*
	 * find the trailer of the ESP.
	 */
	m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
	     (caddr_t)&esptail);
	nxt = esptail.esp_nxt;
	taillen = esptail.esp_padlen + sizeof(esptail);

	if (m->m_pkthdr.len < taillen ||
	    m->m_pkthdr.len - taillen < off + esplen + ivlen + sizeof(esptail)) {
		ipseclog((LOG_WARNING,
		    "bad pad length in IPv4 ESP input: %s %s\n",
		    ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
		ipsecstat.in_inval++;
		goto bad;
	}

	/* strip off the trailing pad area. */
	m_adj(m, -taillen);

#ifdef IPLEN_FLIPPED
	ip->ip_len = ip->ip_len - taillen;
#else
	ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
#endif

	/* was it transmitted over the IPsec tunnel SA? */
	if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav)) {
		/*
		 * strip off all the headers that precedes ESP header.
		 *	IP4 xx ESP IP4' payload -> IP4' payload
		 *
		 * XXX more sanity checks
		 * XXX relationship with gif?
		 */
		u_int8_t tos;

		tos = ip->ip_tos;
		m_adj(m, off + esplen + ivlen);
		if (m->m_len < sizeof(*ip)) {
			m = m_pullup(m, sizeof(*ip));
			if (!m) {
				ipsecstat.in_inval++;
				goto bad;
			}
		}
		ip = mtod(m, struct ip *);
		/* ECN consideration. */
		ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos);
		if (!key_checktunnelsanity(sav, AF_INET,
			    (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
			ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
			    "in IPv4 ESP input: %s %s\n",
			    ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
			ipsecstat.in_inval++;
			goto bad;
		}

		key_sa_recordxfer(sav, m);
		if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
		    ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
			ipsecstat.in_nomem++;
			goto bad;
		}

		if (netisr_queue(NETISR_IP, m)) {
			ipsecstat.in_inval++;
			m = NULL;
			goto bad;
		}

		nxt = IPPROTO_DONE;
	} else {
/*
 * esp_init() is called when an SPI is being set up.
 */
static int
esp_init(struct secasvar *sav, const struct xformsw *xsp)
{
    const struct enc_xform *txform;
    struct cryptoini cria, crie;
    int keylen;
    int error;

    txform = esp_algorithm_lookup(sav->alg_enc);
    if (txform == NULL) {
        DPRINTF(("esp_init: unsupported encryption algorithm %d\n",
                 sav->alg_enc));
        return EINVAL;
    }
    if (sav->key_enc == NULL) {
        DPRINTF(("esp_init: no encoding key for %s algorithm\n",
                 txform->name));
        return EINVAL;
    }
    if ((sav->flags&(SADB_X_EXT_OLD|SADB_X_EXT_IV4B)) == SADB_X_EXT_IV4B) {
        DPRINTF(("esp_init: 4-byte IV not supported with protocol\n"));
        return EINVAL;
    }
    keylen = _KEYLEN(sav->key_enc);
    if (txform->minkey > keylen || keylen > txform->maxkey) {
        DPRINTF(("esp_init: invalid key length %u, must be in "
                 "the range [%u..%u] for algorithm %s\n",
                 keylen, txform->minkey, txform->maxkey,
                 txform->name));
        return EINVAL;
    }

    sav->ivlen = txform->ivsize;

    /*
     * Setup AH-related state.
     */
    if (sav->alg_auth != 0) {
        error = ah_init0(sav, xsp, &cria);
        if (error)
            return error;
    }

    /* NB: override anything set in ah_init0 */
    sav->tdb_xform = xsp;
    sav->tdb_encalgxform = txform;

    if (sav->alg_enc == SADB_X_EALG_AESGCM16 ||
            sav->alg_enc == SADB_X_EALG_AESGMAC) {
        switch (keylen) {
        case 20:
            sav->alg_auth = SADB_X_AALG_AES128GMAC;
            sav->tdb_authalgxform = &auth_hash_gmac_aes_128;
            break;
        case 28:
            sav->alg_auth = SADB_X_AALG_AES192GMAC;
            sav->tdb_authalgxform = &auth_hash_gmac_aes_192;
            break;
        case 36:
            sav->alg_auth = SADB_X_AALG_AES256GMAC;
            sav->tdb_authalgxform = &auth_hash_gmac_aes_256;
            break;
        }
        memset(&cria, 0, sizeof(cria));
        cria.cri_alg = sav->tdb_authalgxform->type;
        cria.cri_klen = _KEYBITS(sav->key_enc);
        cria.cri_key = _KEYBUF(sav->key_enc);
    }

    /* Initialize crypto session. */
    memset(&crie, 0, sizeof (crie));
    crie.cri_alg = sav->tdb_encalgxform->type;
    crie.cri_klen = _KEYBITS(sav->key_enc);
    crie.cri_key = _KEYBUF(sav->key_enc);
    /* XXX Rounds ? */

    if (sav->tdb_authalgxform && sav->tdb_encalgxform) {
        /* init both auth & enc */
        crie.cri_next = &cria;
        error = crypto_newsession(&sav->tdb_cryptoid,
                                  &crie, crypto_support);
    } else if (sav->tdb_encalgxform) {
        error = crypto_newsession(&sav->tdb_cryptoid,
                                  &crie, crypto_support);
    } else if (sav->tdb_authalgxform) {
        error = crypto_newsession(&sav->tdb_cryptoid,
                                  &cria, crypto_support);
    } else {
        /* XXX cannot happen? */
        DPRINTF(("esp_init: no encoding OR authentication xform!\n"));
        error = EINVAL;
    }
    return error;
}
Beispiel #7
0
static int
esp_cbc_mature(struct secasvar *sav)
{
	int keylen;
	const struct esp_algorithm *algo;

	if (sav->flags & SADB_X_EXT_OLD) {
		ipseclog((LOG_ERR,
		    "esp_cbc_mature: algorithm incompatible with esp-old\n"));
		return 1;
	}
	if (sav->flags & SADB_X_EXT_DERIV) {
		ipseclog((LOG_ERR,
		    "esp_cbc_mature: algorithm incompatible with derived\n"));
		return 1;
	}

	if (!sav->key_enc) {
		ipseclog((LOG_ERR, "esp_cbc_mature: no key is given.\n"));
		return 1;
	}

	algo = esp_algorithm_lookup(sav->alg_enc);
	if (!algo) {
		ipseclog((LOG_ERR,
		    "esp_cbc_mature: unsupported algorithm.\n"));
		return 1;
	}

	keylen = sav->key_enc->sadb_key_bits;
	if (keylen < algo->keymin || algo->keymax < keylen) {
		ipseclog((LOG_ERR,
		    "esp_cbc_mature %s: invalid key length %d.\n",
		    algo->name, sav->key_enc->sadb_key_bits));
		return 1;
	}
	switch (sav->alg_enc) {
	case SADB_EALG_3DESCBC:
		/* weak key check */
		if (des_is_weak_key((des_cblock *)_KEYBUF(sav->key_enc)) ||
		    des_is_weak_key((des_cblock *)(_KEYBUF(sav->key_enc) + 8)) ||
		    des_is_weak_key((des_cblock *)(_KEYBUF(sav->key_enc) + 16))) {
			ipseclog((LOG_ERR,
			    "esp_cbc_mature %s: weak key was passed.\n",
			    algo->name));
			return 1;
		}
		break;
	case SADB_X_EALG_RIJNDAELCBC:
		/* allows specific key sizes only */
		if (!(keylen == 128 || keylen == 192 || keylen == 256)) {
			ipseclog((LOG_ERR,
			    "esp_cbc_mature %s: invalid key length %d.\n",
			    algo->name, keylen));
			return 1;
		}
		break;
	}

	return 0;
}
Beispiel #8
0
/*
 * compute ESP header size.
 */
size_t
esp_hdrsiz(struct ipsecrequest *isr)
{
	struct secasvar *sav;
	const struct esp_algorithm *algo;
	const struct ah_algorithm *aalgo;
	size_t ivlen;
	size_t authlen;
	size_t hdrsiz;

	/* sanity check */
	if (isr == NULL)
		panic("esp_hdrsiz: NULL was passed.");

	sav = isr->sav;

	if (isr->saidx.proto != IPPROTO_ESP)
		panic("unsupported mode passed to esp_hdrsiz");

	if (sav == NULL)
		goto estimate;
	if (sav->state != SADB_SASTATE_MATURE &&
	    sav->state != SADB_SASTATE_DYING)
		goto estimate;

	/* we need transport mode ESP. */
	algo = esp_algorithm_lookup(sav->alg_enc);
	if (!algo)
		goto estimate;
	ivlen = sav->ivlen;
	if (ivlen < 0)
		goto estimate;

	/*
	 * XXX
	 * right now we don't calcurate the padding size.  simply
	 * treat the padding size as constant, for simplicity.
	 *
	 * XXX variable size padding support
	 */
	if (sav->flags & SADB_X_EXT_OLD) {
		/* RFC 1827 */
		hdrsiz = sizeof(struct esp) + ivlen + 9;
	} else {
		/* RFC 2406 */
		aalgo = ah_algorithm_lookup(sav->alg_auth);
		if (aalgo && sav->replay && sav->key_auth)
			authlen = (aalgo->sumsiz)(sav);
		else
			authlen = 0;
		hdrsiz = sizeof(struct newesp) + ivlen + 9 + authlen;
	}

	return hdrsiz;

estimate:
	/*
	 * ASSUMING:
	 *	sizeof(struct newesp) > sizeof(struct esp).
	 *	esp_max_ivlen() = max ivlen for CBC mode
	 *	9 = (maximum padding length without random padding length)
	 *	   + (Pad Length field) + (Next Header field).
	 *	16 = maximum ICV we support.
	 */
	return sizeof(struct newesp) + esp_max_ivlen() + 9 + 16;
}
Beispiel #9
0
/*
 * Modify the packet so that the payload is encrypted.
 * The mbuf (m) must start with IPv4 or IPv6 header.
 * On failure, free the given mbuf and return NULL.
 *
 * on invocation:
 *	m   nexthdrp md
 *	v   v        v
 *	IP ......... payload
 * during the encryption:
 *	m   nexthdrp mprev md
 *	v   v        v     v
 *	IP ............... esp iv payload pad padlen nxthdr
 *	                   <--><-><------><--------------->
 *	                   esplen plen    extendsiz
 *	                       ivlen
 *	                   <-----> esphlen
 *	<-> hlen
 *	<-----------------> espoff
 */
static int
esp_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md,
	   struct ipsecrequest *isr, int af)
{
	struct mbuf *n;
	struct mbuf *mprev;
	struct esp *esp;
	struct esptail *esptail;
	struct secasvar *sav = isr->sav;
	const struct esp_algorithm *algo;
	u_int32_t spi;
	u_int8_t nxt = 0;
	size_t plen;	/* payload length to be encrypted */
	size_t espoff;
	int ivlen;
	int afnumber;
	size_t extendsiz;
	int error = 0;
	struct ipsecstat *stat;

	switch (af) {
#ifdef INET
	case AF_INET:
		afnumber = 4;
		stat = &ipsecstat;
		break;
#endif
#ifdef INET6
	case AF_INET6:
		afnumber = 6;
		stat = &ipsec6stat;
		break;
#endif
	default:
		ipseclog((LOG_ERR, "esp_output: unsupported af %d\n", af));
		return 0;	/* no change at all */
	}

	/* some sanity check */
	if ((sav->flags & SADB_X_EXT_OLD) == 0 && !sav->replay) {
		switch (af) {
#ifdef INET
		case AF_INET:
		    {
			struct ip *ip;

			ip = mtod(m, struct ip *);
			ipseclog((LOG_DEBUG, "esp4_output: internal error: "
				"sav->replay is null: %x->%x, SPI=%u\n",
				(u_int32_t)ntohl(ip->ip_src.s_addr),
				(u_int32_t)ntohl(ip->ip_dst.s_addr),
				(u_int32_t)ntohl(sav->spi)));
			ipsecstat.out_inval++;
			break;
		    }
#endif /* INET */
#ifdef INET6
		case AF_INET6:
			ipseclog((LOG_DEBUG, "esp6_output: internal error: "
				"sav->replay is null: SPI=%u\n",
				(u_int32_t)ntohl(sav->spi)));
			ipsec6stat.out_inval++;
			break;
#endif /* INET6 */
		default:
			panic("esp_output: should not reach here");
		}
		m_freem(m);
		return EINVAL;
	}

	algo = esp_algorithm_lookup(sav->alg_enc);
	if (!algo) {
		ipseclog((LOG_ERR, "esp_output: unsupported algorithm: "
		    "SPI=%u\n", (u_int32_t)ntohl(sav->spi)));
		m_freem(m);
		return EINVAL;
	}
	spi = sav->spi;
	ivlen = sav->ivlen;
	/* should be okey */
	if (ivlen < 0) {
		panic("invalid ivlen");
	}

    {
	/*
	 * insert ESP header.
	 * XXX inserts ESP header right after IPv4 header.  should
	 * chase the header chain.
	 * XXX sequential number
	 */
#ifdef INET
	struct ip *ip = NULL;
#endif
	size_t esplen;	/* sizeof(struct esp/newesp) */
	size_t esphlen;	/* sizeof(struct esp/newesp) + ivlen */

	if (sav->flags & SADB_X_EXT_OLD) {
		/* RFC 1827 */
		esplen = sizeof(struct esp);
	} else {
		/* RFC 2406 */
		if (sav->flags & SADB_X_EXT_DERIV)
			esplen = sizeof(struct esp);
		else
			esplen = sizeof(struct newesp);
	}
	esphlen = esplen + ivlen;

	for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next)
		;
	if (mprev == NULL || mprev->m_next != md) {
		ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n",
		    afnumber));
		m_freem(m);
		return EINVAL;
	}

	plen = 0;
	for (n = md; n; n = n->m_next)
		plen += n->m_len;

	switch (af) {
#ifdef INET
	case AF_INET:
		ip = mtod(m, struct ip *);
		break;
#endif
#ifdef INET6
	case AF_INET6:
		break;
#endif
	}

	/* make the packet over-writable */
	mprev->m_next = NULL;
	if ((md = ipsec_copypkt(md)) == NULL) {
		m_freem(m);
		error = ENOBUFS;
		goto fail;
	}
	mprev->m_next = md;

	espoff = m->m_pkthdr.len - plen;

	/*
	 * grow the mbuf to accomodate ESP header.
	 * before: IP ... payload
	 * after:  IP ... ESP IV payload
	 */
	if (M_LEADINGSPACE(md) < esphlen || (md->m_flags & M_EXT)) {
		MGET(n, M_NOWAIT, MT_DATA);
		if (!n) {
			m_freem(m);
			error = ENOBUFS;
			goto fail;
		}
		n->m_len = esphlen;
		mprev->m_next = n;
		n->m_next = md;
		m->m_pkthdr.len += esphlen;
		esp = mtod(n, struct esp *);
	} else {
Beispiel #10
0
/*
 * Modify the packet so that the payload is encrypted.
 * The mbuf (m) must start with IPv4 or IPv6 header.
 * On failure, free the given mbuf and return NULL.
 *
 * on invocation:
 *	m   nexthdrp md
 *	v   v        v
 *	IP ......... payload
 * during the encryption:
 *	m   nexthdrp mprev md
 *	v   v        v     v
 *	IP ............... esp iv payload pad padlen nxthdr
 *	                   <--><-><------><--------------->
 *	                   esplen plen    extendsiz
 *	                       ivlen
 *	                   <-----> esphlen
 *	<-> hlen
 *	<-----------------> espoff
 */
static int
esp_output(
	struct mbuf *m,
	u_char *nexthdrp,
	struct mbuf *md,
	int af,
	struct secasvar *sav)
{
	struct mbuf *n;
	struct mbuf *mprev;
	struct esp *esp;
	struct esptail *esptail;
	const struct esp_algorithm *algo;
	u_int32_t spi;
	u_int8_t nxt = 0;
	size_t plen;	/*payload length to be encrypted*/
	size_t espoff;
	size_t esphlen;	/* sizeof(struct esp/newesp) + ivlen */
	int ivlen;
	int afnumber;
	size_t extendsiz;
	int error = 0;
	struct ipsecstat *stat;
	struct udphdr *udp = NULL;
	int	udp_encapsulate = (sav->flags & SADB_X_EXT_NATT && (af == AF_INET || af == AF_INET6) &&
			(esp_udp_encap_port & 0xFFFF) != 0);

	KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_START, sav->ivlen,0,0,0,0);
	switch (af) {
#if INET
	case AF_INET:
		afnumber = 4;
		stat = &ipsecstat;
		break;
#endif
#if INET6
	case AF_INET6:
		afnumber = 6;
		stat = &ipsec6stat;
		break;
#endif
	default:
		ipseclog((LOG_ERR, "esp_output: unsupported af %d\n", af));
		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 1,0,0,0,0);
		return 0;	/* no change at all */
	}

	/* some sanity check */
	if ((sav->flags & SADB_X_EXT_OLD) == 0 && !sav->replay) {
		switch (af) {
#if INET
		case AF_INET:
		    {
			struct ip *ip;

			ip = mtod(m, struct ip *);
			ipseclog((LOG_DEBUG, "esp4_output: internal error: "
				"sav->replay is null: %x->%x, SPI=%u\n",
				(u_int32_t)ntohl(ip->ip_src.s_addr),
				(u_int32_t)ntohl(ip->ip_dst.s_addr),
				(u_int32_t)ntohl(sav->spi)));
			IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
			break;
		    }
#endif /*INET*/
#if INET6
		case AF_INET6:
			ipseclog((LOG_DEBUG, "esp6_output: internal error: "
				"sav->replay is null: SPI=%u\n",
				(u_int32_t)ntohl(sav->spi)));
			IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
			break;
#endif /*INET6*/
		default:
			panic("esp_output: should not reach here");
		}
		m_freem(m);
		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 2,0,0,0,0);
		return EINVAL;
	}

	algo = esp_algorithm_lookup(sav->alg_enc);
	if (!algo) {
		ipseclog((LOG_ERR, "esp_output: unsupported algorithm: "
		    "SPI=%u\n", (u_int32_t)ntohl(sav->spi)));
		m_freem(m);
		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 3,0,0,0,0);
		return EINVAL;
	}
	spi = sav->spi;
	ivlen = sav->ivlen;
	/* should be okey */
	if (ivlen < 0) {
		panic("invalid ivlen");
	}

    {
	/*
	 * insert ESP header.
	 * XXX inserts ESP header right after IPv4 header.  should
	 * chase the header chain.
	 * XXX sequential number
	 */
#if INET
	struct ip *ip = NULL;
#endif
#if INET6
	struct ip6_hdr *ip6 = NULL;
#endif
	size_t esplen;	/* sizeof(struct esp/newesp) */
	size_t hlen = 0;	/* ip header len */

	if (sav->flags & SADB_X_EXT_OLD) {
		/* RFC 1827 */
		esplen = sizeof(struct esp);
	} else {
		/* RFC 2406 */
		if (sav->flags & SADB_X_EXT_DERIV)
			esplen = sizeof(struct esp);
		else
			esplen = sizeof(struct newesp);
	}
	esphlen = esplen + ivlen;

	for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next)
		;
	if (mprev == NULL || mprev->m_next != md) {
		ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n",
		    afnumber));
		m_freem(m);
		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 4,0,0,0,0);
		return EINVAL;
	}

	plen = 0;
	for (n = md; n; n = n->m_next)
		plen += n->m_len;

	switch (af) {
#if INET
	case AF_INET:
		ip = mtod(m, struct ip *);
#ifdef _IP_VHL
		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
#else
		hlen = ip->ip_hl << 2;
#endif
		break;
#endif
#if INET6
	case AF_INET6:
		ip6 = mtod(m, struct ip6_hdr *);
		hlen = sizeof(*ip6);
		break;
#endif
	}

	/* make the packet over-writable */
	mprev->m_next = NULL;
	if ((md = ipsec_copypkt(md)) == NULL) {
		m_freem(m);
		error = ENOBUFS;
		goto fail;
	}
	mprev->m_next = md;
	
	/* 
	 * Translate UDP source port back to its original value.
	 * SADB_X_EXT_NATT_MULTIPLEUSERS is only set for transort mode.
	 */
	if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
		/* if not UDP - drop it */
		if (ip->ip_p != IPPROTO_UDP)	{
			IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
			m_freem(m);
			error = EINVAL;
			goto fail;
		}			
		
		udp = mtod(md, struct udphdr *);

		/* if src port not set in sav - find it */
		if (sav->natt_encapsulated_src_port == 0)
			if (key_natt_get_translated_port(sav) == 0) {
				m_freem(m);
				error = EINVAL;
				goto fail;
			}
		if (sav->remote_ike_port == htons(udp->uh_dport)) {
			/* translate UDP port */
			udp->uh_dport = sav->natt_encapsulated_src_port;
			udp->uh_sum = 0;	/* don't need checksum with ESP auth */
		} else {
			/* drop the packet - can't translate the port */
			IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
			m_freem(m);
			error = EINVAL;
			goto fail;
		}
	}
		

	espoff = m->m_pkthdr.len - plen;
	
	if (udp_encapsulate) {
		esphlen += sizeof(struct udphdr);
		espoff += sizeof(struct udphdr);
	}

	/*
	 * grow the mbuf to accomodate ESP header.
	 * before: IP ... payload
	 * after:  IP ... [UDP] ESP IV payload
	 */
	if (M_LEADINGSPACE(md) < esphlen || (md->m_flags & M_EXT) != 0) {
		MGET(n, M_DONTWAIT, MT_DATA);
		if (!n) {
			m_freem(m);
			error = ENOBUFS;
			goto fail;
		}
		n->m_len = esphlen;
		mprev->m_next = n;
		n->m_next = md;
		m->m_pkthdr.len += esphlen;
		if (udp_encapsulate) {
			udp = mtod(n, struct udphdr *);
			esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr));
		} else {
			esp = mtod(n, struct esp *);
		}
	} else {
Beispiel #11
0
/*
 * compute ESP header size.
 */
size_t
esp_hdrsiz(__unused struct ipsecrequest *isr)
{

#if 0
	/* sanity check */
	if (isr == NULL)
		panic("esp_hdrsiz: NULL was passed.\n");


	lck_mtx_lock(sadb_mutex);
	{
		struct secasvar *sav;
		const struct esp_algorithm *algo;
		const struct ah_algorithm *aalgo;
		size_t ivlen;
		size_t authlen;
		size_t hdrsiz;
		size_t maxpad;
	
		/*%%%% this needs to change - no sav in ipsecrequest any more */
		sav = isr->sav;
	
		if (isr->saidx.proto != IPPROTO_ESP)
			panic("unsupported mode passed to esp_hdrsiz");
	
		if (sav == NULL)
			goto estimate;
		if (sav->state != SADB_SASTATE_MATURE
		 && sav->state != SADB_SASTATE_DYING)
			goto estimate;
	
		/* we need transport mode ESP. */
		algo = esp_algorithm_lookup(sav->alg_enc);
		if (!algo)
			goto estimate;
		ivlen = sav->ivlen;
		if (ivlen < 0)
			goto estimate;
	
		if (algo->padbound)
			maxpad = algo->padbound;
		else
			maxpad = 4;
		maxpad += 1; /* maximum 'extendsiz' is padbound + 1, see esp_output */
		
		if (sav->flags & SADB_X_EXT_OLD) {
			/* RFC 1827 */
			hdrsiz = sizeof(struct esp) + ivlen + maxpad;
		} else {
			/* RFC 2406 */
			aalgo = ah_algorithm_lookup(sav->alg_auth);
			if (aalgo && sav->replay && sav->key_auth)
				authlen = (aalgo->sumsiz)(sav);
			else
				authlen = 0;
			hdrsiz = sizeof(struct newesp) + ivlen + maxpad + authlen;
		}
		
		/*
		 * If the security association indicates that NATT is required,
		 * add the size of the NATT encapsulation header:
		 */
		if ((sav->flags & SADB_X_EXT_NATT) != 0) hdrsiz += sizeof(struct udphdr) + 4;
	
		lck_mtx_unlock(sadb_mutex);
		return hdrsiz;
	}
estimate:
   lck_mtx_unlock(sadb_mutex);
#endif
	/*
	 * ASSUMING:
	 *	sizeof(struct newesp) > sizeof(struct esp). (8)
	 *	esp_max_ivlen() = max ivlen for CBC mode
	 *	17 = (maximum padding length without random padding length)
	 *	   + (Pad Length field) + (Next Header field).
	 *	64 = maximum ICV we support.
	 *  sizeof(struct udphdr) in case NAT traversal is used
	 */
	return sizeof(struct newesp) + esp_max_ivlen() + 17 + AH_MAXSUMSIZE + sizeof(struct udphdr);
}