/* * Generate a cookie (aka SPI) * First argument is true if we're to create an Initiator cookie. * Length SHOULD be a multiple of sizeof(u_int32_t). * * As responder, we use a hashing method to get a pseudo random * value instead of using our own random pool. It will prevent * an attacker from gaining raw data from our random pool and * it will prevent an attacker from depleting our random pool * or entropy. */ void get_cookie(bool initiator, u_int8_t cookie[COOKIE_SIZE], const ip_address *addr) { do { if (initiator) { get_rnd_bytes(cookie, COOKIE_SIZE); } else { static u_int32_t counter = 0; /* STATIC */ unsigned char addr_buff[ sizeof(union { struct in_addr A; struct in6_addr B; })]; u_char buffer[SHA2_256_DIGEST_SIZE]; sha256_context ctx; size_t addr_length = addrbytesof(addr, addr_buff, sizeof(addr_buff)); sha256_init(&ctx); sha256_write(&ctx, addr_buff, addr_length); sha256_write(&ctx, secret_of_the_day, sizeof(secret_of_the_day)); counter++; sha256_write(&ctx, (const void *) &counter, sizeof(counter)); sha256_final(buffer, &ctx); /* cookie size is smaller than any hash output sizes */ memcpy(cookie, buffer, COOKIE_SIZE); } } while (is_zero_cookie(cookie)); /* probably never loops */ }
/* Generate a cookie. * First argument is true if we're to create an Initiator cookie. * Length SHOULD be a multiple of sizeof(u_int32_t). */ void get_cookie(bool initiator, u_int8_t *cookie, int length, const ip_address *addr) { u_char buffer[SHA1_DIGEST_SIZE]; SHA1_CTX ctx; do { if (initiator) { get_rnd_bytes(cookie, length); } else /* Responder cookie */ { /* This looks as good as any way */ size_t addr_length; static u_int32_t counter = 0; unsigned char addr_buff[ sizeof(union {struct in_addr; struct in6_addr;})]; addr_length = addrbytesof(addr, addr_buff, sizeof(addr_buff)); SHA1Init(&ctx); SHA1Update(&ctx, addr_buff, addr_length); SHA1Update(&ctx, secret_of_the_day, sizeof(secret_of_the_day)); counter++; SHA1Update(&ctx, (const void *) &counter, sizeof(counter)); SHA1Final(buffer, &ctx); memcpy(cookie, buffer, length); } } while (is_zero_cookie(cookie)); /* probably never loops */ }
u_char * get_random_bits(size_t nbits, int level UNUSED, int secure UNUSED) { size_t nbytes = (nbits+7)/8; u_char *b = alloc_bytes(nbytes, "random bytes"); get_rnd_bytes(b, nbytes); return b; }
void init_secret(void) { /* * Generate the secret value for responder cookies, and * schedule an event for refresh. */ get_rnd_bytes(secret_of_the_day, sizeof(secret_of_the_day)); event_schedule(EVENT_REINIT_SECRET, EVENT_REINIT_SECRET_DELAY, NULL); }
/* * Initialize the random pool. */ void init_rnd_pool(void) { /* start of rand(3) on the right foot */ { unsigned int seed; get_rnd_bytes((void *)&seed, sizeof(seed)); srand(seed); } }
/* * Initialize the random pool. */ void init_rnd_pool(void) { unsigned int i; unsigned int max_rnd_devices = elemsof(random_devices)+1; const char *rnd_dev; if(random_fd != -1) close(random_fd); random_fd = -1; for(i=0; random_fd == -1 && i<max_rnd_devices; i++) { DBG(DBG_CONTROL, DBG_log("opening %s", random_devices[i])); random_fd = open(random_devices[i], O_RDONLY); rnd_dev = random_devices[i]; if (random_fd == -1) { openswan_log("WARNING: open of %s failed: %s", random_devices[i] , strerror(errno)); } } if(random_fd == -1 || i == max_rnd_devices) { openswan_log("Failed to open any source of random. Unable to start any connections."); return; } openswan_log("using %s as source of random entropy", rnd_dev); fcntl(random_fd, F_SETFD, FD_CLOEXEC); get_rnd_bytes(random_pool, RANDOM_POOL_SIZE); mix_pool(); /* start of rand(3) on the right foot */ { unsigned int seed; get_rnd_bytes((void *)&seed, sizeof(seed)); srand(seed); } }
void calc_nonce(struct pluto_crypto_req *r) { struct pcr_kenonce *kn = &r->pcr_d.kn; pluto_crypto_allocchunk(&kn->thespace, &kn->n, DEFAULT_NONCE_SIZE); get_rnd_bytes(wire_chunk_ptr(kn, &(kn->n)), DEFAULT_NONCE_SIZE); DBG(DBG_CRYPT, DBG_dump("Generated nonce:\n" , wire_chunk_ptr(kn, &(kn->n)) , DEFAULT_NONCE_SIZE)); }
/* * Initialize the random pool. */ void init_rnd_pool(void) { #ifndef USE_ARC4RANDOM # ifdef USE_DEV_RANDOM DBG(DBG_KLIPS, DBG_log("opening %s", RANDOM_PATH)); random_fd = open(RANDOM_PATH, O_RDONLY); if (random_fd == -1) exit_log_errno((e, "open of %s failed in init_rnd_pool()", RANDOM_PATH)); fcntl(random_fd, F_SETFD, FD_CLOEXEC); # endif get_rnd_bytes(random_pool, RANDOM_POOL_SIZE); mix_pool(); #endif /* !USE_ARC4RANDOM */ /* start of rand(3) on the right foot */ { unsigned int seed; get_rnd_bytes((void *)&seed, sizeof(seed)); srand(seed); } }
/* * builds a senderNonce attribute */ chunk_t scep_senderNonce_attribute(void) { const size_t nonce_len = 16; u_char nonce_buf[nonce_len]; chunk_t senderNonce = { nonce_buf, nonce_len }; get_rnd_bytes(nonce_buf, nonce_len); return asn1_wrap(ASN1_SEQUENCE, "cm" , ASN1_senderNonce_oid , asn1_wrap(ASN1_SET, "m" , asn1_simple_object(ASN1_OCTET_STRING, senderNonce) ) ); }
/******************************************************************************* 函数名称: calc_ke 功能描述: 当模长度是1024时,可以使用硬件来加速模幂运算 输入参数: 输出参数: 无 返 回 值: 无 -------------------------------------------------------------------------------- 最近一次修改记录 : 修改作者: 李 志 修改目的: 性能优化 修改日期: 2011-1-15 *******************************************************************************/ void calc_ke(struct pluto_crypto_req *r) { s32 ret = -1; const struct oakley_group_desc *group; chunk_t gi; struct pcr_kenonce *kn = &r->pcr_d.kn; group = lookup_group(kn->oakley_group); pluto_crypto_allocchunk(&kn->thespace, &kn->secret, LOCALSECRETSIZE); get_rnd_bytes(wire_chunk_ptr(kn, &(kn->secret)), LOCALSECRETSIZE); if(OAKLEY_GROUP_MODP1024 == kn->oakley_group && (!g_ipsec_device_is_dpx)) { /*由于硬件只支持1024和512位的模运算,而ipsec最低使用768位的模运算,故只有1024位时才可以使用硬件计算*/ chunk_t secret_chunk; IPSEC_DEBUG(DBG_CRYPT, IPSEC_dbg("hardware calc_ke mod exp, group:%d\n", kn->oakley_group););
void calc_ke(struct pluto_crypto_req *r) { MP_INT mp_g; MP_INT secret; chunk_t gi; struct pcr_kenonce *kn = &r->pcr_d.kn; const struct oakley_group_desc *group; group = lookup_group(kn->oakley_group); pluto_crypto_allocchunk(&kn->thespace , &kn->secret , LOCALSECRETSIZE); get_rnd_bytes(wire_chunk_ptr(kn, &(kn->secret)), LOCALSECRETSIZE); n_to_mpz(&secret, wire_chunk_ptr(kn, &(kn->secret)), LOCALSECRETSIZE); mpz_init(&mp_g); oswcrypto.mod_exp(&mp_g, group->generator, &secret, group->modulus); gi = mpz_to_n(&mp_g, group->bytes); pluto_crypto_allocchunk(&kn->thespace, &kn->gi, gi.len); { char *gip = wire_chunk_ptr(kn, &(kn->gi)); memcpy(gip, gi.ptr, gi.len); } DBG(DBG_CRYPT, DBG_dump("Local DH secret:\n" , wire_chunk_ptr(kn, &(kn->secret)) , LOCALSECRETSIZE); DBG_dump_chunk("Public DH value sent:\n", gi)); /* clean up after ourselves */ mpz_clear(&mp_g); mpz_clear(&secret); freeanychunk(gi); }
msgid_t generate_msgid(struct state *isakmp_sa) { int timeout = 100; /* only try so hard for unique msgid */ msgid_t msgid; passert(IS_ISAKMP_ENCRYPTED(isakmp_sa->st_state)); for (;; ) { get_rnd_bytes((void *) &msgid, sizeof(msgid)); if (msgid != 0 && unique_msgid(isakmp_sa, msgid)) break; if (--timeout == 0) { libreswan_log( "gave up looking for unique msgid; using 0x%08lx", (unsigned long) msgid); break; } } return msgid; }
/** * DPD Out Initiator * * @param p2st A state struct that is already in phase2 * @return void */ static void dpd_outI(struct state *p1st, struct state *st, bool eroute_care, deltatime_t delay, deltatime_t timeout) { monotime_t nw; monotime_t last; deltatime_t nextdelay; u_int32_t seqno; DBG(DBG_DPD, DBG_log("DPD: processing for state #%lu (\"%s\")", st->st_serialno, st->st_connection->name)); /* If no DPD, then get out of here */ if (!st->hidden_variables.st_peer_supports_dpd) { DBG(DBG_DPD, DBG_log("DPD: peer does not support dpd")); return; } /* If there is no state, there can be no DPD */ if (!IS_ISAKMP_SA_ESTABLISHED(p1st->st_state)) { DBG(DBG_DPD, DBG_log("DPD: no phase1 state, so no DPD")); return; } /* find out when now is */ nw = mononow(); /* * pick least recent activity value, since with multiple phase 2s, * it may well be that one phase 2 is very active, while the other * for some reason, gets stomped upon by some network screw up. * * (this would only happen if the network was sensitive to different * SPI#, since for NAT-T, all traffic should be on the same UDP port. * At worst, this means that we send a bit more traffic then we need * to when there are multiple SAs and one is much less active. * * ??? the code actually picks the most recent. So much for comments. */ last = !monobefore(p1st->st_last_dpd, st->st_last_dpd) ? p1st->st_last_dpd : st->st_last_dpd; nextdelay = monotimediff(monotimesum(last, delay), nw); /* has there been enough activity of late? */ if (deltasecs(nextdelay) > 0) { /* Yes, just reschedule "phase 2" */ DBG(DBG_DPD, DBG_log("DPD: not yet time for dpd event: %ld < %ld", (long)nw.mono_secs, (long)(last.mono_secs + deltasecs(delay)))); event_schedule(EVENT_DPD, deltasecs(nextdelay), st); return; } /* now plan next check time */ /* ??? this test is nuts: it will always succeed! */ if (deltasecs(nextdelay) < 1) nextdelay = delay; /* * check the phase 2, if we are supposed to, * and return if it is active recently */ if (eroute_care && st->hidden_variables.st_nat_traversal == LEMPTY && !was_eroute_idle(st, delay)) { DBG(DBG_DPD, DBG_log("DPD: out event not sent, phase 2 active")); /* update phase 2 time stamp only */ st->st_last_dpd = nw; /* * Since there was activity, kill any EVENT_DPD_TIMEOUT that might * be waiting. This can happen when a R_U_THERE_ACK is lost, and * subsequently traffic started flowing over the SA again, and no * more DPD packets are sent to cancel the outstanding DPD timer. */ if (p1st->st_dpd_event != NULL && p1st->st_dpd_event->ev_type == EVENT_DPD_TIMEOUT) { DBG(DBG_DPD, DBG_log("DPD: deleting p1st DPD event")); delete_dpd_event(p1st); } event_schedule(EVENT_DPD, deltasecs(nextdelay), st); return; } if (st != p1st) { /* * reschedule next event, since we cannot do it from the activity * routine. */ event_schedule(EVENT_DPD, deltasecs(nextdelay), st); } if (p1st->st_dpd_seqno == 0) { /* Get a non-zero random value that has room to grow */ get_rnd_bytes((u_char *)&p1st->st_dpd_seqno, sizeof(p1st->st_dpd_seqno)); p1st->st_dpd_seqno &= 0x7fff; p1st->st_dpd_seqno++; } seqno = htonl(p1st->st_dpd_seqno); /* make sure that the timeout occurs. We do this before the send, * because the send may fail due to network issues, etc, and * the timeout has to occur anyway */ dpd_sched_timeout(p1st, nw, timeout); DBG(DBG_DPD, { ipstr_buf b; DBG_log("DPD: sending R_U_THERE %u to %s:%d (state #%lu)", p1st->st_dpd_seqno, ipstr(&p1st->st_remoteaddr, &b), p1st->st_remoteport, p1st->st_serialno); });
/** * DPD Out Initiator * * @param p2st A state struct that is already in phase2 * @return void */ static void dpd_outI(struct state *p1st, struct state *st, bool eroute_care ,time_t delay, time_t timeout) { time_t tm; time_t last; u_int32_t seqno; bool eroute_idle; time_t nextdelay; DBG(DBG_DPD, DBG_log("processing dpd for state #%lu (\"%s\")" , st->st_serialno , st->st_connection->name)); /* If no DPD, then get out of here */ if (!st->hidden_variables.st_dpd) return; /* If there is no state, there can be no DPD */ if (!IS_ISAKMP_SA_ESTABLISHED(p1st->st_state)) return; /* find out when now is */ tm = now(); /* * pick least recent activity value, since with multiple phase 2s, * it may well be that one phase 2 is very active, while the other * for some reason, gets stomped upon by some network screw up. * * (this would only happen if the network was sensitive to different * SPI#, since for NAT-T, all traffic should be on the same UDP port. * At worst, this means that we send a bit more traffic then we need * to when there are multiple SAs and one is much less active. * */ last = (p1st->st_last_dpd > st->st_last_dpd ? st->st_last_dpd : p1st->st_last_dpd ); nextdelay = p1st->st_last_dpd + delay - tm; /* has there been enough activity of late? */ if(nextdelay > 0) { /* Yes, just reschedule "phase 2" */ DBG(DBG_DPD, DBG_log("not yet time for dpd event: %lu < %lu" , (unsigned long)tm , (unsigned long)(p1st->st_last_dpd + delay))); event_schedule(EVENT_DPD, nextdelay, st); return; } /* now plan next check time */ if(nextdelay < 1) { nextdelay = delay; } /* * check the phase 2, if we are supposed to, * and return if it is active recently */ if(eroute_care && !st->hidden_variables.st_nat_traversal) { eroute_idle = was_eroute_idle(st, delay); if(!eroute_idle) { DBG(DBG_DPD, DBG_log("dpd out event not sent, phase 2 active")); /* update phase 2 time stamp only */ st->st_last_dpd = tm; event_schedule(EVENT_DPD, nextdelay, st); return; } } if(st != p1st) { /* * reschedule next event, since we can not do it from the activity * routine. */ event_schedule(EVENT_DPD, nextdelay, st); } if (!p1st->st_dpd_seqno) { /* Get a non-zero random value that has room to grow */ get_rnd_bytes((u_char *)&p1st->st_dpd_seqno , sizeof(p1st->st_dpd_seqno)); p1st->st_dpd_seqno &= 0x7fff; p1st->st_dpd_seqno++; } seqno = htonl(p1st->st_dpd_seqno); /* make sure that the timeout occurs. We do this before the send, * because the send may fail due to network issues, etc, and * the timeout has to occur anyway */ dpd_sched_timeout(p1st, tm, timeout); DBG(DBG_DPD, DBG_log("sending R_U_THERE %u to %s:%d (state #%lu)" , seqno , ip_str(&p1st->st_remoteaddr) , p1st->st_remoteport , p1st->st_serialno)); if (send_isakmp_notification(p1st, R_U_THERE , &seqno, sizeof(seqno)) != STF_IGNORE) { loglog(RC_LOG_SERIOUS, "DPD Error: could not send R_U_THERE"); return; } st->st_last_dpd = tm; p1st->st_last_dpd = tm; p1st->st_dpd_expectseqno = p1st->st_dpd_seqno++; }
void calc_ke(struct pluto_crypto_req *r) { #ifndef HAVE_LIBNSS MP_INT mp_g; MP_INT secret; chunk_t gi; #else chunk_t prime; chunk_t base; SECKEYDHParams dhp; PK11SlotInfo *slot = NULL; SECKEYPrivateKey *privk; SECKEYPublicKey *pubk; #endif struct pcr_kenonce *kn = &r->pcr_d.kn; const struct oakley_group_desc *group; group = lookup_group(kn->oakley_group); #ifndef HAVE_LIBNSS pluto_crypto_allocchunk(&kn->thespace , &kn->secret , LOCALSECRETSIZE); get_rnd_bytes(wire_chunk_ptr(kn, &(kn->secret)), LOCALSECRETSIZE); n_to_mpz(&secret, wire_chunk_ptr(kn, &(kn->secret)), LOCALSECRETSIZE); mpz_init(&mp_g); #ifdef USE_MODP_RFC5114 oswcrypto.mod_exp(&mp_g, group->generator, &secret, group->modulus); #else oswcrypto.mod_exp(&mp_g, &groupgenerator, &secret, group->modulus); #endif gi = mpz_to_n(&mp_g, group->bytes); pluto_crypto_allocchunk(&kn->thespace, &kn->gi, gi.len); { char *gip = wire_chunk_ptr(kn, &(kn->gi)); memcpy(gip, gi.ptr, gi.len); } DBG(DBG_CRYPT, DBG_dump("Local DH secret:\n" , wire_chunk_ptr(kn, &(kn->secret)) , LOCALSECRETSIZE); DBG_dump_chunk("Public DH value sent:\n", gi)); /* clean up after ourselves */ mpz_clear(&mp_g); mpz_clear(&secret); freeanychunk(gi); #else #ifdef USE_MODP_RFC5114 base = mpz_to_n2(group->generator); #else base = mpz_to_n2(&groupgenerator); #endif prime = mpz_to_n2(group->modulus); DBG(DBG_CRYPT,DBG_dump_chunk("NSS: Value of Prime:\n", prime)); DBG(DBG_CRYPT,DBG_dump_chunk("NSS: Value of base:\n", base)); dhp.prime.data=prime.ptr; dhp.prime.len=prime.len; dhp.base.data=base.ptr; dhp.base.len=base.len; slot = PK11_GetBestSlot(CKM_DH_PKCS_KEY_PAIR_GEN,osw_return_nss_password_file_info()); if(!slot) { loglog(RC_LOG_SERIOUS, "NSS: slot for DH key gen is NULL"); } PR_ASSERT(slot!=NULL); while(1) { privk = PK11_GenerateKeyPair(slot, CKM_DH_PKCS_KEY_PAIR_GEN, &dhp, &pubk, PR_FALSE, PR_TRUE, osw_return_nss_password_file_info()); if(!privk) { loglog(RC_LOG_SERIOUS, "NSS: DH private key creation failed (err %d)", PR_GetError()); } PR_ASSERT(privk!=NULL); if( group-> bytes == pubk->u.dh.publicValue.len ) { DBG(DBG_CRYPT, DBG_log("NSS: generated dh priv and pub keys: %d\n", pubk->u.dh.publicValue.len)); break; } else { DBG(DBG_CRYPT, DBG_log("NSS: generating dh priv and pub keys")); if (privk) SECKEY_DestroyPrivateKey(privk); if (pubk) SECKEY_DestroyPublicKey(pubk); } } pluto_crypto_allocchunk(&kn->thespace, &kn->secret, sizeof(SECKEYPrivateKey*)); { char *gip = wire_chunk_ptr(kn, &(kn->secret)); memcpy(gip, &privk, sizeof(SECKEYPrivateKey *)); } pluto_crypto_allocchunk(&kn->thespace, &kn->gi, pubk->u.dh.publicValue.len); { char *gip = wire_chunk_ptr(kn, &(kn->gi)); memcpy(gip, pubk->u.dh.publicValue.data, pubk->u.dh.publicValue.len); } pluto_crypto_allocchunk(&kn->thespace, &kn->pubk, sizeof(SECKEYPublicKey*)); { char *gip = wire_chunk_ptr(kn, &(kn->pubk)); memcpy(gip, &pubk, sizeof(SECKEYPublicKey*)); } DBG(DBG_CRYPT, DBG_dump("NSS: Local DH secret:\n" , wire_chunk_ptr(kn, &(kn->secret)) , sizeof(SECKEYPrivateKey*)); DBG_dump("NSS: Public DH value sent(computed in NSS):\n", wire_chunk_ptr(kn, &(kn->gi)),pubk->u.dh.publicValue.len)); DBG(DBG_CRYPT, DBG_dump("NSS: Local DH public value (pointer):\n" , wire_chunk_ptr(kn, &(kn->pubk)) , sizeof(SECKEYPublicKey*))); /* clean up after ourselves */ if (slot) { PK11_FreeSlot(slot); } /* if (privk){SECKEY_DestroyPrivateKey(privk);} */ /* if (pubk){SECKEY_DestroyPublicKey(pubk);} */ freeanychunk(prime); freeanychunk(base); #endif }