/* * Stir our S-box. */ static void arc4_randomstir(void) { u_int8_t key[ARC4_KEYBYTES]; int n; struct timeval tv_now; /* * XXX: FIX!! This isn't brilliant. Need more confidence. * This returns zero entropy before random(4) is seeded. */ (void)read_random(key, ARC4_KEYBYTES); getmicrouptime(&tv_now); mtx_lock(&arc4_mtx); for (n = 0; n < 256; n++) { arc4_j = (arc4_j + arc4_sbox[n] + key[n]) % 256; arc4_swap(&arc4_sbox[n], &arc4_sbox[arc4_j]); } arc4_i = arc4_j = 0; /* Reset for next reseed cycle. */ arc4_t_reseed = tv_now.tv_sec + ARC4_RESEED_SECONDS; arc4_numruns = 0; /* * Throw away the first N words of output, as suggested in the * paper "Weaknesses in the Key Scheduling Algorithm of RC4" * by Fluher, Mantin, and Shamir. (N = 256 in our case.) * * http://dl.acm.org/citation.cfm?id=646557.694759 */ for (n = 0; n < 256*4; n++) arc4_randbyte(); mtx_unlock(&arc4_mtx); }
u_int32_t arc4random(void) { u_int32_t ret; struct timeval tv_now; /* Initialize array if needed. */ if (!arc4_initialized) arc4_init(); microtime(&tv_now); if ((++arc4_numruns > ARC4_MAXRUNS) || (tv_now.tv_sec > arc4_tv_nextreseed.tv_sec)) { arc4_randomstir(); } ret = arc4_randbyte(); ret |= arc4_randbyte() << 8; ret |= arc4_randbyte() << 16; ret |= arc4_randbyte() << 24; return ret; }
/* * MPSAFE */ void arc4rand(void *ptr, u_int len, int reseed) { u_char *p; struct timeval tv; getmicrotime(&tv); if (reseed || (arc4_numruns > ARC4_RESEED_BYTES) || (tv.tv_sec > arc4_t_reseed)) arc4_randomstir(); mtx_lock(&arc4_mtx); arc4_numruns += len; p = ptr; while (len--) *p++ = arc4_randbyte(); mtx_unlock(&arc4_mtx); }
/* * MPSAFE */ void arc4rand(void *ptr, u_int len, int reseed) { u_int8_t *p; struct timeval tv; GETKTIME(&tv); if (reseed || (arc4_numruns > ARC4_RESEED_BYTES) || (tv.tv_sec > arc4_t_reseed)) arc4_randomstir(); MUTEX_ENTER(&arc4_mtx); arc4_numruns += len; p = ptr; while (len--) *p++ = arc4_randbyte(); MUTEX_EXIT(&arc4_mtx); }
/* * MPSAFE */ void arc4rand(void *ptr, u_int len, int reseed) { u_char *p; struct timeval tv; getmicrouptime(&tv); if (atomic_cmpset_int(&arc4rand_iniseed_state, ARC4_ENTR_HAVE, ARC4_ENTR_SEED) || reseed || (arc4_numruns > ARC4_RESEED_BYTES) || (tv.tv_sec > arc4_t_reseed)) arc4_randomstir(); mtx_lock(&arc4_mtx); arc4_numruns += len; p = ptr; while (len--) *p++ = arc4_randbyte(); mtx_unlock(&arc4_mtx); }
/* * Initialize our S-box to its beginning defaults. */ static void arc4_init(void) { int n; arc4_i = arc4_j = 0; for (n = 0; n < 256; n++) arc4_sbox[n] = (u_int8_t) n; arc4_randomstir(); arc4_initialized = 1; /* * Throw away the first N words of output, as suggested in the * paper "Weaknesses in the Key Scheduling Algorithm of RC4" * by Fluher, Mantin, and Shamir. (N = 256 in our case.) */ for (n = 0; n < 256*4; n++) arc4_randbyte(); }
/* * Stir our S-box. */ static void arc4_randomstir (void) { u_int8_t key[256]; int r, n; struct timeval tv_now; /* * XXX read_random() returns unsafe numbers if the entropy * device is not loaded -- MarkM. */ r = read_random(key, ARC4_KEYBYTES); getmicrouptime(&tv_now); mtx_lock(&arc4_mtx); /* If r == 0 || -1, just use what was on the stack. */ if (r > 0) { for (n = r; n < sizeof(key); n++) key[n] = key[n % r]; } for (n = 0; n < 256; n++) { arc4_j = (arc4_j + arc4_sbox[n] + key[n]) % 256; arc4_swap(&arc4_sbox[n], &arc4_sbox[arc4_j]); } /* Reset for next reseed cycle. */ arc4_t_reseed = tv_now.tv_sec + ARC4_RESEED_SECONDS; arc4_numruns = 0; /* * Throw away the first N words of output, as suggested in the * paper "Weaknesses in the Key Scheduling Algorithm of RC4" * by Fluher, Mantin, and Shamir. (N = 256 in our case.) */ for (n = 0; n < 256 * 4; n++) arc4_randbyte(); mtx_unlock(&arc4_mtx); }