/* * Stir our S-box. */ static void arc4_randomstir (void) { u_int8_t key[256]; int r, n; /* * XXX read_random() returns unsafe numbers if the entropy * device is not loaded -- MarkM. */ #if 0 r = read_random(key, ARC4_KEYBYTES); #else r = 0; /*XXX*/ #endif /* If r == 0 || -1, just use what was on the stack. */ if (r > 0) { for (n = r; n < sizeof(key); n++) key[n] = key[n % r]; } for (n = 0; n < 256; n++) { arc4_j = (arc4_j + arc4_sbox[n] + key[n]) % 256; arc4_swap(&arc4_sbox[n], &arc4_sbox[arc4_j]); } /* Reset for next reseed cycle. */ microtime(&arc4_tv_nextreseed); arc4_tv_nextreseed.tv_sec += ARC4_RESEED_SECONDS; arc4_numruns = 0; }
/* * Stir our S-box. */ static void arc4_randomstir(void) { u_int8_t key[ARC4_KEYBYTES]; int n; struct timeval tv_now; /* * XXX: FIX!! This isn't brilliant. Need more confidence. * This returns zero entropy before random(4) is seeded. */ (void)read_random(key, ARC4_KEYBYTES); getmicrouptime(&tv_now); mtx_lock(&arc4_mtx); for (n = 0; n < 256; n++) { arc4_j = (arc4_j + arc4_sbox[n] + key[n]) % 256; arc4_swap(&arc4_sbox[n], &arc4_sbox[arc4_j]); } arc4_i = arc4_j = 0; /* Reset for next reseed cycle. */ arc4_t_reseed = tv_now.tv_sec + ARC4_RESEED_SECONDS; arc4_numruns = 0; /* * Throw away the first N words of output, as suggested in the * paper "Weaknesses in the Key Scheduling Algorithm of RC4" * by Fluher, Mantin, and Shamir. (N = 256 in our case.) * * http://dl.acm.org/citation.cfm?id=646557.694759 */ for (n = 0; n < 256*4; n++) arc4_randbyte(); mtx_unlock(&arc4_mtx); }
/* * Generate a random byte. */ static u_int8_t arc4_randbyte(void) { u_int8_t arc4_t; arc4_i = (arc4_i + 1) % 256; arc4_j = (arc4_j + arc4_sbox[arc4_i]) % 256; arc4_swap(&arc4_sbox[arc4_i], &arc4_sbox[arc4_j]); arc4_t = (arc4_sbox[arc4_i] + arc4_sbox[arc4_j]) % 256; return arc4_sbox[arc4_t]; }
/* * Stir our S-box. */ static void arc4_randomstir (void) { u_int8_t key[256]; int r, n; struct timeval tv_now; /* * XXX read_random() returns unsafe numbers if the entropy * device is not loaded -- MarkM. */ r = read_random(key, ARC4_KEYBYTES); getmicrouptime(&tv_now); mtx_lock(&arc4_mtx); /* If r == 0 || -1, just use what was on the stack. */ if (r > 0) { for (n = r; n < sizeof(key); n++) key[n] = key[n % r]; } for (n = 0; n < 256; n++) { arc4_j = (arc4_j + arc4_sbox[n] + key[n]) % 256; arc4_swap(&arc4_sbox[n], &arc4_sbox[arc4_j]); } /* Reset for next reseed cycle. */ arc4_t_reseed = tv_now.tv_sec + ARC4_RESEED_SECONDS; arc4_numruns = 0; /* * Throw away the first N words of output, as suggested in the * paper "Weaknesses in the Key Scheduling Algorithm of RC4" * by Fluher, Mantin, and Shamir. (N = 256 in our case.) */ for (n = 0; n < 256 * 4; n++) arc4_randbyte(); mtx_unlock(&arc4_mtx); }