/* * MPSAFE */ void arc4rand(void *ptr, u_int len, int reseed) { u_char *p; struct timeval tv; getmicrotime(&tv); if (reseed || (arc4_numruns > ARC4_RESEED_BYTES) || (tv.tv_sec > arc4_t_reseed)) arc4_randomstir(); mtx_lock(&arc4_mtx); arc4_numruns += len; p = ptr; while (len--) *p++ = arc4_randbyte(); mtx_unlock(&arc4_mtx); }
/* * MPSAFE */ void arc4rand(void *ptr, u_int len, int reseed) { u_int8_t *p; struct timeval tv; GETKTIME(&tv); if (reseed || (arc4_numruns > ARC4_RESEED_BYTES) || (tv.tv_sec > arc4_t_reseed)) arc4_randomstir(); MUTEX_ENTER(&arc4_mtx); arc4_numruns += len; p = ptr; while (len--) *p++ = arc4_randbyte(); MUTEX_EXIT(&arc4_mtx); }
/* * MPSAFE */ void arc4rand(void *ptr, u_int len, int reseed) { u_char *p; struct timeval tv; getmicrouptime(&tv); if (atomic_cmpset_int(&arc4rand_iniseed_state, ARC4_ENTR_HAVE, ARC4_ENTR_SEED) || reseed || (arc4_numruns > ARC4_RESEED_BYTES) || (tv.tv_sec > arc4_t_reseed)) arc4_randomstir(); mtx_lock(&arc4_mtx); arc4_numruns += len; p = ptr; while (len--) *p++ = arc4_randbyte(); mtx_unlock(&arc4_mtx); }
/* * Initialize our S-box to its beginning defaults. */ static void arc4_init(void) { int n; arc4_i = arc4_j = 0; for (n = 0; n < 256; n++) arc4_sbox[n] = (u_int8_t) n; arc4_randomstir(); arc4_initialized = 1; /* * Throw away the first N words of output, as suggested in the * paper "Weaknesses in the Key Scheduling Algorithm of RC4" * by Fluher, Mantin, and Shamir. (N = 256 in our case.) */ for (n = 0; n < 256*4; n++) arc4_randbyte(); }
u_int32_t arc4random(void) { u_int32_t ret; struct timeval tv_now; /* Initialize array if needed. */ if (!arc4_initialized) arc4_init(); microtime(&tv_now); if ((++arc4_numruns > ARC4_MAXRUNS) || (tv_now.tv_sec > arc4_tv_nextreseed.tv_sec)) { arc4_randomstir(); } ret = arc4_randbyte(); ret |= arc4_randbyte() << 8; ret |= arc4_randbyte() << 16; ret |= arc4_randbyte() << 24; return ret; }