static int procfs_open(struct inode *inode, struct file *file) { int ret; int i; mutex_lock(&kinterval_lock); for (i = 0; i < 1000; i++) { int start, end, type; start = get_random_int() % 10000; end = get_random_int() % 10000; type = get_random_int() % 2; if (i & 1) kinterval_add(&kinterval_tree, min(start, end), max(start, end), type, GFP_KERNEL); else kinterval_del(&kinterval_tree, min(start, end), max(start, end), GFP_KERNEL); } ret = single_open(file, procfs_read, NULL); if (ret < 0) kinterval_clear(&kinterval_tree); mutex_unlock(&kinterval_lock); return ret; }
static int __init test_hexdump_init(void) { unsigned int i; int rowsize; rowsize = (get_random_int() % 2 + 1) * 16; for (i = 0; i < 16; i++) test_hexdump_set(rowsize, false); rowsize = (get_random_int() % 2 + 1) * 16; for (i = 0; i < 16; i++) test_hexdump_set(rowsize, true); for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++) test_hexdump_overflow_set(i, false); for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++) test_hexdump_overflow_set(i, true); if (failed_tests == 0) pr_info("all %u tests passed\n", total_tests); else pr_err("failed %u out of %u tests\n", failed_tests, total_tests); return failed_tests ? -EINVAL : 0; }
/******************************************************************************* * FUNCTION : particle_update * PURPOSE : update the particles properties to be rendered in the next frame * INPUT : struct particle **head. Head of the particle list * OUTPUT : returns -1 on error, 0 on success * NOTES : *******************************************************************************/ int particle_update( struct particle **head ) { struct particle *temp = *head; while(temp != NULL) { temp->col.r = get_random_int(2); temp->col.g = get_random_int(2); temp->col.b = get_random_int(2); temp->pos.x += (temp->dir.x * temp->spd.x); temp->pos.y += (temp->dir.y * temp->spd.y); temp->lifespan -= DELTA_LIFE_SPAN; if (temp->lifespan < 0) { temp->col.a = 0; particle_remove(temp); } temp = temp->next; } return 0; }
void lowest_random(int *move, int dist_from_border, double threshold) { int i, j, k, random; int possible[20000] = {-1}; int found = 0; for(i = dist_from_border; i < BOARD_SIZE - dist_from_border; i++) { found = 0; for(j = dist_from_border; j < BOARD_SIZE - dist_from_border; j++) { if(abs(board[i][j]) < threshold) { possible[k] = i; possible[k + 1] = j; k += 2; j += 24; found = 1; } } if(found) i += 24; } random = get_random_int(); random = random % k; while(possible[random] == -1) { random = get_random_int(); random = random % k; } if(random % 2 == 1) random = random - 1; move[0] = possible[random]; move[1] = possible[random + 1]; }
void initHQPlanets(game* theGame) { // Make sure that the initial planets are at least half a galaxy apart galaxy* theGalaxy = theGame->getGalaxy(); std::pair<double, double> Xbounds = theGalaxy->getXBounds(); std::pair<double, double> Ybounds = theGalaxy->getYBounds(); double xdist = Xbounds.second-Xbounds.first; double ydist = Ybounds.second-Ybounds.first; double diagonal = sqrt(pow(xdist, 2.0) + pow(ydist, 2.0)); uint32_t numplanets = theGalaxy->getNumPlanets(); std::vector<std::vector<double> > dist_matrix = theGalaxy->getDistMatrix(); while (true) { uint32_t test1 = get_random_int(0, numplanets); uint32_t test2 = get_random_int(0, numplanets); double loc_dist = dist_matrix[test1][test2]; // The equality test is probably redundant if ((loc_dist > (diagonal/2.0)) && (test1 != test2)) { // The planets are far enough apart, asign them to the HG player* player1 = theGame->getPlayer(1); player* player2 = theGame->getPlayer(2); player1->addToPlanetsOwned(test1); player2->addToPlanetsOwned(test2); break; } } }
/******************************************************************************* * FUNCTION : particle_init * PURPOSE : initialize the properties of a single particle * INPUT : pointer to the particle structure to be initialized * OUTPUT : returns -1 on error, 0 on success * NOTES : *******************************************************************************/ int particle_init( struct particle* p ) { int direction = get_random_int(8); /* set the particles properties */ p->col.r = 0; p->col.g = 0; p->col.b = 0; p->col.a = 1.0; p->pos.x = 0; p->pos.y = 0; p->spd.x = 0.1 * (get_random_int(8) + 1); p->spd.y = 0.1 * (get_random_int(8) + 1); switch (direction) { case 0: p->dir.x = 0.5; p->dir.y = 0; break; case 1: p->dir.x = -0.5; p->dir.y = 0; break; case 2: p->dir.x = 0; p->dir.y = 0.5; break; case 3: p->dir.x = 0; p->dir.y = -0.5; break; case 4: p->dir.x = 0.5; p->dir.y = -0.5; break; case 5: p->dir.x = -0.5; p->dir.y = 0.5; break; case 6: p->dir.x = 0.5; p->dir.y = 0.5; break; case 7: p->dir.x = -0.5; p->dir.y = -0.5; break; } p->lifespan = get_random_int(100) + 100; p->size = 10; return 0; }
// Randomly where we don't own void decent_random(int *move) { move[0] = get_random_int(); move[1] = get_random_int(); while(do_move(move) < 0 || board[move[0]][move[1]] > 0) { move[0] = get_random_int(); move[1] = get_random_int(); } undo_move(move); }
unsigned long arch_mmap_rnd(void) { unsigned long rnd; /* 8MB for 32bit, 1GB for 64bit */ if (is_32bit_task()) rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT)); else rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT)); return rnd << PAGE_SHIFT; }
static void __init test_hexdump_overflow_set(size_t buflen, bool ascii) { unsigned int i = 0; int rs = (get_random_int() % 2 + 1) * 16; do { int gs = 1 << i; size_t len = get_random_int() % rs + gs; test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii); } while (i++ < 3); }
static unsigned long mmap_rnd(void) { unsigned long rnd = 0; if (current->flags & PF_RANDOMIZE) { if (mmap_is_ia32()) rnd = get_random_int() % (1<<8); else rnd = get_random_int() % (1<<28); } return rnd << PAGE_SHIFT; }
int do_move_random(int *move) { move[0] = get_random_int(); move[1] = get_random_int(); int score; while((score = do_move(move)) < 0) { move[0] = get_random_int(); move[1] = get_random_int(); } return score; }
/* * Since get_random_int() returns the same value within a 1 jiffy window, * we will almost always get the same randomisation for the stack and mmap * region. This will mean the relative distance between stack and mmap will * be the same. * * To avoid this we can shift the randomness by 1 bit. */ static unsigned long mmap_rnd(void) { unsigned long rnd = 0; if (current->flags & PF_RANDOMIZE) { /* 8MB for 32bit, 1GB for 64bit */ if (is_32bit_task()) rnd = (long)(get_random_int() % (1<<(22-PAGE_SHIFT))); else rnd = (long)(get_random_int() % (1<<(29-PAGE_SHIFT))); } return (rnd << PAGE_SHIFT) * 2; }
unsigned long arch_mmap_rnd(void) { unsigned long rnd; /* * 8 bits of randomness in 32bit mmaps, 20 address space bits * 28 bits of randomness in 64bit mmaps, 40 address space bits */ if (mmap_is_ia32()) rnd = (unsigned long)get_random_int() % (1<<8); else rnd = (unsigned long)get_random_int() % (1<<28); return rnd << PAGE_SHIFT; }
/* Clause 4.6.2.3 */ void get_n_string(char *n_string, int x, int y) { int length; int i; length = x + get_random_int(y - x + 1) + 1; n_string[length - 1] = '\0'; for (i = 0; i < length - 1; i++) { n_string[i] = n_string_char[get_random_int(N_STRING_CHAR_LEN)]; } return; }
static unsigned long mmap_rnd(void) { unsigned long rnd = 0; /* * 8 bits of randomness in 32bit mmaps, 20 address space bits * 28 bits of randomness in 64bit mmaps, 40 address space bits */ if (current->flags & PF_RANDOMIZE) { if (mmap_is_ia32()) rnd = get_random_int() % (1<<8); else rnd = get_random_int() % (1<<28); } return rnd << PAGE_SHIFT; }
static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; prepare_to_copy(orig); tsk = alloc_task_struct(); if (!tsk) return NULL; ti = alloc_thread_info(tsk); if (!ti) { free_task_struct(tsk); return NULL; } *tsk = *orig; tsk->stack = ti; setup_thread_stack(tsk, orig); #ifdef CONFIG_CC_STACKPROTECTOR tsk->stack_canary = get_random_int(); #endif /* One for us, one for whoever does the "release_task()" (usually parent) */ atomic_set(&tsk->usage,2); atomic_set(&tsk->fs_excl, 0); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; return tsk; }
/* * Don't forget that the stack pointer must be aligned on a 8 bytes * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. */ unsigned long arch_align_stack(unsigned long sp) { if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) sp -= get_random_int() & ~PAGE_MASK; return sp & ALMASK; }
static unsigned long mmap_rnd(void) { if (!(current->flags & PF_RANDOMIZE)) return 0; /* 8MB randomization for mmap_base */ return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; }
static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; int node = tsk_fork_get_node(orig); int err; tsk = alloc_task_struct_node(node); if (!tsk) return NULL; ti = alloc_thread_info_node(tsk, node); if (!ti) goto free_tsk; err = arch_dup_task_struct(tsk, orig); if (err) goto free_ti; tsk->stack = ti; #ifdef CONFIG_SECCOMP /* * We must handle setting up seccomp filters once we're under * the sighand lock in case orig has changed between now and * then. Until then, filter must be NULL to avoid messing up * the usage counts on the error path calling free_task. */ tsk->seccomp.filter = NULL; #endif setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); set_task_stack_end_magic(tsk); #ifdef CONFIG_CC_STACKPROTECTOR tsk->stack_canary = get_random_int(); #endif /* * One for us, one for whoever does the "release_task()" (usually * parent) */ atomic_set(&tsk->usage, 2); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; account_kernel_stack(ti, 1); return tsk; free_ti: free_thread_info(ti); free_tsk: free_task_struct(tsk); return NULL; }
struct page *get_signal_page(void) { unsigned long ptr; unsigned offset; struct page *page; void *addr; page = alloc_pages(GFP_KERNEL, 0); if (!page) return NULL; addr = page_address(page); /* Give the signal return code some randomness */ offset = 0x200 + (get_random_int() & 0x7fc); signal_return_offset = offset; /* * Copy signal return handlers into the vector page, and * set sigreturn to be a pointer to these. */ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); ptr = (unsigned long)addr + offset; flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); return page; }
void GA::crossover(Individual *p1, Individual *p2, pop_vector &v) { // one point crossover if (get_random_prob() < m_prob_crossover) { unsigned int gi = get_random_int(0, m_nvars-1); Individual *c1 = new Individual(); Individual *c2 = new Individual(); for (unsigned int i=0; i<gi; i++) { c1->add_cut(p1->get_cut(i)); c2->add_cut(p2->get_cut(i)); } for (unsigned int i=gi; i<m_nvars; i++) { c1->add_cut(p2->get_cut(i)); c2->add_cut(p1->get_cut(i)); } v.push_back(c1); v.push_back(c2); } else { v.push_back(p1->copy()); v.push_back(p2->copy()); } }
static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; unsigned long *stackend; int node = tsk_fork_get_node(orig); int err; prepare_to_copy(orig); tsk = alloc_task_struct_node(node); if (!tsk) return NULL; ti = alloc_thread_info_node(tsk, node); if (!ti) { free_task_struct(tsk); return NULL; } err = arch_dup_task_struct(tsk, orig); if (err) goto out; tsk->stack = ti; err = prop_local_init_single(&tsk->dirties); if (err) goto out; setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* for overflow detection */ #ifdef CONFIG_CC_STACKPROTECTOR tsk->stack_canary = get_random_int(); #endif /* * One for us, one for whoever does the "release_task()" (usually * parent) */ atomic_set(&tsk->usage, 2); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; account_kernel_stack(ti, 1); return tsk; out: free_thread_info(ti); free_task_struct(tsk); return NULL; }
/* Shuffle the vdso up a bit, randomly. */ static unsigned long vdso_addr(unsigned long start, unsigned int len) { unsigned int offset; /* This loses some more bits than a modulo, but is cheaper */ offset = get_random_int() & (PTRS_PER_PTE - 1); return start + (offset << PAGE_SHIFT); }
static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; unsigned long *stackend; int node = tsk_fork_get_node(orig); int err; prepare_to_copy(orig); tsk = alloc_task_struct_node(node); if (!tsk){ printk("[%d:%s] fork fail at alloc_tsk_node, please check kmem_cache_alloc_node()\n", current->pid, current->comm); return NULL; } ti = alloc_thread_info_node(tsk, node); if (!ti) { printk("[%d:%s] fork fail at alloc_t_info_node, please check alloc_pages_node()\n", current->pid, current->comm); free_task_struct(tsk); return NULL; } err = arch_dup_task_struct(tsk, orig); if (err){ printk("[%d:%s] fork fail at arch_dup_task_struct, err:%d \n", current->pid, current->comm, err); goto out; } tsk->stack = ti; setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* for overflow detection */ #ifdef CONFIG_CC_STACKPROTECTOR tsk->stack_canary = get_random_int(); #endif /* * One for us, one for whoever does the "release_task()" (usually * parent) */ atomic_set(&tsk->usage, 2); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; account_kernel_stack(ti, 1); return tsk; out: free_thread_info(ti); free_task_struct(tsk); return NULL; }
static unsigned long mmap_rnd(void) { unsigned long rnd = 0; if (current->flags & PF_RANDOMIZE) rnd = get_random_int() & MMAP_RND_MASK; return rnd << PAGE_SHIFT; }
void GA::mutate(pop_vector &v) { for (auto &indv : v) { if (get_random_prob() < m_prob_mutation) { int idx = get_random_int(0, m_nvars-1); indv->set_cut(idx, get_random_cut(m_variables[idx])); } } }
static void __init test_hexdump_set(int rowsize, bool ascii) { size_t d = min_t(size_t, sizeof(data_b), rowsize); size_t len = get_random_int() % d + 1; test_hexdump(len, rowsize, 4, ascii); test_hexdump(len, rowsize, 2, ascii); test_hexdump(len, rowsize, 8, ascii); test_hexdump(len, rowsize, 1, ascii); }
unsigned long arch_mmap_rnd(void) { unsigned long rnd; /* 8MB for 32bit, 1GB for 64bit */ if (is_32bit_task()) rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT)); else rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT)); if (current->flags & PF_RANDOMIZE) { /* 8MB for 32bit, 1GB for 64bit */ if (is_32bit_task()) rnd = get_random_long() % (1<<(23-PAGE_SHIFT)); else rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT)); } return rnd << PAGE_SHIFT; }
void main() { start_random_seed(); int total = 0; for (int i = 0; i < 10000; i++) { int temp = two_steps_distribution(get_random_int()); total = total + temp; } printf("%d\n", total/10000); return; }
passwd_cache::passwd_cache() { uid_table = new UidHashTable(10, compute_user_hash, updateDuplicateKeys); group_table = new GroupHashTable(10, compute_user_hash, updateDuplicateKeys); /* set the number of seconds until a cache entry expires */ // Randomize this timer a bit to decrease chances of lots of // processes all pounding on NIS at the same time. int default_lifetime = 300 + get_random_int() % 60; Entry_lifetime = param_integer("PASSWD_CACHE_REFRESH", default_lifetime ); loadConfig(); }