void brodge(struct c642_pict *pict) { int o; int at[2], m[2]; long d; struct timespec ls = current_kernel_time(); c642_esqu(pict); //°\\ 0: osc1 , 1: osc2 , 2: osc3 for(o = 0; o < 2; o++) { d = 1L * pict->num_at * random32(); at[o] = d >> 32; } d = 4L * random32(); m[0] = d >> 32; d = 4L * random32(); m[1] = d >> 32; brodge1(pict, at, m); pict->sig = current_kernel_time(); d = (pict->sig.tv_nsec - ls.tv_nsec); if ( d < 0 ) { printk("Brodge ' %lX ' taked %ld.%lds.\n", pict->sig.tv_sec, pict->sig.tv_sec - ls.tv_sec - 1, (1000000000 - d)/1000000); } else { printk("Brodge ' %lX ' taked %ld.%lds.\n", pict->sig.tv_sec, pict->sig.tv_sec - ls.tv_sec, d/1000000); } // Sig // Export d = a051_data_write(pict->env, pict->picture, pict->size); printk("Brodge ' %lX ' send %ld bytes\n", 0x7F1FFFFFFF & (1L * pict->sig.tv_sec * 1000L + pict->sig.tv_nsec / 1000000L), d); }
quint64 Zobrist::random64() { quint64 random1 = (quint64)random32(); quint64 random2 = (quint64)random32(); quint64 random3 = (quint64)random32(); return random1 ^ (random2 << 31) ^ (random3 << 62); }
/* Fill the vars with random numbers */ void hashRndInit() { int i, j, k; srand(0); for (i = 0; i < 2; ++i) for (j = 0; j < 6; ++j) for (k = 0; k < 64; ++k) hash.piece[i][j][k] = random32(); hash.side = random32(); for (i = 0; i < 64; ++i) hash.ep[i] = random32(); }
end_point replication_app_client_base::get_read_address(read_semantic_t semantic, const partition_configuration& config) { if (semantic == read_semantic_t::ReadLastUpdate) return config.primary; // readsnapshot or readoutdated, using random else { bool has_primary = false; int N = static_cast<int>(config.secondaries.size()); if (config.primary != dsn::end_point::INVALID) { N++; has_primary = true; } if (0 == N) return config.primary; int r = random32(0, 1000) % N; if (has_primary && r == N - 1) return config.primary; else return config.secondaries[r]; } }
static ssize_t nasty_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int rcount, ret; /* random bytes to write */ /* 0 in -> 0 out, no strings attached */ if (count == 0) return 0; /* end of story */ if (*ppos >= FILE_MAX_SIZE) return -ENOSPC; /* make sure that we write at least 1 character */ rcount = 1 + random32() % MAX_CHUNK_SIZE; /* ... and don't cross the borders */ if (rcount > count) rcount = count; if (*ppos + rcount > FILE_MAX_SIZE) rcount = FILE_MAX_SIZE - *ppos; ret = copy_from_user(&content[*ppos], buf, rcount); if (ret != 0) { pr_warning("@write: copy to user failead\n"); return -EFAULT; } pr_debug("@write: requested %d, written %d, current pos %lld\n", count, rcount, *ppos); *ppos += rcount; crt_size = max(crt_size, (int)*ppos); return rcount; }
static ssize_t nasty_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int rcount, ret; /* random bytes to read */ /* end of file */ if (*ppos >= crt_size) return 0; /* make sure that we read at least 1 character */ rcount = 1 + random32() % MAX_CHUNK_SIZE; /* ... and don't cross the borders */ if (rcount > count) rcount = count; if (*ppos + rcount > crt_size) rcount = crt_size - *ppos; ret = copy_to_user(buf, &content[*ppos], rcount); if (ret != 0) { pr_warning("@read: copy to user failed\n"); return -EFAULT; } pr_debug("@read: requested %d, received %d, current pos %lld\n", count, rcount, *ppos); *ppos += rcount; return rcount; }
static const HDNode *generateKeyHandle(const uint8_t app_id[], uint8_t key_handle[]) { uint8_t keybase[U2F_APPID_SIZE + KEY_PATH_LEN]; // Derivation path is m/U2F'/r'/r'/r'/r'/r'/r'/r'/r' uint32_t key_path[KEY_PATH_ENTRIES]; for (uint32_t i = 0; i < KEY_PATH_ENTRIES; i++) { // high bit for hardened keys key_path[i]= 0x80000000 | random32(); } // First half of keyhandle is key_path memcpy(key_handle, key_path, KEY_PATH_LEN); // prepare keypair from /random data const HDNode *node = getDerivedNode(key_path, KEY_PATH_ENTRIES); if (!node) return NULL; // For second half of keyhandle // Signature of app_id and random data memcpy(&keybase[0], app_id, U2F_APPID_SIZE); memcpy(&keybase[U2F_APPID_SIZE], key_handle, KEY_PATH_LEN); hmac_sha256(node->private_key, sizeof(node->private_key), keybase, sizeof(keybase), &key_handle[KEY_PATH_LEN]); // Done! return node; }
static void yam_arbitrate(struct net_device *dev) { struct yam_port *yp = netdev_priv(dev); if (yp->magic != YAM_MAGIC || yp->tx_state != TX_OFF || skb_queue_empty(&yp->send_queue)) return; /* tx_state is TX_OFF and there is data to send */ if (yp->dupmode) { /* Full duplex mode, don't wait */ yam_start_tx(dev, yp); return; } if (yp->dcd) { /* DCD on, wait slotime ... */ yp->slotcnt = yp->slot / 10; return; } /* Is slottime passed ? */ if ((--yp->slotcnt) > 0) return; yp->slotcnt = yp->slot / 10; /* is random > persist ? */ if ((random32() % 256) > yp->pers) return; yam_start_tx(dev, yp); }
void IChatHandler::_sCmd_Coin(IWorldPlayer *_player, const char *_message) { FDASSERT(_player); FDASSERT(_message); _player->greenText((random32() & 1) ? "heads" : "tails"); }
/*search in cache*/ dsn::rpc_address replication_app_client_base::get_address(bool is_write, read_semantic_t semantic, const partition_configuration& config) { if (is_write || semantic == read_semantic_t::ReadLastUpdate) return config.primary; // readsnapshot or readoutdated, using random else { bool has_primary = false; int N = static_cast<int>(config.secondaries.size()); if (!config.primary.is_invalid()) { N++; has_primary = true; } if (0 == N) return config.primary; int r = random32(0, 1000) % N; if (has_primary && r == N - 1) return config.primary; else return config.secondaries[r]; } }
static int do_operation(void) { if (random32() & 1) return do_read(); else return do_write(); }
void gravManager::addTestObject() { lockSources(); RectangleBase* obj = new RectangleBase( 0.0f, 0.0f ); drawnObjects->push_back( obj ); bool useRandName = false; if ( useRandName ) { int nameLength = 10; std::string randName; for( int i = 0; i < nameLength; i++ ) { int rand = ((float)random32() / (float)random32_max() * 95) + 32; randName += (char)rand; } obj->setName( randName ); } else if ( drawnObjects->size() % 2 == 0 ) { obj->setName( " " ); } else { obj->setName( "TEST" ); } Texture t = GLUtil::getInstance()->getTexture( "border" ); obj->setTexture( t.ID, t.width, t.height ); obj->setUserDeletable( true ); unlockSources(); }
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, u32 reserved, u32 flags) { int i; alloc->start = start; alloc->flags = flags; if (flags & C4IW_ID_TABLE_F_RANDOM) alloc->last = random32() % RANDOM_SKIP; else alloc->last = 0; alloc->max = num; spin_lock_init(&alloc->lock); alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof(long), GFP_KERNEL); if (!alloc->table) return -ENOMEM; bitmap_zero(alloc->table, num); if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY)) for (i = 0; i < reserved; ++i) set_bit(i, alloc->table); return 0; }
DECLARE_TEST( ringbuffer, io ) { ringbuffer_t* buffer; char from[256]; char to[256]; unsigned int size, verify, loop, loops; unsigned int expected_size = 0; for( size = 0; size < 256; ++size ) from[size] = (char)( random32() & 0xFF ); buffer = ringbuffer_allocate( 512 ); loops = 32; for( loop = 0; loop < loops; ++loop ) { for( size = 0; size < 256; ++size ) { ringbuffer_write( buffer, from, size ); ringbuffer_read( buffer, to, size ); for( verify = 0; verify < size; ++verify ) EXPECT_EQ( to[verify], from[verify] ); expected_size += size; } } EXPECT_EQ( ringbuffer_total_read( buffer ), expected_size ); EXPECT_EQ( ringbuffer_total_written( buffer ), expected_size ); ringbuffer_deallocate( buffer ); return 0; }
/** * Allocate a new binding. Try using the same port as on the IPv6 side. * If it's already in use, allocate one randomly. The binding is inserted into * the Binding Information Base (BIB). * * \param bkey Initializer for the created binding. * * \return A pointer to the created binding if successful, NULL otherwise. */ static struct nat64_binding * nat64_binding_create(const struct nat64_binding *bkey) { struct nat64_binding *b; int min; int max; int first; b = malloc(sizeof(*b)); if(!b) { if(printk_ratelimit()) printk(KERN_DEBUG "nat64_binding_create: kmalloc failed"); return NULL; } *b = *bkey; b->b_saddr4 = *(nat64_config_nat_addr()); b->b_sport4 = b->b_sport6; if (!nat64_bib_insert(b)) return b; min = b->b_sport6 < 1024 ? 0 : 1024; max = b->b_sport6 < 1024 ? 1024 : 65536; first = min + ((random32() % ((max - min) / 2) * 2) | (b->b_sport6 & 1)); if (nat64_binding_alloc_port(b, first, max) || nat64_binding_alloc_port(b, min, first)) return b; kfree(b); return NULL; }
/* * Trivial bitmap-based allocator. If the random flag is set, the * allocator is designed to: * - pseudo-randomize the id returned such that it is not trivially predictable. * - avoid reuse of recently used id (at the expense of predictability) */ u32 c4iw_id_alloc(struct c4iw_id_table *alloc) { unsigned long flags; u32 obj; spin_lock_irqsave(&alloc->lock, flags); obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); if (obj >= alloc->max) obj = find_first_zero_bit(alloc->table, alloc->max); if (obj < alloc->max) { if (alloc->flags & C4IW_ID_TABLE_F_RANDOM) alloc->last += random32() % RANDOM_SKIP; else alloc->last = obj + 1; if (alloc->last >= alloc->max) alloc->last = 0; set_bit(obj, alloc->table); obj += alloc->start; } else obj = -1; spin_unlock_irqrestore(&alloc->lock, flags); return obj; }
bool should_fail(struct fault_attr *attr, ssize_t size) { if (attr->task_filter && !fail_task(attr, current)) return false; if (atomic_read(&attr->times) == 0) return false; if (atomic_read(&attr->space) > size) { atomic_sub(size, &attr->space); return false; } if (attr->interval > 1) { attr->count++; if (attr->count % attr->interval) return false; } if (attr->probability <= random32() % 100) return false; if (!fail_stacktrace(attr)) return false; fail_dump(attr); if (atomic_read(&attr->times) != -1) atomic_dec_not_zero(&attr->times); return true; }
int main(void) { __stack_chk_guard = random32(); setup(); memory_protect(); oledInit(); // at least one button is unpressed uint16_t state = gpio_port_read(BTN_PORT); if ((state & BTN_PIN_YES) == BTN_PIN_YES || (state & BTN_PIN_NO) == BTN_PIN_NO) { check_firmware_sanity(); oledClear(); oledDrawBitmap(40, 0, &bmp_logo64_empty); oledRefresh(); uint8_t hash[32]; if (!signatures_ok(hash)) { show_unofficial_warning(hash); } load_app(); } bootloader_loop(); return 0; }
uint32_t next_cid(void) { // extremely unlikely but hey do { cid = random32(); } while (cid == 0 || cid == CID_BROADCAST); return cid; }
static int rand_len(int offs) { unsigned int len; len = random32(); len %= (bufsize - offs); return len; }
static int rand_offs(void) { unsigned int offs; offs = random32(); offs %= bufsize; return offs; }
int main(void) { _buttonusr_isr = (void *)&buttonisr_usr; _timerusr_isr = (void *)&timerisr_usr; _mmhusr_isr = (void *)&mmhisr; /* Drop privileges */ drop_privs(); /* Init board */ kk_board_init(); /* Program the model into OTP, if we're not in screen-test mode, and it's * not already there */ (void)flash_programModel(); /* Init for safeguard against stack overflow (-fstack-protector-all) */ __stack_chk_guard = (uintptr_t)random32(); /* Bootloader Verification */ check_bootloader(); led_func(SET_RED_LED); dbg_print("Application Version %d.%d.%d\n\r", MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION); /* Init storage */ storage_init(); /* Init protcol buffer message map and usb msg callback */ fsm_init(); led_func(SET_GREEN_LED); usbInit(); u2fInit(); led_func(CLR_RED_LED); reset_idle_time(); if (is_mfg_mode()) layout_screen_test(); else if (!storage_isInitialized()) layout_standard_notification("Welcome", "keepkey.com/get-started", NOTIFICATION_LOGO); else layoutHomeForced(); while (1) { delay_ms_with_callback(ONE_SEC, &exec, 1); increment_idle_time(ONE_SEC); toggle_screensaver(); } return 0; }
inline u32 rtw_random32(void) { #ifdef PLATFORM_LINUX #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) return prandom_u32(); #else return random32(); #endif #endif }
knh_uint_t knh_rand(void) { #if defined(KONOHA_ON_LKM) return (knh_uint_t)random32(); #elif defined(K_USING_INT32) return (knh_uint_t)genrand_int31(); #else return (knh_uint_t)genrand64_int63(); #endif }
int init_test_probes(void) { int ret; target = kprobe_target; target2 = kprobe_target2; do { rand1 = random32(); } while (rand1 <= div_factor); printk(KERN_INFO "Kprobe smoke test started\n"); num_tests++; ret = test_kprobe(); if (ret < 0) errors++; num_tests++; ret = test_kprobes(); if (ret < 0) errors++; num_tests++; ret = test_jprobe(); if (ret < 0) errors++; num_tests++; ret = test_jprobes(); if (ret < 0) errors++; #ifdef CONFIG_KRETPROBES num_tests++; ret = test_kretprobe(); if (ret < 0) errors++; num_tests++; ret = test_kretprobes(); if (ret < 0) errors++; #endif /* */ if (errors) printk(KERN_ERR "BUG: Kprobe smoke test: %d out of " "%d tests failed\n", errors, num_tests); else if (handler_errors) printk(KERN_ERR "BUG: Kprobe smoke test: %d error(s) " "running handlers\n", handler_errors); else printk(KERN_INFO "Kprobe smoke test passed successfully\n"); return 0; }
/*H:435 * And this is us, creating the new page directory. If we really do * allocate a new one (and so the kernel parts are not there), we set * blank_pgdir. */ static unsigned int new_pgdir(struct lg_cpu *cpu, unsigned long gpgdir, int *blank_pgdir) { unsigned int next; #ifdef CONFIG_X86_PAE pmd_t *pmd_table; #endif /* * We pick one entry at random to throw out. Choosing the Least * Recently Used might be better, but this is easy. */ next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); /* If it's never been allocated at all before, try now. */ if (!cpu->lg->pgdirs[next].pgdir) { cpu->lg->pgdirs[next].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); /* If the allocation fails, just keep using the one we have */ if (!cpu->lg->pgdirs[next].pgdir) next = cpu->cpu_pgd; else { #ifdef CONFIG_X86_PAE /* * In PAE mode, allocate a pmd page and populate the * last pgd entry. */ pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL); if (!pmd_table) { free_page((long)cpu->lg->pgdirs[next].pgdir); set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0)); next = cpu->cpu_pgd; } else { set_pgd(cpu->lg->pgdirs[next].pgdir + SWITCHER_PGD_INDEX, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); /* * This is a blank page, so there are no kernel * mappings: caller must map the stack! */ *blank_pgdir = 1; } #else *blank_pgdir = 1; #endif } } /* Record which Guest toplevel this shadows. */ cpu->lg->pgdirs[next].gpgdir = gpgdir; /* Release all the non-kernel mappings. */ flush_user_mappings(cpu->lg, next); return next; }
int main() { struct rtp_packet *pkt; pkt = rtp_packet_new(); if (!pkt) { fprintf(stderr, "malloc error\n"); } fprintf(stderr, "rtp header size %ld\n", sizeof(struct rtp_hdr_t)); fprintf(stderr, "rtp packet size %ld\n", sizeof(struct rtp_packet)); fprintf(stderr, "rtcp packet size %ld\n", sizeof(struct rtcp_packet)); rtp_packet_build(pkt, NULL, 0); fprintf(stderr, "random = 0x%x\n", random32()); fprintf(stderr, "random = 0x%8x\n", random32()); rtp_packet_free(pkt); return 0; }
static ssize_t device_read(struct file *filp, char *buffer, size_t length, loff_t *offset) { size_t i; cantidadLecturas++; for(i = 0; i < length; i++) { unsigned int r = random32(); buffer[i] = 'A' + r%26; } return length; }
static int rand_eb(void) { unsigned int eb; again: eb = random32(); /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */ eb %= (ebcnt - 1); if (bbt[eb]) goto again; return eb; }
int rtp_make_header(struct rtp_hdr_t *hdr) { hdr->cc = 0; hdr->x = 0; hdr->p = 0; hdr->version = 2; hdr-> seq = htons(random32() & 0xFFFF); hdr-> ts = htonl(random32()); hdr-> ssrc = htonl(random32()); fprintf(stderr, "hdr->cc = 0x%x\n", hdr->cc); fprintf(stderr, "hdr->x = 0x%x\n", hdr->x); fprintf(stderr, "hdr->p = 0x%x\n", hdr->p); fprintf(stderr, "hdr->version = 0x%x\n", hdr->version); fprintf(stderr, "hdr->seq = 0x%x\n", hdr->seq); fprintf(stderr, "hdr->ts = 0x%x\n", hdr->ts); fprintf(stderr, "hdr->ssrc = 0x%x\n", hdr->ssrc); return (0); }