// This internal function calculates a random preferred mapping address. // It is used when the client of allocPages() passes null as the address. // In calculating an address, we balance good ASLR against not fragmenting the // address space too badly. static void* getRandomPageBase() { uintptr_t random; random = static_cast<uintptr_t>(ranval(&s_ranctx)); #if CPU(X86_64) random <<= 32UL; random |= static_cast<uintptr_t>(ranval(&s_ranctx)); // This address mask gives a low liklihood of address space collisions. // We handle the situation gracefully if there is a collision. #if OS(WIN) // 64-bit Windows has a bizarrely small 8TB user address space. // Allocates in the 1-5TB region. random &= 0x3ffffffffffUL; random += 0x10000000000UL; #else // Linux and OS X support the full 47-bit user space of x64 processors. random &= 0x3fffffffffffUL; #endif #else // !CPU(X86_64) // This is a good range on Windows, Linux and Mac. // Allocates in the 0.5-1.5GB region. random &= 0x3fffffff; random += 0x20000000; #endif // CPU(X86_64) random &= kPageAllocationGranularityBaseMask; return reinterpret_cast<void*>(random); }
void test_aa_map_arbitrary_kv_5(void) { char mem[aa_map_size]; struct aa_map *m = aa_map_create( sizeof(mem) , mem , sizeof(int64_t) , ARBITRARY_LEN , __i64_cmp , __i64_cpy , __s32_cpy_never_ever , 0 , __alloc , __dealloc ); ranctx rctx; raninit(&rctx, 0xDEADBEEF); const size_t N = 50; size_t i = 0; const char dict[] = "qwertyuiopasdfghjklzxcvbnm" "QWERTYUIOPASDFGHJKLZXCVBNM1234567890"; for(; i < N; i++ ) { int64_t k = ranval(&rctx) % 10000; size_t ln2 = 1 + ranval(&rctx) % (ARBITRARY_LEN-2); char v[ARBITRARY_LEN]; randstr(&rctx, v, ln2, dict); aa_map_alter(m, true, &k, v, __kv_5_alter); } aa_map_enum(m, 0, __kv_5_print); aa_map_filter(m, 0, __kv_5_filt_even); fprintf(stdout, "filtered\n"); aa_map_enum(m, 0, __kv_5_print); aa_map_destroy(m); }
void raninit(ranctx *x, u4 seed) { u4 i; x->a = 0xf1ea5eed, x->b = x->c = x->d = seed; for (i = 0; i < 20; ++i) { (void)ranval(x); } }
// Calculates a random preferred mapping address. In calculating an // address, we balance good ASLR against not fragmenting the address // space too badly. void* getRandomPageBase() { uintptr_t random; random = static_cast<uintptr_t>(ranval(&s_ranctx)); #if CPU(X86_64) random <<= 32UL; random |= static_cast<uintptr_t>(ranval(&s_ranctx)); // This address mask gives a low liklihood of address space collisions. // We handle the situation gracefully if there is a collision. #if OS(WIN) // 64-bit Windows has a bizarrely small 8TB user address space. // Allocates in the 1-5TB region. // TODO(cevans): I think Win 8.1 has 47-bits like Linux. random &= 0x3ffffffffffUL; random += 0x10000000000UL; #elif defined(MEMORY_TOOL_REPLACES_ALLOCATOR) // This range is copied from the TSan source, but works for all tools. random &= 0x007fffffffffUL; random += 0x7e8000000000UL; #else // Linux and OS X support the full 47-bit user space of x64 processors. random &= 0x3fffffffffffUL; #endif #elif CPU(ARM64) // ARM64 on Linux has 39-bit user space. random &= 0x3fffffffffUL; random += 0x1000000000UL; #else // !CPU(X86_64) && !CPU(ARM64) #if OS(WIN) // On win32 host systems the randomization plus huge alignment causes // excessive fragmentation. Plus most of these systems lack ASLR, so the // randomization isn't buying anything. In that case we just skip it. // TODO(jschuh): Just dump the randomization when HE-ASLR is present. static BOOL isWow64 = -1; if (isWow64 == -1 && !IsWow64Process(GetCurrentProcess(), &isWow64)) isWow64 = FALSE; if (!isWow64) return nullptr; #endif // OS(WIN) // This is a good range on Windows, Linux and Mac. // Allocates in the 0.5-1.5GB region. random &= 0x3fffffff; random += 0x20000000; #endif // CPU(X86_64) random &= kPageAllocationGranularityBaseMask; return reinterpret_cast<void*>(random); }
// Called at the beginning of each listening window for transmitting to // the tag. static void ranging_listening_window_task () { // Check if we are done transmitting to the tag. // Ideally we never get here, as an ack from the tag will cause us to stop // cycling through listening windows and put us back into a ready state. if (_ranging_listening_window_num == NUM_RANGING_CHANNELS) { // Go back to IDLE _state = ASTATE_IDLE; // Stop the timer for the window timer_stop(_anchor_timer); // Restart being an anchor oneway_anchor_start(); } else { // Setup the channel and antenna settings oneway_set_ranging_listening_window_settings(ANCHOR, _ranging_listening_window_num, pp_anc_final_pkt.final_antenna); // Prepare the outgoing packet to send back to the // tag with our TOAs. pp_anc_final_pkt.ieee154_header_unicast.seqNum++; const uint16_t frame_len = sizeof(struct pp_anc_final); // const uint16_t frame_len = sizeof(struct pp_anc_final) - (sizeof(uint64_t)*NUM_RANGING_BROADCASTS); dwt_writetxfctrl(frame_len, 0); // Pick a slot to respond in. Generate a random number and mod it // by the number of slots uint8_t slot_num = ranval(&_prng_state) % (_ranging_operation_config.anchor_reply_window_in_us / _ranging_operation_config.anchor_reply_slot_time_in_us); // Come up with the time to send this packet back to the // tag based on the slot we picked. uint32_t delay_time = dwt_readsystimestamphi32() + DW_DELAY_FROM_US(ANC_FINAL_INITIAL_DELAY_HACK_VALUE + (slot_num*_ranging_operation_config.anchor_reply_slot_time_in_us)); delay_time &= 0xFFFFFFFE; // Record the outgoing time in the packet. Do not take calibration into // account here, as that is done on all of the RX timestamps. pp_anc_final_pkt.dw_time_sent = (((uint64_t) delay_time) << 8); // Set the packet to be transmitted later. dwt_setdelayedtrxtime(delay_time); // Send the response packet // TODO: handle if starttx errors. I'm not sure what to do about it, // other than just wait for the next slot. dwt_starttx(DWT_START_TX_DELAYED); dwt_settxantennadelay(DW1000_ANTENNA_DELAY_TX); dwt_writetxdata(frame_len, (uint8_t*) &pp_anc_final_pkt, 0); _ranging_listening_window_num++; } }
static char *randstr(ranctx *rnd, char *dst, size_t n, const char *dict) { char *p = dst; char *pe = p + n; size_t dlen = strlen(dict); for(; p < pe; p++ ) { *p = dict[ranval(rnd) % dlen]; } *p = 0; return dst; }
// Called at the beginning of each listening window for transmitting to // the tag. static void ranging_listening_window_task () { // Check if we are done transmitting to the tag. // Ideally we never get here, as an ack from the tag will cause us to stop // cycling through listening windows and put us back into a ready state. if (_ranging_listening_window_num == NUM_RANGING_CHANNELS) { // Go back to IDLE _state = ASTATE_IDLE; // Stop the timer for the window timer_stop(_ranging_broadcast_timer); } else { // Setup the channel and antenna settings dw1000_set_ranging_listening_window_settings(ANCHOR, _ranging_listening_window_num, pp_anc_final_pkt.final_antenna, FALSE); // Prepare the outgoing packet to send back to the // tag with our TOAs. pp_anc_final_pkt.ieee154_header_unicast.seqNum++; const uint16_t frame_len = sizeof(struct pp_anc_final); dwt_writetxfctrl(frame_len, 0); // Pick a slot to respond in. Generate a random number and mod it // by the number of slots uint8_t slot_num = ranval(&_prng_state) % (_ranging_operation_config.anchor_reply_window_in_us / _ranging_operation_config.anchor_reply_slot_time_in_us); // Come up with the time to send this packet back to the // tag based on the slot we picked. uint32_t delay_time = dwt_readsystimestamphi32() + DW_DELAY_FROM_US(ANC_FINAL_INITIAL_DELAY_HACK_VALUE + (slot_num*_ranging_operation_config.anchor_reply_slot_time_in_us)); delay_time &= 0xFFFFFFFE; pp_anc_final_pkt.dw_time_sent = delay_time; dwt_setdelayedtrxtime(delay_time); // Send the response packet int err = dwt_starttx(DWT_START_TX_DELAYED); dwt_settxantennadelay(DW1000_ANTENNA_DELAY_TX); dwt_writetxdata(frame_len, (uint8_t*) &pp_anc_final_pkt, 0); _ranging_listening_window_num++; } }
void test_aa_map_no_val_copy_1(void) { char mem[aa_map_size]; struct aa_map *m = aa_map_create( sizeof(mem) , mem , sizeof(int64_t) , sizeof(int64_t) , __i64_cmp , __i64_cpy , 0 // NO VALUE COPY FUNCTION , 0 , __alloc , __dealloc ); ranctx rctx; raninit(&rctx, 0xDEADBEEF); const size_t N = 50; size_t i = 0; for(; i < N; i++ ) { int64_t k = ranval(&rctx) % 100000; fprintf(stdout, "added %ld? %s\n", k, aa_map_add(m, &k, &k) ? "yes" : "no"); aa_map_alter(m, true, &k, 0, __no_val_copy_1_alter); } aa_map_enum(m, 0, __no_val_copy_1_print); aa_map_destroy(m); }
void test_static_mem_pool_1(void) { ranctx rctx; raninit(&rctx, 0x128e437); struct static_mem_pool pool; struct hash *hs[20] = { 0 }; const size_t HSIZE = sizeof(hs)/sizeof(hs[0]); void *pp = static_mem_pool_init( &pool , 8192 - sizeof(struct static_mem_pool) , 0 , nomem , 0 , 0 , 0 ); if( !pp ) { fprintf(stderr, "Can't setup memory pool\n"); return; } size_t i = 0; for(; i < HSIZE; i++ ) { hs[i] = hash_create( hash_size , static_mem_pool_alloc(&pool, hash_size) , sizeof(uint32_t) , sizeof(uint32_t) , 32 , uint32_hash , uint32_eq , uint32_cpy , uint32_cpy , 0 , __alloc , __dealloc ); } for(i = 0; i < 100500; i++ ) { size_t hnum = ranval(&rctx) % HSIZE; uint32_t k = ranval(&rctx) % 1000001; uint32_t v = ranval(&rctx) % 99999999; hash_add(hs[hnum], &k, &v); } for(i = 0; i < HSIZE; i++ ) { size_t cap = 0, used = 0, cls = 0, maxb = 0; hash_stats(hs[i], &cap, &used, &cls, &maxb); fprintf( stdout , "\nhash#%zu\n" "capacity: %zu\n" "used: %zu\n" "collisions (avg): %zu\n" "max. row: %zu\n" , i , cap , used , cls , maxb ); hash_destroy(hs[i]); } static_mem_pool_destroy(&pool); }
void test_aa_map_arbitrary_kv_3(void) { char mem[aa_map_size]; struct aa_map *m = aa_map_create( sizeof(mem) , mem , ARBITRARY_LEN , ARBITRARY_LEN , __s32_cmp , __s32_cpy , __s32_cpy , 0 , __alloc , __dealloc ); struct aa_map *m2 = aa_map_create( sizeof(mem) , mem , ARBITRARY_LEN , ARBITRARY_LEN , __s32_cmp , __s32_cpy , __s32_cpy , 0 , __alloc , __dealloc ); ranctx rctx; raninit(&rctx, 0xDEADBEEF); const size_t N = 1000; size_t i = 0; const char dict[] = "qwertyuiopasdfghjklzxcvbnm" "QWERTYUIOPASDFGHJKLZXCVBNM1234567890"; for(; i < N; i++ ) { size_t ln1 = 1 + ranval(&rctx) % (ARBITRARY_LEN-2); size_t ln2 = 1 + ranval(&rctx) % (ARBITRARY_LEN-2); char k[ARBITRARY_LEN]; char v[ARBITRARY_LEN]; randstr(&rctx, k, ln1, dict); randstr(&rctx, v, ln2, dict); aa_map_add(m, k, v); } aa_map_enum(m, m2, __add_to_another); __s32_cpy_n = 0; __s32_cmp_n = 0; fprintf(stdout, "\n"); char ks[] = "SOME STRING"; void *v = aa_map_find(m, ks); fprintf(stdout , "found '%s' in m: %s, cmp: %zu\n" , ks , v ? (char*)v : "no" , __s32_cmp_n ); __s32_cpy_n = 0; __s32_cmp_n = 0; v = aa_map_find(m2, ks); fprintf(stdout , "found '%s' in m2: %s, cmp: %zu\n" , ks , v ? (char*)v : "no" , __s32_cmp_n ); size_t n = 4; aa_map_filter(m2, &n, __s32_shorter); aa_map_enum(m2, 0, __s32_print); __s32_cpy_n = 0; __s32_cmp_n = 0; struct lookup_kv3_cc cc = { .m = m, .n = 0, .match = 0 }; aa_map_enum(m2, &cc, __lookup_kv_3); fprintf( stdout , "lookup in m, %zu, found %zu, avg. cmp %zu (%zu)\n" , cc.n , cc.match , __s32_cmp_n / cc.n , __s32_cmp_n ); aa_map_destroy(m); aa_map_destroy(m2); } static void __kv_4_alter(void *c, void *k, void *v, bool n) { if( n ) { __s32_cpy(v, c); } else { char *p = v; char *pe = p + ARBITRARY_LEN; for(; *p && p < pe; p++ ) { *p = tolower(*p); } } } static void __kv_4_alter_up(void *c, void *k, void *v, bool n) { if( !n ) { char *p = v; char *pe = p + ARBITRARY_LEN; for(; *p && p < pe; p++ ) { *p = toupper(*p); } } } void test_aa_map_arbitrary_kv_4(void) { char mem[aa_map_size]; struct aa_map *m = aa_map_create( sizeof(mem) , mem , ARBITRARY_LEN , ARBITRARY_LEN , __s32_cmp , __s32_cpy , __s32_cpy , 0 , __alloc , __dealloc ); ranctx rctx; raninit(&rctx, 0xDEADBEEF); const size_t N = 20; size_t i = 0; const char dict[] = "qwertyuiopasdfghjklzxcvbnm" "QWERTYUIOPASDFGHJKLZXCVBNM1234567890"; for(; i < N; i++ ) { size_t ln1 = 1 + ranval(&rctx) % (ARBITRARY_LEN-2); size_t ln2 = 1 + ranval(&rctx) % (ARBITRARY_LEN-2); char k[ARBITRARY_LEN]; char v[ARBITRARY_LEN]; randstr(&rctx, k, ln1, dict); randstr(&rctx, v, ln2, dict); aa_map_alter(m, true, k, v, __kv_4_alter); aa_map_alter(m, true, k, v, __kv_4_alter); aa_map_alter(m, false, k, v, __kv_4_alter_up); } aa_map_enum(m, 0, __s32_print); fprintf(stdout, "filtering\n"); size_t n = 8; aa_map_filter(m, &n, __s32_shorter); aa_map_enum(m, 0, __s32_print); fprintf(stdout, "wiping\n"); aa_map_filter(m, 0, 0); aa_map_enum(m, 0, __s32_print); aa_map_destroy(m); }
void test_aa_tree_clinical_1(void) { ranctx rctx; raninit(&rctx, 0xDEADBEEF); char mem[aa_tree_size]; fprintf(stdout, "clinical case\n"); struct aa_tree *t = aa_tree_create( sizeof(mem) , mem , sizeof(uint32_t) , __u32_cmp_stat , __u32_cpy , 0 , __alloc , __dealloc ); const size_t N = 10000; size_t i = 0; size_t cn = 0; for(; i < N; i++ ) { uint32_t tmp = i; __cmp_num = 0; aa_tree_insert(t, &tmp); cn = __cmp_num > cn ? __cmp_num : cn; } fprintf( stdout , "inserted %zu, max. cmp: %zu ops\n" , i , cn ); cn = 0; size_t found = 0; for(i = 0; i < N; i++ ) { uint32_t tmp = ranval(&rctx) % N; __cmp_num = 0; if( aa_tree_find(t, &tmp) ) { found++; } cn = __cmp_num > cn ? __cmp_num : cn; } fprintf( stdout , "found %zu of %zu, max. cmp: %zu ops\n" , found , i , cn ); aa_tree_destroy(t,0,0); }