ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes) { ssize_t ret = _asan_extattr_get_file(path, attrnamespace, attrname, data, nbytes); if(ret != -1) { touch_mem(path); touch_mem(attrname); if(data != NULL) ASAN_WRITE_RANGE(data, ret); } return ret; }
int extattr_set_link(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes) { int ret = _asan_extattr_set_link(path, attrnamespace, attrname, data, nbytes); if(ret != -1) { touch_mem(path); touch_mem(attrname); if(data != NULL) ASAN_READ_RANGE(data, ret); } return ret; }
int __truncate(const char *path, int pad, off_t length) { int ret = _asan___truncate(path, pad, length); touch_mem(path); return ret; }
int chroot(const char *path) { int ret = _asan_chroot(path); touch_mem(path); return ret; }
int unmount(const char *path, int flags) { int ret = _asan_unmount(path, flags); if(ret == 0) { touch_mem(path); } return ret; }
int __mknod50(const char *path, mode_t mode, dev_t dev) { int ret = _asan___mknod50(path, mode, dev); if(ret == 0) { touch_mem(path); } return ret; }
int lchown(const char *path, uid_t owner, gid_t group) { int ret = _asan_lchown(path, owner, group); if(ret == 0) { touch_mem(path); } return ret; }
static unsigned int search_mem(void) { record_t key, *found; record_t *src, *copy; unsigned int chunk; size_t copy_size = chunk_size; unsigned int i; unsigned int state = 0; for (i = 0; threads_go == 1; i++) { chunk = rand_num(chunks, &state); src = mem[chunk]; /* * If we're doing random sizes, we need a non-zero * multiple of record size. */ if (random_size) copy_size = (rand_num(chunk_size / record_size, &state) + 1) * record_size; copy = alloc_mem(copy_size); if (touch_pages) { touch_mem((char *)copy, copy_size); } else { if (no_lib_memcpy) my_memcpy(copy, src, copy_size); else memcpy(copy, src, copy_size); key = rand_num(copy_size / record_size, &state); if (verbose > 2) printf("Search key %zu, copy size %zu\n", key, copy_size); if (linear) found = linear_search(key, copy, copy_size); else found = bsearch(&key, copy, copy_size / record_size, record_size, compare); /* Below check is mainly for memory corruption or other bug */ if (found == NULL) { fprintf(stderr, "Couldn't find key %zd\n", key); exit(1); } } /* end if ! touch_pages */ free_mem(copy, copy_size); } return (i); }
int revoke(const char *path) { int ret = _asan_revoke(path); if(ret == 0) { touch_mem(path); } return ret; }
int mkfifo(const char *path, mode_t mode) { int ret = _asan_mkfifo(path, mode); if(ret == 0) { touch_mem(path); } return ret; }
int rmdir(const char *path) { int ret = _asan_rmdir(path); if(ret == 0) { touch_mem(path); } return ret; }
int __stat50(const char *path, struct stat *ub) { int ret = _asan___stat50(path, ub); if(ret == 0) { touch_mem(path); ASAN_WRITE_RANGE(ub, sizeof(struct stat)); } return ret; }
int extattr_delete_fd(int fd, int attrnamespace, const char *attrname) { int ret = _asan_extattr_delete_fd(fd, attrnamespace, attrname); if(ret == 0) { touch_mem(attrname); } return ret; }
int __utimes50(const char *path, const struct timeval times[2]) { int ret = _asan___utimes50(path, times); if(ret == 0) { touch_mem(path); if(times != NULL) ASAN_READ_RANGE(times, 2*sizeof(struct timeval)); } return ret; }
void ndbd_alloc_touch_mem(void *p, size_t sz, volatile Uint32 * watchCounter) { struct NdbThread *thread_ptr[TOUCH_PARALLELISM]; struct AllocTouchMem touch_mem_struct[TOUCH_PARALLELISM]; Uint32 tmp = 0; if (watchCounter == 0) { watchCounter = &tmp; } for (Uint32 i = 0; i < TOUCH_PARALLELISM; i++) { touch_mem_struct[i].watchCounter = watchCounter; touch_mem_struct[i].sz = sz; touch_mem_struct[i].p = p; touch_mem_struct[i].index = i; thread_ptr[i] = NULL; if (sz > MIN_START_THREAD_SIZE) { thread_ptr[i] = NdbThread_Create(touch_mem, (NDB_THREAD_ARG*)&touch_mem_struct[i], 0, "touch_thread", NDB_THREAD_PRIO_MEAN); } if (thread_ptr[i] == NULL) { touch_mem((void*)&touch_mem_struct[i]); } } for (Uint32 i = 0; i < TOUCH_PARALLELISM; i++) { void *dummy_status; if (thread_ptr[i]) { NdbThread_WaitFor(thread_ptr[i], &dummy_status); NdbThread_Destroy(&thread_ptr[i]); } } }
int main (int argc, char *argv[]) { guint64 block_size = 512, area_size = 1024 * 1024, n_blocks, repeats = 1000000; if (argc > 1) block_size = parse_memsize (argv[1]); else { usage(); block_size = 512; } if (argc > 2) area_size = parse_memsize (argv[2]); if (argc > 3) repeats = parse_memsize (argv[3]); if (argc > 4) g_slice_set_config (G_SLICE_CONFIG_COLOR_INCREMENT, parse_memsize (argv[4])); /* figure number of blocks from block and area size. * divide area by 3 because touch_mem() allocates 3 areas */ n_blocks = area_size / 3 / ALIGN (block_size, sizeof (gsize) * 2); /* basic sanity checks */ if (!block_size || !n_blocks || block_size >= area_size) { g_printerr ("Invalid arguments: block-size=%" G_GUINT64_FORMAT " memory-size=%" G_GUINT64_FORMAT "\n", block_size, area_size); usage(); return 1; } g_printerr ("Will allocate and touch %" G_GUINT64_FORMAT " blocks of %" G_GUINT64_FORMAT " bytes (= %" G_GUINT64_FORMAT " bytes) %" G_GUINT64_FORMAT " times with color increment: 0x%08" G_GINT64_MODIFIER "x\n", n_blocks, block_size, n_blocks * block_size, repeats, (guint64)g_slice_get_config (G_SLICE_CONFIG_COLOR_INCREMENT)); touch_mem (block_size, n_blocks, repeats); return 0; }
/* ------------------------- __malloc_consolidate ------------------------- __malloc_consolidate is a specialized version of free() that tears down chunks held in fastbins. Free itself cannot be used for this purpose since, among other things, it might place chunks back onto fastbins. So, instead, we need to use a minor variant of the same code. Also, because this routine needs to be called the first time through malloc anyway, it turns out to be the perfect place to trigger initialization code. */ void attribute_hidden __malloc_consolidate(mstate av) { mfastbinptr* fb; /* current fastbin being consolidated */ mfastbinptr* maxfb; /* last fastbin (for loop control) */ mchunkptr p; /* current chunk being consolidated */ mchunkptr nextp; /* next chunk to consolidate */ mchunkptr unsorted_bin; /* bin header */ mchunkptr first_unsorted; /* chunk to link to */ ustate unit; /* */ /* These have same use as in free() */ mchunkptr nextchunk; size_t size; size_t nextsize; size_t prevsize; int nextinuse; mchunkptr bck; mchunkptr fwd; /* If max_fast is 0, we know that av hasn't yet been initialized, in which case do so below */ if (av->max_fast != 0) { clear_fastchunks(av); unsorted_bin = unsorted_chunks(av); /* Remove each chunk from fast bin and consolidate it, placing it then in unsorted bin. Among other reasons for doing this, placing in unsorted bin avoids needing to calculate actual bins until malloc is sure that chunks aren't immediately going to be reused anyway. */ maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); fb = &(av->fastbins[0]); do { if ( (p = *fb) != 0) { *fb = 0; do { check_inuse_chunk(p); nextp = p->fd; /* Slightly streamlined version of consolidation code in free() */ size = p->size & ~PREV_INUSE; nextchunk = chunk_at_offset(p, size); nextsize = chunksize(nextchunk); if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); unlink(p, bck, fwd); } unit = lookup_ustate_by_mem((void*)p); if (nextchunk != unit->unit_top) { nextinuse = inuse_bit_at_offset(nextchunk, nextsize); set_head(nextchunk, nextsize); if (!nextinuse) { size += nextsize; unlink(nextchunk, bck, fwd); } first_unsorted = unsorted_bin->fd; unsorted_bin->fd = p; first_unsorted->bk = p; set_head(p, size | PREV_INUSE); p->bk = unsorted_bin; p->fd = first_unsorted; set_foot(p, size); } else { size += nextsize; set_head(p, size | PREV_INUSE); unit->unit_top = p; } } while ( (p = nextp) != 0); } } while (fb++ != maxfb); } else { if (get_abstate()->mstate_list.num == 0) { //initialize abheap state init_linked_list(&(get_abstate()->mstate_list)); init_linked_list(&(get_abstate()->ustate_list)); init_linked_list(&(get_abstate()->mmapped_ustate_list)); get_abstate()->ab_top = (mchunkptr)(CHANNEL_ADDR); //allocate channel heap space mmap((void *) CHANNEL_ADDR, CHANNEL_SIZE, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_FIXED|MAP_SHARED, -1, 0); touch_mem((void *)CHANNEL_ADDR, CHANNEL_SIZE); } malloc_init_state(av); check_malloc_state(); } }
static void do_random_experiment(FILE* outfile, int num_cpus, int wss, int sleep_min, int sleep_max, int write_cycle, int sample_count, int best_effort) { int last_cpu, next_cpu, delay, show = 1, i; unsigned long preempt_counter = 0; unsigned long migration_counter = 0; unsigned long counter = 1; unsigned long num_pages = wss / getpagesize(); unsigned long *phys_addrs; cycles_t start, stop; cycles_t cold, hot1, hot2, hot3, after_resume; int *mem; if (!num_pages) num_pages = 1; phys_addrs = malloc(sizeof(long) * num_pages); migrate_to(0); last_cpu = 0; /* prefault and dirty cache */ reset_arena(); #if defined(__i386__) || defined(__x86_64__) if (!best_effort) iopl(3); #endif fprintf(outfile, "# %5s, %6s, %6s, %6s, %3s, %3s" ", %10s, %10s, %10s, %10s, %10s" ", %12s, %12s" "\n", "COUNT", "WCYCLE", "WSS", "DELAY", "SRC", "TGT", "COLD", "HOT1", "HOT2", "HOT3", "WITH-CPMD", "VIRT ADDR", "PHYS ADDR"); while (!sample_count || sample_count >= preempt_counter || (num_cpus > 1 && sample_count >= migration_counter)) { delay = sleep_min + random() % (sleep_max - sleep_min + 1); next_cpu = pick_cpu(last_cpu, num_cpus); if (sample_count) show = (next_cpu == last_cpu && sample_count >= preempt_counter) || (next_cpu != last_cpu && sample_count >= migration_counter); mem = allocate(wss); #if defined(__i386__) || defined(__x86_64__) if (!best_effort) cli(); #endif start = get_cycles(); mem[0] = touch_mem(mem, wss, write_cycle); stop = get_cycles(); cold = stop - start; start = get_cycles(); mem[0] = touch_mem(mem, wss, write_cycle); stop = get_cycles(); hot1 = stop - start; start = get_cycles(); mem[0] = touch_mem(mem, wss, write_cycle); stop = get_cycles(); hot2 = stop - start; start = get_cycles(); mem[0] = touch_mem(mem, wss, write_cycle); stop = get_cycles(); hot3 = stop - start; #if defined(__i386__) || defined(__x86_64__) if (!best_effort) sti(); #endif migrate_to(next_cpu); sleep_us(delay); #if defined(__i386__) || defined(__x86_64__) if (!best_effort) cli(); #endif start = get_cycles(); mem[0] = touch_mem(mem, wss, write_cycle); stop = get_cycles(); #if defined(__i386__) || defined(__x86_64__) if (!best_effort) sti(); #endif after_resume = stop - start; /* run, write ratio, wss, delay, from, to, cold, hot1, hot2, * hot3, after_resume */ if (show) { fprintf(outfile, " %6ld, %6d, %6d, %6d, %3d, %3d, " "%10" CYCLES_FMT ", " "%10" CYCLES_FMT ", " "%10" CYCLES_FMT ", " "%10" CYCLES_FMT ", " "%10" CYCLES_FMT ", " "%12lu", counter++, write_cycle, wss, delay, last_cpu, next_cpu, cold, hot1, hot2, hot3, after_resume, (unsigned long) mem); get_phys_addrs(0, (unsigned long) mem, wss * 1024 + (unsigned long) mem, phys_addrs, wss); for (i = 0; i < num_pages; i++) fprintf(outfile, ", %12lu", phys_addrs[i]); fprintf(outfile, "\n"); } if (next_cpu == last_cpu) preempt_counter++; else migration_counter++; last_cpu = next_cpu; deallocate(mem); } free(phys_addrs); }