int fio_start_gtod_thread(void) { struct fio_mutex *mutex; pthread_attr_t attr; int ret; mutex = fio_mutex_init(FIO_MUTEX_LOCKED); if (!mutex) return 1; pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, 2 * PTHREAD_STACK_MIN); ret = pthread_create(>od_thread, &attr, gtod_thread_main, mutex); pthread_attr_destroy(&attr); if (ret) { log_err("Can't create gtod thread: %s\n", strerror(ret)); goto err; } ret = pthread_detach(gtod_thread); if (ret) { log_err("Can't detach gtod thread: %s\n", strerror(ret)); goto err; } dprint(FD_MUTEX, "wait on startup_mutex\n"); fio_mutex_down(mutex); dprint(FD_MUTEX, "done waiting on startup_mutex\n"); err: fio_mutex_remove(mutex); return ret; }
void file_hash_init(void *ptr) { unsigned int i; file_hash = ptr; for (i = 0; i < HASH_BUCKETS; i++) INIT_FLIST_HEAD(&file_hash[i]); hash_lock = fio_mutex_init(1); }
void file_hash_init(void) { unsigned int i; file_hash = smalloc(file_hash_size); for (i = 0; i < HASH_BUCKETS; i++) INIT_FLIST_HEAD(&file_hash[i]); hash_lock = fio_mutex_init(FIO_MUTEX_UNLOCKED); file_bloom = bloom_new(BLOOM_SIZE); }
static bool add_pool(struct pool *pool, unsigned int alloc_size) { int bitmap_blocks; int mmap_flags; void *ptr; if (nr_pools == MAX_POOLS) return false; #ifdef SMALLOC_REDZONE alloc_size += sizeof(unsigned int); #endif alloc_size += sizeof(struct block_hdr); if (alloc_size < INITIAL_SIZE) alloc_size = INITIAL_SIZE; /* round up to nearest full number of blocks */ alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1); bitmap_blocks = alloc_size / SMALLOC_BPL; alloc_size += bitmap_blocks * sizeof(unsigned int); pool->mmap_size = alloc_size; pool->nr_blocks = bitmap_blocks; pool->free_blocks = bitmap_blocks * SMALLOC_BPB; mmap_flags = OS_MAP_ANON; #ifdef CONFIG_ESX mmap_flags |= MAP_PRIVATE; #else mmap_flags |= MAP_SHARED; #endif ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, mmap_flags, -1, 0); if (ptr == MAP_FAILED) goto out_fail; pool->map = ptr; pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL); memset(pool->bitmap, 0, bitmap_blocks * sizeof(unsigned int)); pool->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED); if (!pool->lock) goto out_fail; nr_pools++; return true; out_fail: log_err("smalloc: failed adding pool\n"); if (pool->map) munmap(pool->map, pool->mmap_size); return false; }
static int add_pool(struct pool *pool, unsigned int alloc_size) { int bitmap_blocks; void *ptr; #ifdef SMALLOC_REDZONE alloc_size += sizeof(unsigned int); #endif alloc_size += sizeof(struct block_hdr); if (alloc_size < INITIAL_SIZE) alloc_size = INITIAL_SIZE; /* round up to nearest full number of blocks */ alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1); bitmap_blocks = alloc_size / SMALLOC_BPL; alloc_size += bitmap_blocks * sizeof(unsigned int); pool->mmap_size = alloc_size; pool->nr_blocks = bitmap_blocks; pool->free_blocks = bitmap_blocks * SMALLOC_BPB; ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, MAP_SHARED | OS_MAP_ANON, -1, 0); if (ptr == MAP_FAILED) goto out_fail; memset(ptr, 0, alloc_size); pool->map = ptr; pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL); pool->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED); if (!pool->lock) goto out_fail; nr_pools++; return 0; out_fail: fprintf(stderr, "smalloc: failed adding pool\n"); if (pool->map) munmap(pool->map, pool->mmap_size); return 1; }
/* * Adds a job to the list of things todo. Sanitizes the various options * to make sure we don't have conflicts, and initializes various * members of td. */ static int add_job(struct thread_data *td, const char *jobname, int job_add_num) { const char *ddir_str[] = { NULL, "read", "write", "rw", NULL, "randread", "randwrite", "randrw" }; unsigned int i; char fname[PATH_MAX]; int numjobs, file_alloced; /* * the def_thread is just for options, it's not a real job */ if (td == &def_thread) return 0; /* * if we are just dumping the output command line, don't add the job */ if (dump_cmdline) { put_job(td); return 0; } if (profile_td_init(td)) goto err; if (ioengine_load(td)) goto err; if (td->o.use_thread) nr_thread++; else nr_process++; if (td->o.odirect) td->io_ops->flags |= FIO_RAWIO; file_alloced = 0; if (!td->o.filename && !td->files_index && !td->o.read_iolog_file) { file_alloced = 1; if (td->o.nr_files == 1 && exists_and_not_file(jobname)) add_file(td, jobname); else { for (i = 0; i < td->o.nr_files; i++) { sprintf(fname, "%s.%d.%d", jobname, td->thread_number, i); add_file(td, fname); } } } if (fixup_options(td)) goto err; flow_init_job(td); /* * IO engines only need this for option callbacks, and the address may * change in subprocesses. */ if (td->eo) *(struct thread_data **)td->eo = NULL; if (td->io_ops->flags & FIO_DISKLESSIO) { struct fio_file *f; for_each_file(td, f, i) f->real_file_size = -1ULL; } td->mutex = fio_mutex_init(0); td->ts.clat_percentiles = td->o.clat_percentiles; if (td->o.overwrite_plist) memcpy(td->ts.percentile_list, td->o.percentile_list, sizeof(td->o.percentile_list)); else memcpy(td->ts.percentile_list, def_percentile_list, sizeof(def_percentile_list)); td->ts.clat_stat[0].min_val = td->ts.clat_stat[1].min_val = ULONG_MAX; td->ts.slat_stat[0].min_val = td->ts.slat_stat[1].min_val = ULONG_MAX; td->ts.lat_stat[0].min_val = td->ts.lat_stat[1].min_val = ULONG_MAX; td->ts.bw_stat[0].min_val = td->ts.bw_stat[1].min_val = ULONG_MAX; td->ddir_seq_nr = td->o.ddir_seq_nr; if ((td->o.stonewall || td->o.new_group) && prev_group_jobs) { prev_group_jobs = 0; groupid++; } td->groupid = groupid; prev_group_jobs++; if (init_random_state(td, td->rand_seeds, sizeof(td->rand_seeds))) { td_verror(td, errno, "init_random_state"); goto err; } if (setup_rate(td)) goto err; if (td->o.write_lat_log) { setup_log(&td->lat_log, td->o.log_avg_msec); setup_log(&td->slat_log, td->o.log_avg_msec); setup_log(&td->clat_log, td->o.log_avg_msec); } if (td->o.write_bw_log) setup_log(&td->bw_log, td->o.log_avg_msec); if (td->o.write_iops_log) setup_log(&td->iops_log, td->o.log_avg_msec); if (!td->o.name) td->o.name = strdup(jobname); if (!terse_output) { if (!job_add_num) { if (!strcmp(td->io_ops->name, "cpuio")) { log_info("%s: ioengine=cpu, cpuload=%u," " cpucycle=%u\n", td->o.name, td->o.cpuload, td->o.cpucycle); } else { char *c1, *c2, *c3, *c4; c1 = to_kmg(td->o.min_bs[DDIR_READ]); c2 = to_kmg(td->o.max_bs[DDIR_READ]); c3 = to_kmg(td->o.min_bs[DDIR_WRITE]); c4 = to_kmg(td->o.max_bs[DDIR_WRITE]); log_info("%s: (g=%d): rw=%s, bs=%s-%s/%s-%s," " ioengine=%s, iodepth=%u\n", td->o.name, td->groupid, ddir_str[td->o.td_ddir], c1, c2, c3, c4, td->io_ops->name, td->o.iodepth); free(c1); free(c2); free(c3); free(c4); } } else if (job_add_num == 1) log_info("...\n"); } /* * recurse add identical jobs, clear numjobs and stonewall options * as they don't apply to sub-jobs */ numjobs = td->o.numjobs; while (--numjobs) { struct thread_data *td_new = get_new_job(0, td, 1); if (!td_new) goto err; td_new->o.numjobs = 1; td_new->o.stonewall = 0; td_new->o.new_group = 0; if (file_alloced) { td_new->o.filename = NULL; td_new->files_index = 0; td_new->files_size = 0; td_new->files = NULL; } job_add_num = numjobs - 1; if (add_job(td_new, jobname, job_add_num)) goto err; } return 0; err: put_job(td); return -1; }