static void sfree_pool(struct pool *pool, void *ptr) { struct block_hdr *hdr; unsigned int i, idx; unsigned long offset; if (!ptr) return; ptr -= sizeof(*hdr); hdr = ptr; assert(ptr_valid(pool, ptr)); sfree_check_redzone(hdr); offset = ptr - pool->map; i = offset / SMALLOC_BPL; idx = (offset % SMALLOC_BPL) / SMALLOC_BPB; fio_mutex_down(pool->lock); clear_blocks(pool, i, idx, size_to_blocks(hdr->size)); if (i < pool->next_non_full) pool->next_non_full = i; pool->free_blocks += size_to_blocks(hdr->size); fio_mutex_up(pool->lock); }
struct fio_file *lookup_file_hash(const char *name) { struct fio_file *f; fio_mutex_down(hash_lock); f = __lookup_file_hash(name); fio_mutex_up(hash_lock); return f; }
static void *__smalloc_pool(struct pool *pool, size_t size) { size_t nr_blocks; unsigned int i; unsigned int offset; unsigned int last_idx; void *ret = NULL; fio_mutex_down(pool->lock); nr_blocks = size_to_blocks(size); if (nr_blocks > pool->free_blocks) goto fail; i = pool->next_non_full; last_idx = 0; offset = -1U; while (i < pool->nr_blocks) { unsigned int idx; if (pool->bitmap[i] == -1U) { i++; pool->next_non_full = i; last_idx = 0; continue; } idx = find_next_zero(pool->bitmap[i], last_idx); if (!blocks_free(pool, i, idx, nr_blocks)) { idx += nr_blocks; if (idx < SMALLOC_BPI) last_idx = idx; else { last_idx = 0; while (idx >= SMALLOC_BPI) { i++; idx -= SMALLOC_BPI; } } continue; } set_blocks(pool, i, idx, nr_blocks); offset = i * SMALLOC_BPL + idx * SMALLOC_BPB; break; } if (i < pool->nr_blocks) { pool->free_blocks -= nr_blocks; ret = pool->map + offset; } fail: fio_mutex_up(pool->lock); return ret; }
void remove_file_hash(struct fio_file *f) { fio_mutex_down(hash_lock); if (f->flags & FIO_FILE_HASHED) { assert(!flist_empty(&f->hash_list)); flist_del_init(&f->hash_list); f->flags &= ~FIO_FILE_HASHED; } fio_mutex_up(hash_lock); }
void remove_file_hash(struct fio_file *f) { fio_mutex_down(hash_lock); if (fio_file_hashed(f)) { assert(!flist_empty(&f->hash_list)); flist_del_init(&f->hash_list); fio_file_clear_hashed(f); } fio_mutex_up(hash_lock); }
void file_hash_exit(void) { unsigned int i, has_entries = 0; fio_mutex_down(hash_lock); for (i = 0; i < HASH_BUCKETS; i++) has_entries += !flist_empty(&file_hash[i]); fio_mutex_up(hash_lock); if (has_entries) log_err("fio: file hash not empty on exit\n"); file_hash = NULL; fio_mutex_remove(hash_lock); hash_lock = NULL; }
static void *gtod_thread_main(void *data) { struct fio_mutex *mutex = data; fio_setaffinity(gettid(), fio_gtod_cpumask); fio_mutex_up(mutex); /* * As long as we have jobs around, update the clock. It would be nice * to have some way of NOT hammering that CPU with gettimeofday(), * but I'm not sure what to use outside of a simple CPU nop to relax * it - we don't want to lose precision. */ while (threads) { fio_gtod_update(); nop; } return NULL; }
struct fio_file *add_file_hash(struct fio_file *f) { struct fio_file *alias; if (f->flags & FIO_FILE_HASHED) return NULL; INIT_FLIST_HEAD(&f->hash_list); fio_mutex_down(hash_lock); alias = __lookup_file_hash(f->file_name); if (!alias) { f->flags |= FIO_FILE_HASHED; flist_add_tail(&f->hash_list, &file_hash[hash(f->file_name)]); } fio_mutex_up(hash_lock); return alias; }
struct fio_file *add_file_hash(struct fio_file *f) { struct fio_file *alias; if (fio_file_hashed(f)) return NULL; INIT_FLIST_HEAD(&f->hash_list); fio_mutex_down(hash_lock); alias = __lookup_file_hash(f->file_name); if (!alias) { fio_file_set_hashed(f); flist_add_tail(&f->hash_list, &file_hash[hash(f->file_name)]); } fio_mutex_up(hash_lock); return alias; }
static void *helper_thread_main(void *data) { struct helper_data *hd = data; unsigned int msec_to_next_event, next_log; struct timeval tv, last_du; int ret = 0; sk_out_assign(hd->sk_out); gettimeofday(&tv, NULL); memcpy(&last_du, &tv, sizeof(tv)); fio_mutex_up(hd->startup_mutex); msec_to_next_event = DISK_UTIL_MSEC; while (!ret && !hd->exit) { struct timespec ts; struct timeval now; uint64_t since_du; timeval_add_msec(&tv, msec_to_next_event); ts.tv_sec = tv.tv_sec; ts.tv_nsec = tv.tv_usec * 1000; pthread_mutex_lock(&hd->lock); pthread_cond_timedwait(&hd->cond, &hd->lock, &ts); gettimeofday(&now, NULL); if (hd->reset) { memcpy(&tv, &now, sizeof(tv)); memcpy(&last_du, &now, sizeof(last_du)); hd->reset = 0; } pthread_mutex_unlock(&hd->lock); since_du = mtime_since(&last_du, &now); if (since_du >= DISK_UTIL_MSEC || DISK_UTIL_MSEC - since_du < 10) { ret = update_io_ticks(); timeval_add_msec(&last_du, DISK_UTIL_MSEC); msec_to_next_event = DISK_UTIL_MSEC; if (since_du >= DISK_UTIL_MSEC) msec_to_next_event -= (since_du - DISK_UTIL_MSEC); } else { if (since_du >= DISK_UTIL_MSEC) msec_to_next_event = DISK_UTIL_MSEC - (DISK_UTIL_MSEC - since_du); else msec_to_next_event = DISK_UTIL_MSEC; } if (hd->do_stat) { hd->do_stat = 0; __show_running_run_stats(); } next_log = calc_log_samples(); if (!next_log) next_log = DISK_UTIL_MSEC; msec_to_next_event = min(next_log, msec_to_next_event); if (!is_backend) print_thread_status(); } fio_writeout_logs(false); sk_out_drop(); return NULL; }
static inline void pool_unlock(struct pool *pool) { fio_mutex_up(pool->lock); }
void fio_file_hash_unlock(void) { if (hash_lock) fio_mutex_up(hash_lock); }