static void *helper_thread_main(void *data) { struct helper_data *hd = data; unsigned int msec_to_next_event, next_log; struct timeval tv, last_du; int ret = 0; sk_out_assign(hd->sk_out); gettimeofday(&tv, NULL); memcpy(&last_du, &tv, sizeof(tv)); fio_mutex_up(hd->startup_mutex); msec_to_next_event = DISK_UTIL_MSEC; while (!ret && !hd->exit) { struct timespec ts; struct timeval now; uint64_t since_du; timeval_add_msec(&tv, msec_to_next_event); ts.tv_sec = tv.tv_sec; ts.tv_nsec = tv.tv_usec * 1000; pthread_mutex_lock(&hd->lock); pthread_cond_timedwait(&hd->cond, &hd->lock, &ts); gettimeofday(&now, NULL); if (hd->reset) { memcpy(&tv, &now, sizeof(tv)); memcpy(&last_du, &now, sizeof(last_du)); hd->reset = 0; } pthread_mutex_unlock(&hd->lock); since_du = mtime_since(&last_du, &now); if (since_du >= DISK_UTIL_MSEC || DISK_UTIL_MSEC - since_du < 10) { ret = update_io_ticks(); timeval_add_msec(&last_du, DISK_UTIL_MSEC); msec_to_next_event = DISK_UTIL_MSEC; if (since_du >= DISK_UTIL_MSEC) msec_to_next_event -= (since_du - DISK_UTIL_MSEC); } else { if (since_du >= DISK_UTIL_MSEC) msec_to_next_event = DISK_UTIL_MSEC - (DISK_UTIL_MSEC - since_du); else msec_to_next_event = DISK_UTIL_MSEC; } if (hd->do_stat) { hd->do_stat = 0; __show_running_run_stats(); } next_log = calc_log_samples(); if (!next_log) next_log = DISK_UTIL_MSEC; msec_to_next_event = min(next_log, msec_to_next_event); if (!is_backend) print_thread_status(); } fio_writeout_logs(false); sk_out_drop(); return NULL; }
static void *worker_thread(void *data) { struct submit_worker *sw = data; struct workqueue *wq = sw->wq; unsigned int eflags = 0, ret = 0; FLIST_HEAD(local_list); sk_out_assign(sw->sk_out); if (wq->ops.nice) { if (nice(wq->ops.nice) < 0) { log_err("workqueue: nice %s\n", strerror(errno)); ret = 1; } } if (!ret) ret = workqueue_init_worker(sw); pthread_mutex_lock(&sw->lock); sw->flags |= SW_F_RUNNING; if (ret) sw->flags |= SW_F_ERROR; pthread_mutex_unlock(&sw->lock); pthread_mutex_lock(&wq->flush_lock); pthread_cond_signal(&wq->flush_cond); pthread_mutex_unlock(&wq->flush_lock); if (sw->flags & SW_F_ERROR) goto done; while (1) { pthread_mutex_lock(&sw->lock); if (flist_empty(&sw->work_list)) { if (sw->flags & SW_F_EXIT) { pthread_mutex_unlock(&sw->lock); break; } if (workqueue_pre_sleep_check(sw)) { pthread_mutex_unlock(&sw->lock); workqueue_pre_sleep(sw); pthread_mutex_lock(&sw->lock); } /* * We dropped and reaquired the lock, check * state again. */ if (!flist_empty(&sw->work_list)) goto handle_work; if (sw->flags & SW_F_EXIT) { pthread_mutex_unlock(&sw->lock); break; } else if (!(sw->flags & SW_F_IDLE)) { sw->flags |= SW_F_IDLE; wq->next_free_worker = sw->index; if (wq->wake_idle) pthread_cond_signal(&wq->flush_cond); } if (wq->ops.update_acct_fn) wq->ops.update_acct_fn(sw); pthread_cond_wait(&sw->cond, &sw->lock); } else { handle_work: flist_splice_init(&sw->work_list, &local_list); } pthread_mutex_unlock(&sw->lock); handle_list(sw, &local_list); } if (wq->ops.update_acct_fn) wq->ops.update_acct_fn(sw); done: pthread_mutex_lock(&sw->lock); sw->flags |= (SW_F_EXITED | eflags); pthread_mutex_unlock(&sw->lock); sk_out_drop(); return NULL; }