static void fill_secmon_params(struct secmon_params *p, void (*bsp_entry)(void *), void *bsp_arg) { const struct spintable_attributes *spin_attrs; memset(p, 0, sizeof(*p)); p->online_cpus = cpus_online(); spin_attrs = spintable_get_attributes(); if (spin_attrs != NULL) { p->secondary.run = spin_attrs->entry; p->secondary.arg = spin_attrs->addr; } p->bsp.run = bsp_entry; p->bsp.arg = bsp_arg; }
void fio_idle_prof_init(void) { int i, ret; struct timeval tp; struct timespec ts; pthread_attr_t tattr; struct idle_prof_thread *ipt; ipc.nr_cpus = cpus_online(); ipc.status = IDLE_PROF_STATUS_OK; if (ipc.opt == IDLE_PROF_OPT_NONE) return; if ((ret = pthread_attr_init(&tattr))) { log_err("fio: pthread_attr_init %s\n", strerror(ret)); return; } if ((ret = pthread_attr_setscope(&tattr, PTHREAD_SCOPE_SYSTEM))) { log_err("fio: pthread_attr_setscope %s\n", strerror(ret)); return; } ipc.ipts = malloc(ipc.nr_cpus * sizeof(struct idle_prof_thread)); if (!ipc.ipts) { log_err("fio: malloc failed\n"); return; } ipc.buf = malloc(ipc.nr_cpus * page_size); if (!ipc.buf) { log_err("fio: malloc failed\n"); free(ipc.ipts); return; } /* * profiling aborts on any single thread failure since the * result won't be accurate if any cpu is not used. */ for (i = 0; i < ipc.nr_cpus; i++) { ipt = &ipc.ipts[i]; ipt->cpu = i; ipt->state = TD_NOT_CREATED; ipt->data = (unsigned char *)(ipc.buf + page_size * i); if ((ret = pthread_mutex_init(&ipt->init_lock, NULL))) { ipc.status = IDLE_PROF_STATUS_ABORT; log_err("fio: pthread_mutex_init %s\n", strerror(ret)); break; } if ((ret = pthread_mutex_init(&ipt->start_lock, NULL))) { ipc.status = IDLE_PROF_STATUS_ABORT; log_err("fio: pthread_mutex_init %s\n", strerror(ret)); break; } if ((ret = pthread_cond_init(&ipt->cond, NULL))) { ipc.status = IDLE_PROF_STATUS_ABORT; log_err("fio: pthread_cond_init %s\n", strerror(ret)); break; } /* make sure all threads are spawned before they start */ pthread_mutex_lock(&ipt->init_lock); /* make sure all threads finish init before profiling starts */ pthread_mutex_lock(&ipt->start_lock); if ((ret = pthread_create(&ipt->thread, &tattr, idle_prof_thread_fn, ipt))) { ipc.status = IDLE_PROF_STATUS_ABORT; log_err("fio: pthread_create %s\n", strerror(ret)); break; } else ipt->state = TD_CREATED; if ((ret = pthread_detach(ipt->thread))) { /* log error and let the thread spin */ log_err("fio: pthread_detatch %s\n", strerror(ret)); } } /* * let good threads continue so that they can exit * if errors on other threads occurred previously. */ for (i = 0; i < ipc.nr_cpus; i++) { ipt = &ipc.ipts[i]; pthread_mutex_unlock(&ipt->init_lock); } if (ipc.status == IDLE_PROF_STATUS_ABORT) return; /* wait for calibration to finish */ for (i = 0; i < ipc.nr_cpus; i++) { ipt = &ipc.ipts[i]; pthread_mutex_lock(&ipt->init_lock); while ((ipt->state != TD_EXITED) && (ipt->state!=TD_INITIALIZED)) { fio_gettime(&tp, NULL); ts.tv_sec = tp.tv_sec + 1; ts.tv_nsec = tp.tv_usec * 1000; pthread_cond_timedwait(&ipt->cond, &ipt->init_lock, &ts); } pthread_mutex_unlock(&ipt->init_lock); /* * any thread failed to initialize would abort other threads * later after fio_idle_prof_start. */ if (ipt->state == TD_EXITED) ipc.status = IDLE_PROF_STATUS_ABORT; } if (ipc.status != IDLE_PROF_STATUS_ABORT) calibration_stats(); else ipc.cali_mean = ipc.cali_stddev = 0.0; if (ipc.opt == IDLE_PROF_OPT_CALI) ipc.status = IDLE_PROF_STATUS_CALI_STOP; }