std::string ErrStr (BlErrNo err) { const errcode * perr= std::find_if (table, table_end, is_err (err) ); if (perr != table_end) return perr->str; std::ostringstream strbuf; strbuf << "Error " << err; return strbuf.str (); }
/** * merge_pstates - make sure both main trace and baseline have same pstates * @datas: pointer to struct cpuidle_datas for main trace * @baseline: pointer to struct cpuidle_datas for baseline trace * * This function adds "empty" pstate records for frequencies that exist * in main trace but not in baseline trace or vice versa. This makes sure * that the data (with zero hits into state for thusly created entries) * exists in both trace results for all frequencies used by either trace. */ static void merge_pstates(struct cpuidle_datas *datas, struct cpuidle_datas *baseline) { int cpu; int idx; struct cpufreq_pstates *percpu_a, *percpu_b; assert(datas && !is_err(datas)); assert(baseline && !is_err(baseline)); for (cpu = 0; cpu < datas->nrcpus; ++cpu) { percpu_a = &(datas->pstates[cpu]); percpu_b = &(baseline->pstates[cpu]); for (idx = 0; idx < percpu_a->max; ++idx) alloc_pstate(percpu_b, percpu_a->pstate[idx].freq); for (idx = 0; idx < percpu_b->max; ++idx) alloc_pstate(percpu_a, percpu_b->pstate[idx].freq); } }
static struct snobj *handle_add_tc(struct snobj *q) { const char *tc_name; int wid; struct tc_params params; struct tc *c; tc_name = snobj_eval_str(q, "name"); if (!tc_name) return snobj_err(EINVAL, "Missing 'name' field"); if (!ns_is_valid_name(tc_name)) return snobj_err(EINVAL, "'%s' is an invalid name", tc_name); if (ns_name_exists(tc_name)) return snobj_err(EINVAL, "Name '%s' already exists", tc_name); wid = snobj_eval_uint(q, "wid"); if (wid >= MAX_WORKERS) return snobj_err(EINVAL, "'wid' must be between 0 and %d", MAX_WORKERS - 1); if (!is_worker_active(wid)) return snobj_err(EINVAL, "worker:%d does not exist", wid); memset(¶ms, 0, sizeof(params)); strcpy(params.name, tc_name); params.priority = snobj_eval_int(q, "priority"); if (params.priority == DEFAULT_PRIORITY) return snobj_err(EINVAL, "Priority %d is reserved", DEFAULT_PRIORITY); /* TODO */ params.share = 1; params.share_resource = RESOURCE_CNT; c = tc_init(workers[wid]->s, ¶ms); if (is_err(c)) return snobj_err(-ptr_to_err(c), "tc_init() failed"); tc_join(c); return NULL; }
void filewrite(char* filename, char* data) { struct file *filp; mm_segment_t fs; filp = filp_open(filename, o_rdwr|o_append, 0644); if(is_err(filp)) { printk("open error...\n"); return; } fs=get_fs(); set_fs(kernel_ds); filp->f_op->write(filp, data, strlen(data),&filp->f_pos); set_fs(fs); filp_close(filp,null); }
void output_cstate_info(FILE *f, struct cpu_topology * topo, int nrcpus) { struct cpuidle_cstates *cstates; int i, j; cstates = build_cstate_info(nrcpus); assert(!is_err(cstates)); for (i=0; i < nrcpus; i++) { if (!cpu_is_online(topo, i)) continue; fprintf(f, "cpuid %d:\n", i); for (j=0; j < MAXCSTATE ; j++) { write_cstate_info(f, cstates[i].cstate[j].name, cstates[i].cstate[j].target_residency); } } }
extern _upi_bool_ write_backup_file(char *filename, _upi_u8_ *data, _upi_u32_ size) #endif ///< end of uG31xx_OS_WINDOWS { #ifndef uG31xx_BOOT_LOADER #ifndef CONFIG_ASUS_ENGINEER_MODE #ifdef UG31XX_USE_SHELL_AP_FOR_FILE_OP struct subprocess_info *sub_info; char *argv[] = {shell_ap_name, "BACKUP_FILE", "WRITE", filename, NULL}; char *env[] = {NULL}; int rtn; sub_info = NULL; sub_info = call_usermodehelper_setup(argv[0], argv, env, GFP_ATOMIC); if(sub_info == NULL) { return (_UPI_FALSE_); } UG31_LOGN("[%s]: call_usermodehelper_setup() done (%d - %d - %d)\n", __func__, (int)sub_info, (int)data, (int)size); rtn = call_usermodehelper_exec(sub_info, UMH_WAIT_PROC); UG31_LOGN("[%s]: call_usermodehelper_exec() = %d\n", __func__, rtn); return ((rtn == 0) ? _UPI_TRUE_ : _UPI_FALSE_); #else ///< else of UG31XX_USE_SHELL_AP_FOR_FILE_OP #ifdef UG31XX_USE_DAEMON_AP_FOR_FILE_OP if(get_file_op_status() & UG31XX_KERNEL_FILE_FINISH) { clear_file_op_status_bit(UG31XX_KERNEL_FILE_EXIST); clear_file_op_status_bit(UG31XX_KERNEL_FILE_READ); set_file_op_status_bit(UG31XX_KERNEL_FILE_WRITE); return (_UPI_FALSE_); } if(get_file_op_status() & UG31XX_USER_FILE_WRITE) { clear_file_op_status_bit(UG31XX_KERNEL_FILE_EXIST); clear_file_op_status_bit(UG31XX_KERNEL_FILE_WRITE); set_file_op_status_bit(UG31XX_KERNEL_FILE_READ); set_file_op_status_bit(UG31XX_KERNEL_FILE_FINISH); return (_UPI_TRUE_); } clear_file_op_status_bit(UG31XX_KERNEL_FILE_EXIST); clear_file_op_status_bit(UG31XX_KERNEL_FILE_READ); set_file_op_status_bit(UG31XX_KERNEL_FILE_WRITE); set_file_op_status_bit(UG31XX_KERNEL_FILE_FINISH); return (_UPI_FALSE_); #else ///< else of UG31XX_USE_DAEMON_AP_FOR_FILE_OP struct file *fp; _upi_u8_ retry; retry = 3; while(retry) { fp = filp_open(filename, O_CREAT | O_RDWR, 0644); if(!is_err(fp)) { break; } retry = retry - 1; } if(retry == 0) { return (_UPI_FALSE_); } /// [AT-PM] : Write data to file ; 02/21/2013 write_file(fp, data, size); filp_close(fp, _UPI_NULL_); #endif ///< end of UG31XX_USE_DAEMON_AP_FOR_FILE_OP #endif ///< end of UG31XX_USE_SHELL_AP_FOR_FILE_OP #else int backup_tag = BACKUP_BATTERY_KEY; if (ug31xx_save_config_data("ug31xx", data, size)) { UG31_LOGE("[%s]: fail to write Intel UMIP data\n", __func__); return (_UPI_FALSE_); } return (_UPI_TRUE_); #endif #endif ///< end of uG31xx_BOOT_LOADER return (_UPI_TRUE_); }
_upi_bool_ is_file_exist(char *filename) #endif ///< end of uG31xx_OS_WINDOWS { #ifndef uG31xx_BOOT_LOADER #ifndef CONFIG_ASUS_ENGINEER_MODE #ifdef UG31XX_USE_SHELL_AP_FOR_FILE_OP struct subprocess_info *sub_info; char *argv[] = {shell_ap_name, "BACKUP_FILE", "EXIST", filename, NULL}; char *env[] = {NULL}; int rtn; sub_info = NULL; sub_info = call_usermodehelper_setup(argv[0], argv, env, GFP_ATOMIC); if(sub_info == NULL) { return (_UPI_FALSE_); } UG31_LOGN("[%s]: call_usermodehelper_setup() done (%d)\n", __func__, (int)sub_info); rtn = call_usermodehelper_exec(sub_info, UMH_WAIT_PROC); UG31_LOGN("[%s]: call_usermodehelper_exec() = %d\n", __func__, rtn); return ((rtn == 0) ? _UPI_TRUE_ : _UPI_FALSE_); #else ///< else of UG31XX_USE_SHELL_AP_FOR_FILE_OP #ifdef UG31XX_USE_DAEMON_AP_FOR_FILE_OP if(get_file_op_status() & UG31XX_KERNEL_FILE_FINISH) { set_file_op_status_bit(UG31XX_KERNEL_FILE_EXIST); return (_UPI_FALSE_); } if(get_file_op_status() & UG31XX_USER_FILE_EXIST) { clear_file_op_status_bit(UG31XX_KERNEL_FILE_EXIST); set_file_op_status_bit(UG31XX_KERNEL_FILE_READ); set_file_op_status_bit(UG31XX_KERNEL_FILE_FINISH); return (_UPI_TRUE_); } set_file_op_status_bit(UG31XX_KERNEL_FILE_EXIST); set_file_op_status_bit(UG31XX_KERNEL_FILE_FINISH); return (_UPI_FALSE_); #else ///< else of UG31XX_USE_DAEMON_AP_FOR_FILE_OP struct file *fp; _upi_u8_ retry; retry = 3; while(retry) { fp = filp_open(filename, O_RDONLY, 0644); if(!is_err(fp)) { break; } retry = retry - 1; } if(retry == 0) { return (_UPI_FALSE_); } filp_close(fp, _UPI_NULL_); #endif ///< end of UG31XX_USE_DAEMON_AP_FOR_FILE_OP #endif ///< end of UG31XX_USE_SHELL_AP_FOR_FILE_OP #else _upi_u8_ backup_tag = 0; if (ug31xx_read_backup_tag("ug31xx", &backup_tag)) return (_UPI_FALSE_); if (backup_tag != BACKUP_BATTERY_KEY) return (_UPI_FALSE_); return (_UPI_TRUE_); #endif #endif ///< end of uG31xx_BOOT_LOADER return (_UPI_TRUE_); }
int main(int argc, char *argv[], char *const envp[]) { struct cpuidle_datas *datas; struct cpuidle_datas *baseline; struct program_options options; int args; double start_ts = 0, end_ts = 0; struct init_pstates *initp = NULL; struct report_ops *output_handler = NULL; struct cpu_topology *cpu_topo = NULL; struct trace_options *saved_trace_options = NULL; void *report_data = NULL; args = getoptions(argc, argv, &options); if (args <= 0) return 1; /* Tracing requires manipulation of some files only accessible * to root */ if ((options.mode == TRACE) && getuid()) { fprintf(stderr, "must be root to run traces\n"); return 1; } output_handler = get_report_ops(options.report_type_name); if (is_err(output_handler)) return 1; if (output_handler->check_options && output_handler->check_options(&options) < 0) return 1; if (output_handler->allocate_report_data) { report_data = output_handler->allocate_report_data(&options); if (is_err(report_data)) return 1; } if (output_handler->check_output(&options, report_data)) return 1; if (options.energy_model_filename && parse_energy_model(&options) < 0) { fprintf(stderr, "can't parse energy model file\n"); return 1; } /* Acquisition time specified means we will get the traces */ if ((options.mode == TRACE) || args < argc) { /* Read cpu topology info from sysfs */ cpu_topo = read_sysfs_cpu_topo(); if (is_err(cpu_topo)) { fprintf(stderr, "Failed to read CPU topology info from" " sysfs.\n"); return 1; } /* Stop tracing (just in case) */ if (idlestat_trace_enable(false)) { fprintf(stderr, "idlestat requires kernel Ftrace and " "debugfs mounted on /sys/kernel/debug\n"); return 1; } saved_trace_options = idlestat_store_trace_options(); if (is_err(saved_trace_options)) return 1; /* * Calculate/verify buffer size and polling trace data * interval. The interval or may may not be used to * transfer data from kernel trace buffer to some * storage media. It is needed for long eventful traces, * but is not preferred. If the user does not specify * the values, we will calculate reasonable defaults. */ if (calculate_buffer_parameters(options.duration, &options.tbs)) return 1; /* Initialize the traces for cpu_idle and increase the * buffer size to let 'idlestat' to possibly sleep instead * of acquiring data, hence preventing it to pertubate the * measurements. */ if (idlestat_init_trace(options.tbs.percpu_buffer_size)) goto err_restore_trace_options; /* Remove all the previous traces */ if (idlestat_flush_trace()) goto err_restore_trace_options; /* Get starting timestamp */ if (get_trace_ts(&start_ts) == -1) goto err_restore_trace_options; initp = build_init_pstates(cpu_topo); /* Start the recording */ if (idlestat_trace_enable(true)) goto err_restore_trace_options; /* We want to prevent to begin the acquisition with a cpu in * idle state because we won't be able later to close the * state and to determine which state it was. */ if (idlestat_wake_all()) goto err_restore_trace_options; /* Execute the command or wait a specified delay */ if (execute(argc - args, &argv[args], envp, &options)) goto err_restore_trace_options; /* Wake up all cpus again to account for last idle state */ if (idlestat_wake_all()) goto err_restore_trace_options; /* Stop tracing */ if (idlestat_trace_enable(false)) goto err_restore_trace_options; /* Get ending timestamp */ if (get_trace_ts(&end_ts) == -1) goto err_restore_trace_options; /* At this point we should have some spurious wake up * at the beginning of the traces and at the end (wake * up all cpus and timer expiration for the timer * acquisition). We assume these will be lost in the number * of other traces and could be negligible. */ if (idlestat_store(options.filename, start_ts, end_ts, initp, cpu_topo)) goto err_restore_trace_options; /* Restore original kernel ftrace options */ if (idlestat_restore_trace_options(saved_trace_options)) return 1; /* Discard topology, will be reloaded during trace load */ release_cpu_topo_cstates(cpu_topo); release_cpu_topo_info(cpu_topo); cpu_topo = NULL; } /* Load the idle states information */ datas = idlestat_load(options.filename); if (is_err(datas)) return 1; cpu_topo = datas->topo; if (options.baseline_filename) { baseline = idlestat_load(options.baseline_filename); merge_pstates(datas, baseline); } else { baseline = NULL; } if (is_err(baseline)) return 1; datas->baseline = baseline; assign_baseline_in_topo(datas); if (output_handler->open_report_file(options.outfilename, report_data)) return 1; if (options.display & IDLE_DISPLAY) { output_handler->cstate_table_header(report_data); dump_cpu_topo_info(output_handler, report_data, display_cstates, cpu_topo, 1); output_handler->cstate_table_footer(report_data); } if (options.display & FREQUENCY_DISPLAY) { output_handler->pstate_table_header(report_data); dump_cpu_topo_info(output_handler, report_data, display_pstates, cpu_topo, 0); output_handler->pstate_table_footer(report_data); } if (options.display & WAKEUP_DISPLAY) { output_handler->wakeup_table_header(report_data); dump_cpu_topo_info(output_handler, report_data, display_wakeup, cpu_topo, 1); output_handler->wakeup_table_footer(report_data); } if (options.energy_model_filename) calculate_energy_consumption(cpu_topo); output_handler->close_report_file(report_data); release_init_pstates(initp); release_datas(datas); if (output_handler->release_report_data) output_handler->release_report_data(report_data); return 0; err_restore_trace_options: /* Restore original kernel ftrace options */ idlestat_restore_trace_options(saved_trace_options); return 1; }
static struct cpuidle_datas * idlestat_native_load(const char *filename) { FILE *f; unsigned int nrcpus; struct cpuidle_datas *datas; char *line; char buffer[BUFSIZE]; f = fopen(filename, "r"); if (!f) { fprintf(stderr, "%s: failed to open '%s': %m\n", __func__, filename); return ptrerror(NULL); } /* Version line */ line = fgets(buffer, BUFSIZE, f); if (!line) goto error_close; /* Number of CPUs */ line = fgets(buffer, BUFSIZE, f); if (!line) goto error_close; if (sscanf(buffer, "cpus=%u", &nrcpus) != 1 || nrcpus == 0) { fclose(f); return ptrerror("Cannot load trace file (nrcpus == 0)"); } line = fgets(buffer, BUFSIZE, f); if (!line) goto error_close; datas = calloc(sizeof(*datas), 1); if (!datas) { fclose(f); return ptrerror(__func__); } datas->nrcpus = nrcpus; datas->pstates = build_pstate_info(nrcpus); if (!datas->pstates) goto propagate_error_free_datas; /* Read topology information */ datas->topo = read_cpu_topo_info(f, buffer); if (is_err(datas->topo)) goto propagate_error_free_datas; /* Read C-state information */ datas->cstates = load_and_build_cstate_info(f, buffer, nrcpus, datas->topo); if (is_err(datas->cstates)) goto propagate_error_free_datas; load_text_data_lines(f, buffer, datas); fclose(f); return datas; propagate_error_free_datas: fclose(f); if (!is_err(datas->topo)) release_cpu_topo_info(datas->topo); if (!is_err(datas->cstates)) release_cstate_info(datas->cstates, nrcpus); free(datas); return ptrerror(NULL); error_close: fclose(f); fprintf(stderr, "%s: error or EOF while reading '%s': %m", __func__, filename); return ptrerror(NULL); }
static struct cpuidle_datas * tracecmd_report_load(const char *filename) { FILE *f; unsigned int nrcpus; struct cpuidle_datas *datas; int ret; char *line; char buffer[BUFSIZE]; f = fopen(filename, "r"); if (!f) { fprintf(stderr, "%s: failed to open '%s': %m\n", __func__, filename); return ptrerror(NULL); } /* Version line */ line = fgets(buffer, BUFSIZE, f); if (!line) goto error_close; /* Number of CPUs */ nrcpus = 0; line = fgets(buffer, BUFSIZE, f); ret = sscanf(buffer, "cpus=%u", &nrcpus); if (ret != 1) nrcpus = 0; line = fgets(buffer, BUFSIZE, f); if (!line) goto error_close; if (!nrcpus) { fclose(f); return ptrerror("Cannot load trace file (nrcpus == 0)"); } datas = calloc(sizeof(*datas), 1); if (!datas) { fclose(f); return ptrerror(__func__); } datas->nrcpus = nrcpus; datas->pstates = build_pstate_info(nrcpus); if (!datas->pstates) goto propagate_error_free_datas; datas->topo = read_sysfs_cpu_topo(); if (is_err(datas->topo)) goto propagate_error_free_datas; /* Build C-state information from current host sysfs */ datas->cstates = build_cstate_info(nrcpus); if (is_err(datas->cstates)) goto propagate_error_free_datas; tracecmd_load_text_data_lines(f, buffer, datas); fclose(f); return datas; propagate_error_free_datas: fclose(f); if (!is_err(datas->topo)) release_cpu_topo_info(datas->topo); if (!is_err(datas->cstates)) release_cstate_info(datas->cstates, nrcpus); free(datas); return ptrerror(NULL); error_close: fclose(f); fprintf(stderr, "%s: error or EOF while reading '%s': %m", __func__, filename); return ptrerror(NULL); }