int readCpuCounters(SFLHost_cpu_counters *cpu) { int gotData = NO; perfstat_cpu_total_t cpu_total; if(perfstat_cpu_total(NULL, &cpu_total, sizeof(cpu_total), 1) != -1) { gotData = YES; cpu->load_one = (float)cpu_total.loadavg[0]/(float)(1<<SBITS); cpu->load_five = (float)cpu_total.loadavg[1]/(float)(1<<SBITS); cpu->load_fifteen = (float)cpu_total.loadavg[2]/(float)(1<<SBITS); cpu->cpu_num = cpu_total.ncpus; cpu->cpu_speed = cpu_total.processorHZ / (1024 * 1024); /* MHz */ cpu->cpu_user = (cpu_total.puser * 1000) / cpu_total.processorHZ; SFL_UNDEF_COUNTER(cpu->cpu_nice); cpu->cpu_system = (cpu_total.psys * 1000) / cpu_total.processorHZ; cpu->cpu_idle = (cpu_total.pidle * 1000) / cpu_total.processorHZ; cpu->cpu_wio = (cpu_total.pwait * 1000) / cpu_total.processorHZ; cpu->proc_run = cpu_total.runque /* + cpu_total.swpque */; if(cpu->proc_run > 0) { // subtract myself from the running process count, // otherwise it always shows at least 1. Thanks to // Dave Mangot for pointing this out. cpu->proc_run--; } cpu->interrupts = cpu_total.devintrs /* + cpu_total.softintrs */; cpu->contexts = cpu_total.pswitch; cpu->uptime = cpu_total.lbolt; /* is this in seconds? */ } return gotData; }
int SYSTEM_CPU_NUM(AGENT_REQUEST *request, AGENT_RESULT *result) { #ifdef HAVE_LIBPERFSTAT char *tmp; perfstat_cpu_total_t ps_cpu_total; if (1 < request->nparam) return SYSINFO_RET_FAIL; tmp = get_rparam(request, 0); /* only "online" (default) for parameter "type" is supported */ if (NULL != tmp && '\0' != *tmp && 0 != strcmp(tmp, "online")) return SYSINFO_RET_FAIL; if (-1 == perfstat_cpu_total(NULL, &ps_cpu_total, sizeof(ps_cpu_total), 1)) return SYSINFO_RET_FAIL; SET_UI64_RESULT(result, ps_cpu_total.ncpus); return SYSINFO_RET_OK; #else return SYSINFO_RET_FAIL; #endif }
int SYSTEM_CPU_NUM(AGENT_REQUEST *request, AGENT_RESULT *result) { #ifdef HAVE_LIBPERFSTAT char *tmp; perfstat_cpu_total_t ps_cpu_total; if (1 < request->nparam) { SET_MSG_RESULT(result, zbx_strdup(NULL, "Too many parameters.")); return SYSINFO_RET_FAIL; } tmp = get_rparam(request, 0); /* only "online" (default) for parameter "type" is supported */ if (NULL != tmp && '\0' != *tmp && 0 != strcmp(tmp, "online")) { SET_MSG_RESULT(result, zbx_strdup(NULL, "Invalid first parameter.")); return SYSINFO_RET_FAIL; } if (-1 == perfstat_cpu_total(NULL, &ps_cpu_total, sizeof(ps_cpu_total), 1)) { SET_MSG_RESULT(result, zbx_dsprintf(NULL, "Cannot obtain system information: %s", zbx_strerror(errno))); return SYSINFO_RET_FAIL; } SET_UI64_RESULT(result, ps_cpu_total.ncpus); return SYSINFO_RET_OK; #else SET_MSG_RESULT(result, zbx_strdup(NULL, "Agent was compiled without support for Perfstat API.")); return SYSINFO_RET_FAIL; #endif }
int get_avg_data(struct cpu_data * _cpu) { perfstat_cpu_total_t cpustats; perfstat_cpu_total(NULL, &cpustats, sizeof cpustats, 1); _cpu->one = cpustats.loadavg[0]/(float)(1<<SBITS); _cpu->two = cpustats.loadavg[1]/(float)(1<<SBITS); _cpu->three = cpustats.loadavg[2]/(float)(1<<SBITS); return 0; }
int get_uptime() { perfstat_cpu_total_t cpustats; if (perfstat_cpu_total(NULL, &cpustats, sizeof cpustats, 1)!=1) return 0; /* This is still untested on multi-processor machines. */ return (cpustats.user+cpustats.sys+cpustats.idle+cpustats.wait) /cpustats.ncpus/HZ; }
int get_cpu_data(struct cpu_data * _cpu) { perfstat_cpu_total_t cpustats; perfstat_cpu_total(NULL, &cpustats, sizeof cpustats, 1); _cpu->u = cpustats.user; _cpu->s = cpustats.sys; _cpu->i = cpustats.idle; _cpu->n = 0; /* AIX doesn't keep nice anywhere. */ return 0; }
void uv_loadavg(double avg[3]) { perfstat_cpu_total_t ps_total; int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1); if (result == -1) { avg[0] = 0.; avg[1] = 0.; avg[2] = 0.; return; } avg[0] = ps_total.loadavg[0] / (double)(1 << SBITS); avg[1] = ps_total.loadavg[1] / (double)(1 << SBITS); avg[2] = ps_total.loadavg[2] / (double)(1 << SBITS); }
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { uv_cpu_info_t* cpu_info; perfstat_cpu_total_t ps_total; perfstat_cpu_t* ps_cpus; perfstat_id_t cpu_id; int result, ncpus, idx = 0; result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1); if (result == -1) { return UV_ENOSYS; } ncpus = result = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0); if (result == -1) { return UV_ENOSYS; } ps_cpus = (perfstat_cpu_t*) uv__malloc(ncpus * sizeof(perfstat_cpu_t)); if (!ps_cpus) { return UV_ENOMEM; } /* TODO(bnoordhuis) Check uv__strscpy() return value. */ uv__strscpy(cpu_id.name, FIRST_CPU, sizeof(cpu_id.name)); result = perfstat_cpu(&cpu_id, ps_cpus, sizeof(perfstat_cpu_t), ncpus); if (result == -1) { uv__free(ps_cpus); return UV_ENOSYS; } *cpu_infos = (uv_cpu_info_t*) uv__malloc(ncpus * sizeof(uv_cpu_info_t)); if (!*cpu_infos) { uv__free(ps_cpus); return UV_ENOMEM; } *count = ncpus; cpu_info = *cpu_infos; while (idx < ncpus) { cpu_info->speed = (int)(ps_total.processorHZ / 1000000); cpu_info->model = uv__strdup(ps_total.description); cpu_info->cpu_times.user = ps_cpus[idx].user; cpu_info->cpu_times.sys = ps_cpus[idx].sys; cpu_info->cpu_times.idle = ps_cpus[idx].idle; cpu_info->cpu_times.irq = ps_cpus[idx].wait; cpu_info->cpu_times.nice = 0; cpu_info++; idx++; } uv__free(ps_cpus); return 0; }
uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { uv_cpu_info_t* cpu_info; perfstat_cpu_total_t ps_total; perfstat_cpu_t* ps_cpus; perfstat_id_t cpu_id; int result, ncpus, idx = 0; result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1); if (result == -1) { return uv__new_artificial_error(UV_ENOSYS); } ncpus = result = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0); if (result == -1) { return uv__new_artificial_error(UV_ENOSYS); } ps_cpus = (perfstat_cpu_t*) malloc(ncpus * sizeof(perfstat_cpu_t)); if (!ps_cpus) { return uv__new_artificial_error(UV_ENOMEM); } strcpy(cpu_id.name, FIRST_CPU); result = perfstat_cpu(&cpu_id, ps_cpus, sizeof(perfstat_cpu_t), ncpus); if (result == -1) { free(ps_cpus); return uv__new_artificial_error(UV_ENOSYS); } *cpu_infos = (uv_cpu_info_t*) malloc(ncpus * sizeof(uv_cpu_info_t)); if (!*cpu_infos) { free(ps_cpus); return uv__new_artificial_error(UV_ENOMEM); } *count = ncpus; cpu_info = *cpu_infos; while (idx < ncpus) { cpu_info->speed = (int)(ps_total.processorHZ / 1000000); cpu_info->model = strdup(ps_total.description); cpu_info->cpu_times.user = ps_cpus[idx].user; cpu_info->cpu_times.sys = ps_cpus[idx].sys; cpu_info->cpu_times.idle = ps_cpus[idx].idle; cpu_info->cpu_times.irq = ps_cpus[idx].wait; cpu_info->cpu_times.nice = 0; cpu_info++; idx++; } free(ps_cpus); return uv_ok_; }
int SYSTEM_CPU_LOAD(AGENT_REQUEST *request, AGENT_RESULT *result) { #ifdef HAVE_LIBPERFSTAT #if !defined(SBITS) # define SBITS 16 #endif char *tmp; int mode, per_cpu = 1; perfstat_cpu_total_t ps_cpu_total; double value; if (2 < request->nparam) return SYSINFO_RET_FAIL; tmp = get_rparam(request, 0); if (NULL == tmp || '\0' == *tmp || 0 == strcmp(tmp, "all")) per_cpu = 0; else if (0 != strcmp(tmp, "percpu")) return SYSINFO_RET_FAIL; tmp = get_rparam(request, 1); if (NULL == tmp || '\0' == *tmp || 0 == strcmp(tmp, "avg1")) mode = ZBX_AVG1; else if (0 == strcmp(tmp, "avg5")) mode = ZBX_AVG5; else if (0 == strcmp(tmp, "avg15")) mode = ZBX_AVG15; else return SYSINFO_RET_FAIL; if (-1 == perfstat_cpu_total(NULL, &ps_cpu_total, sizeof(ps_cpu_total), 1)) return SYSINFO_RET_FAIL; value = (double)ps_cpu_total.loadavg[mode] / (1 << SBITS); if (1 == per_cpu) { if (0 >= ps_cpu_total.ncpus) return SYSINFO_RET_FAIL; value /= ps_cpu_total.ncpus; } SET_DBL_RESULT(result, value); return SYSINFO_RET_OK; #else return SYSINFO_RET_FAIL; #endif }
int SYSTEM_CPU_INTR(AGENT_REQUEST *request, AGENT_RESULT *result) { #ifdef HAVE_LIBPERFSTAT perfstat_cpu_total_t ps_cpu_total; if (-1 == perfstat_cpu_total(NULL, &ps_cpu_total, sizeof(ps_cpu_total), 1)) return SYSINFO_RET_FAIL; SET_UI64_RESULT(result, (zbx_uint64_t)ps_cpu_total.devintrs); return SYSINFO_RET_OK; #else return SYSINFO_RET_FAIL; #endif }
int SYSTEM_CPU_INTR(AGENT_REQUEST *request, AGENT_RESULT *result) { #ifdef HAVE_LIBPERFSTAT perfstat_cpu_total_t ps_cpu_total; if (-1 == perfstat_cpu_total(NULL, &ps_cpu_total, sizeof(ps_cpu_total), 1)) { SET_MSG_RESULT(result, zbx_dsprintf(NULL, "Cannot obtain system information: %s", zbx_strerror(errno))); return SYSINFO_RET_FAIL; } SET_UI64_RESULT(result, (zbx_uint64_t)ps_cpu_total.devintrs); return SYSINFO_RET_OK; #else SET_MSG_RESULT(result, zbx_strdup(NULL, "Agent was compiled without support for Perfstat API.")); return SYSINFO_RET_FAIL; #endif }
/** * This routine returns 'nelem' double precision floats containing * the load averages in 'loadv'; at most 3 values will be returned. * @param loadv destination of the load averages * @param nelem number of averages * @return: 0 if successful, -1 if failed (and all load averages are 0). */ int getloadavg_sysdep (double *loadv, int nelem) { perfstat_cpu_total_t cpu; if (perfstat_cpu_total(NULL, &cpu, sizeof(perfstat_cpu_total_t), 1) < 1) { LogError("system statistic error -- perfstat_cpu_total failed: %s\n", STRERROR); return -1; } switch (nelem) { case 3: loadv[2] = (double)cpu.loadavg[2] / (double)(1 << SBITS); case 2: loadv[1] = (double)cpu.loadavg[1] / (double)(1 << SBITS); case 1: loadv[0] = (double)cpu.loadavg[0] / (double)(1 << SBITS); } return 0; }
/** * This routine returns system/user CPU time in use. * @return: true if successful, false if failed (or not available) */ boolean_t used_system_cpu_sysdep(SystemInfo_T *si) { perfstat_cpu_total_t cpu; unsigned long long cpu_total; unsigned long long cpu_total_new = 0ULL; unsigned long long cpu_user = 0ULL; unsigned long long cpu_syst = 0ULL; unsigned long long cpu_wait = 0ULL; if (perfstat_cpu_total(NULL, &cpu, sizeof(perfstat_cpu_total_t), 1) < 0) { LogError("system statistic error -- perfstat_cpu_total failed: %s\n", STRERROR); return -1; } cpu_total_new = (cpu.user + cpu.sys + cpu.wait + cpu.idle) / cpu.ncpus; cpu_total = cpu_total_new - cpu_total_old; cpu_total_old = cpu_total_new; cpu_user = cpu.user / cpu.ncpus; cpu_syst = cpu.sys / cpu.ncpus; cpu_wait = cpu.wait / cpu.ncpus; if (cpu_initialized) { if (cpu_total > 0) { si->total_cpu_user_percent = 1000 * ((double)(cpu_user - cpu_user_old) / (double)cpu_total); si->total_cpu_syst_percent = 1000 * ((double)(cpu_syst - cpu_syst_old) / (double)cpu_total); si->total_cpu_wait_percent = 1000 * ((double)(cpu_wait - cpu_wait_old) / (double)cpu_total); } else { si->total_cpu_user_percent = 0; si->total_cpu_syst_percent = 0; si->total_cpu_wait_percent = 0; } } cpu_user_old = cpu_user; cpu_syst_old = cpu_syst; cpu_wait_old = cpu_wait; cpu_initialized = 1; return true; }
int SYSTEM_UPTIME(AGENT_REQUEST *request, AGENT_RESULT *result) { #if defined(HAVE_LIBPERFSTAT) perfstat_cpu_total_t ps_cpu_total; if (0 == hertz) { hertz = sysconf(_SC_CLK_TCK); /* make sure we do not divide by 0 */ assert(hertz); } /* AIX 6.1 */ if (-1 == perfstat_cpu_total(NULL, &ps_cpu_total, sizeof(ps_cpu_total), 1)) return SYSINFO_RET_FAIL; SET_UI64_RESULT(result, (zbx_uint64_t)((double)ps_cpu_total.lbolt / hertz)); return SYSINFO_RET_OK; #else return SYSINFO_RET_FAIL; #endif }
int sys_cpu_util_init(test_t *test) { int num_cpus; perfstat_cpu_total_t *perfstat_total_buffer; int ret; int err = HPUX_SYS_STATS_SUCCESS; netsysstat_data_t *tsd = GET_TEST_DATA(test); /* one of these days I really aught to check the return value */ perfstat_total_buffer = (perfstat_cpu_total_t *) malloc(sizeof(perfstat_cpu_total_t)); /* while they seem to have kept the parms similar between perfstat_cpu and perfstat_cpu_total, it seems that perfstat_cpu_total never wants a "real" name parm given to it. I suppose that if I were paranoid, I'd call perfstat_cpu_total once with the first two parms NULL to get the number of totals, but for now we just ass-u-me it will be one :) raj 2006-04-07 */ ret = perfstat_cpu_total(NULL, perfstat_total_buffer, sizeof(perfstat_cpu_total_t), 1); tsd->num_cpus = perfstat_total_buffer->ncpus; tsd->method = "AIX_PERFSTAT"; /* save the pointer to the perfstat_total_buffer - from it we will be grabbing the processor frequency, which we use for a calibration-free CPU utilization measurement. */ tsd->psd = perfstat_total_buffer; return(err); }
static void update_cpustats(ZBX_CPUS_STAT_DATA *pcpus) { const char *__function_name = "update_cpustats"; int cpu_num; zbx_uint64_t counter[ZBX_CPU_STATE_COUNT]; #if defined(HAVE_PROC_STAT) FILE *file; char line[1024]; unsigned char *cpu_status = NULL; const char *filename = "/proc/stat"; #elif defined(HAVE_SYS_PSTAT_H) struct pst_dynamic psd; struct pst_processor psp; #elif defined(HAVE_FUNCTION_SYSCTLBYNAME) && defined(CPUSTATES) long cp_time[CPUSTATES], *cp_times = NULL; size_t nlen, nlen_alloc; #elif defined(HAVE_KSTAT_H) kstat_ctl_t *kc; kstat_t *k; cpu_stat_t *cpu; zbx_uint64_t total[ZBX_CPU_STATE_COUNT]; #elif defined(HAVE_FUNCTION_SYSCTL_KERN_CPTIME) int mib[3]; long all_states[CPUSTATES]; u_int64_t one_states[CPUSTATES]; size_t sz; #elif defined(HAVE_LIBPERFSTAT) perfstat_cpu_total_t ps_cpu_total; perfstat_cpu_t ps_cpu; perfstat_id_t ps_id; #endif zabbix_log(LOG_LEVEL_DEBUG, "In %s()", __function_name); #define ZBX_SET_CPUS_NOTSUPPORTED() \ for (cpu_num = 0; cpu_num <= pcpus->count; cpu_num++) \ update_cpu_counters(&pcpus->cpu[cpu_num], NULL) #if defined(HAVE_PROC_STAT) if (NULL == (file = fopen(filename, "r"))) { zbx_error("cannot open [%s]: %s", filename, zbx_strerror(errno)); ZBX_SET_CPUS_NOTSUPPORTED(); goto exit; } cpu_status = zbx_malloc(cpu_status, sizeof(unsigned char) * (pcpus->count + 1)); for (cpu_num = 0; cpu_num <= pcpus->count; cpu_num++) cpu_status[cpu_num] = SYSINFO_RET_FAIL; while (NULL != fgets(line, sizeof(line), file)) { if (0 != strncmp(line, "cpu", 3)) continue; if ('0' <= line[3] && line[3] <= '9') { cpu_num = atoi(line + 3) + 1; if (1 > cpu_num || cpu_num > pcpus->count) continue; } else if (' ' == line[3]) cpu_num = 0; else continue; memset(counter, 0, sizeof(counter)); sscanf(line, "%*s " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64, &counter[ZBX_CPU_STATE_USER], &counter[ZBX_CPU_STATE_NICE], &counter[ZBX_CPU_STATE_SYSTEM], &counter[ZBX_CPU_STATE_IDLE], &counter[ZBX_CPU_STATE_IOWAIT], &counter[ZBX_CPU_STATE_INTERRUPT], &counter[ZBX_CPU_STATE_SOFTIRQ], &counter[ZBX_CPU_STATE_STEAL]); update_cpu_counters(&pcpus->cpu[cpu_num], counter); cpu_status[cpu_num] = SYSINFO_RET_OK; } zbx_fclose(file); for (cpu_num = 0; cpu_num <= pcpus->count; cpu_num++) if (SYSINFO_RET_FAIL == cpu_status[cpu_num]) update_cpu_counters(&pcpus->cpu[cpu_num], NULL); zbx_free(cpu_status); #elif defined(HAVE_SYS_PSTAT_H) for (cpu_num = 0; cpu_num <= pcpus->count; cpu_num++) { memset(counter, 0, sizeof(counter)); if (0 == cpu_num) { if (-1 == pstat_getdynamic(&psd, sizeof(psd), 1, 0)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)psd.psd_cpu_time[CP_USER]; counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)psd.psd_cpu_time[CP_NICE]; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)psd.psd_cpu_time[CP_SYS]; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)psd.psd_cpu_time[CP_IDLE]; } else { if (-1 == pstat_getprocessor(&psp, sizeof(psp), 1, cpu_num - 1)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)psp.psp_cpu_time[CP_USER]; counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)psp.psp_cpu_time[CP_NICE]; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)psp.psp_cpu_time[CP_SYS]; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)psp.psp_cpu_time[CP_IDLE]; } update_cpu_counters(&pcpus->cpu[cpu_num], counter); } #elif defined(HAVE_FUNCTION_SYSCTLBYNAME) && defined(CPUSTATES) /* FreeBSD 7.0 */ nlen = sizeof(cp_time); if (-1 == sysctlbyname("kern.cp_time", &cp_time, &nlen, NULL, 0) || nlen != sizeof(cp_time)) { ZBX_SET_CPUS_NOTSUPPORTED(); goto exit; } memset(counter, 0, sizeof(counter)); counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)cp_time[CP_USER]; counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)cp_time[CP_NICE]; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)cp_time[CP_SYS]; counter[ZBX_CPU_STATE_INTERRUPT] = (zbx_uint64_t)cp_time[CP_INTR]; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)cp_time[CP_IDLE]; update_cpu_counters(&pcpus->cpu[0], counter); /* get size of result set for CPU statistics */ if (-1 == sysctlbyname("kern.cp_times", NULL, &nlen_alloc, NULL, 0)) { for (cpu_num = 1; cpu_num <= pcpus->count; cpu_num++) update_cpu_counters(&pcpus->cpu[cpu_num], NULL); goto exit; } cp_times = zbx_malloc(cp_times, nlen_alloc); nlen = nlen_alloc; if (0 == sysctlbyname("kern.cp_times", cp_times, &nlen, NULL, 0) && nlen == nlen_alloc) { for (cpu_num = 1; cpu_num <= pcpus->count; cpu_num++) { memset(counter, 0, sizeof(counter)); counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)*(cp_times + (cpu_num - 1) * CPUSTATES + CP_USER); counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)*(cp_times + (cpu_num - 1) * CPUSTATES + CP_NICE); counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)*(cp_times + (cpu_num - 1) * CPUSTATES + CP_SYS); counter[ZBX_CPU_STATE_INTERRUPT] = (zbx_uint64_t)*(cp_times + (cpu_num - 1) * CPUSTATES + CP_INTR); counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)*(cp_times + (cpu_num - 1) * CPUSTATES + CP_IDLE); update_cpu_counters(&pcpus->cpu[cpu_num], counter); } } else { for (cpu_num = 1; cpu_num <= pcpus->count; cpu_num++) update_cpu_counters(&pcpus->cpu[cpu_num], NULL); } zbx_free(cp_times); #elif defined(HAVE_KSTAT_H) /* Solaris */ if (NULL == (kc = kstat_open())) { ZBX_SET_CPUS_NOTSUPPORTED(); goto exit; } memset(total, 0, sizeof(total)); for (cpu_num = 1; cpu_num <= pcpus->count; cpu_num++) { if (NULL == (k = kstat_lookup(kc, "cpu_stat", pcpus->cpu[cpu_num].cpu_num - 1, NULL)) || -1 == kstat_read(kc, k, NULL)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } cpu = (cpu_stat_t *)k->ks_data; memset(counter, 0, sizeof(counter)); total[ZBX_CPU_STATE_IDLE] += counter[ZBX_CPU_STATE_IDLE] = cpu->cpu_sysinfo.cpu[CPU_IDLE]; total[ZBX_CPU_STATE_USER] += counter[ZBX_CPU_STATE_USER] = cpu->cpu_sysinfo.cpu[CPU_USER]; total[ZBX_CPU_STATE_SYSTEM] += counter[ZBX_CPU_STATE_SYSTEM] = cpu->cpu_sysinfo.cpu[CPU_KERNEL]; total[ZBX_CPU_STATE_IOWAIT] += counter[ZBX_CPU_STATE_IOWAIT] = cpu->cpu_sysinfo.cpu[CPU_WAIT]; update_cpu_counters(&pcpus->cpu[cpu_num], counter); } kstat_close(kc); update_cpu_counters(&pcpus->cpu[0], total); #elif defined(HAVE_FUNCTION_SYSCTL_KERN_CPTIME) /* OpenBSD 4.3 */ for (cpu_num = 0; cpu_num <= pcpus->count; cpu_num++) { memset(counter, 0, sizeof(counter)); if (0 == cpu_num) { mib[0] = CTL_KERN; mib[1] = KERN_CPTIME; sz = sizeof(all_states); if (-1 == sysctl(mib, 2, &all_states, &sz, NULL, 0) || sz != sizeof(all_states)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)all_states[CP_USER]; counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)all_states[CP_NICE]; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)all_states[CP_SYS]; counter[ZBX_CPU_STATE_INTERRUPT] = (zbx_uint64_t)all_states[CP_INTR]; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)all_states[CP_IDLE]; } else { mib[0] = CTL_KERN; mib[1] = KERN_CPTIME2; mib[2] = cpu_num - 1; sz = sizeof(one_states); if (-1 == sysctl(mib, 3, &one_states, &sz, NULL, 0) || sz != sizeof(one_states)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)one_states[CP_USER]; counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)one_states[CP_NICE]; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)one_states[CP_SYS]; counter[ZBX_CPU_STATE_INTERRUPT] = (zbx_uint64_t)one_states[CP_INTR]; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)one_states[CP_IDLE]; } update_cpu_counters(&pcpus->cpu[cpu_num], counter); } #elif defined(HAVE_LIBPERFSTAT) /* AIX 6.1 */ for (cpu_num = 0; cpu_num <= pcpus->count; cpu_num++) { memset(counter, 0, sizeof(counter)); if (0 == cpu_num) { if (-1 == perfstat_cpu_total(NULL, &ps_cpu_total, sizeof(ps_cpu_total), 1)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)ps_cpu_total.user; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)ps_cpu_total.sys; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)ps_cpu_total.idle; counter[ZBX_CPU_STATE_IOWAIT] = (zbx_uint64_t)ps_cpu_total.wait; } else { zbx_snprintf(ps_id.name, sizeof(ps_id.name), "cpu%d", cpu_num - 1); if (-1 == perfstat_cpu(&ps_id, &ps_cpu, sizeof(ps_cpu), 1)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)ps_cpu.user; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)ps_cpu.sys; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)ps_cpu.idle; counter[ZBX_CPU_STATE_IOWAIT] = (zbx_uint64_t)ps_cpu.wait; } update_cpu_counters(&pcpus->cpu[cpu_num], counter); } #endif /* HAVE_LIBPERFSTAT */ #undef ZBX_SET_CPUS_NOTSUPPORTED exit: zabbix_log(LOG_LEVEL_DEBUG, "End of %s()", __function_name); }
/* * Load the latest CPU usage statistics */ int netsnmp_cpu_arch_load( netsnmp_cache *cache, void *magic ) { int i,n; perfstat_id_t name; perfstat_cpu_total_t cs; perfstat_cpu_t *cs2; perfstat_memory_total_t ms; netsnmp_cpu_info *cpu = netsnmp_cpu_get_byIdx( -1, 0 ); if (perfstat_cpu_total((perfstat_id_t *)NULL, &cs, sizeof(perfstat_cpu_total_t), 1) > 0) { /* Returns 'u_longlong_t' statistics */ cpu->user_ticks = (unsigned long long)cs.user / cs.ncpus; cpu->sys_ticks = ((unsigned long long)cs.sys + (unsigned long long)cs.wait) / cs.ncpus; cpu->kern_ticks = (unsigned long long)cs.sys / cs.ncpus; cpu->wait_ticks = (unsigned long long)cs.wait / cs.ncpus; cpu->idle_ticks = (unsigned long long)cs.idle / cs.ncpus; /* intrpt_ticks, sirq_ticks, nice_ticks unused */ /* * Interrupt/Context Switch statistics * XXX - Do these really belong here ? */ cpu->pageIn = (unsigned long long)cs.sysread; cpu->pageOut = (unsigned long long)cs.syswrite; cpu->nInterrupts = (unsigned long long)cs.devintrs + cs.softintrs; cpu->nCtxSwitches = (unsigned long long)cs.pswitch; } if (perfstat_memory_total((perfstat_id_t *)NULL, &ms, sizeof(perfstat_memory_total_t), 1) > 0) { cpu->swapIn = (unsigned long long)ms.pgspins; cpu->swapOut = (unsigned long long)ms.pgspouts; } /* * Per-CPU statistics */ n = cs.ncpus; /* XXX - Compare against cpu_num */ cs2 = (perfstat_cpu_t*)malloc( n*sizeof(perfstat_cpu_t)); strcpy( name.name, ""); if (perfstat_cpu(&name, cs2, sizeof(perfstat_cpu_t), n) > 0) { for ( i = 0; i < n; i++ ) { cpu = netsnmp_cpu_get_byIdx( i, 1 ); cpu->user_ticks = (unsigned long long)cs2[i].user; cpu->sys_ticks = (unsigned long long)cs2[i].sys + (unsigned long long)cs2[i].wait; cpu->kern_ticks = (unsigned long long)cs2[i].sys; cpu->wait_ticks = (unsigned long long)cs2[i].wait; cpu->idle_ticks = (unsigned long long)cs2[i].idle; cpu->pageIn = (unsigned long long)cs2[i].sysread; cpu->pageOut = (unsigned long long)cs2[i].syswrite; cpu->nCtxSwitches = (unsigned long long)cs2[i].pswitch; /* Interrupt stats only apply overall, not per-CPU */ } } else { _cpu_copy_stats( cpu ); } free(cs2); return 0; }
static sg_error sg_get_load_stats_int(sg_load_stats *load_stats_buf){ #ifdef HAVE_GETLOADAVG double loadav[3]; #elif defined(HPUX) struct pst_dynamic pstat_dynamic; #elif defined(AIX) perfstat_cpu_total_t all_cpu_info; int rc; #elif defined(SOLARIS) && !defined(HAVE_SYS_LOADAVG_H) kstat_ctl_t *kc; kstat_t *ksp; kstat_named_t *kn; #endif load_stats_buf->min1 = load_stats_buf->min5 = load_stats_buf->min15 = 0; #ifdef HAVE_GETLOADAVG getloadavg(loadav,3); load_stats_buf->min1=loadav[0]; load_stats_buf->min5=loadav[1]; load_stats_buf->min15=loadav[2]; #elif defined(SOLARIS) if ((kc = kstat_open()) == NULL) { RETURN_WITH_SET_ERROR("load", SG_ERROR_KSTAT_OPEN, NULL); } if((ksp=kstat_lookup(kc, "unix", 0, "system_misc")) == NULL){ kstat_close(kc); RETURN_WITH_SET_ERROR("load", SG_ERROR_KSTAT_LOOKUP, "unix,0,system_misc"); } if (kstat_read(kc, ksp, 0) == -1) { kstat_close(kc); RETURN_WITH_SET_ERROR("load", SG_ERROR_KSTAT_READ, NULL); } kstat_close(kc); if((kn=kstat_data_lookup(ksp, "avenrun_1min")) == NULL) { RETURN_WITH_SET_ERROR("load", SG_ERROR_KSTAT_DATA_LOOKUP, "avenrun_1min"); } load_stats_buf->min1 = (double)kn->value.ui32 / (double)256; if((kn=kstat_data_lookup(ksp, "avenrun_5min")) == NULL) { RETURN_WITH_SET_ERROR("load", SG_ERROR_KSTAT_DATA_LOOKUP, "avenrun_5min"); } load_stats_buf->min5 = (double)kn->value.ui32 / (double)256; if((kn=kstat_data_lookup(ksp, "avenrun_15min")) == NULL) { RETURN_WITH_SET_ERROR("load", SG_ERROR_KSTAT_DATA_LOOKUP, "avenrun_15min"); } load_stats_buf->min15 = (double)kn->value.ui32 / (double)256; #elif defined(HPUX) if (pstat_getdynamic(&pstat_dynamic, sizeof(pstat_dynamic), 1, 0) == -1) { RETURN_WITH_SET_ERROR_WITH_ERRNO("load", SG_ERROR_PSTAT, "pstat_dynamic"); } load_stats_buf->min1=pstat_dynamic.psd_avg_1_min; load_stats_buf->min5=pstat_dynamic.psd_avg_5_min; load_stats_buf->min15=pstat_dynamic.psd_avg_15_min; #elif defined(AIX) rc = perfstat_cpu_total( NULL, &all_cpu_info, sizeof(all_cpu_info), 1); if( -1 == rc ) { RETURN_WITH_SET_ERROR_WITH_ERRNO("load", SG_ERROR_PSTAT, "perfstat_cpu_total"); } else { load_stats_buf->min1 = (double) all_cpu_info.loadavg[0] / (double)(1 << SBITS); load_stats_buf->min5 = (double) all_cpu_info.loadavg[1] / (double)(1 << SBITS); load_stats_buf->min15 = (double) all_cpu_info.loadavg[2] / (double)(1 << SBITS); } #else RETURN_WITH_SET_ERROR("load", SG_ERROR_UNSUPPORTED, OS_TYPE); #endif load_stats_buf->systime = time(NULL); return SG_ERROR_NONE; }
int SYSTEM_CPU_LOAD(AGENT_REQUEST *request, AGENT_RESULT *result) { #ifdef HAVE_LIBPERFSTAT #if !defined(SBITS) # define SBITS 16 #endif char *tmp; int mode, per_cpu = 1; perfstat_cpu_total_t ps_cpu_total; double value; if (2 < request->nparam) { SET_MSG_RESULT(result, zbx_strdup(NULL, "Too many parameters.")); return SYSINFO_RET_FAIL; } tmp = get_rparam(request, 0); if (NULL == tmp || '\0' == *tmp || 0 == strcmp(tmp, "all")) per_cpu = 0; else if (0 != strcmp(tmp, "percpu")) { SET_MSG_RESULT(result, zbx_strdup(NULL, "Invalid first parameter.")); return SYSINFO_RET_FAIL; } tmp = get_rparam(request, 1); if (NULL == tmp || '\0' == *tmp || 0 == strcmp(tmp, "avg1")) mode = ZBX_AVG1; else if (0 == strcmp(tmp, "avg5")) mode = ZBX_AVG5; else if (0 == strcmp(tmp, "avg15")) mode = ZBX_AVG15; else { SET_MSG_RESULT(result, zbx_strdup(NULL, "Invalid second parameter.")); return SYSINFO_RET_FAIL; } if (-1 == perfstat_cpu_total(NULL, &ps_cpu_total, sizeof(ps_cpu_total), 1)) { SET_MSG_RESULT(result, zbx_dsprintf(NULL, "Cannot obtain system information: %s", zbx_strerror(errno))); return SYSINFO_RET_FAIL; } value = (double)ps_cpu_total.loadavg[mode] / (1 << SBITS); if (1 == per_cpu) { if (0 >= ps_cpu_total.ncpus) { SET_MSG_RESULT(result, zbx_strdup(NULL, "Cannot obtain number of CPUs.")); return SYSINFO_RET_FAIL; } value /= ps_cpu_total.ncpus; } SET_DBL_RESULT(result, value); return SYSINFO_RET_OK; #else SET_MSG_RESULT(result, zbx_strdup(NULL, "Agent was compiled without support for Perfstat API.")); return SYSINFO_RET_FAIL; #endif }
static sg_error sg_get_host_info_int(sg_host_info *host_info_buf) { #ifdef WIN32 unsigned long nameln; char *name; long long result; OSVERSIONINFOEX osinfo; SYSTEM_INFO sysinfo; char *tmp_name; char tmp[10]; #else struct utsname os; # if defined(HPUX) struct pst_static pstat_static; struct pst_dynamic pstat_dynamic; time_t currtime; long boottime; # elif defined(SOLARIS) time_t boottime, curtime; kstat_ctl_t *kc; kstat_t *ksp; kstat_named_t *kn; char *isainfo = NULL; long isabufsz, rc; # elif defined(LINUX) || defined(CYGWIN) FILE *f; # elif defined(ALLBSD) int mib[2]; struct timeval boottime; time_t curtime; size_t size; int ncpus; # if defined(HW_MACHINE_ARCH) || defined(HW_MACHINE) char arch_name[16]; # endif # elif defined(AIX) static perfstat_cpu_total_t cpu_total; sg_error rc; # if defined(HAVE_GETUTXENT) struct utmpx *ut; # else struct utmp *ut; # endif # endif #endif host_info_buf->ncpus = 0; host_info_buf->maxcpus = 0; host_info_buf->bitwidth = 0; host_info_buf->host_state = sg_unknown_configuration; host_info_buf->uptime = 0; host_info_buf->systime = 0; #ifdef WIN32 /* these settings are static after boot, so why get them * constantly? * * Because we want to know some changes anyway - at least * when the hostname (DNS?) changes */ /* get system name */ nameln = MAX_COMPUTERNAME_LENGTH + 1; name = sg_malloc(nameln); if(name == NULL) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } /* * XXX probably GetComputerNameEx() is a better entry point ... */ if( GetComputerName(name, &nameln) == 0 ) { free(name); RETURN_WITH_SET_ERROR("os", SG_ERROR_HOST, "GetComputerName"); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->hostname, name)) { free(name); RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } free(name); /* get OS name, version and build */ ZeroMemory(&osinfo, sizeof(OSVERSIONINFOEX)); osinfo.dwOSVersionInfoSize = sizeof(osinfo); if(!GetVersionEx(&osinfo)) { RETURN_WITH_SET_ERROR("os", SG_ERROR_HOST, "GetVersionEx"); } GetSystemInfo(&sysinfo); /* Release - single number */ if(snprintf(tmp, sizeof(tmp), "%ld", osinfo.dwBuildNumber) == -1) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_SPRINTF, NULL); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->os_release, tmp)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } /* Version */ /* usually a single digit . single digit, eg 5.0 */ if(snprintf(tmp, sizeof(tmp), "%ld.%ld", osinfo.dwMajorVersion, osinfo.dwMinorVersion) == -1) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->os_version, tmp)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } /* OS name */ tmp_name = get_os_name(osinfo, sysinfo); if(tmp_name == NULL) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->os_name, tmp_name)) { free(tmp_name); RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } free(tmp_name); /* Platform */ switch(sysinfo.wProcessorArchitecture) { case PROCESSOR_ARCHITECTURE_INTEL: if(SG_ERROR_NONE != sg_update_string(&host_info_buf->platform, "Intel")) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } break; case PROCESSOR_ARCHITECTURE_IA64: if(SG_ERROR_NONE != sg_update_string(&host_info_buf->platform, "IA64")) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } break; case PROCESSOR_ARCHITECTURE_AMD64: if(SG_ERROR_NONE != sg_update_string(&host_info_buf->platform, "AMD64")) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } break; default: if(SG_ERROR_NONE != sg_update_string(&host_info_buf->platform, "Unknown")){ RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } break; } if(read_counter_large(SG_WIN32_UPTIME, &result)) { RETURN_WITH_SET_ERROR("os", SG_ERROR_PDHREAD, PDH_UPTIME); } host_info_buf->uptime = (time_t) result; #else if((uname(&os)) < 0) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_UNAME, NULL); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->os_name, os.sysname)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->os_release, os.release)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->os_version, os.version)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->platform, os.machine)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->hostname, os.nodename)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } /* get uptime */ #ifdef HPUX if (pstat_getstatic(&pstat_static, sizeof(pstat_static), 1, 0) == -1) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_PSTAT, "pstat_static"); } if (pstat_getdynamic(&pstat_dynamic, sizeof(pstat_dynamic), 1, 0) == -1) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_PSTAT, "pstat_dynamic"); } currtime = time(NULL); boottime = pstat_static.boot_time; host_info_buf->uptime = currtime - boottime; host_info_buf->ncpus = pstat_dynamic.psd_proc_cnt; host_info_buf->maxcpus = pstat_dynamic.psd_max_proc_cnt; host_info_buf->bitwidth = sysconf(_SC_KERNEL_BITS); /* * TODO: getting virtualization state * 1) on boostrapping this component, try loading /opt/hpvm/lib/libhpvm.so (or so) * 2) get function addresses for * a) HPVM_boolean hpvm_api_server_check() * b) HPVM_boolean hpvm_api_virtmach_check() * * Seems to be hardware virtualization ... * See: http://docstore.mik.ua/manuals/hp-ux/en/T2767-90141/index.html (hpvmpubapi(3)) * http://jreypo.wordpress.com/tag/hpvm/ * http://jreypo.wordpress.com/category/hp-ux/page/3/ * http://h20338.www2.hp.com/enterprise/us/en/os/hpux11i-partitioning-integrity-vm.html */ #elif defined(SOLARIS) if ((kc = kstat_open()) == NULL) { RETURN_WITH_SET_ERROR("os", SG_ERROR_KSTAT_OPEN, NULL); } if((ksp=kstat_lookup(kc, "unix", -1, "system_misc"))==NULL){ kstat_close(kc); RETURN_WITH_SET_ERROR("os", SG_ERROR_KSTAT_LOOKUP, "unix,-1,system_misc"); } if (kstat_read(kc, ksp, 0) == -1) { kstat_close(kc); RETURN_WITH_SET_ERROR("os", SG_ERROR_KSTAT_READ, NULL); } if((kn=kstat_data_lookup(ksp, "boot_time")) == NULL){ kstat_close(kc); RETURN_WITH_SET_ERROR("os", SG_ERROR_KSTAT_DATA_LOOKUP, "boot_time"); } /* XXX verify on Solaris 10 if it's still ui32 */ boottime = (kn->value.ui32); kstat_close(kc); time(&curtime); host_info_buf->uptime = curtime - boottime; host_info_buf->ncpus = sysconf(_SC_NPROCESSORS_ONLN); host_info_buf->maxcpus = sysconf(_SC_NPROCESSORS_CONF); isainfo = sg_malloc( isabufsz = (32 * sizeof(*isainfo)) ); if( NULL == isainfo ) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } # define MKSTR(x) #x # if defined(SI_ARCHITECTURE_K) # define SYSINFO_CMD SI_ARCHITECTURE_K # elif defined(SI_ISALIST) # define SYSINFO_CMD SI_ISALIST # else # define SYSINFO_CMD SI_ARCHITECTURE # endif sysinfo_again: if( -1 == ( rc = sysinfo( SYSINFO_CMD, isainfo, isabufsz ) ) ) { free(isainfo); RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSINFO, MKSTR(SYSINFO_CMD) ); } else if( rc > isabufsz ) { char *tmp = sg_realloc(isainfo, rc); if( NULL == tmp ) { free(isainfo); RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } isabufsz = rc; isainfo = tmp; goto sysinfo_again; } host_info_buf->bitwidth = get_bitwidth_by_arch_name(isainfo); free(isainfo); host_info_buf->host_state = sg_unknown_configuration; #elif defined(LINUX) || defined(CYGWIN) if ((f=fopen("/proc/uptime", "r")) == NULL) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_OPEN, "/proc/uptime"); } #define TIME_T_SCANF_FMT (sizeof(int[(((time_t)-1)/2)%4+1]) == sizeof(int[1]) ? "%ld %*d" : "%lu %*d" ) if((fscanf(f,TIME_T_SCANF_FMT,&host_info_buf->uptime)) != 1){ fclose(f); RETURN_WITH_SET_ERROR("os", SG_ERROR_PARSE, NULL); } fclose(f); # if defined(LINUX) host_info_buf->ncpus = sysconf(_SC_NPROCESSORS_ONLN); host_info_buf->maxcpus = sysconf(_SC_NPROCESSORS_CONF); if( access( "/proc/sys/kernel/vsyscall64", F_OK ) == 0 || access( "/proc/sys/abi/vsyscall32", F_OK ) == 0 ) { host_info_buf->bitwidth = 64; } else { host_info_buf->bitwidth = sysconf(_SC_LONG_BIT); // well, maybe 64-bit disabled 128-bit system o.O } host_info_buf->host_state = sg_unknown_configuration; # endif #elif defined(ALLBSD) mib[0] = CTL_KERN; mib[1] = KERN_BOOTTIME; size = sizeof(boottime); if (sysctl(mib, 2, &boottime, &size, NULL, 0) < 0) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSCTL, "CTL_KERN.KERN_BOOTTIME"); } time(&curtime); host_info_buf->uptime= curtime - boottime.tv_sec; # if defined(HW_NCPU) mib[0] = CTL_HW; mib[1] = HW_NCPU; size = sizeof(int); if( sysctl( mib, 2, &ncpus, &size, NULL, 0 ) < 0 ) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSCTL, "CTL_HW.HW_NCPU" ); } # endif # if defined(HW_MACHINE_ARCH) mib[0] = CTL_HW; mib[1] = HW_MACHINE_ARCH; size = sizeof(arch_name); if( sysctl( mib, 2, arch_name, &size, NULL, 0 ) == 0 ) { host_info_buf->bitwidth = get_bitwidth_by_arch_name(arch_name); } else { # endif # if defined(HW_MACHINE) mib[0] = CTL_HW; mib[1] = HW_MACHINE; size = sizeof(arch_name); if( sysctl( mib, 2, arch_name, &size, NULL, 0 ) == 0 ) { host_info_buf->bitwidth = get_bitwidth_by_arch_name(arch_name); } else { SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSCTL, "CTL_HW.HW_MACHINE" ); } # elif defined(HW_MACHINE_ARCH) SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSCTL, "CTL_HW.HW_MACHINE_ARCH" ); # endif # if defined(HW_MACHINE_ARCH) } # endif host_info_buf->host_state = sg_unknown_configuration; /* details must be analysed "manually", no syscall */ host_info_buf->maxcpus = (unsigned)ncpus; # if defined(HW_NCPUONLINE) /* use knowledge about number of cpu's online, when available instead of assuming all of them */ mib[0] = CTL_HW; mib[1] = HW_NCPUONLINE; size = sizeof(int); if( sysctl( mib, 2, &ncpus, &size, NULL, 0 ) < 0 ) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSCTL, "CTL_HW.HW_NCPUONLINE" ); } # endif host_info_buf->ncpus = (unsigned)ncpus; #elif defined(AIX) if(perfstat_cpu_total(NULL, &cpu_total, sizeof(cpu_total), 1) != 1) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSCTL, "perfstat_cpu_total"); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->platform, cpu_total.description)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } host_info_buf->ncpus = cpu_total.ncpus; host_info_buf->maxcpus = cpu_total.ncpus_cfg; host_info_buf->bitwidth = sysconf(_SC_AIX_KERNEL_BITMODE); if( sysconf(_SC_LPAR_ENABLED) > 0 ) { host_info_buf->host_state = sg_hardware_virtualized; } else { host_info_buf->host_state = sg_physical_host; } #ifdef ENABLE_THREADS if( SG_ERROR_NONE != ( rc = sg_lock_mutex("utmp") ) ) { RETURN_FROM_PREVIOUS_ERROR( "os", rc ); } #endif # if defined(HAVE_GETUTXENT) # define UTENTFN getutxent # define UTENTTM ut->ut_tv.tv_sec setutxent(); # else # define UTENTFN getutent # define UTENTTM ut->ut_time setutent(); # endif while( NULL != ( ut = UTENTFN() ) ) { if( ut->ut_type == BOOT_TIME ) { host_info_buf->uptime = time(NULL) - UTENTTM; break; } } # if defined(HAVE_GETUTXENT) endutxent(); # else endutent(); # endif #ifdef ENABLE_THREADS if( SG_ERROR_NONE != ( rc = sg_unlock_mutex("utmp") ) ) { RETURN_FROM_PREVIOUS_ERROR( "os", rc ); } #endif #else RETURN_WITH_SET_ERROR("os", SG_ERROR_UNSUPPORTED, OS_TYPE); #endif #endif /* WIN32 */ host_info_buf->systime = time(NULL); return SG_ERROR_NONE; }
static int getentropy_fallback(void *buf, size_t len) { uint8_t results[SHA512_DIGEST_LENGTH]; int save_errno = errno, e, pgs = sysconf(_SC_PAGESIZE), faster = 0, repeat; static int cnt; struct timespec ts; struct timeval tv; perfstat_cpu_total_t cpustats; #ifdef _AIX61 perfstat_cpu_total_wpar_t cpustats_wpar; #endif perfstat_partition_total_t lparstats; perfstat_disk_total_t diskinfo; perfstat_netinterface_total_t netinfo; struct rusage ru; sigset_t sigset; struct stat st; SHA512_CTX ctx; static pid_t lastpid; pid_t pid; size_t i, ii, m; char *p; pid = getpid(); if (lastpid == pid) { faster = 1; repeat = 2; } else { faster = 0; lastpid = pid; repeat = REPEAT; } for (i = 0; i < len; ) { int j; SHA512_Init(&ctx); for (j = 0; j < repeat; j++) { HX((e = gettimeofday(&tv, NULL)) == -1, tv); if (e != -1) { cnt += (int)tv.tv_sec; cnt += (int)tv.tv_usec; } HX(perfstat_cpu_total(NULL, &cpustats, sizeof(cpustats), 1) == -1, cpustats); #ifdef _AIX61 HX(perfstat_cpu_total_wpar(NULL, &cpustats_wpar, sizeof(cpustats_wpar), 1) == -1, cpustats_wpar); #endif HX(perfstat_partition_total(NULL, &lparstats, sizeof(lparstats), 1) == -1, lparstats); HX(perfstat_disk_total(NULL, &diskinfo, sizeof(diskinfo), 1) == -1, diskinfo); HX(perfstat_netinterface_total(NULL, &netinfo, sizeof(netinfo), 1) == -1, netinfo); for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]); ii++) HX(clock_gettime(cl[ii], &ts) == -1, ts); HX((pid = getpid()) == -1, pid); HX((pid = getsid(pid)) == -1, pid); HX((pid = getppid()) == -1, pid); HX((pid = getpgid(0)) == -1, pid); HX((e = getpriority(0, 0)) == -1, e); if (!faster) { ts.tv_sec = 0; ts.tv_nsec = 1; (void) nanosleep(&ts, NULL); } HX(sigpending(&sigset) == -1, sigset); HX(sigprocmask(SIG_BLOCK, NULL, &sigset) == -1, sigset); HF(getentropy); /* an addr in this library */ HF(printf); /* an addr in libc */ p = (char *)&p; HD(p); /* an addr on stack */ p = (char *)&errno; HD(p); /* the addr of errno */ if (i == 0) { struct sockaddr_storage ss; struct statvfs stvfs; struct termios tios; socklen_t ssl; off_t off; /* * Prime-sized mappings encourage fragmentation; * thus exposing some address entropy. */ struct mm { size_t npg; void *p; } mm[] = { { 17, MAP_FAILED }, { 3, MAP_FAILED }, { 11, MAP_FAILED }, { 2, MAP_FAILED }, { 5, MAP_FAILED }, { 3, MAP_FAILED }, { 7, MAP_FAILED }, { 1, MAP_FAILED }, { 57, MAP_FAILED }, { 3, MAP_FAILED }, { 131, MAP_FAILED }, { 1, MAP_FAILED }, }; for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) { HX(mm[m].p = mmap(NULL, mm[m].npg * pgs, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0), mm[m].p); if (mm[m].p != MAP_FAILED) { size_t mo; /* Touch some memory... */ p = mm[m].p; mo = cnt % (mm[m].npg * pgs - 1); p[mo] = 1; cnt += (int)((long)(mm[m].p) / pgs); } /* Check cnts and times... */ for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]); ii++) { HX((e = clock_gettime(cl[ii], &ts)) == -1, ts); if (e != -1) cnt += (int)ts.tv_nsec; } HX((e = getrusage(RUSAGE_SELF, &ru)) == -1, ru); if (e != -1) { cnt += (int)ru.ru_utime.tv_sec; cnt += (int)ru.ru_utime.tv_usec; } } for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) { if (mm[m].p != MAP_FAILED) munmap(mm[m].p, mm[m].npg * pgs); mm[m].p = MAP_FAILED; } HX(stat(".", &st) == -1, st); HX(statvfs(".", &stvfs) == -1, stvfs); HX(stat("/", &st) == -1, st); HX(statvfs("/", &stvfs) == -1, stvfs); HX((e = fstat(0, &st)) == -1, st); if (e == -1) { if (S_ISREG(st.st_mode) || S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) { HX(fstatvfs(0, &stvfs) == -1, stvfs); HX((off = lseek(0, (off_t)0, SEEK_CUR)) < 0, off); } if (S_ISCHR(st.st_mode)) { HX(tcgetattr(0, &tios) == -1, tios); } else if (S_ISSOCK(st.st_mode)) { memset(&ss, 0, sizeof ss); ssl = sizeof(ss); HX(getpeername(0, (void *)&ss, &ssl) == -1, ss); } } HX((e = getrusage(RUSAGE_CHILDREN, &ru)) == -1, ru); if (e != -1) { cnt += (int)ru.ru_utime.tv_sec; cnt += (int)ru.ru_utime.tv_usec; } } else { /* Subsequent hashes absorb previous result */ HD(results); } HX((e = gettimeofday(&tv, NULL)) == -1, tv); if (e != -1) { cnt += (int)tv.tv_sec; cnt += (int)tv.tv_usec; } HD(cnt); } SHA512_Final(results, &ctx); memcpy((char *)buf + i, results, min(sizeof(results), len - i)); i += min(sizeof(results), len - i); } explicit_bzero(&ctx, sizeof ctx); explicit_bzero(results, sizeof results); if (gotdata(buf, len) == 0) { errno = save_errno; return 0; /* satisfied */ } errno = EIO; return -1; }
static void update_vmstat(ZBX_VMSTAT_DATA *vmstat) { #if defined(HAVE_LIBPERFSTAT) int now; zbx_uint64_t dlcpu_us, dlcpu_sy, dlcpu_id, dlcpu_wa, lcputime; perfstat_memory_total_t memstats; perfstat_cpu_total_t cpustats; perfstat_disk_total_t diskstats; #ifdef _AIXVERSION_530 zbx_uint64_t dpcpu_us, dpcpu_sy, dpcpu_id, dpcpu_wa, pcputime, dtimebase; zbx_uint64_t delta_purr, entitled_purr, unused_purr, r1, r2; perfstat_partition_total_t lparstats; #ifdef HAVE_AIXOSLEVEL_530006 zbx_uint64_t didle_donated_purr, dbusy_donated_purr, didle_stolen_purr, dbusy_stolen_purr; #endif /* HAVE_AIXOSLEVEL_530006 */ #endif /* _AIXVERSION_530 */ now = (int)time(NULL); /* retrieve the metrics * Upon successful completion, the number of structures filled is returned. * If unsuccessful, a value of -1 is returned and the errno global variable is set */ #ifdef _AIXVERSION_530 if (-1 == perfstat_partition_total(NULL, &lparstats, sizeof(lparstats), 1)) { zabbix_log(LOG_LEVEL_DEBUG, "perfstat_partition_total: %s", zbx_strerror(errno)); return; } #endif if (-1 == perfstat_cpu_total(NULL, &cpustats, sizeof(cpustats), 1)) { zabbix_log(LOG_LEVEL_DEBUG, "perfstat_cpu_total: %s", zbx_strerror(errno)); return; } if (-1 == perfstat_memory_total(NULL, &memstats, sizeof(memstats), 1)) { zabbix_log(LOG_LEVEL_DEBUG, "perfstat_memory_total: %s", zbx_strerror(errno)); return; } if (-1 == perfstat_disk_total(NULL, &diskstats, sizeof(diskstats), 1)) { zabbix_log(LOG_LEVEL_DEBUG, "perfstat_disk_total: %s", zbx_strerror(errno)); return; } if (last_clock && now > last_clock) { /* --- kthr --- */ vmstat->kthr_r = (double)(cpustats.runque - last_runque) / (double)(now - last_clock); vmstat->kthr_b = (double)(cpustats.swpque - last_swpque) / (double)(now - last_clock); /* --- page --- */ vmstat->fi = (double)(memstats.pgins - last_pgins) / (double)(now - last_clock); vmstat->fo = (double)(memstats.pgouts - last_pgouts) / (double)(now - last_clock); vmstat->pi = (double)(memstats.pgspins - last_pgspins) / (double)(now - last_clock); vmstat->po = (double)(memstats.pgspouts - last_pgspouts) / (double)(now - last_clock); vmstat->fr = (double)(memstats.cycles - last_cycles) / (double)(now - last_clock); vmstat->sr = (double)(memstats.scans - last_scans) / (double)(now - last_clock); /* -- faults -- */ vmstat->in = (double)(cpustats.devintrs - last_devintrs) / (double)(now - last_clock); vmstat->sy = (double)(cpustats.syscall - last_syscall) / (double)(now - last_clock); vmstat->cs = (double)(cpustats.pswitch - last_pswitch) / (double)(now - last_clock); #ifdef _AIXVERSION_530 /* --- cpu ---- */ dpcpu_us = lparstats.puser - last_puser; dpcpu_sy = lparstats.psys - last_psys; dpcpu_id = lparstats.pidle - last_pidle; dpcpu_wa = lparstats.pwait - last_pwait; delta_purr = pcputime = dpcpu_us + dpcpu_sy + dpcpu_id + dpcpu_wa; #endif /* _AIXVERSION_530 */ dlcpu_us = cpustats.user - last_user; dlcpu_sy = cpustats.sys - last_sys; dlcpu_id = cpustats.idle - last_idle; dlcpu_wa = cpustats.wait - last_wait; lcputime = dlcpu_us + dlcpu_sy + dlcpu_id + dlcpu_wa; #ifdef _AIXVERSION_530 /* Distribute the donated and stolen purr to the existing purr buckets in case if donation is enabled. */ #ifdef HAVE_AIXOSLEVEL_530006 if (lparstats.type.b.donate_enabled) { didle_donated_purr = lparstats.idle_donated_purr - last_idle_donated_purr; dbusy_donated_purr = lparstats.busy_donated_purr - last_busy_donated_purr; didle_stolen_purr = lparstats.idle_stolen_purr - last_idle_stolen_purr; dbusy_stolen_purr = lparstats.busy_stolen_purr - last_busy_stolen_purr; if (0 != dlcpu_id + dlcpu_wa) { r1 = dlcpu_id / (dlcpu_id + dlcpu_wa); r2 = dlcpu_wa / (dlcpu_id + dlcpu_wa); } else r1 = r2 = 0; dpcpu_us += didle_donated_purr * r1 + didle_stolen_purr * r1; dpcpu_wa += didle_donated_purr * r2 + didle_stolen_purr * r2; dpcpu_sy += dbusy_donated_purr + dbusy_stolen_purr; delta_purr += didle_donated_purr + dbusy_donated_purr + didle_stolen_purr + dbusy_stolen_purr; pcputime = delta_purr; } #endif /* HAVE_AIXOSLEVEL_530006 */ dtimebase = lparstats.timebase_last - last_timebase_last; vmstat->ent = (double)lparstats.entitled_proc_capacity / 100.0; if (lparstats.type.b.shared_enabled) { entitled_purr = dtimebase * vmstat->ent; if (entitled_purr < delta_purr) { /* when above entitlement, use consumption in percentages */ entitled_purr = delta_purr; } unused_purr = entitled_purr - delta_purr; /* distribute unused purr in wait and idle proportionally to logical wait and idle */ if (0 != dlcpu_wa + dlcpu_id) { dpcpu_wa += unused_purr * ((double)dlcpu_wa / (double)(dlcpu_wa + dlcpu_id)); dpcpu_id += unused_purr * ((double)dlcpu_id / (double)(dlcpu_wa + dlcpu_id)); } pcputime = entitled_purr; } /* Physical Processor Utilization */ vmstat->cpu_us = (double)dpcpu_us * 100.0 / (double)pcputime; vmstat->cpu_sy = (double)dpcpu_sy * 100.0 / (double)pcputime; vmstat->cpu_id = (double)dpcpu_id * 100.0 / (double)pcputime; vmstat->cpu_wa = (double)dpcpu_wa * 100.0 / (double)pcputime; if (lparstats.type.b.shared_enabled) { /* Physical Processor Consumed */ vmstat->cpu_pc = (double)delta_purr / (double)dtimebase; /* Percentage of Entitlement Consumed */ vmstat->cpu_ec = (double)(vmstat->cpu_pc / vmstat->ent) * 100.0; /* Logical Processor Utilization */ vmstat->cpu_lbusy = (double)(dlcpu_us + dlcpu_sy) * 100.0 / (double)lcputime; if (lparstats.type.b.pool_util_authority) { /* Available Pool Processor (app) */ vmstat->cpu_app = (double)(lparstats.pool_idle_time - last_pool_idle_time) / (XINTFRAC * (double)dtimebase); } } #else /* not _AIXVERSION_530 */ /* Physical Processor Utilization */ vmstat->cpu_us = (double)dlcpu_us * 100.0 / (double)lcputime; vmstat->cpu_sy = (double)dlcpu_sy * 100.0 / (double)lcputime; vmstat->cpu_id = (double)dlcpu_id * 100.0 / (double)lcputime; vmstat->cpu_wa = (double)dlcpu_wa * 100.0 / (double)lcputime; #endif /* _AIXVERSION_530 */ /* --- disk --- */ vmstat->disk_bps = 512 * ((diskstats.wblks - last_wblks) + (diskstats.rblks - last_rblks)) / (now - last_clock); vmstat->disk_tps = (double)(diskstats.xfers - last_xfers) / (double)(now - last_clock); /* -- memory -- */ #ifdef HAVE_AIXOSLEVEL_520004 vmstat->mem_avm = (zbx_uint64_t)memstats.virt_active; /* Active virtual pages. Virtual pages are considered active if they have been accessed */ #endif vmstat->mem_fre = (zbx_uint64_t)memstats.real_free; /* free real memory (in 4KB pages) */ } else { #ifdef _AIXVERSION_530 vmstat->shared_enabled = (unsigned char)lparstats.type.b.shared_enabled; vmstat->pool_util_authority = (unsigned char)lparstats.type.b.pool_util_authority; #endif #ifdef HAVE_AIXOSLEVEL_520004 vmstat->aix52stats = 1; #endif } /* saving last values */ last_clock = now; /* --- kthr -- */ last_runque = (zbx_uint64_t)cpustats.runque; last_swpque = (zbx_uint64_t)cpustats.swpque; /* --- page --- */ last_pgins = (zbx_uint64_t)memstats.pgins; last_pgouts = (zbx_uint64_t)memstats.pgouts; last_pgspins = (zbx_uint64_t)memstats.pgspins; last_pgspouts = (zbx_uint64_t)memstats.pgspouts; last_cycles = (zbx_uint64_t)memstats.cycles; last_scans = (zbx_uint64_t)memstats.scans; /* -- faults -- */ last_devintrs = (zbx_uint64_t)cpustats.devintrs; last_syscall = (zbx_uint64_t)cpustats.syscall; last_pswitch = (zbx_uint64_t)cpustats.pswitch; /* --- cpu ---- */ #ifdef _AIXVERSION_530 last_puser = (zbx_uint64_t)lparstats.puser; last_psys = (zbx_uint64_t)lparstats.psys; last_pidle = (zbx_uint64_t)lparstats.pidle; last_pwait = (zbx_uint64_t)lparstats.pwait; last_timebase_last = (zbx_uint64_t)lparstats.timebase_last; last_pool_idle_time = (zbx_uint64_t)lparstats.pool_idle_time; #ifdef HAVE_AIXOSLEVEL_530006 last_idle_donated_purr = (zbx_uint64_t)lparstats.idle_donated_purr; last_busy_donated_purr = (zbx_uint64_t)lparstats.busy_donated_purr; last_idle_stolen_purr = (zbx_uint64_t)lparstats.idle_stolen_purr; last_busy_stolen_purr = (zbx_uint64_t)lparstats.busy_stolen_purr; #endif /* HAVE_AIXOSLEVEL_530006 */ #endif /* _AIXVERSION_530 */ last_user = (zbx_uint64_t)cpustats.user; last_sys = (zbx_uint64_t)cpustats.sys; last_idle = (zbx_uint64_t)cpustats.idle; last_wait = (zbx_uint64_t)cpustats.wait; last_xfers = (zbx_uint64_t)diskstats.xfers; last_wblks = (zbx_uint64_t)diskstats.wblks; last_rblks = (zbx_uint64_t)diskstats.rblks; #endif /* HAVE_LIBPERFSTAT */ }
int getloadavg (double loadavg[], int nelem) { int elem = 0; /* Return value. */ # ifdef NO_GET_LOAD_AVG # define LDAV_DONE /* Set errno to zero to indicate that there was no particular error; this function just can't work at all on this system. */ errno = 0; elem = -1; # endif # if !defined (LDAV_DONE) && defined (HAVE_LIBKSTAT) /* Use libkstat because we don't have to be root. */ # define LDAV_DONE kstat_ctl_t *kc; kstat_t *ksp; kstat_named_t *kn; kc = kstat_open (); if (kc == 0) return -1; ksp = kstat_lookup (kc, "unix", 0, "system_misc"); if (ksp == 0) return -1; if (kstat_read (kc, ksp, 0) == -1) return -1; kn = kstat_data_lookup (ksp, "avenrun_1min"); if (kn == 0) { /* Return -1 if no load average information is available. */ nelem = 0; elem = -1; } if (nelem >= 1) loadavg[elem++] = (double) kn->value.ul / FSCALE; if (nelem >= 2) { kn = kstat_data_lookup (ksp, "avenrun_5min"); if (kn != 0) { loadavg[elem++] = (double) kn->value.ul / FSCALE; if (nelem >= 3) { kn = kstat_data_lookup (ksp, "avenrun_15min"); if (kn != 0) loadavg[elem++] = (double) kn->value.ul / FSCALE; } } } kstat_close (kc); # endif /* HAVE_LIBKSTAT */ # if !defined (LDAV_DONE) && defined (hpux) && defined (HAVE_PSTAT_GETDYNAMIC) /* Use pstat_getdynamic() because we don't have to be root. */ # define LDAV_DONE # undef LOAD_AVE_TYPE struct pst_dynamic dyn_info; if (pstat_getdynamic (&dyn_info, sizeof (dyn_info), 0, 0) < 0) return -1; if (nelem > 0) loadavg[elem++] = dyn_info.psd_avg_1_min; if (nelem > 1) loadavg[elem++] = dyn_info.psd_avg_5_min; if (nelem > 2) loadavg[elem++] = dyn_info.psd_avg_15_min; # endif /* hpux && HAVE_PSTAT_GETDYNAMIC */ # if ! defined LDAV_DONE && defined HAVE_LIBPERFSTAT # define LDAV_DONE # undef LOAD_AVE_TYPE /* Use perfstat_cpu_total because we don't have to be root. */ { perfstat_cpu_total_t cpu_stats; int result = perfstat_cpu_total (NULL, &cpu_stats, sizeof cpu_stats, 1); if (result == -1) return result; loadavg[0] = cpu_stats.loadavg[0] / (double)(1 << SBITS); loadavg[1] = cpu_stats.loadavg[1] / (double)(1 << SBITS); loadavg[2] = cpu_stats.loadavg[2] / (double)(1 << SBITS); elem = 3; } # endif # if !defined (LDAV_DONE) && (defined (__linux__) || defined (__CYGWIN__)) # define LDAV_DONE # undef LOAD_AVE_TYPE # ifndef LINUX_LDAV_FILE # define LINUX_LDAV_FILE "/proc/loadavg" # endif char ldavgbuf[3 * (INT_STRLEN_BOUND (int) + sizeof ".00 ")]; char const *ptr = ldavgbuf; int fd, count; fd = open (LINUX_LDAV_FILE, O_RDONLY); if (fd == -1) return -1; count = read (fd, ldavgbuf, sizeof ldavgbuf - 1); (void) close (fd); if (count <= 0) return -1; ldavgbuf[count] = '\0'; for (elem = 0; elem < nelem; elem++) { char *endptr; double d; errno = 0; d = c_strtod (ptr, &endptr); if (ptr == endptr || (d == 0 && errno != 0)) { if (elem == 0) return -1; break; } loadavg[elem] = d; ptr = endptr; } return elem; # endif /* __linux__ || __CYGWIN__ */ # if !defined (LDAV_DONE) && defined (__NetBSD__) # define LDAV_DONE # undef LOAD_AVE_TYPE # ifndef NETBSD_LDAV_FILE # define NETBSD_LDAV_FILE "/kern/loadavg" # endif unsigned long int load_ave[3], scale; int count; FILE *fp; fp = fopen (NETBSD_LDAV_FILE, "r"); if (fp == NULL) return -1; count = fscanf (fp, "%lu %lu %lu %lu\n", &load_ave[0], &load_ave[1], &load_ave[2], &scale); (void) fclose (fp); if (count != 4) return -1; for (elem = 0; elem < nelem; elem++) loadavg[elem] = (double) load_ave[elem] / (double) scale; return elem; # endif /* __NetBSD__ */ # if !defined (LDAV_DONE) && defined (NeXT) # define LDAV_DONE /* The NeXT code was adapted from iscreen 3.2. */ host_t host; struct processor_set_basic_info info; unsigned int info_count; /* We only know how to get the 1-minute average for this system, so even if the caller asks for more than 1, we only return 1. */ if (!getloadavg_initialized) { if (processor_set_default (host_self (), &default_set) == KERN_SUCCESS) getloadavg_initialized = true; } if (getloadavg_initialized) { info_count = PROCESSOR_SET_BASIC_INFO_COUNT; if (processor_set_info (default_set, PROCESSOR_SET_BASIC_INFO, &host, (processor_set_info_t) &info, &info_count) != KERN_SUCCESS) getloadavg_initialized = false; else { if (nelem > 0) loadavg[elem++] = (double) info.load_average / LOAD_SCALE; } } if (!getloadavg_initialized) return -1; # endif /* NeXT */ # if !defined (LDAV_DONE) && defined (UMAX) # define LDAV_DONE /* UMAX 4.2, which runs on the Encore Multimax multiprocessor, does not have a /dev/kmem. Information about the workings of the running kernel can be gathered with inq_stats system calls. We only know how to get the 1-minute average for this system. */ struct proc_summary proc_sum_data; struct stat_descr proc_info; double load; register unsigned int i, j; if (cpus == 0) { register unsigned int c, i; struct cpu_config conf; struct stat_descr desc; desc.sd_next = 0; desc.sd_subsys = SUBSYS_CPU; desc.sd_type = CPUTYPE_CONFIG; desc.sd_addr = (char *) &conf; desc.sd_size = sizeof conf; if (inq_stats (1, &desc)) return -1; c = 0; for (i = 0; i < conf.config_maxclass; ++i) { struct class_stats stats; bzero ((char *) &stats, sizeof stats); desc.sd_type = CPUTYPE_CLASS; desc.sd_objid = i; desc.sd_addr = (char *) &stats; desc.sd_size = sizeof stats; if (inq_stats (1, &desc)) return -1; c += stats.class_numcpus; } cpus = c; samples = cpus < 2 ? 3 : (2 * cpus / 3); } proc_info.sd_next = 0; proc_info.sd_subsys = SUBSYS_PROC; proc_info.sd_type = PROCTYPE_SUMMARY; proc_info.sd_addr = (char *) &proc_sum_data; proc_info.sd_size = sizeof (struct proc_summary); proc_info.sd_sizeused = 0; if (inq_stats (1, &proc_info) != 0) return -1; load = proc_sum_data.ps_nrunnable; j = 0; for (i = samples - 1; i > 0; --i) { load += proc_sum_data.ps_nrun[j]; if (j++ == PS_NRUNSIZE) j = 0; } if (nelem > 0) loadavg[elem++] = load / samples / cpus; # endif /* UMAX */ # if !defined (LDAV_DONE) && defined (DGUX) # define LDAV_DONE /* This call can return -1 for an error, but with good args it's not supposed to fail. The first argument is for no apparent reason of type `long int *'. */ dg_sys_info ((long int *) &load_info, DG_SYS_INFO_LOAD_INFO_TYPE, DG_SYS_INFO_LOAD_VERSION_0); if (nelem > 0) loadavg[elem++] = load_info.one_minute; if (nelem > 1) loadavg[elem++] = load_info.five_minute; if (nelem > 2) loadavg[elem++] = load_info.fifteen_minute; # endif /* DGUX */ # if !defined (LDAV_DONE) && defined (apollo) # define LDAV_DONE /* Apollo code from [email protected] (Ray Lischner). This system call is not documented. The load average is obtained as three long integers, for the load average over the past minute, five minutes, and fifteen minutes. Each value is a scaled integer, with 16 bits of integer part and 16 bits of fraction part. I'm not sure which operating system first supported this system call, but I know that SR10.2 supports it. */ extern void proc1_$get_loadav (); unsigned long load_ave[3]; proc1_$get_loadav (load_ave); if (nelem > 0) loadavg[elem++] = load_ave[0] / 65536.0; if (nelem > 1) loadavg[elem++] = load_ave[1] / 65536.0; if (nelem > 2) loadavg[elem++] = load_ave[2] / 65536.0; # endif /* apollo */ # if !defined (LDAV_DONE) && defined (OSF_MIPS) # define LDAV_DONE struct tbl_loadavg load_ave; table (TBL_LOADAVG, 0, &load_ave, 1, sizeof (load_ave)); loadavg[elem++] = (load_ave.tl_lscale == 0 ? load_ave.tl_avenrun.d[0] : (load_ave.tl_avenrun.l[0] / (double) load_ave.tl_lscale)); # endif /* OSF_MIPS */ # if !defined (LDAV_DONE) && (defined (__MSDOS__) || defined (WINDOWS32)) # define LDAV_DONE /* A faithful emulation is going to have to be saved for a rainy day. */ for ( ; elem < nelem; elem++) { loadavg[elem] = 0.0; } # endif /* __MSDOS__ || WINDOWS32 */ # if !defined (LDAV_DONE) && defined (OSF_ALPHA) # define LDAV_DONE struct tbl_loadavg load_ave; table (TBL_LOADAVG, 0, &load_ave, 1, sizeof (load_ave)); for (elem = 0; elem < nelem; elem++) loadavg[elem] = (load_ave.tl_lscale == 0 ? load_ave.tl_avenrun.d[elem] : (load_ave.tl_avenrun.l[elem] / (double) load_ave.tl_lscale)); # endif /* OSF_ALPHA */ # if ! defined LDAV_DONE && defined __VMS /* VMS specific code -- read from the Load Ave driver. */ LOAD_AVE_TYPE load_ave[3]; static bool getloadavg_initialized; # ifdef eunice struct { int dsc$w_length; char *dsc$a_pointer; } descriptor; # endif /* Ensure that there is a channel open to the load ave device. */ if (!getloadavg_initialized) { /* Attempt to open the channel. */ # ifdef eunice descriptor.dsc$w_length = 18; descriptor.dsc$a_pointer = "$$VMS_LOAD_AVERAGE"; # else $DESCRIPTOR (descriptor, "LAV0:"); # endif if (sys$assign (&descriptor, &channel, 0, 0) & 1) getloadavg_initialized = true; } /* Read the load average vector. */ if (getloadavg_initialized && !(sys$qiow (0, channel, IO$_READVBLK, 0, 0, 0, load_ave, 12, 0, 0, 0, 0) & 1)) { sys$dassgn (channel); getloadavg_initialized = false; } if (!getloadavg_initialized) return -1; # endif /* ! defined LDAV_DONE && defined __VMS */ # if ! defined LDAV_DONE && defined LOAD_AVE_TYPE && ! defined __VMS /* UNIX-specific code -- read the average from /dev/kmem. */ # define LDAV_PRIVILEGED /* This code requires special installation. */ LOAD_AVE_TYPE load_ave[3]; /* Get the address of LDAV_SYMBOL. */ if (offset == 0) { # ifndef sgi # if ! defined NLIST_STRUCT || ! defined N_NAME_POINTER strcpy (nl[0].n_name, LDAV_SYMBOL); strcpy (nl[1].n_name, ""); # else /* NLIST_STRUCT */ # ifdef HAVE_STRUCT_NLIST_N_UN_N_NAME nl[0].n_un.n_name = LDAV_SYMBOL; nl[1].n_un.n_name = 0; # else /* not HAVE_STRUCT_NLIST_N_UN_N_NAME */ nl[0].n_name = LDAV_SYMBOL; nl[1].n_name = 0; # endif /* not HAVE_STRUCT_NLIST_N_UN_N_NAME */ # endif /* NLIST_STRUCT */ # ifndef SUNOS_5 if ( # if !(defined (_AIX) && !defined (ps2)) nlist (KERNEL_FILE, nl) # else /* _AIX */ knlist (nl, 1, sizeof (nl[0])) # endif >= 0) /* Omit "&& nl[0].n_type != 0 " -- it breaks on Sun386i. */ { # ifdef FIXUP_KERNEL_SYMBOL_ADDR FIXUP_KERNEL_SYMBOL_ADDR (nl); # endif offset = nl[0].n_value; } # endif /* !SUNOS_5 */ # else /* sgi */ int ldav_off; ldav_off = sysmp (MP_KERNADDR, MPKA_AVENRUN); if (ldav_off != -1) offset = (long int) ldav_off & 0x7fffffff; # endif /* sgi */ } /* Make sure we have /dev/kmem open. */ if (!getloadavg_initialized) { # ifndef SUNOS_5 channel = open ("/dev/kmem", O_RDONLY); if (channel >= 0) { /* Set the channel to close on exec, so it does not litter any child's descriptor table. */ set_cloexec_flag (channel, true); getloadavg_initialized = true; } # else /* SUNOS_5 */ /* We pass 0 for the kernel, corefile, and swapfile names to use the currently running kernel. */ kd = kvm_open (0, 0, 0, O_RDONLY, 0); if (kd != 0) { /* nlist the currently running kernel. */ kvm_nlist (kd, nl); offset = nl[0].n_value; getloadavg_initialized = true; } # endif /* SUNOS_5 */ } /* If we can, get the load average values. */ if (offset && getloadavg_initialized) { /* Try to read the load. */ # ifndef SUNOS_5 if (lseek (channel, offset, 0) == -1L || read (channel, (char *) load_ave, sizeof (load_ave)) != sizeof (load_ave)) { close (channel); getloadavg_initialized = false; } # else /* SUNOS_5 */ if (kvm_read (kd, offset, (char *) load_ave, sizeof (load_ave)) != sizeof (load_ave)) { kvm_close (kd); getloadavg_initialized = false; } # endif /* SUNOS_5 */ } if (offset == 0 || !getloadavg_initialized) return -1; # endif /* ! defined LDAV_DONE && defined LOAD_AVE_TYPE && ! defined __VMS */ # if !defined (LDAV_DONE) && defined (LOAD_AVE_TYPE) /* Including VMS. */ if (nelem > 0) loadavg[elem++] = LDAV_CVT (load_ave[0]); if (nelem > 1) loadavg[elem++] = LDAV_CVT (load_ave[1]); if (nelem > 2) loadavg[elem++] = LDAV_CVT (load_ave[2]); # define LDAV_DONE # endif /* !LDAV_DONE && LOAD_AVE_TYPE */ # if !defined LDAV_DONE /* Set errno to zero to indicate that there was no particular error; this function just can't work at all on this system. */ errno = 0; elem = -1; # endif return elem; }
/* * Data collection function take_snapshot starts here * Get data from kernel and save into the snapshot strutcs * Argument is the snapshot struct to save to. Global anyway, but looks nicer */ static int take_snapshot(struct cpu_stat_snapshot *css) { /* * Variables start here */ /* * High resolution time counter */ struct timeval tp; unsigned long long current_time; /* * see libperfstat.h, holds CPU/memory data */ perfstat_cpu_total_t cs; perfstat_memory_total_t ms; /* * The usual stuff to count on, err, by */ int i; /* * Variables end here */ /* * Function starts here */ /* * Get time */ gettimeofday(&tp, (struct timezone *)NULL); current_time = tp.tv_sec * (unsigned long long)1000000 + tp.tv_usec; /* * If we have just gotten the data, return the values from last run (skip if-clause) * This happens on a snmpwalk request. No need to read the perfstat again * if we just did it less than 2 seconds ago * Jumps into if-clause either when snapshot is empty or when too old */ if ((css->css_time == 0) || (current_time > css->css_time + 2000000)) { /* * Make sure we clean up before we put new data into snapshot */ memset(css, 0, sizeof *css); /* * Update timer */ css->css_time = current_time; if((perfstat_cpu_total((perfstat_id_t *)NULL, &cs, sizeof(perfstat_cpu_total_t), 1) > 0) && (perfstat_memory_total((perfstat_id_t *)NULL, &ms, sizeof(perfstat_memory_total_t), 1) > 0)) { css->css_cpus = cs.ncpus; css->css_swapin = ms.pgspins; css->css_swapout = ms.pgspouts; css->css_blocks_read = cs.sysread; css->css_blocks_write = cs.syswrite; css->css_interrupts = cs.devintrs + cs.softintrs; css->css_context_sw = cs.pswitch; css->css_cpu[CPU_USER] = cs.user; css->css_cpu[CPU_SYSTEM] = cs.sys; css->css_cpu[CPU_IDLE] = cs.idle; css->css_cpu[CPU_WAIT] = cs.wait; } } /* * All engines running at warp speed, no problems (if there are any engines, that is) */ return (cs.ncpus > 0 ? 0 : -1); } /* take_snapshot ends here */
static int uptime_init (void) /* {{{ */ { /* * On most unix systems the uptime is calculated by looking at the boot * time (stored in unix time, since epoch) and the current one. We are * going to do the same, reading the boot time value while executing * the uptime_init function (there is no need to read, every time the * plugin_read is called, a value that won't change). However, since * uptime_init is run only once, if the function fails in retrieving * the boot time, the plugin is unregistered and there is no chance to * try again later. Nevertheless, this is very unlikely to happen. */ #if KERNEL_LINUX unsigned long starttime; char buffer[1024]; int ret; FILE *fh; ret = 0; fh = fopen (STAT_FILE, "r"); if (fh == NULL) { char errbuf[1024]; ERROR ("uptime plugin: Cannot open "STAT_FILE": %s", sstrerror (errno, errbuf, sizeof (errbuf))); return (-1); } while (fgets (buffer, 1024, fh) != NULL) { /* look for the btime string and read the value */ ret = sscanf (buffer, "btime %lu", &starttime); /* avoid further loops if btime has been found and read * correctly (hopefully) */ if (ret == 1) break; } fclose (fh); /* loop done, check if no value has been found/read */ if (ret != 1) { ERROR ("uptime plugin: No value read from "STAT_FILE""); return (-1); } boottime = (time_t) starttime; if (boottime == 0) { ERROR ("uptime plugin: btime read from "STAT_FILE", " "but `boottime' is zero!"); return (-1); } /* #endif KERNEL_LINUX */ #elif HAVE_LIBKSTAT kstat_t *ksp; kstat_named_t *knp; ksp = NULL; knp = NULL; /* kstats chain already opened by update_kstat (using *kc), verify everything went fine. */ if (kc == NULL) { ERROR ("uptime plugin: kstat chain control structure not available."); return (-1); } ksp = kstat_lookup (kc, "unix", 0, "system_misc"); if (ksp == NULL) { ERROR ("uptime plugin: Cannot find unix:0:system_misc kstat."); return (-1); } if (kstat_read (kc, ksp, NULL) < 0) { ERROR ("uptime plugin: kstat_read failed."); return (-1); } knp = (kstat_named_t *) kstat_data_lookup (ksp, "boot_time"); if (knp == NULL) { ERROR ("uptime plugin: kstat_data_lookup (boot_time) failed."); return (-1); } boottime = (time_t) knp->value.ui32; if (boottime == 0) { ERROR ("uptime plugin: kstat_data_lookup returned success, " "but `boottime' is zero!"); return (-1); } /* #endif HAVE_LIBKSTAT */ # elif HAVE_SYS_SYSCTL_H struct timeval boottv = { 0 }; size_t boottv_len; int status; int mib[] = { CTL_KERN, KERN_BOOTTIME }; boottv_len = sizeof (boottv); status = sysctl (mib, STATIC_ARRAY_SIZE (mib), &boottv, &boottv_len, /* new_value = */ NULL, /* new_length = */ 0); if (status != 0) { char errbuf[1024]; ERROR ("uptime plugin: No value read from sysctl interface: %s", sstrerror (errno, errbuf, sizeof (errbuf))); return (-1); } boottime = boottv.tv_sec; if (boottime == 0) { ERROR ("uptime plugin: sysctl(3) returned success, " "but `boottime' is zero!"); return (-1); } /* #endif HAVE_SYS_SYSCTL_H */ #elif HAVE_PERFSTAT int status; perfstat_cpu_total_t cputotal; int hertz; status = perfstat_cpu_total(NULL, &cputotal, sizeof(perfstat_cpu_total_t), 1); if (status < 0) { char errbuf[1024]; ERROR ("uptime plugin: perfstat_cpu_total: %s", sstrerror (errno, errbuf, sizeof (errbuf))); return (-1); } hertz = sysconf(_SC_CLK_TCK); if (hertz <= 0) hertz = HZ; boottime = time(NULL) - cputotal.lbolt / hertz; #endif /* HAVE_PERFSTAT */ return (0); } /* }}} int uptime_init */
static int load_read (void) { #if defined(HAVE_GETLOADAVG) double load[3]; if (getloadavg (load, 3) == 3) load_submit (load[LOADAVG_1MIN], load[LOADAVG_5MIN], load[LOADAVG_15MIN]); else { char errbuf[1024]; WARNING ("load: getloadavg failed: %s", sstrerror (errno, errbuf, sizeof (errbuf))); } /* #endif HAVE_GETLOADAVG */ #elif defined(KERNEL_LINUX) gauge_t snum, mnum, lnum; FILE *loadavg; char buffer[16]; char *fields[8]; int numfields; if ((loadavg = fopen ("/proc/loadavg", "r")) == NULL) { char errbuf[1024]; WARNING ("load: fopen: %s", sstrerror (errno, errbuf, sizeof (errbuf))); return (-1); } if (fgets (buffer, 16, loadavg) == NULL) { char errbuf[1024]; WARNING ("load: fgets: %s", sstrerror (errno, errbuf, sizeof (errbuf))); fclose (loadavg); return (-1); } if (fclose (loadavg)) { char errbuf[1024]; WARNING ("load: fclose: %s", sstrerror (errno, errbuf, sizeof (errbuf))); } numfields = strsplit (buffer, fields, 8); if (numfields < 3) return (-1); snum = atof (fields[0]); mnum = atof (fields[1]); lnum = atof (fields[2]); load_submit(snum, mnum, lnum); /* #endif KERNEL_LINUX */ #elif HAVE_LIBSTATGRAB gauge_t snum, mnum, lnum; sg_load_stats *ls; if ((ls = sg_get_load_stats ()) == NULL) return; snum = ls->min1; mnum = ls->min5; lnum = ls->min15; load_submit(snum, mnum, lnum); /* #endif HAVE_LIBSTATGRAB */ #elif HAVE_PERFSTAT gauge_t snum, mnum, lnum; perfstat_cpu_total_t cputotal; if (perfstat_cpu_total(NULL, &cputotal, sizeof(perfstat_cpu_total_t), 1) < 0) { char errbuf[1024]; WARNING ("load: perfstat_cpu : %s", sstrerror (errno, errbuf, sizeof (errbuf))); return (-1); } snum = (float)cputotal.loadavg[0]/(float)(1<<SBITS); mnum = (float)cputotal.loadavg[1]/(float)(1<<SBITS); lnum = (float)cputotal.loadavg[2]/(float)(1<<SBITS); load_submit(snum, mnum, lnum); /* #endif HAVE_PERFSTAT */ #else # error "No applicable input method." #endif return (0); }
/****************************************************************************** * * * Function: zbx_get_cpu_num * * * * Purpose: returns the number of processors which are currently online * * (i.e., available). * * * * Return value: number of CPUs * * * * Author: Eugene Grigorjev * * * ******************************************************************************/ static int zbx_get_cpu_num() { #if defined(_WINDOWS) SYSTEM_INFO sysInfo; GetSystemInfo(&sysInfo); return (int)sysInfo.dwNumberOfProcessors; #elif defined(HAVE_SYS_PSTAT_H) struct pst_dynamic psd; if (-1 == pstat_getdynamic(&psd, sizeof(struct pst_dynamic), 1, 0)) goto return_one; return (int)psd.psd_proc_cnt; #elif defined(_SC_NPROCESSORS_CONF) /* FreeBSD 7.0 x86 */ /* Solaris 10 x86 */ int ncpu; if (-1 == (ncpu = sysconf(_SC_NPROCESSORS_CONF))) goto return_one; return ncpu; #elif defined(HAVE_FUNCTION_SYSCTL_HW_NCPU) /* FreeBSD 6.2 x86; FreeBSD 7.0 x86 */ /* NetBSD 3.1 x86; NetBSD 4.0 x86 */ /* OpenBSD 4.2 x86 */ size_t len; int mib[] = {CTL_HW, HW_NCPU}, ncpu; len = sizeof(ncpu); if (0 != sysctl(mib, 2, &ncpu, &len, NULL, 0)) goto return_one; return ncpu; #elif defined(HAVE_PROC_CPUINFO) FILE *f = NULL; int ncpu = 0; if (NULL == (file = fopen("/proc/cpuinfo", "r"))) goto return_one; while (NULL != fgets(line, 1024, file)) { if (NULL == strstr(line, "processor")) continue; ncpu++; } zbx_fclose(file); if (0 == ncpu) goto return_one; return ncpu; #elif defined(HAVE_LIBPERFSTAT) /* AIX 6.1 */ perfstat_cpu_total_t ps_cpu_total; if (-1 == perfstat_cpu_total(NULL, &ps_cpu_total, sizeof(ps_cpu_total), 1)) goto return_one; return (int)ps_cpu_total.ncpus; #endif #ifndef _WINDOWS return_one: zabbix_log(LOG_LEVEL_WARNING, "cannot determine number of CPUs, assuming 1"); return 1; #endif }
/* * try to get load average * Inputs: pointer to array of doubles, number of elements in array * Returns: 0=array has values, -1=error occurred. */ int try_getloadavg(double *r_ave, size_t s_ave) { #ifndef HAVE_GETLOADAVG #ifdef HAVE_SYS_FIXPOINT_H fix favenrun[3]; #endif #if (defined(ultrix) || defined(sun) || defined(__alpha) || defined(dynix)) int i; #if (defined(sun) || defined(__alpha) || defined(dynix)) long favenrun[3]; if (s_ave > 3) /* bounds check */ return (-1); #define FIX_TO_DBL(_IN) (((double) _IN)/((double) FSCALE)) #endif #endif #if defined(aix4) || defined(aix5) || defined(aix6) || defined(aix7) perfstat_cpu_total_t cs; #endif #if defined(hpux10) || defined(hpux11) struct pst_dynamic pst_buf; #endif #ifdef irix6 int i, favenrun[3]; sgt_cookie_t cookie; #endif #endif /* !HAVE_GETLOADAVG */ #ifdef HAVE_GETLOADAVG if (getloadavg(r_ave, s_ave) == -1) return (-1); #elif defined(linux) { FILE *in = fopen("/proc/loadavg", "r"); if (!in) { NETSNMP_LOGONCE((LOG_ERR, "snmpd: cannot open /proc/loadavg\n")); return (-1); } fscanf(in, "%lf %lf %lf", r_ave, (r_ave + 1), (r_ave + 2)); fclose(in); } #elif (defined(ultrix) || defined(sun) || defined(__alpha) || defined(dynix)) if (auto_nlist(LOADAVE_SYMBOL, (char *) favenrun, sizeof(favenrun)) == 0) return (-1); for (i = 0; i < s_ave; i++) *(r_ave + i) = FIX_TO_DBL(favenrun[i]); #elif defined(hpux10) || defined(hpux11) if (pstat_getdynamic(&pst_buf, sizeof(struct pst_dynamic), 1, 0) < 0) return(-1); r_ave[0] = pst_buf.psd_avg_1_min; r_ave[1] = pst_buf.psd_avg_5_min; r_ave[2] = pst_buf.psd_avg_15_min; #elif defined(aix4) || defined(aix5) || defined(aix6) || defined(aix7) if(perfstat_cpu_total((perfstat_id_t *)NULL, &cs, sizeof(perfstat_cpu_total_t), 1) > 0) { r_ave[0] = cs.loadavg[0] / 65536.0; r_ave[1] = cs.loadavg[1] / 65536.0; r_ave[2] = cs.loadavg[2] / 65536.0; } #elif defined(irix6) SGT_COOKIE_INIT(&cookie); SGT_COOKIE_SET_KSYM(&cookie, "avenrun"); sysget(SGT_KSYM, (char*)favenrun, sizeof(favenrun), SGT_READ, &cookie); for (i = 0; i < s_ave; i++) r_ave[i] = favenrun[i] / 1000.0; DEBUGMSGTL(("ucd-snmp/loadave", "irix6: %d %d %d\n", favenrun[0], favenrun[1], favenrun[2])); #elif !defined(cygwin) #if defined(NETSNMP_CAN_USE_NLIST) && defined(LOADAVE_SYMBOL) if (auto_nlist(LOADAVE_SYMBOL, (char *) r_ave, sizeof(double) * s_ave) == 0) #endif return (-1); #endif /* * XXX * To calculate this, we need to compare * successive values of the kernel array * '_cp_times', and calculate the resulting * percentage changes. * This calculation needs to be performed * regularly - perhaps as a background process. * * See the source to 'top' for full details. * * The linux SNMP HostRes implementation * uses 'avenrun[0]*100' as an approximation. * This is less than accurate, but has the * advantage of being simple to implement! * * I'm also assuming a single processor */ return 0; }