/* Return the amount of physical memory available. */ double physmem_available (void) { #if defined _SC_AVPHYS_PAGES && defined _SC_PAGESIZE { /* This works on linux-gnu, solaris2 and cygwin. */ double pages = sysconf (_SC_AVPHYS_PAGES); double pagesize = sysconf (_SC_PAGESIZE); if (0 <= pages && 0 <= pagesize) return pages * pagesize; } #endif #if HAVE_PSTAT_GETSTATIC && HAVE_PSTAT_GETDYNAMIC { /* This works on hpux11. */ struct pst_static pss; struct pst_dynamic psd; if (0 <= pstat_getstatic (&pss, sizeof pss, 1, 0) && 0 <= pstat_getdynamic (&psd, sizeof psd, 1, 0)) { double pages = psd.psd_free; double pagesize = pss.page_size; if (0 <= pages && 0 <= pagesize) return pages * pagesize; } } #endif #if HAVE_SYSMP && defined MP_SAGET && defined MPSA_RMINFO && defined _SC_PAGESIZE { /* This works on irix6. */ struct rminfo realmem; if (sysmp (MP_SAGET, MPSA_RMINFO, &realmem, sizeof realmem) == 0) { double pagesize = sysconf (_SC_PAGESIZE); double pages = realmem.availrmem; if (0 <= pages && 0 <= pagesize) return pages * pagesize; } } #endif #if HAVE_TABLE && defined TBL_VMSTATS { /* This works on Tru64 UNIX V4/5. */ struct tbl_vmstats vmstats; if (table (TBL_VMSTATS, 0, &vmstats, 1, sizeof (vmstats)) == 1) { double pages = vmstats.free_count; double pagesize = vmstats.pagesize; if (0 <= pages && 0 <= pagesize) return pages * pagesize; } } #endif #if HAVE_SYSCTL && defined HW_USERMEM { /* This works on *bsd and darwin. */ unsigned int usermem; size_t len = sizeof usermem; static int mib[2] = { CTL_HW, HW_USERMEM }; if (sysctl (mib, ARRAY_SIZE (mib), &usermem, &len, NULL, 0) == 0 && len == sizeof (usermem)) return (double) usermem; } #endif #if defined _WIN32 { /* this works on windows */ PFN_MS_EX pfnex; HMODULE h = GetModuleHandle ("kernel32.dll"); if (!h) return 0.0; /* Use GlobalMemoryStatusEx if available. */ if ((pfnex = (PFN_MS_EX) GetProcAddress (h, "GlobalMemoryStatusEx"))) { lMEMORYSTATUSEX lms_ex; lms_ex.dwLength = sizeof lms_ex; if (!pfnex (&lms_ex)) return 0.0; /* BEGIN GCC-XML MODIFICATIONS ($Date: 2009-06-09 18:18:45 $) */ #if !defined(_MSC_VER) || _MSC_VER >= 1300 return (double) lms_ex.ullAvailPhys; #else return (double)(signed __int64) lms_ex.ullAvailPhys; #endif /* END GCC-XML MODIFICATIONS ($Date: 2009-06-09 18:18:45 $) */ } /* Fall back to GlobalMemoryStatus which is always available. but returns wrong results for physical memory > 4GB */ else { MEMORYSTATUS ms; GlobalMemoryStatus (&ms); return (double) ms.dwAvailPhys; } } #endif /* Guess 25% of physical memory. */ return physmem_total () / 4; }
static int getentropy_fallback(void *buf, size_t len) { uint8_t results[SHA512_DIGEST_LENGTH]; int save_errno = errno, e, pgs = sysconf(_SC_PAGESIZE), faster = 0, repeat; static int cnt; struct timespec ts; struct timeval tv; struct pst_vminfo pvi; struct pst_vm_status pvs; struct pst_dynamic pdy; struct rusage ru; sigset_t sigset; struct stat st; SHA512_CTX ctx; static pid_t lastpid; pid_t pid; size_t i, ii, m; char *p; pid = getpid(); if (lastpid == pid) { faster = 1; repeat = 2; } else { faster = 0; lastpid = pid; repeat = REPEAT; } for (i = 0; i < len; ) { int j; SHA512_Init(&ctx); for (j = 0; j < repeat; j++) { HX((e = gettimeofday(&tv, NULL)) == -1, tv); if (e != -1) { cnt += (int)tv.tv_sec; cnt += (int)tv.tv_usec; } HX(pstat_getvminfo(&pvi, sizeof(pvi), 1, 0) != 1, pvi); HX(pstat_getprocvm(&pvs, sizeof(pvs), 0, 0) != 1, pvs); for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]); ii++) HX(clock_gettime(cl[ii], &ts) == -1, ts); HX((pid = getpid()) == -1, pid); HX((pid = getsid(pid)) == -1, pid); HX((pid = getppid()) == -1, pid); HX((pid = getpgid(0)) == -1, pid); HX((e = getpriority(0, 0)) == -1, e); if(pstat_getdynamic(&pdy, sizeof(pdy), 1, 0) != 1) { HD(errno); } else { HD(pdy.psd_avg_1_min); HD(pdy.psd_avg_5_min); HD(pdy.psd_avg_15_min); } if (!faster) { ts.tv_sec = 0; ts.tv_nsec = 1; (void) nanosleep(&ts, NULL); } HX(sigpending(&sigset) == -1, sigset); HX(sigprocmask(SIG_BLOCK, NULL, &sigset) == -1, sigset); HF(getentropy); /* an addr in this library */ HF(printf); /* an addr in libc */ p = (char *)&p; HD(p); /* an addr on stack */ p = (char *)&errno; HD(p); /* the addr of errno */ if (i == 0) { struct sockaddr_storage ss; struct statvfs stvfs; struct termios tios; socklen_t ssl; off_t off; /* * Prime-sized mappings encourage fragmentation; * thus exposing some address entropy. */ struct mm { size_t npg; void *p; } mm[] = { { 17, MAP_FAILED }, { 3, MAP_FAILED }, { 11, MAP_FAILED }, { 2, MAP_FAILED }, { 5, MAP_FAILED }, { 3, MAP_FAILED }, { 7, MAP_FAILED }, { 1, MAP_FAILED }, { 57, MAP_FAILED }, { 3, MAP_FAILED }, { 131, MAP_FAILED }, { 1, MAP_FAILED }, }; for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) { HX(mm[m].p = mmap(NULL, mm[m].npg * pgs, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0), mm[m].p); if (mm[m].p != MAP_FAILED) { size_t mo; /* Touch some memory... */ p = mm[m].p; mo = cnt % (mm[m].npg * pgs - 1); p[mo] = 1; cnt += (int)((long)(mm[m].p) / pgs); } /* Check cnts and times... */ for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]); ii++) { HX((e = clock_gettime(cl[ii], &ts)) == -1, ts); if (e != -1) cnt += (int)ts.tv_nsec; } HX((e = getrusage(RUSAGE_SELF, &ru)) == -1, ru); if (e != -1) { cnt += (int)ru.ru_utime.tv_sec; cnt += (int)ru.ru_utime.tv_usec; } } for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) { if (mm[m].p != MAP_FAILED) munmap(mm[m].p, mm[m].npg * pgs); mm[m].p = MAP_FAILED; } HX(stat(".", &st) == -1, st); HX(statvfs(".", &stvfs) == -1, stvfs); HX(stat("/", &st) == -1, st); HX(statvfs("/", &stvfs) == -1, stvfs); HX((e = fstat(0, &st)) == -1, st); if (e == -1) { if (S_ISREG(st.st_mode) || S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) { HX(fstatvfs(0, &stvfs) == -1, stvfs); HX((off = lseek(0, (off_t)0, SEEK_CUR)) < 0, off); } if (S_ISCHR(st.st_mode)) { HX(tcgetattr(0, &tios) == -1, tios); } else if (S_ISSOCK(st.st_mode)) { memset(&ss, 0, sizeof ss); ssl = sizeof(ss); HX(getpeername(0, (void *)&ss, &ssl) == -1, ss); } } HX((e = getrusage(RUSAGE_CHILDREN, &ru)) == -1, ru); if (e != -1) { cnt += (int)ru.ru_utime.tv_sec; cnt += (int)ru.ru_utime.tv_usec; } } else { /* Subsequent hashes absorb previous result */ HD(results); } HX((e = gettimeofday(&tv, NULL)) == -1, tv); if (e != -1) { cnt += (int)tv.tv_sec; cnt += (int)tv.tv_usec; } HD(cnt); } SHA512_Final(results, &ctx); memcpy((char *)buf + i, results, min(sizeof(results), len - i)); i += min(sizeof(results), len - i); } explicit_bzero(&ctx, sizeof ctx); explicit_bzero(results, sizeof results); if (gotdata(buf, len) == 0) { errno = save_errno; return 0; /* satisfied */ } errno = EIO; return -1; }
int getloadavg (double loadavg[], int nelem) { int elem = 0; /* Return value. */ # ifdef NO_GET_LOAD_AVG # define LDAV_DONE errno = ENOSYS; elem = -1; # endif # if !defined (LDAV_DONE) && defined (HAVE_LIBKSTAT) /* Solaris <= 2.6 */ /* Use libkstat because we don't have to be root. */ # define LDAV_DONE kstat_ctl_t *kc; kstat_t *ksp; kstat_named_t *kn; int saved_errno; kc = kstat_open (); if (kc == 0) return -1; ksp = kstat_lookup (kc, "unix", 0, "system_misc"); if (ksp == 0) return -1; if (kstat_read (kc, ksp, 0) == -1) return -1; kn = kstat_data_lookup (ksp, "avenrun_1min"); if (kn == 0) { /* Return -1 if no load average information is available. */ nelem = 0; elem = -1; } if (nelem >= 1) loadavg[elem++] = (double) kn->value.ul / FSCALE; if (nelem >= 2) { kn = kstat_data_lookup (ksp, "avenrun_5min"); if (kn != 0) { loadavg[elem++] = (double) kn->value.ul / FSCALE; if (nelem >= 3) { kn = kstat_data_lookup (ksp, "avenrun_15min"); if (kn != 0) loadavg[elem++] = (double) kn->value.ul / FSCALE; } } } saved_errno = errno; kstat_close (kc); errno = saved_errno; # endif /* HAVE_LIBKSTAT */ # if !defined (LDAV_DONE) && defined (hpux) && defined (HAVE_PSTAT_GETDYNAMIC) /* HP-UX */ /* Use pstat_getdynamic() because we don't have to be root. */ # define LDAV_DONE # undef LOAD_AVE_TYPE struct pst_dynamic dyn_info; if (pstat_getdynamic (&dyn_info, sizeof (dyn_info), 0, 0) < 0) return -1; if (nelem > 0) loadavg[elem++] = dyn_info.psd_avg_1_min; if (nelem > 1) loadavg[elem++] = dyn_info.psd_avg_5_min; if (nelem > 2) loadavg[elem++] = dyn_info.psd_avg_15_min; # endif /* hpux && HAVE_PSTAT_GETDYNAMIC */ # if ! defined LDAV_DONE && defined HAVE_LIBPERFSTAT /* AIX */ # define LDAV_DONE # undef LOAD_AVE_TYPE /* Use perfstat_cpu_total because we don't have to be root. */ { perfstat_cpu_total_t cpu_stats; int result = perfstat_cpu_total (NULL, &cpu_stats, sizeof cpu_stats, 1); if (result == -1) return result; loadavg[0] = cpu_stats.loadavg[0] / (double)(1 << SBITS); loadavg[1] = cpu_stats.loadavg[1] / (double)(1 << SBITS); loadavg[2] = cpu_stats.loadavg[2] / (double)(1 << SBITS); elem = 3; } # endif # if !defined (LDAV_DONE) && (defined (__linux__) || defined (__CYGWIN__)) /* Linux without glibc, Cygwin */ # define LDAV_DONE # undef LOAD_AVE_TYPE # ifndef LINUX_LDAV_FILE # define LINUX_LDAV_FILE "/proc/loadavg" # endif char ldavgbuf[3 * (INT_STRLEN_BOUND (int) + sizeof ".00 ")]; char const *ptr = ldavgbuf; int fd, count, saved_errno; fd = open (LINUX_LDAV_FILE, O_RDONLY); if (fd == -1) return -1; count = read (fd, ldavgbuf, sizeof ldavgbuf - 1); saved_errno = errno; (void) close (fd); errno = saved_errno; if (count <= 0) return -1; ldavgbuf[count] = '\0'; for (elem = 0; elem < nelem; elem++) { double numerator = 0; double denominator = 1; while (*ptr == ' ') ptr++; /* Finish if this number is missing, and report an error if all were missing. */ if (! ('0' <= *ptr && *ptr <= '9')) { if (elem == 0) { errno = ENOTSUP; return -1; } break; } while ('0' <= *ptr && *ptr <= '9') numerator = 10 * numerator + (*ptr++ - '0'); if (*ptr == '.') for (ptr++; '0' <= *ptr && *ptr <= '9'; ptr++) numerator = 10 * numerator + (*ptr - '0'), denominator *= 10; loadavg[elem++] = numerator / denominator; } return elem; # endif /* __linux__ || __CYGWIN__ */ # if !defined (LDAV_DONE) && defined (__NetBSD__) /* NetBSD < 0.9 */ # define LDAV_DONE # undef LOAD_AVE_TYPE # ifndef NETBSD_LDAV_FILE # define NETBSD_LDAV_FILE "/kern/loadavg" # endif unsigned long int load_ave[3], scale; int count; FILE *fp; fp = fopen (NETBSD_LDAV_FILE, "r"); if (fp == NULL) return -1; count = fscanf (fp, "%lu %lu %lu %lu\n", &load_ave[0], &load_ave[1], &load_ave[2], &scale); (void) fclose (fp); if (count != 4) { errno = ENOTSUP; return -1; } for (elem = 0; elem < nelem; elem++) loadavg[elem] = (double) load_ave[elem] / (double) scale; return elem; # endif /* __NetBSD__ */ # if !defined (LDAV_DONE) && defined (NeXT) /* NeXTStep */ # define LDAV_DONE /* The NeXT code was adapted from iscreen 3.2. */ host_t host; struct processor_set_basic_info info; unsigned int info_count; /* We only know how to get the 1-minute average for this system, so even if the caller asks for more than 1, we only return 1. */ if (!getloadavg_initialized) { if (processor_set_default (host_self (), &default_set) == KERN_SUCCESS) getloadavg_initialized = true; } if (getloadavg_initialized) { info_count = PROCESSOR_SET_BASIC_INFO_COUNT; if (processor_set_info (default_set, PROCESSOR_SET_BASIC_INFO, &host, (processor_set_info_t) &info, &info_count) != KERN_SUCCESS) getloadavg_initialized = false; else { if (nelem > 0) loadavg[elem++] = (double) info.load_average / LOAD_SCALE; } } if (!getloadavg_initialized) { errno = ENOTSUP; return -1; } # endif /* NeXT */ # if !defined (LDAV_DONE) && defined (UMAX) # define LDAV_DONE /* UMAX 4.2, which runs on the Encore Multimax multiprocessor, does not have a /dev/kmem. Information about the workings of the running kernel can be gathered with inq_stats system calls. We only know how to get the 1-minute average for this system. */ struct proc_summary proc_sum_data; struct stat_descr proc_info; double load; register unsigned int i, j; if (cpus == 0) { register unsigned int c, i; struct cpu_config conf; struct stat_descr desc; desc.sd_next = 0; desc.sd_subsys = SUBSYS_CPU; desc.sd_type = CPUTYPE_CONFIG; desc.sd_addr = (char *) &conf; desc.sd_size = sizeof conf; if (inq_stats (1, &desc)) return -1; c = 0; for (i = 0; i < conf.config_maxclass; ++i) { struct class_stats stats; memset (&stats, 0, sizeof stats); desc.sd_type = CPUTYPE_CLASS; desc.sd_objid = i; desc.sd_addr = (char *) &stats; desc.sd_size = sizeof stats; if (inq_stats (1, &desc)) return -1; c += stats.class_numcpus; } cpus = c; samples = cpus < 2 ? 3 : (2 * cpus / 3); } proc_info.sd_next = 0; proc_info.sd_subsys = SUBSYS_PROC; proc_info.sd_type = PROCTYPE_SUMMARY; proc_info.sd_addr = (char *) &proc_sum_data; proc_info.sd_size = sizeof (struct proc_summary); proc_info.sd_sizeused = 0; if (inq_stats (1, &proc_info) != 0) return -1; load = proc_sum_data.ps_nrunnable; j = 0; for (i = samples - 1; i > 0; --i) { load += proc_sum_data.ps_nrun[j]; if (j++ == PS_NRUNSIZE) j = 0; } if (nelem > 0) loadavg[elem++] = load / samples / cpus; # endif /* UMAX */ # if !defined (LDAV_DONE) && defined (DGUX) # define LDAV_DONE /* This call can return -1 for an error, but with good args it's not supposed to fail. The first argument is for no apparent reason of type 'long int *'. */ dg_sys_info ((long int *) &load_info, DG_SYS_INFO_LOAD_INFO_TYPE, DG_SYS_INFO_LOAD_VERSION_0); if (nelem > 0) loadavg[elem++] = load_info.one_minute; if (nelem > 1) loadavg[elem++] = load_info.five_minute; if (nelem > 2) loadavg[elem++] = load_info.fifteen_minute; # endif /* DGUX */ # if !defined (LDAV_DONE) && defined (apollo) # define LDAV_DONE /* Apollo code from [email protected] (Ray Lischner). This system call is not documented. The load average is obtained as three long integers, for the load average over the past minute, five minutes, and fifteen minutes. Each value is a scaled integer, with 16 bits of integer part and 16 bits of fraction part. I'm not sure which operating system first supported this system call, but I know that SR10.2 supports it. */ extern void proc1_$get_loadav (); unsigned long load_ave[3]; proc1_$get_loadav (load_ave); if (nelem > 0) loadavg[elem++] = load_ave[0] / 65536.0; if (nelem > 1) loadavg[elem++] = load_ave[1] / 65536.0; if (nelem > 2) loadavg[elem++] = load_ave[2] / 65536.0; # endif /* apollo */ # if !defined (LDAV_DONE) && defined (OSF_MIPS) # define LDAV_DONE struct tbl_loadavg load_ave; table (TBL_LOADAVG, 0, &load_ave, 1, sizeof (load_ave)); loadavg[elem++] = (load_ave.tl_lscale == 0 ? load_ave.tl_avenrun.d[0] : (load_ave.tl_avenrun.l[0] / (double) load_ave.tl_lscale)); # endif /* OSF_MIPS */ # if !defined (LDAV_DONE) && (defined (__MSDOS__) || defined (WINDOWS32)) /* DJGPP */ # define LDAV_DONE /* A faithful emulation is going to have to be saved for a rainy day. */ for ( ; elem < nelem; elem++) { loadavg[elem] = 0.0; } # endif /* __MSDOS__ || WINDOWS32 */ # if !defined (LDAV_DONE) && defined (OSF_ALPHA) /* OSF/1 */ # define LDAV_DONE struct tbl_loadavg load_ave; table (TBL_LOADAVG, 0, &load_ave, 1, sizeof (load_ave)); for (elem = 0; elem < nelem; elem++) loadavg[elem] = (load_ave.tl_lscale == 0 ? load_ave.tl_avenrun.d[elem] : (load_ave.tl_avenrun.l[elem] / (double) load_ave.tl_lscale)); # endif /* OSF_ALPHA */ # if ! defined LDAV_DONE && defined __VMS /* VMS */ /* VMS specific code -- read from the Load Ave driver. */ LOAD_AVE_TYPE load_ave[3]; static bool getloadavg_initialized; # ifdef eunice struct { int dsc$w_length; char *dsc$a_pointer; } descriptor; # endif /* Ensure that there is a channel open to the load ave device. */ if (!getloadavg_initialized) { /* Attempt to open the channel. */ # ifdef eunice descriptor.dsc$w_length = 18; descriptor.dsc$a_pointer = "$$VMS_LOAD_AVERAGE"; # else $DESCRIPTOR (descriptor, "LAV0:"); # endif if (sys$assign (&descriptor, &channel, 0, 0) & 1) getloadavg_initialized = true; } /* Read the load average vector. */ if (getloadavg_initialized && !(sys$qiow (0, channel, IO$_READVBLK, 0, 0, 0, load_ave, 12, 0, 0, 0, 0) & 1)) { sys$dassgn (channel); getloadavg_initialized = false; } if (!getloadavg_initialized) { errno = ENOTSUP; return -1; } # endif /* ! defined LDAV_DONE && defined __VMS */ # if ! defined LDAV_DONE && defined LOAD_AVE_TYPE && ! defined __VMS /* IRIX, other old systems */ /* UNIX-specific code -- read the average from /dev/kmem. */ # define LDAV_PRIVILEGED /* This code requires special installation. */ LOAD_AVE_TYPE load_ave[3]; /* Get the address of LDAV_SYMBOL. */ if (offset == 0) { # ifndef sgi # if ! defined NLIST_STRUCT || ! defined N_NAME_POINTER strcpy (name_list[0].n_name, LDAV_SYMBOL); strcpy (name_list[1].n_name, ""); # else /* NLIST_STRUCT */ # ifdef HAVE_STRUCT_NLIST_N_UN_N_NAME name_list[0].n_un.n_name = LDAV_SYMBOL; name_list[1].n_un.n_name = 0; # else /* not HAVE_STRUCT_NLIST_N_UN_N_NAME */ name_list[0].n_name = LDAV_SYMBOL; name_list[1].n_name = 0; # endif /* not HAVE_STRUCT_NLIST_N_UN_N_NAME */ # endif /* NLIST_STRUCT */ # ifndef SUNOS_5 if ( # if !(defined (_AIX) && !defined (ps2)) nlist (KERNEL_FILE, name_list) # else /* _AIX */ knlist (name_list, 1, sizeof (name_list[0])) # endif >= 0) /* Omit "&& name_list[0].n_type != 0 " -- it breaks on Sun386i. */ { # ifdef FIXUP_KERNEL_SYMBOL_ADDR FIXUP_KERNEL_SYMBOL_ADDR (name_list); # endif offset = name_list[0].n_value; } # endif /* !SUNOS_5 */ # else /* sgi */ ptrdiff_t ldav_off = sysmp (MP_KERNADDR, MPKA_AVENRUN); if (ldav_off != -1) offset = (long int) ldav_off & 0x7fffffff; # endif /* sgi */ } /* Make sure we have /dev/kmem open. */ if (!getloadavg_initialized) { # ifndef SUNOS_5 /* Set the channel to close on exec, so it does not litter any child's descriptor table. */ # ifndef O_CLOEXEC # define O_CLOEXEC 0 # endif int fd = open ("/dev/kmem", O_RDONLY | O_CLOEXEC); if (0 <= fd) { # if F_DUPFD_CLOEXEC if (fd <= STDERR_FILENO) { int fd1 = fcntl (fd, F_DUPFD_CLOEXEC, STDERR_FILENO + 1); close (fd); fd = fd1; } # endif if (0 <= fd) { channel = fd; getloadavg_initialized = true; } } # else /* SUNOS_5 */ /* We pass 0 for the kernel, corefile, and swapfile names to use the currently running kernel. */ kd = kvm_open (0, 0, 0, O_RDONLY, 0); if (kd != 0) { /* nlist the currently running kernel. */ kvm_nlist (kd, name_list); offset = name_list[0].n_value; getloadavg_initialized = true; } # endif /* SUNOS_5 */ } /* If we can, get the load average values. */ if (offset && getloadavg_initialized) { /* Try to read the load. */ # ifndef SUNOS_5 if (lseek (channel, offset, 0) == -1L || read (channel, (char *) load_ave, sizeof (load_ave)) != sizeof (load_ave)) { close (channel); getloadavg_initialized = false; } # else /* SUNOS_5 */ if (kvm_read (kd, offset, (char *) load_ave, sizeof (load_ave)) != sizeof (load_ave)) { kvm_close (kd); getloadavg_initialized = false; } # endif /* SUNOS_5 */ } if (offset == 0 || !getloadavg_initialized) { errno = ENOTSUP; return -1; } # endif /* ! defined LDAV_DONE && defined LOAD_AVE_TYPE && ! defined __VMS */ # if !defined (LDAV_DONE) && defined (LOAD_AVE_TYPE) /* Including VMS. */ if (nelem > 0) loadavg[elem++] = LDAV_CVT (load_ave[0]); if (nelem > 1) loadavg[elem++] = LDAV_CVT (load_ave[1]); if (nelem > 2) loadavg[elem++] = LDAV_CVT (load_ave[2]); # define LDAV_DONE # endif /* !LDAV_DONE && LOAD_AVE_TYPE */ # if !defined LDAV_DONE errno = ENOSYS; elem = -1; # endif return elem; }
/*++ Function: GetSystemInfo GetSystemInfo The GetSystemInfo function returns information about the current system. Parameters lpSystemInfo [out] Pointer to a SYSTEM_INFO structure that receives the information. Return Values This function does not return a value. Note: fields returned by this function are: dwNumberOfProcessors dwPageSize Others are set to zero. --*/ VOID PALAPI GetSystemInfo( OUT LPSYSTEM_INFO lpSystemInfo) { int nrcpus = 0; long pagesize; PERF_ENTRY(GetSystemInfo); ENTRY("GetSystemInfo (lpSystemInfo=%p)\n", lpSystemInfo); pagesize = getpagesize(); lpSystemInfo->wProcessorArchitecture_PAL_Undefined = 0; lpSystemInfo->wReserved_PAL_Undefined = 0; lpSystemInfo->dwPageSize = pagesize; lpSystemInfo->dwActiveProcessorMask_PAL_Undefined = 0; #if HAVE_SYSCONF #if defined(__hppa__) || ( defined (_IA64_) && defined (_HPUX_) ) struct pst_dynamic psd; if (pstat_getdynamic(&psd, sizeof(psd), (size_t)1, 0) != -1) { nrcpus = psd.psd_proc_cnt; } else { ASSERT("pstat_getdynamic failed (%d)\n", errno); } #else // !__hppa__ nrcpus = sysconf(_SC_NPROCESSORS_ONLN); if (nrcpus < 1) { ASSERT("sysconf failed for _SC_NPROCESSORS_ONLN (%d)\n", errno); } #endif // __hppa__ #elif HAVE_SYSCTL int rc; size_t sz; int mib[2]; sz = sizeof(nrcpus); mib[0] = CTL_HW; mib[1] = HW_NCPU; rc = sysctl(mib, 2, &nrcpus, &sz, NULL, 0); if (rc != 0) { ASSERT("sysctl failed for HW_NCPU (%d)\n", errno); } #endif // HAVE_SYSCONF TRACE("dwNumberOfProcessors=%d\n", nrcpus); lpSystemInfo->dwNumberOfProcessors = nrcpus; #ifdef VM_MAXUSER_ADDRESS lpSystemInfo->lpMaximumApplicationAddress = (PVOID) VM_MAXUSER_ADDRESS; #elif defined(__sun__) || defined(_AIX) || defined(__hppa__) || ( defined (_IA64_) && defined (_HPUX_) ) || defined(__LINUX__) lpSystemInfo->lpMaximumApplicationAddress = (PVOID) -1; #elif defined(USERLIMIT) lpSystemInfo->lpMaximumApplicationAddress = (PVOID) USERLIMIT; #elif defined(_WIN64) #if defined(USRSTACK64) lpSystemInfo->lpMaximumApplicationAddress = (PVOID) USRSTACK64; #else // !USRSTACK64 #error How come USRSTACK64 is not defined for 64bit? #endif // USRSTACK64 #elif defined(USRSTACK) lpSystemInfo->lpMaximumApplicationAddress = (PVOID) USRSTACK; #else #error The maximum application address is not known on this platform. #endif lpSystemInfo->lpMinimumApplicationAddress = (PVOID) pagesize; lpSystemInfo->dwProcessorType_PAL_Undefined = 0; lpSystemInfo->dwAllocationGranularity = pagesize; lpSystemInfo->wProcessorLevel_PAL_Undefined = 0; lpSystemInfo->wProcessorRevision_PAL_Undefined = 0; LOGEXIT("GetSystemInfo returns VOID\n"); PERF_EXIT(GetSystemInfo); }
int getloadavg (double loadavg[], int nelem) { int elem = 0; /* Return value. */ # ifdef NO_GET_LOAD_AVG # define LDAV_DONE /* Set errno to zero to indicate that there was no particular error; this function just can't work at all on this system. */ errno = 0; elem = -1; # endif # if !defined (LDAV_DONE) && defined (HAVE_LIBKSTAT) /* Use libkstat because we don't have to be root. */ # define LDAV_DONE kstat_ctl_t *kc; kstat_t *ksp; kstat_named_t *kn; kc = kstat_open (); if (kc == 0) return -1; ksp = kstat_lookup (kc, "unix", 0, "system_misc"); if (ksp == 0 ) return -1; if (kstat_read (kc, ksp, 0) == -1) return -1; kn = kstat_data_lookup (ksp, "avenrun_1min"); if (kn == 0) { /* Return -1 if no load average information is available. */ nelem = 0; elem = -1; } if (nelem >= 1) loadavg[elem++] = (double) kn->value.ul/FSCALE; if (nelem >= 2) { kn = kstat_data_lookup (ksp, "avenrun_5min"); if (kn != 0) { loadavg[elem++] = (double) kn->value.ul/FSCALE; if (nelem >= 3) { kn = kstat_data_lookup (ksp, "avenrun_15min"); if (kn != 0) loadavg[elem++] = (double) kn->value.ul/FSCALE; } } } kstat_close (kc); # endif /* HAVE_LIBKSTAT */ # if !defined (LDAV_DONE) && defined (hpux) && defined (HAVE_PSTAT_GETDYNAMIC) /* Use pstat_getdynamic() because we don't have to be root. */ # define LDAV_DONE # undef LOAD_AVE_TYPE struct pst_dynamic dyn_info; if (pstat_getdynamic (&dyn_info, sizeof (dyn_info), 0, 0) < 0) return -1; if (nelem > 0) loadavg[elem++] = dyn_info.psd_avg_1_min; if (nelem > 1) loadavg[elem++] = dyn_info.psd_avg_5_min; if (nelem > 2) loadavg[elem++] = dyn_info.psd_avg_15_min; # endif /* hpux && HAVE_PSTAT_GETDYNAMIC */ # if !defined (LDAV_DONE) && defined (__linux__) # define LDAV_DONE # undef LOAD_AVE_TYPE # ifndef LINUX_LDAV_FILE # define LINUX_LDAV_FILE "/proc/loadavg" # endif char ldavgbuf[40]; double load_ave[3]; int fd, count; fd = open (LINUX_LDAV_FILE, O_RDONLY); if (fd == -1) return -1; count = read (fd, ldavgbuf, 40); (void) close (fd); if (count <= 0) return -1; /* The following sscanf must use the C locale. */ setlocale (LC_NUMERIC, "C"); count = sscanf (ldavgbuf, "%lf %lf %lf", &load_ave[0], &load_ave[1], &load_ave[2]); setlocale (LC_NUMERIC, ""); if (count < 1) return -1; for (elem = 0; elem < nelem && elem < count; elem++) loadavg[elem] = load_ave[elem]; return elem; # endif /* __linux__ */ # if !defined (LDAV_DONE) && defined (__NetBSD__) # define LDAV_DONE # undef LOAD_AVE_TYPE # ifndef NETBSD_LDAV_FILE # define NETBSD_LDAV_FILE "/kern/loadavg" # endif unsigned long int load_ave[3], scale; int count; FILE *fp; fp = fopen (NETBSD_LDAV_FILE, "r"); if (fp == NULL) return -1; count = fscanf (fp, "%lu %lu %lu %lu\n", &load_ave[0], &load_ave[1], &load_ave[2], &scale); (void) fclose (fp); if (count != 4) return -1; for (elem = 0; elem < nelem; elem++) loadavg[elem] = (double) load_ave[elem] / (double) scale; return elem; # endif /* __NetBSD__ */ # if !defined (LDAV_DONE) && defined (NeXT) # define LDAV_DONE /* The NeXT code was adapted from iscreen 3.2. */ host_t host; struct processor_set_basic_info info; unsigned info_count; /* We only know how to get the 1-minute average for this system, so even if the caller asks for more than 1, we only return 1. */ if (!getloadavg_initialized) { if (processor_set_default (host_self (), &default_set) == KERN_SUCCESS) getloadavg_initialized = 1; } if (getloadavg_initialized) { info_count = PROCESSOR_SET_BASIC_INFO_COUNT; if (processor_set_info (default_set, PROCESSOR_SET_BASIC_INFO, &host, (processor_set_info_t) &info, &info_count) != KERN_SUCCESS) getloadavg_initialized = 0; else { if (nelem > 0) loadavg[elem++] = (double) info.load_average / LOAD_SCALE; } } if (!getloadavg_initialized) return -1; # endif /* NeXT */ # if !defined (LDAV_DONE) && defined (UMAX) # define LDAV_DONE /* UMAX 4.2, which runs on the Encore Multimax multiprocessor, does not have a /dev/kmem. Information about the workings of the running kernel can be gathered with inq_stats system calls. We only know how to get the 1-minute average for this system. */ struct proc_summary proc_sum_data; struct stat_descr proc_info; double load; register unsigned int i, j; if (cpus == 0) { register unsigned int c, i; struct cpu_config conf; struct stat_descr desc; desc.sd_next = 0; desc.sd_subsys = SUBSYS_CPU; desc.sd_type = CPUTYPE_CONFIG; desc.sd_addr = (char *) &conf; desc.sd_size = sizeof conf; if (inq_stats (1, &desc)) return -1; c = 0; for (i = 0; i < conf.config_maxclass; ++i) { struct class_stats stats; bzero ((char *) &stats, sizeof stats); desc.sd_type = CPUTYPE_CLASS; desc.sd_objid = i; desc.sd_addr = (char *) &stats; desc.sd_size = sizeof stats; if (inq_stats (1, &desc)) return -1; c += stats.class_numcpus; } cpus = c; samples = cpus < 2 ? 3 : (2 * cpus / 3); } proc_info.sd_next = 0; proc_info.sd_subsys = SUBSYS_PROC; proc_info.sd_type = PROCTYPE_SUMMARY; proc_info.sd_addr = (char *) &proc_sum_data; proc_info.sd_size = sizeof (struct proc_summary); proc_info.sd_sizeused = 0; if (inq_stats (1, &proc_info) != 0) return -1; load = proc_sum_data.ps_nrunnable; j = 0; for (i = samples - 1; i > 0; --i) { load += proc_sum_data.ps_nrun[j]; if (j++ == PS_NRUNSIZE) j = 0; } if (nelem > 0) loadavg[elem++] = load / samples / cpus; # endif /* UMAX */ # if !defined (LDAV_DONE) && defined (DGUX) # define LDAV_DONE /* This call can return -1 for an error, but with good args it's not supposed to fail. The first argument is for no apparent reason of type `long int *'. */ dg_sys_info ((long int *) &load_info, DG_SYS_INFO_LOAD_INFO_TYPE, DG_SYS_INFO_LOAD_VERSION_0); if (nelem > 0) loadavg[elem++] = load_info.one_minute; if (nelem > 1) loadavg[elem++] = load_info.five_minute; if (nelem > 2) loadavg[elem++] = load_info.fifteen_minute; # endif /* DGUX */ # if !defined (LDAV_DONE) && defined (apollo) # define LDAV_DONE /* Apollo code from [email protected] (Ray Lischner). This system call is not documented. The load average is obtained as three long integers, for the load average over the past minute, five minutes, and fifteen minutes. Each value is a scaled integer, with 16 bits of integer part and 16 bits of fraction part. I'm not sure which operating system first supported this system call, but I know that SR10.2 supports it. */ extern void proc1_$get_loadav (); unsigned long load_ave[3]; proc1_$get_loadav (load_ave); if (nelem > 0) loadavg[elem++] = load_ave[0] / 65536.0; if (nelem > 1) loadavg[elem++] = load_ave[1] / 65536.0; if (nelem > 2) loadavg[elem++] = load_ave[2] / 65536.0; # endif /* apollo */ # if !defined (LDAV_DONE) && defined (OSF_MIPS) # define LDAV_DONE struct tbl_loadavg load_ave; table (TBL_LOADAVG, 0, &load_ave, 1, sizeof (load_ave)); loadavg[elem++] = (load_ave.tl_lscale == 0 ? load_ave.tl_avenrun.d[0] : (load_ave.tl_avenrun.l[0] / (double) load_ave.tl_lscale)); # endif /* OSF_MIPS */ # if !defined (LDAV_DONE) && (defined (__MSDOS__) || defined (WINDOWS32)) # define LDAV_DONE /* A faithful emulation is going to have to be saved for a rainy day. */ for ( ; elem < nelem; elem++) { loadavg[elem] = 0.0; } # endif /* __MSDOS__ || WINDOWS32 */ # if !defined (LDAV_DONE) && defined (OSF_ALPHA) # define LDAV_DONE struct tbl_loadavg load_ave; table (TBL_LOADAVG, 0, &load_ave, 1, sizeof (load_ave)); for (elem = 0; elem < nelem; elem++) loadavg[elem] = (load_ave.tl_lscale == 0 ? load_ave.tl_avenrun.d[elem] : (load_ave.tl_avenrun.l[elem] / (double) load_ave.tl_lscale)); # endif /* OSF_ALPHA */ # if !defined (LDAV_DONE) && defined (VMS) /* VMS specific code -- read from the Load Ave driver. */ LOAD_AVE_TYPE load_ave[3]; static int getloadavg_initialized = 0; # ifdef eunice struct { int dsc$w_length; char *dsc$a_pointer; } descriptor; # endif /* Ensure that there is a channel open to the load ave device. */ if (!getloadavg_initialized) { /* Attempt to open the channel. */ # ifdef eunice descriptor.dsc$w_length = 18; descriptor.dsc$a_pointer = "$$VMS_LOAD_AVERAGE"; # else $DESCRIPTOR (descriptor, "LAV0:"); # endif if (sys$assign (&descriptor, &channel, 0, 0) & 1) getloadavg_initialized = 1; } /* Read the load average vector. */ if (getloadavg_initialized && !(sys$qiow (0, channel, IO$_READVBLK, 0, 0, 0, load_ave, 12, 0, 0, 0, 0) & 1)) { sys$dassgn (channel); getloadavg_initialized = 0; } if (!getloadavg_initialized) return -1; # endif /* VMS */ # if !defined (LDAV_DONE) && defined(LOAD_AVE_TYPE) && !defined(VMS) /* UNIX-specific code -- read the average from /dev/kmem. */ # define LDAV_PRIVILEGED /* This code requires special installation. */ LOAD_AVE_TYPE load_ave[3]; /* Get the address of LDAV_SYMBOL. */ if (offset == 0) { # ifndef sgi # ifndef NLIST_STRUCT strcpy (nl[0].n_name, LDAV_SYMBOL); strcpy (nl[1].n_name, ""); # else /* NLIST_STRUCT */ # ifdef HAVE_STRUCT_NLIST_N_UN_N_NAME nl[0].n_un.n_name = LDAV_SYMBOL; nl[1].n_un.n_name = 0; # else /* not HAVE_STRUCT_NLIST_N_UN_N_NAME */ nl[0].n_name = LDAV_SYMBOL; nl[1].n_name = 0; # endif /* not HAVE_STRUCT_NLIST_N_UN_N_NAME */ # endif /* NLIST_STRUCT */ # ifndef SUNOS_5 if ( # if !(defined (_AIX) && !defined (ps2)) nlist (KERNEL_FILE, nl) # else /* _AIX */ knlist (nl, 1, sizeof (nl[0])) # endif >= 0) /* Omit "&& nl[0].n_type != 0 " -- it breaks on Sun386i. */ { # ifdef FIXUP_KERNEL_SYMBOL_ADDR FIXUP_KERNEL_SYMBOL_ADDR (nl); # endif offset = nl[0].n_value; } # endif /* !SUNOS_5 */ # else /* sgi */ int ldav_off; ldav_off = sysmp (MP_KERNADDR, MPKA_AVENRUN); if (ldav_off != -1) offset = (long) ldav_off & 0x7fffffff; # endif /* sgi */ } /* Make sure we have /dev/kmem open. */ if (!getloadavg_initialized) { # ifndef SUNOS_5 channel = open ("/dev/kmem", 0); if (channel >= 0) { /* Set the channel to close on exec, so it does not litter any child's descriptor table. */ # ifdef F_SETFD # ifndef FD_CLOEXEC # define FD_CLOEXEC 1 # endif (void) fcntl (channel, F_SETFD, FD_CLOEXEC); # endif getloadavg_initialized = 1; } # else /* SUNOS_5 */ /* We pass 0 for the kernel, corefile, and swapfile names to use the currently running kernel. */ kd = kvm_open (0, 0, 0, O_RDONLY, 0); if (kd != 0) { /* nlist the currently running kernel. */ kvm_nlist (kd, nl); offset = nl[0].n_value; getloadavg_initialized = 1; } # endif /* SUNOS_5 */ } /* If we can, get the load average values. */ if (offset && getloadavg_initialized) { /* Try to read the load. */ # ifndef SUNOS_5 if (lseek (channel, offset, 0) == -1L || read (channel, (char *) load_ave, sizeof (load_ave)) != sizeof (load_ave)) { close (channel); getloadavg_initialized = 0; } # else /* SUNOS_5 */ if (kvm_read (kd, offset, (char *) load_ave, sizeof (load_ave)) != sizeof (load_ave)) { kvm_close (kd); getloadavg_initialized = 0; } # endif /* SUNOS_5 */ } if (offset == 0 || !getloadavg_initialized) return -1; # endif /* LOAD_AVE_TYPE and not VMS */ # if !defined (LDAV_DONE) && defined (LOAD_AVE_TYPE) /* Including VMS. */ if (nelem > 0) loadavg[elem++] = LDAV_CVT (load_ave[0]); if (nelem > 1) loadavg[elem++] = LDAV_CVT (load_ave[1]); if (nelem > 2) loadavg[elem++] = LDAV_CVT (load_ave[2]); # define LDAV_DONE # endif /* !LDAV_DONE && LOAD_AVE_TYPE */ # ifdef LDAV_DONE return elem; # else /* Set errno to zero to indicate that there was no particular error; this function just can't work at all on this system. */ errno = 0; return -1; # endif }
/****************************************************************************** * * * Function: zbx_get_cpu_num * * * * Purpose: returns the number of processors which are currently online * * (i.e., available). * * * * Return value: number of CPUs * * * * Author: Eugene Grigorjev * * * ******************************************************************************/ static int zbx_get_cpu_num() { #if defined(_WINDOWS) SYSTEM_INFO sysInfo; GetSystemInfo(&sysInfo); return (int)sysInfo.dwNumberOfProcessors; #elif defined(HAVE_SYS_PSTAT_H) struct pst_dynamic psd; if (-1 == pstat_getdynamic(&psd, sizeof(struct pst_dynamic), 1, 0)) goto return_one; return (int)psd.psd_proc_cnt; #elif defined(_SC_NPROCESSORS_CONF) /* FreeBSD 7.0 x86 */ /* Solaris 10 x86 */ int ncpu; if (-1 == (ncpu = sysconf(_SC_NPROCESSORS_CONF))) goto return_one; return ncpu; #elif defined(HAVE_FUNCTION_SYSCTL_HW_NCPU) /* FreeBSD 6.2 x86; FreeBSD 7.0 x86 */ /* NetBSD 3.1 x86; NetBSD 4.0 x86 */ /* OpenBSD 4.2 x86 */ size_t len; int mib[] = {CTL_HW, HW_NCPU}, ncpu; len = sizeof(ncpu); if (0 != sysctl(mib, 2, &ncpu, &len, NULL, 0)) goto return_one; return ncpu; #elif defined(HAVE_PROC_CPUINFO) FILE *f = NULL; int ncpu = 0; if (NULL == (file = fopen("/proc/cpuinfo", "r"))) goto return_one; while (NULL != fgets(line, 1024, file)) { if (NULL == strstr(line, "processor")) continue; ncpu++; } zbx_fclose(file); if (0 == ncpu) goto return_one; return ncpu; #elif defined(HAVE_LIBPERFSTAT) /* AIX 6.1 */ perfstat_cpu_total_t ps_cpu_total; if (-1 == perfstat_cpu_total(NULL, &ps_cpu_total, sizeof(ps_cpu_total), 1)) goto return_one; return (int)ps_cpu_total.ncpus; #endif #ifndef _WINDOWS return_one: zabbix_log(LOG_LEVEL_WARNING, "cannot determine number of CPUs, assuming 1"); return 1; #endif }
/* * try to get load average * Inputs: pointer to array of doubles, number of elements in array * Returns: 0=array has values, -1=error occurred. */ int try_getloadavg(double *r_ave, size_t s_ave) { double *pave = r_ave; #ifndef HAVE_GETLOADAVG #ifdef HAVE_SYS_FIXPOINT_H fix favenrun[3]; #endif #if (defined(ultrix) || defined(sun) || defined(__alpha) || defined(dynix)) int i; #if (defined(sun) || defined(__alpha) || defined(dynix)) long favenrun[3]; if (s_ave > 3) /* bounds check */ return (-1); #define FIX_TO_DBL(_IN) (((double) _IN)/((double) FSCALE)) #endif #endif #if defined(aix4) || defined(aix5) || defined(aix6) int favenrun[3]; perfstat_cpu_total_t cs; #endif #if defined(hpux10) || defined(hpux11) struct pst_dynamic pst_buf; #endif #ifdef irix6 int i, favenrun[3]; sgt_cookie_t cookie; #endif #endif /* !HAVE_GETLOADAVG */ #ifdef HAVE_GETLOADAVG if (getloadavg(pave, s_ave) == -1) return (-1); #elif defined(linux) { FILE *in = fopen("/proc/loadavg", "r"); if (!in) { snmp_log(LOG_ERR, "snmpd: cannot open /proc/loadavg\n"); return (-1); } fscanf(in, "%lf %lf %lf", pave, (pave + 1), (pave + 2)); fclose(in); } #elif (defined(ultrix) || defined(sun) || defined(__alpha) || defined(dynix)) if (auto_nlist(LOADAVE_SYMBOL, (char *) favenrun, sizeof(favenrun)) == 0) return (-1); for (i = 0; i < s_ave; i++) *(pave + i) = FIX_TO_DBL(favenrun[i]); #elif defined(hpux10) || defined(hpux11) if (pstat_getdynamic(&pst_buf, sizeof(struct pst_dynamic), 1, 0) < 0) return(-1); r_ave[0] = pst_buf.psd_avg_1_min; r_ave[1] = pst_buf.psd_avg_5_min; r_ave[2] = pst_buf.psd_avg_15_min; #elif defined(aix4) || defined(aix5) || defined(aix6) if(perfstat_cpu_total((perfstat_id_t *)NULL, &cs, sizeof(perfstat_cpu_total_t), 1) > 0) { r_ave[0] = cs.loadavg[0] / 65536.0; r_ave[1] = cs.loadavg[1] / 65536.0; r_ave[2] = cs.loadavg[2] / 65536.0; } #elif defined(irix6) SGT_COOKIE_INIT(&cookie); SGT_COOKIE_SET_KSYM(&cookie, "avenrun"); sysget(SGT_KSYM, (char*)favenrun, sizeof(favenrun), SGT_READ, &cookie); for (i = 0; i < s_ave; i++) r_ave[i] = favenrun[i] / 1000.0; DEBUGMSGTL(("ucd-snmp/loadave", "irix6: %d %d %d\n", favenrun[0], favenrun[1], favenrun[2])); #elif !defined(cygwin) #if defined(NETSNMP_CAN_USE_NLIST) && defined(LOADAVE_SYMBOL) if (auto_nlist(LOADAVE_SYMBOL, (char *) pave, sizeof(double) * s_ave) == 0) #endif return (-1); #endif /* * XXX * To calculate this, we need to compare * successive values of the kernel array * '_cp_times', and calculate the resulting * percentage changes. * This calculation needs to be performed * regularly - perhaps as a background process. * * See the source to 'top' for full details. * * The linux SNMP HostRes implementation * uses 'avenrun[0]*100' as an approximation. * This is less than accurate, but has the * advantage of being simple to implement! * * I'm also assuming a single processor */ return 0; }
/* * Load the latest memory usage statistics */ int netsnmp_mem_arch_load (netsnmp_cache * cache, void *magic) { struct pst_static pst; struct pst_dynamic psd; netsnmp_memory_info *mem; long total_swap = 0; long free_swap = 0; long size = 0; /* * Retrieve the memory information from the underlying O/S... */ if (pstat_getstatic (&pst, sizeof (pst), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getstatic failed!\n"); return -1; } if (pstat_getdynamic (&psd, sizeof (psd), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getdynamic failed!\n"); return -1; } mem = netsnmp_memory_get_byIdx (NETSNMP_MEM_TYPE_PHYSMEM, 1); if (!mem) { snmp_log_perror ("No Memory info entry"); } else { if (!mem->descr) mem->descr = strdup ("Physical memory"); mem->units = pst.page_size; mem->size = pst.physical_memory; mem->free = psd.psd_free; mem->other = -1; } get_swapinfo (&total_swap, &free_swap, &size); mem = netsnmp_memory_get_byIdx (NETSNMP_MEM_TYPE_SWAP, 1); if (!mem) { snmp_log_perror ("No Swap info entry"); } else { if (!mem->descr) mem->descr = strdup ("Swap space (total)"); mem->units = size; mem->size = total_swap; mem->free = free_swap; mem->other = -1; } mem = netsnmp_memory_get_byIdx (NETSNMP_MEM_TYPE_STEXT, 1); if (!mem) { snmp_log_perror ("No Swap text entry"); } else { if (!mem->descr) mem->descr = strdup ("Swapped text pages"); mem->units = pst.page_size; mem->size = psd.psd_vmtxt; mem->free = psd.psd_avmtxt; mem->other = -1; } mem = netsnmp_memory_get_byIdx (NETSNMP_MEM_TYPE_RTEXT, 1); if (!mem) { snmp_log_perror ("No real text entry"); } else { if (!mem->descr) mem->descr = strdup ("Real text pages"); mem->units = pst.page_size; mem->size = psd.psd_rmtxt; mem->free = psd.psd_armtxt; mem->other = -1; } /* mem = netsnmp_memory_get_byIdx( NETSNMP_MEM_TYPE_MISC, 1 ); if (!mem) { snmp_log_perror("No Buffer, etc info entry"); } else { mem->units = 1024; mem->size = -1; mem->free = (pst.page_size/1024)*psd.psd_free + swap.free_swap; mem->other = -1; } */ return 0; }
u_char * var_hrstore(struct variable *vp, oid * name, size_t * length, int exact, size_t * var_len, WriteMethod ** write_method) { int store_idx = 0; #if !defined(linux) #if defined(solaris2) int freemem; int swap_total, swap_used; #elif defined(hpux10) || defined(hpux11) struct pst_dynamic pst_buf; #elif defined(darwin8) vm_statistics_data_t vm_stat; int count = HOST_VM_INFO_COUNT; #elif defined(TOTAL_MEMORY_SYMBOL) || defined(USE_SYSCTL_VM) #ifdef VM_UVMEXP struct uvmexp uvmexp_totals; #endif struct vmtotal memory_totals; #endif #if HAVE_KVM_GETSWAPINFO struct kvm_swap swapinfo; static kvm_t *kd = NULL; #endif #if HAVE_SYS_POOL_H struct pool mbpool, mclpool; int i; #endif #ifdef MBSTAT_SYMBOL struct mbstat mbstat; #endif #endif /* !linux */ static char string[1024]; struct HRFS_statfs stat_buf; if (vp->magic == HRSTORE_MEMSIZE) { if (header_hrstore(vp, name, length, exact, var_len, write_method) == MATCH_FAILED) return NULL; } else { really_try_next: store_idx = header_hrstoreEntry(vp, name, length, exact, var_len, write_method); if (store_idx == MATCH_FAILED) return NULL; if (store_idx > HRS_TYPE_FIXED_MAX) { if (HRFS_statfs(HRFS_entry->HRFS_mount, &stat_buf) < 0) { snmp_log_perror(HRFS_entry->HRFS_mount); goto try_next; } } #if !defined(linux) && !defined(solaris2) else switch (store_idx) { case HRS_TYPE_MEM: case HRS_TYPE_SWAP: #ifdef USE_SYSCTL_VM { int mib[2]; size_t len = sizeof(memory_totals); mib[0] = CTL_VM; mib[1] = VM_METER; sysctl(mib, 2, &memory_totals, &len, NULL, 0); #ifdef VM_UVMEXP mib[1] = VM_UVMEXP; len = sizeof(uvmexp_totals); sysctl(mib, 2, &uvmexp_totals, &len, NULL, 0); #endif } #elif defined(darwin8) host_statistics(myHost,HOST_VM_INFO,&vm_stat,&count); #elif defined(hpux10) || defined(hpux11) pstat_getdynamic(&pst_buf, sizeof(struct pst_dynamic), 1, 0); #elif defined(TOTAL_MEMORY_SYMBOL) auto_nlist(TOTAL_MEMORY_SYMBOL, (char *) &memory_totals, sizeof(struct vmtotal)); #endif #if HAVE_KVM_GETSWAPINFO if (kd == NULL) kd = kvm_openfiles(NULL, NULL, NULL, O_RDONLY, NULL); if (!kd) { snmp_log_perror("kvm_openfiles"); goto try_next; } if (kvm_getswapinfo(kd, &swapinfo, 1, 0) < 0) { snmp_log_perror("kvm_getswapinfo"); goto try_next; } #endif break; #if !defined(hpux10) && !defined(hpux11) case HRS_TYPE_MBUF: #if HAVE_SYS_POOL_H auto_nlist(MBPOOL_SYMBOL, (char *) &mbpool, sizeof(mbpool)); auto_nlist(MCLPOOL_SYMBOL, (char *) &mclpool, sizeof(mclpool)); #endif #ifdef MBSTAT_SYMBOL auto_nlist(MBSTAT_SYMBOL, (char *) &mbstat, sizeof(mbstat)); #endif break; #endif /* !hpux10 && !hpux11 */ default: break; } #endif /* !linux && !solaris2 */ } switch (vp->magic) { case HRSTORE_MEMSIZE: long_return = physmem * (pagesize / 1024); return (u_char *) & long_return; case HRSTORE_INDEX: long_return = store_idx; return (u_char *) & long_return; case HRSTORE_TYPE: if (store_idx > HRS_TYPE_FIXED_MAX) if (storageUseNFS && Check_HR_FileSys_NFS()) storage_type_id[storage_type_len - 1] = 10; /* Network Disk */ else storage_type_id[storage_type_len - 1] = 4; /* Assume fixed */ else switch (store_idx) { case HRS_TYPE_MEM: storage_type_id[storage_type_len - 1] = 2; /* RAM */ break; case HRS_TYPE_SWAP: storage_type_id[storage_type_len - 1] = 3; /* Virtual Mem */ break; case HRS_TYPE_MBUF: storage_type_id[storage_type_len - 1] = 1; /* Other */ break; default: storage_type_id[storage_type_len - 1] = 1; /* Other */ break; } *var_len = sizeof(storage_type_id); return (u_char *) storage_type_id; case HRSTORE_DESCR: if (store_idx > HRS_TYPE_FIXED_MAX) { strncpy(string, HRFS_entry->HRFS_mount, sizeof(string)-1); string[ sizeof(string)-1 ] = 0; *var_len = strlen(string); return (u_char *) string; } else { /* store_idx = store_idx - 1; */ *var_len = strlen(hrs_descr[store_idx]); return (u_char *) hrs_descr[store_idx]; } case HRSTORE_UNITS: if (store_idx > HRS_TYPE_FIXED_MAX) #if HRFS_HAS_FRSIZE long_return = stat_buf.f_frsize; #else long_return = stat_buf.f_bsize; #endif else switch (store_idx) { case HRS_TYPE_MEM: case HRS_TYPE_SWAP: #if defined(USE_SYSCTL) || defined(solaris2) long_return = pagesize; #elif defined(NBPG) long_return = NBPG; #else long_return = 1024; /* Report in Kb */ #endif break; case HRS_TYPE_MBUF: #ifdef MSIZE long_return = MSIZE; #elif defined(linux) long_return = 1024; #else long_return = 256; #endif break; default: #if NO_DUMMY_VALUES goto try_next; #endif long_return = 1024; /* As likely as any! */ break; } return (u_char *) & long_return; case HRSTORE_SIZE: if (store_idx > HRS_TYPE_FIXED_MAX) long_return = stat_buf.f_blocks; else switch (store_idx) { #if defined(linux) case HRS_TYPE_MEM: case HRS_TYPE_SWAP: long_return = linux_mem(store_idx, HRSTORE_SIZE); break; #elif defined(solaris2) case HRS_TYPE_MEM: long_return = physmem; break; case HRS_TYPE_SWAP: sol_get_swapinfo(&swap_total, &swap_used); long_return = swap_total; break; #elif defined(hpux10) || defined(hpux11) case HRS_TYPE_MEM: long_return = pst_buf.psd_rm; break; case HRS_TYPE_SWAP: long_return = pst_buf.psd_vm; break; #elif defined(darwin8) case HRS_TYPE_MEM: long_return = physmem; break; case HRS_TYPE_SWAP: long_return = -1; break; #if defined(MBSTAT_SYMBOL) case HRS_TYPE_MBUF: long_return = mbstat.m_mbufs; break; #endif #elif defined(TOTAL_MEMORY_SYMBOL) || defined(USE_SYSCTL_VM) case HRS_TYPE_MEM: long_return = memory_totals.t_rm; break; case HRS_TYPE_SWAP: #if HAVE_KVM_GETSWAPINFO long_return = swapinfo.ksw_total; #elif defined(VM_UVMEXP) long_return = uvmexp_totals.swpages; #else long_return = memory_totals.t_vm; #endif break; #else /* !linux && !solaris2 && !hpux10 && !hpux11 && ... */ case HRS_TYPE_MEM: long_return = physmem; break; case HRS_TYPE_SWAP: #if NO_DUMMY_VALUES goto try_next; #endif long_return = 0; break; #endif /* !linux && !solaris2 && !hpux10 && !hpux11 && ... */ case HRS_TYPE_MBUF: #ifdef linux long_return = linux_mem(store_idx, HRSTORE_SIZE); #elif HAVE_SYS_POOL_H long_return = 0; for (i = 0; i < sizeof(mbstat.m_mtypes) / sizeof(mbstat.m_mtypes[0]); i++) long_return += mbstat.m_mtypes[i]; #elif defined(MBSTAT_SYMBOL) && defined(STRUCT_MBSTAT_HAS_M_MBUFS) long_return = mbstat.m_mbufs; #elif defined(NO_DUMMY_VALUES) goto try_next; #else long_return = 0; #endif break; default: #if NO_DUMMY_VALUES goto try_next; #endif long_return = 1024; break; } return (u_char *) & long_return; case HRSTORE_USED: if (store_idx > HRS_TYPE_FIXED_MAX) long_return = (stat_buf.f_blocks - stat_buf.f_bfree); else switch (store_idx) { #if defined(linux) case HRS_TYPE_MBUF: case HRS_TYPE_MEM: case HRS_TYPE_SWAP: long_return = linux_mem(store_idx, HRSTORE_USED); break; #elif defined(solaris2) case HRS_TYPE_MEM: getKstatInt("unix", "system_pages", "freemem", &freemem); long_return = physmem - freemem; break; case HRS_TYPE_SWAP: sol_get_swapinfo(&swap_total, &swap_used); long_return = swap_used; break; #elif defined(hpux10) || defined(hpux11) case HRS_TYPE_MEM: long_return = pst_buf.psd_arm; break; case HRS_TYPE_SWAP: long_return = pst_buf.psd_avm; break; #elif defined(darwin8) case HRS_TYPE_MEM: long_return = vm_stat.active_count + vm_stat.inactive_count + vm_stat.wire_count; break; case HRS_TYPE_SWAP: long_return = -1; break; #if defined(MBSTAT_SYMBOL) case HRS_TYPE_MBUF: long_return = mbstat.m_mbufs; break; #endif #elif defined(TOTAL_MEMORY_SYMBOL) || defined(USE_SYSCTL_VM) case HRS_TYPE_MEM: long_return = memory_totals.t_arm; break; case HRS_TYPE_SWAP: #if HAVE_KVM_GETSWAPINFO long_return = swapinfo.ksw_used; #elif defined(VM_UVMEXP) long_return = uvmexp_totals.swpginuse; #else long_return = memory_totals.t_avm; #endif break; #endif /* linux || solaris2 || hpux10 || hpux11 || ... */ #if !defined(linux) && !defined(solaris2) && !defined(hpux10) && !defined(hpux11) case HRS_TYPE_MBUF: #if HAVE_SYS_POOL_H long_return = (mbpool.pr_nget - mbpool.pr_nput) * mbpool.pr_size + (mclpool.pr_nget - mclpool.pr_nput) * mclpool.pr_size; #ifdef MSIZE long_return /= MSIZE; #else long_return /= 256; #endif #elif defined(MBSTAT_SYMBOL) && defined(STRUCT_MBSTAT_HAS_M_CLUSTERS) long_return = mbstat.m_clusters - mbstat.m_clfree; /* unlikely, but... */ #elif defined(NO_DUMMY_VALUES) goto try_next; #else long_return = 0; #endif break; #endif /* !linux && !solaris2 && !hpux10 && !hpux11 && ... */ default: #if NO_DUMMY_VALUES goto try_next; #endif long_return = 1024; break; } return (u_char *) & long_return; case HRSTORE_FAILS: if (store_idx > HRS_TYPE_FIXED_MAX) #if NO_DUMMY_VALUES goto try_next; #else long_return = 0; #endif else switch (store_idx) {
UInt32 GetNumberOfProcessors() { struct pst_dynamic psd; if (pstat_getdynamic(&psd, sizeof(psd), (size_t)1, 0) != -1) return (UInt32)psd.psd_proc_cnt; return 1; }
static int VM_MEMORY_FREE(const char *cmd, const char *param, unsigned flags, AGENT_RESULT *result) { #if defined(HAVE_SYS_PSTAT_H) struct pst_static pst; struct pst_dynamic dyn; long page; if(pstat_getstatic(&pst, sizeof(pst), (size_t)1, 0) == -1) { return SYSINFO_RET_FAIL; } else { /* Get page size */ page = pst.page_size; /* return pst.physical_memory;*/ if (pstat_getdynamic(&dyn, sizeof(dyn), 1, 0) == -1) { return SYSINFO_RET_FAIL; } else { /* cout<<"total virtual memory allocated is " << dyn.psd_vm << " pages, " << dyn.psd_vm * page << " bytes" << endl; cout<<"active virtual memory is " << dyn.psd_avm <<" pages, " << dyn.psd_avm * page << " bytes" << endl; cout<<"total real memory is " << dyn.psd_rm << " pages, " << dyn.psd_rm * page << " bytes" << endl; cout<<"active real memory is " << dyn.psd_arm << " pages, " << dyn.psd_arm * page << " bytes" << endl; cout<<"free memory is " << dyn.psd_free << " pages, " << */ /* Free memory in bytes */ SET_UI64_RESULT(result, (zbx_uint64_t)dyn.psd_free * (zbx_uint64_t)page); return SYSINFO_RET_OK; } } #elif defined(HAVE_SYSINFO_FREERAM) struct sysinfo info; if( 0 == sysinfo(&info)) { #ifdef HAVE_SYSINFO_MEM_UNIT SET_UI64_RESULT(result, (zbx_uint64_t)info.freeram * (zbx_uint64_t)info.mem_unit); #else SET_UI64_RESULT(result, info.freeram); #endif return SYSINFO_RET_OK; } else { return SYSINFO_RET_FAIL; } #elif defined(HAVE_SYS_VMMETER_VMTOTAL) int mib[2],len; struct vmtotal v; len=sizeof(struct vmtotal); mib[0]=CTL_VM; mib[1]=VM_METER; sysctl(mib,2,&v,&len,NULL,0); SET_UI64_RESULT(result, v.t_free<<2); return SYSINFO_RET_OK; /* OS/X */ #elif defined(HAVE_MACH_HOST_INFO_H) vm_statistics_data_t page_info; vm_size_t pagesize; mach_msg_type_number_t count; kern_return_t kret; int ret; pagesize = 0; kret = host_page_size (mach_host_self(), &pagesize); count = HOST_VM_INFO_COUNT; kret = host_statistics (mach_host_self(), HOST_VM_INFO, (host_info_t)&page_info, &count); if (kret == KERN_SUCCESS) { double pw, pa, pi, pf, pu; pw = (double)page_info.wire_count*pagesize; pa = (double)page_info.active_count*pagesize; pi = (double)page_info.inactive_count*pagesize; pf = (double)page_info.free_count*pagesize; pu = pw+pa+pi; SET_UI64_RESULT(result, pf); ret = SYSINFO_RET_OK; } else { ret = SYSINFO_RET_FAIL; } return ret; #else return SYSINFO_RET_FAIL; #endif }
static sg_error sg_get_host_info_int(sg_host_info *host_info_buf) { #ifdef WIN32 unsigned long nameln; char *name; long long result; OSVERSIONINFOEX osinfo; SYSTEM_INFO sysinfo; char *tmp_name; char tmp[10]; #else struct utsname os; # if defined(HPUX) struct pst_static pstat_static; struct pst_dynamic pstat_dynamic; time_t currtime; long boottime; # elif defined(SOLARIS) time_t boottime, curtime; kstat_ctl_t *kc; kstat_t *ksp; kstat_named_t *kn; char *isainfo = NULL; long isabufsz, rc; # elif defined(LINUX) || defined(CYGWIN) FILE *f; # elif defined(ALLBSD) int mib[2]; struct timeval boottime; time_t curtime; size_t size; int ncpus; # if defined(HW_MACHINE_ARCH) || defined(HW_MACHINE) char arch_name[16]; # endif # elif defined(AIX) static perfstat_cpu_total_t cpu_total; sg_error rc; # if defined(HAVE_GETUTXENT) struct utmpx *ut; # else struct utmp *ut; # endif # endif #endif host_info_buf->ncpus = 0; host_info_buf->maxcpus = 0; host_info_buf->bitwidth = 0; host_info_buf->host_state = sg_unknown_configuration; host_info_buf->uptime = 0; host_info_buf->systime = 0; #ifdef WIN32 /* these settings are static after boot, so why get them * constantly? * * Because we want to know some changes anyway - at least * when the hostname (DNS?) changes */ /* get system name */ nameln = MAX_COMPUTERNAME_LENGTH + 1; name = sg_malloc(nameln); if(name == NULL) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } /* * XXX probably GetComputerNameEx() is a better entry point ... */ if( GetComputerName(name, &nameln) == 0 ) { free(name); RETURN_WITH_SET_ERROR("os", SG_ERROR_HOST, "GetComputerName"); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->hostname, name)) { free(name); RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } free(name); /* get OS name, version and build */ ZeroMemory(&osinfo, sizeof(OSVERSIONINFOEX)); osinfo.dwOSVersionInfoSize = sizeof(osinfo); if(!GetVersionEx(&osinfo)) { RETURN_WITH_SET_ERROR("os", SG_ERROR_HOST, "GetVersionEx"); } GetSystemInfo(&sysinfo); /* Release - single number */ if(snprintf(tmp, sizeof(tmp), "%ld", osinfo.dwBuildNumber) == -1) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_SPRINTF, NULL); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->os_release, tmp)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } /* Version */ /* usually a single digit . single digit, eg 5.0 */ if(snprintf(tmp, sizeof(tmp), "%ld.%ld", osinfo.dwMajorVersion, osinfo.dwMinorVersion) == -1) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->os_version, tmp)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } /* OS name */ tmp_name = get_os_name(osinfo, sysinfo); if(tmp_name == NULL) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->os_name, tmp_name)) { free(tmp_name); RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } free(tmp_name); /* Platform */ switch(sysinfo.wProcessorArchitecture) { case PROCESSOR_ARCHITECTURE_INTEL: if(SG_ERROR_NONE != sg_update_string(&host_info_buf->platform, "Intel")) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } break; case PROCESSOR_ARCHITECTURE_IA64: if(SG_ERROR_NONE != sg_update_string(&host_info_buf->platform, "IA64")) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } break; case PROCESSOR_ARCHITECTURE_AMD64: if(SG_ERROR_NONE != sg_update_string(&host_info_buf->platform, "AMD64")) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } break; default: if(SG_ERROR_NONE != sg_update_string(&host_info_buf->platform, "Unknown")){ RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } break; } if(read_counter_large(SG_WIN32_UPTIME, &result)) { RETURN_WITH_SET_ERROR("os", SG_ERROR_PDHREAD, PDH_UPTIME); } host_info_buf->uptime = (time_t) result; #else if((uname(&os)) < 0) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_UNAME, NULL); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->os_name, os.sysname)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->os_release, os.release)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->os_version, os.version)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->platform, os.machine)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->hostname, os.nodename)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } /* get uptime */ #ifdef HPUX if (pstat_getstatic(&pstat_static, sizeof(pstat_static), 1, 0) == -1) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_PSTAT, "pstat_static"); } if (pstat_getdynamic(&pstat_dynamic, sizeof(pstat_dynamic), 1, 0) == -1) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_PSTAT, "pstat_dynamic"); } currtime = time(NULL); boottime = pstat_static.boot_time; host_info_buf->uptime = currtime - boottime; host_info_buf->ncpus = pstat_dynamic.psd_proc_cnt; host_info_buf->maxcpus = pstat_dynamic.psd_max_proc_cnt; host_info_buf->bitwidth = sysconf(_SC_KERNEL_BITS); /* * TODO: getting virtualization state * 1) on boostrapping this component, try loading /opt/hpvm/lib/libhpvm.so (or so) * 2) get function addresses for * a) HPVM_boolean hpvm_api_server_check() * b) HPVM_boolean hpvm_api_virtmach_check() * * Seems to be hardware virtualization ... * See: http://docstore.mik.ua/manuals/hp-ux/en/T2767-90141/index.html (hpvmpubapi(3)) * http://jreypo.wordpress.com/tag/hpvm/ * http://jreypo.wordpress.com/category/hp-ux/page/3/ * http://h20338.www2.hp.com/enterprise/us/en/os/hpux11i-partitioning-integrity-vm.html */ #elif defined(SOLARIS) if ((kc = kstat_open()) == NULL) { RETURN_WITH_SET_ERROR("os", SG_ERROR_KSTAT_OPEN, NULL); } if((ksp=kstat_lookup(kc, "unix", -1, "system_misc"))==NULL){ kstat_close(kc); RETURN_WITH_SET_ERROR("os", SG_ERROR_KSTAT_LOOKUP, "unix,-1,system_misc"); } if (kstat_read(kc, ksp, 0) == -1) { kstat_close(kc); RETURN_WITH_SET_ERROR("os", SG_ERROR_KSTAT_READ, NULL); } if((kn=kstat_data_lookup(ksp, "boot_time")) == NULL){ kstat_close(kc); RETURN_WITH_SET_ERROR("os", SG_ERROR_KSTAT_DATA_LOOKUP, "boot_time"); } /* XXX verify on Solaris 10 if it's still ui32 */ boottime = (kn->value.ui32); kstat_close(kc); time(&curtime); host_info_buf->uptime = curtime - boottime; host_info_buf->ncpus = sysconf(_SC_NPROCESSORS_ONLN); host_info_buf->maxcpus = sysconf(_SC_NPROCESSORS_CONF); isainfo = sg_malloc( isabufsz = (32 * sizeof(*isainfo)) ); if( NULL == isainfo ) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } # define MKSTR(x) #x # if defined(SI_ARCHITECTURE_K) # define SYSINFO_CMD SI_ARCHITECTURE_K # elif defined(SI_ISALIST) # define SYSINFO_CMD SI_ISALIST # else # define SYSINFO_CMD SI_ARCHITECTURE # endif sysinfo_again: if( -1 == ( rc = sysinfo( SYSINFO_CMD, isainfo, isabufsz ) ) ) { free(isainfo); RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSINFO, MKSTR(SYSINFO_CMD) ); } else if( rc > isabufsz ) { char *tmp = sg_realloc(isainfo, rc); if( NULL == tmp ) { free(isainfo); RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } isabufsz = rc; isainfo = tmp; goto sysinfo_again; } host_info_buf->bitwidth = get_bitwidth_by_arch_name(isainfo); free(isainfo); host_info_buf->host_state = sg_unknown_configuration; #elif defined(LINUX) || defined(CYGWIN) if ((f=fopen("/proc/uptime", "r")) == NULL) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_OPEN, "/proc/uptime"); } #define TIME_T_SCANF_FMT (sizeof(int[(((time_t)-1)/2)%4+1]) == sizeof(int[1]) ? "%ld %*d" : "%lu %*d" ) if((fscanf(f,TIME_T_SCANF_FMT,&host_info_buf->uptime)) != 1){ fclose(f); RETURN_WITH_SET_ERROR("os", SG_ERROR_PARSE, NULL); } fclose(f); # if defined(LINUX) host_info_buf->ncpus = sysconf(_SC_NPROCESSORS_ONLN); host_info_buf->maxcpus = sysconf(_SC_NPROCESSORS_CONF); if( access( "/proc/sys/kernel/vsyscall64", F_OK ) == 0 || access( "/proc/sys/abi/vsyscall32", F_OK ) == 0 ) { host_info_buf->bitwidth = 64; } else { host_info_buf->bitwidth = sysconf(_SC_LONG_BIT); // well, maybe 64-bit disabled 128-bit system o.O } host_info_buf->host_state = sg_unknown_configuration; # endif #elif defined(ALLBSD) mib[0] = CTL_KERN; mib[1] = KERN_BOOTTIME; size = sizeof(boottime); if (sysctl(mib, 2, &boottime, &size, NULL, 0) < 0) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSCTL, "CTL_KERN.KERN_BOOTTIME"); } time(&curtime); host_info_buf->uptime= curtime - boottime.tv_sec; # if defined(HW_NCPU) mib[0] = CTL_HW; mib[1] = HW_NCPU; size = sizeof(int); if( sysctl( mib, 2, &ncpus, &size, NULL, 0 ) < 0 ) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSCTL, "CTL_HW.HW_NCPU" ); } # endif # if defined(HW_MACHINE_ARCH) mib[0] = CTL_HW; mib[1] = HW_MACHINE_ARCH; size = sizeof(arch_name); if( sysctl( mib, 2, arch_name, &size, NULL, 0 ) == 0 ) { host_info_buf->bitwidth = get_bitwidth_by_arch_name(arch_name); } else { # endif # if defined(HW_MACHINE) mib[0] = CTL_HW; mib[1] = HW_MACHINE; size = sizeof(arch_name); if( sysctl( mib, 2, arch_name, &size, NULL, 0 ) == 0 ) { host_info_buf->bitwidth = get_bitwidth_by_arch_name(arch_name); } else { SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSCTL, "CTL_HW.HW_MACHINE" ); } # elif defined(HW_MACHINE_ARCH) SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSCTL, "CTL_HW.HW_MACHINE_ARCH" ); # endif # if defined(HW_MACHINE_ARCH) } # endif host_info_buf->host_state = sg_unknown_configuration; /* details must be analysed "manually", no syscall */ host_info_buf->maxcpus = (unsigned)ncpus; # if defined(HW_NCPUONLINE) /* use knowledge about number of cpu's online, when available instead of assuming all of them */ mib[0] = CTL_HW; mib[1] = HW_NCPUONLINE; size = sizeof(int); if( sysctl( mib, 2, &ncpus, &size, NULL, 0 ) < 0 ) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSCTL, "CTL_HW.HW_NCPUONLINE" ); } # endif host_info_buf->ncpus = (unsigned)ncpus; #elif defined(AIX) if(perfstat_cpu_total(NULL, &cpu_total, sizeof(cpu_total), 1) != 1) { RETURN_WITH_SET_ERROR_WITH_ERRNO("os", SG_ERROR_SYSCTL, "perfstat_cpu_total"); } if(SG_ERROR_NONE != sg_update_string(&host_info_buf->platform, cpu_total.description)) { RETURN_FROM_PREVIOUS_ERROR( "os", sg_get_error() ); } host_info_buf->ncpus = cpu_total.ncpus; host_info_buf->maxcpus = cpu_total.ncpus_cfg; host_info_buf->bitwidth = sysconf(_SC_AIX_KERNEL_BITMODE); if( sysconf(_SC_LPAR_ENABLED) > 0 ) { host_info_buf->host_state = sg_hardware_virtualized; } else { host_info_buf->host_state = sg_physical_host; } #ifdef ENABLE_THREADS if( SG_ERROR_NONE != ( rc = sg_lock_mutex("utmp") ) ) { RETURN_FROM_PREVIOUS_ERROR( "os", rc ); } #endif # if defined(HAVE_GETUTXENT) # define UTENTFN getutxent # define UTENTTM ut->ut_tv.tv_sec setutxent(); # else # define UTENTFN getutent # define UTENTTM ut->ut_time setutent(); # endif while( NULL != ( ut = UTENTFN() ) ) { if( ut->ut_type == BOOT_TIME ) { host_info_buf->uptime = time(NULL) - UTENTTM; break; } } # if defined(HAVE_GETUTXENT) endutxent(); # else endutent(); # endif #ifdef ENABLE_THREADS if( SG_ERROR_NONE != ( rc = sg_unlock_mutex("utmp") ) ) { RETURN_FROM_PREVIOUS_ERROR( "os", rc ); } #endif #else RETURN_WITH_SET_ERROR("os", SG_ERROR_UNSUPPORTED, OS_TYPE); #endif #endif /* WIN32 */ host_info_buf->systime = time(NULL); return SG_ERROR_NONE; }
static u_char *var_extensible_mem (struct variable *vp, oid * name, size_t * length, int exact, size_t * var_len, WriteMethod ** write_method) { struct swapinfo swap; struct pst_static pst; struct pst_dynamic psd; static long long_ret; /* * Initialize the return value to 0 */ long_ret = 0; swap.total_swap = 0; swap.free_swap = 0; if (header_generic (vp, name, length, exact, var_len, write_method)) return (NULL); switch (vp->magic) { case MIBINDEX: long_ret = 0; return ((u_char *) (&long_ret)); case ERRORNAME: /* dummy name */ sprintf (errmsg, "swap"); *var_len = strlen (errmsg); return ((u_char *) (errmsg)); case MEMTOTALSWAP: get_swapinfo (&swap); long_ret = swap.total_swap; return ((u_char *) (&long_ret)); case MEMAVAILSWAP: get_swapinfo (&swap); long_ret = swap.free_swap; return ((u_char *) (&long_ret)); case MEMSWAPMINIMUM: long_ret = minimumswap; return ((u_char *) (&long_ret)); case MEMTOTALREAL: /* * Retrieve the static memory statistics */ if (pstat_getstatic (&pst, sizeof (pst), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getstatic failed!\n"); return (NULL); } long_ret = pst.page_size / 1024 * pst.physical_memory; return ((u_char *) (&long_ret)); case MEMAVAILREAL: /* * Retrieve the static memory statistics */ if (pstat_getstatic (&pst, sizeof (pst), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getstatic failed!\n"); return (NULL); } /* * Retrieve the dynamic memory statistics */ if (pstat_getdynamic (&psd, sizeof (psd), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getdynamic failed!\n"); return (NULL); } long_ret = pst.page_size / 1024 * psd.psd_free; return ((u_char *) (&long_ret)); case MEMTOTALSWAPTXT: /* * Retrieve the static memory statistics */ if (pstat_getstatic (&pst, sizeof (pst), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getstatic failed!\n"); return (NULL); } /* * Retrieve the dynamic memory statistics */ if (pstat_getdynamic (&psd, sizeof (psd), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getdynamic failed!\n"); return (NULL); } long_ret = pst.page_size / 1024 * psd.psd_vmtxt; return ((u_char *) (&long_ret)); case MEMUSEDSWAPTXT: /* * Retrieve the static memory statistics */ if (pstat_getstatic (&pst, sizeof (pst), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getstatic failed!\n"); return (NULL); } /* * Retrieve the dynamic memory statistics */ if (pstat_getdynamic (&psd, sizeof (psd), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getdynamic failed!\n"); return (NULL); } long_ret = pst.page_size / 1024 * (psd.psd_vmtxt - psd.psd_avmtxt); return ((u_char *) (&long_ret)); case MEMTOTALREALTXT: /* * Retrieve the static memory statistics */ if (pstat_getstatic (&pst, sizeof (pst), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getstatic failed!\n"); return (NULL); } /* * Retrieve the dynamic memory statistics */ if (pstat_getdynamic (&psd, sizeof (psd), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getdynamic failed!\n"); return (NULL); } long_ret = pst.page_size / 1024 * psd.psd_rmtxt; return ((u_char *) (&long_ret)); case MEMUSEDREALTXT: /* * Retrieve the static memory statistics */ if (pstat_getstatic (&pst, sizeof (pst), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getstatic failed!\n"); return (NULL); } /* * Retrieve the dynamic memory statistics */ if (pstat_getdynamic (&psd, sizeof (psd), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getdynamic failed!\n"); return (NULL); } long_ret = pst.page_size / 1024 * (psd.psd_rmtxt - psd.psd_armtxt); return ((u_char *) (&long_ret)); case MEMTOTALFREE: /* * Retrieve the static memory statistics */ if (pstat_getstatic (&pst, sizeof (pst), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getstatic failed!\n"); return (NULL); } /* * Retrieve the dynamic memory statistics */ if (pstat_getdynamic (&psd, sizeof (psd), (size_t) 1, 0) == -1) { snmp_log (LOG_ERR, "memory_hpux: pstat_getdynamic failed!\n"); return (NULL); } get_swapinfo (&swap); long_ret = (pst.page_size / 1024 * psd.psd_free) + swap.free_swap; return ((u_char *) (&long_ret)); case ERRORFLAG: get_swapinfo (&swap); long_ret = (swap.free_swap > minimumswap) ? 0 : 1; return ((u_char *) (&long_ret)); case ERRORMSG: get_swapinfo (&swap); if ((swap.free_swap > minimumswap) ? 0 : 1) sprintf (errmsg, "Running out of swap space (%ld)", long_ret); else errmsg[0] = 0; *var_len = strlen (errmsg); return ((u_char *) (errmsg)); } /* end case */ return (NULL); }
static void update_cpustats(ZBX_CPUS_STAT_DATA *pcpus) { const char *__function_name = "update_cpustats"; int cpu_num; zbx_uint64_t counter[ZBX_CPU_STATE_COUNT]; #if defined(HAVE_PROC_STAT) FILE *file; char line[1024]; unsigned char *cpu_status = NULL; const char *filename = "/proc/stat"; #elif defined(HAVE_SYS_PSTAT_H) struct pst_dynamic psd; struct pst_processor psp; #elif defined(HAVE_FUNCTION_SYSCTLBYNAME) && defined(CPUSTATES) long cp_time[CPUSTATES], *cp_times = NULL; size_t nlen, nlen_alloc; #elif defined(HAVE_KSTAT_H) cpu_stat_t *cpu; zbx_uint64_t total[ZBX_CPU_STATE_COUNT]; kid_t id; #elif defined(HAVE_FUNCTION_SYSCTL_KERN_CPTIME) int mib[3]; long all_states[CPUSTATES]; u_int64_t one_states[CPUSTATES]; size_t sz; #elif defined(HAVE_LIBPERFSTAT) perfstat_cpu_total_t ps_cpu_total; perfstat_cpu_t ps_cpu; perfstat_id_t ps_id; #endif zabbix_log(LOG_LEVEL_DEBUG, "In %s()", __function_name); #define ZBX_SET_CPUS_NOTSUPPORTED() \ for (cpu_num = 0; cpu_num <= pcpus->count; cpu_num++) \ update_cpu_counters(&pcpus->cpu[cpu_num], NULL) #if defined(HAVE_PROC_STAT) if (NULL == (file = fopen(filename, "r"))) { zbx_error("cannot open [%s]: %s", filename, zbx_strerror(errno)); ZBX_SET_CPUS_NOTSUPPORTED(); goto exit; } cpu_status = zbx_malloc(cpu_status, sizeof(unsigned char) * (pcpus->count + 1)); for (cpu_num = 0; cpu_num <= pcpus->count; cpu_num++) cpu_status[cpu_num] = SYSINFO_RET_FAIL; while (NULL != fgets(line, sizeof(line), file)) { if (0 != strncmp(line, "cpu", 3)) continue; if ('0' <= line[3] && line[3] <= '9') { cpu_num = atoi(line + 3) + 1; if (1 > cpu_num || cpu_num > pcpus->count) continue; } else if (' ' == line[3]) cpu_num = 0; else continue; memset(counter, 0, sizeof(counter)); sscanf(line, "%*s " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64, &counter[ZBX_CPU_STATE_USER], &counter[ZBX_CPU_STATE_NICE], &counter[ZBX_CPU_STATE_SYSTEM], &counter[ZBX_CPU_STATE_IDLE], &counter[ZBX_CPU_STATE_IOWAIT], &counter[ZBX_CPU_STATE_INTERRUPT], &counter[ZBX_CPU_STATE_SOFTIRQ], &counter[ZBX_CPU_STATE_STEAL]); update_cpu_counters(&pcpus->cpu[cpu_num], counter); cpu_status[cpu_num] = SYSINFO_RET_OK; } zbx_fclose(file); for (cpu_num = 0; cpu_num <= pcpus->count; cpu_num++) if (SYSINFO_RET_FAIL == cpu_status[cpu_num]) update_cpu_counters(&pcpus->cpu[cpu_num], NULL); zbx_free(cpu_status); #elif defined(HAVE_SYS_PSTAT_H) for (cpu_num = 0; cpu_num <= pcpus->count; cpu_num++) { memset(counter, 0, sizeof(counter)); if (0 == cpu_num) { if (-1 == pstat_getdynamic(&psd, sizeof(psd), 1, 0)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)psd.psd_cpu_time[CP_USER]; counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)psd.psd_cpu_time[CP_NICE]; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)psd.psd_cpu_time[CP_SYS]; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)psd.psd_cpu_time[CP_IDLE]; } else { if (-1 == pstat_getprocessor(&psp, sizeof(psp), 1, cpu_num - 1)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)psp.psp_cpu_time[CP_USER]; counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)psp.psp_cpu_time[CP_NICE]; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)psp.psp_cpu_time[CP_SYS]; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)psp.psp_cpu_time[CP_IDLE]; } update_cpu_counters(&pcpus->cpu[cpu_num], counter); } #elif defined(HAVE_FUNCTION_SYSCTLBYNAME) && defined(CPUSTATES) /* FreeBSD 7.0 */ nlen = sizeof(cp_time); if (-1 == sysctlbyname("kern.cp_time", &cp_time, &nlen, NULL, 0) || nlen != sizeof(cp_time)) { ZBX_SET_CPUS_NOTSUPPORTED(); goto exit; } memset(counter, 0, sizeof(counter)); counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)cp_time[CP_USER]; counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)cp_time[CP_NICE]; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)cp_time[CP_SYS]; counter[ZBX_CPU_STATE_INTERRUPT] = (zbx_uint64_t)cp_time[CP_INTR]; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)cp_time[CP_IDLE]; update_cpu_counters(&pcpus->cpu[0], counter); /* get size of result set for CPU statistics */ if (-1 == sysctlbyname("kern.cp_times", NULL, &nlen_alloc, NULL, 0)) { for (cpu_num = 1; cpu_num <= pcpus->count; cpu_num++) update_cpu_counters(&pcpus->cpu[cpu_num], NULL); goto exit; } cp_times = zbx_malloc(cp_times, nlen_alloc); nlen = nlen_alloc; if (0 == sysctlbyname("kern.cp_times", cp_times, &nlen, NULL, 0) && nlen == nlen_alloc) { for (cpu_num = 1; cpu_num <= pcpus->count; cpu_num++) { memset(counter, 0, sizeof(counter)); counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)*(cp_times + (cpu_num - 1) * CPUSTATES + CP_USER); counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)*(cp_times + (cpu_num - 1) * CPUSTATES + CP_NICE); counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)*(cp_times + (cpu_num - 1) * CPUSTATES + CP_SYS); counter[ZBX_CPU_STATE_INTERRUPT] = (zbx_uint64_t)*(cp_times + (cpu_num - 1) * CPUSTATES + CP_INTR); counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)*(cp_times + (cpu_num - 1) * CPUSTATES + CP_IDLE); update_cpu_counters(&pcpus->cpu[cpu_num], counter); } } else { for (cpu_num = 1; cpu_num <= pcpus->count; cpu_num++) update_cpu_counters(&pcpus->cpu[cpu_num], NULL); } zbx_free(cp_times); #elif defined(HAVE_KSTAT_H) /* Solaris */ if (NULL == kc) { ZBX_SET_CPUS_NOTSUPPORTED(); goto exit; } memset(total, 0, sizeof(total)); for (cpu_num = 1; cpu_num <= pcpus->count; cpu_num++) { read_again: if (NULL != (*ksp)[cpu_num - 1]) { id = kstat_read(kc, (*ksp)[cpu_num - 1], NULL); if (-1 == id || kc_id != id) /* error or our kstat chain copy is out-of-date */ { if (SUCCEED != refresh_kstat(pcpus)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } else goto read_again; } cpu = (cpu_stat_t *)(*ksp)[cpu_num - 1]->ks_data; memset(counter, 0, sizeof(counter)); total[ZBX_CPU_STATE_IDLE] += counter[ZBX_CPU_STATE_IDLE] = cpu->cpu_sysinfo.cpu[CPU_IDLE]; total[ZBX_CPU_STATE_USER] += counter[ZBX_CPU_STATE_USER] = cpu->cpu_sysinfo.cpu[CPU_USER]; total[ZBX_CPU_STATE_SYSTEM] += counter[ZBX_CPU_STATE_SYSTEM] = cpu->cpu_sysinfo.cpu[CPU_KERNEL]; total[ZBX_CPU_STATE_IOWAIT] += counter[ZBX_CPU_STATE_IOWAIT] = cpu->cpu_sysinfo.cpu[CPU_WAIT]; update_cpu_counters(&pcpus->cpu[cpu_num], counter); } else update_cpu_counters(&pcpus->cpu[cpu_num], NULL); } update_cpu_counters(&pcpus->cpu[0], total); #elif defined(HAVE_FUNCTION_SYSCTL_KERN_CPTIME) /* OpenBSD 4.3 */ for (cpu_num = 0; cpu_num <= pcpus->count; cpu_num++) { memset(counter, 0, sizeof(counter)); if (0 == cpu_num) { mib[0] = CTL_KERN; mib[1] = KERN_CPTIME; sz = sizeof(all_states); if (-1 == sysctl(mib, 2, &all_states, &sz, NULL, 0) || sz != sizeof(all_states)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)all_states[CP_USER]; counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)all_states[CP_NICE]; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)all_states[CP_SYS]; counter[ZBX_CPU_STATE_INTERRUPT] = (zbx_uint64_t)all_states[CP_INTR]; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)all_states[CP_IDLE]; } else { mib[0] = CTL_KERN; mib[1] = KERN_CPTIME2; mib[2] = cpu_num - 1; sz = sizeof(one_states); if (-1 == sysctl(mib, 3, &one_states, &sz, NULL, 0) || sz != sizeof(one_states)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)one_states[CP_USER]; counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)one_states[CP_NICE]; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)one_states[CP_SYS]; counter[ZBX_CPU_STATE_INTERRUPT] = (zbx_uint64_t)one_states[CP_INTR]; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)one_states[CP_IDLE]; } update_cpu_counters(&pcpus->cpu[cpu_num], counter); } #elif defined(HAVE_LIBPERFSTAT) /* AIX 6.1 */ for (cpu_num = 0; cpu_num <= pcpus->count; cpu_num++) { memset(counter, 0, sizeof(counter)); if (0 == cpu_num) { if (-1 == perfstat_cpu_total(NULL, &ps_cpu_total, sizeof(ps_cpu_total), 1)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)ps_cpu_total.user; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)ps_cpu_total.sys; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)ps_cpu_total.idle; counter[ZBX_CPU_STATE_IOWAIT] = (zbx_uint64_t)ps_cpu_total.wait; } else { zbx_snprintf(ps_id.name, sizeof(ps_id.name), "cpu%d", cpu_num - 1); if (-1 == perfstat_cpu(&ps_id, &ps_cpu, sizeof(ps_cpu), 1)) { update_cpu_counters(&pcpus->cpu[cpu_num], NULL); continue; } counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)ps_cpu.user; counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)ps_cpu.sys; counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)ps_cpu.idle; counter[ZBX_CPU_STATE_IOWAIT] = (zbx_uint64_t)ps_cpu.wait; } update_cpu_counters(&pcpus->cpu[cpu_num], counter); } #endif /* HAVE_LIBPERFSTAT */ #undef ZBX_SET_CPUS_NOTSUPPORTED #if defined(HAVE_PROC_STAT) || (defined(HAVE_FUNCTION_SYSCTLBYNAME) && defined(CPUSTATES)) || defined(HAVE_KSTAT_H) exit: #endif zabbix_log(LOG_LEVEL_DEBUG, "End of %s()", __function_name); }