void get_load_average(double *one, double *five, double *fifteen) { kstat_ctl_t *kc; kstat_t *ks; kstat_named_t *d1, *d5, *d15; if (!(kc = kstat_open())) { SET_AND_RETURN(0, 0, 0); } if (!(ks = kstat_lookup(kc, "unix", 0, "system_misc"))) { SET_AND_RETURN(0, 0, 0); } if ((kstat_read(kc, ks, NULL)) < 0) { SET_AND_RETURN(0, 0, 0); } d1 = kstat_data_lookup(ks, "avenrun_1min"); d5 = kstat_data_lookup(ks, "avenrun_5min"); d15 = kstat_data_lookup(ks, "avenrun_15min"); kstat_close(kc); SET_AND_RETURN(d1->value.ul / FSCALE, d5->value.ul / FSCALE, d15->value.ul / FSCALE); }
int netstat_init() { kstat_t *ksp; kc = kstat_open(); if (kstat_chain_update(kc)) (void) fprintf(stderr, "<<State Changed>>n"); for (ksp = kc->kc_chain; ksp != NULL; ksp = ksp->ks_next) { if (ksp->ks_type != KSTAT_TYPE_NAMED || strcmp(ksp->ks_class, "net") != 0 || kstat_read(kc, ksp, NULL) == -1 || kstat_data_lookup(ksp, "ipackets64") == NULL || kstat_data_lookup(ksp, "opackets64") == NULL) continue; strlcpy(nics[no_nics].interface, ksp->ks_name, INTERFACE_LEN); nics[no_nics++].ksp = ksp; if (no_nics >= MAX_NETS) { printf("Warning: Number of interfaces exceeded %d\n", MAX_NETS); return (-1); } } return (0); }
int CollectorSolaris::getHostMemoryUsage(ULONG *total, ULONG *used, ULONG *available) { int rc = VINF_SUCCESS; kstat_named_t *kn; if (mKC == 0 || mSysPages == 0) return VERR_INTERNAL_ERROR; if (kstat_read(mKC, mSysPages, 0) == -1) { Log(("kstat_read(sys_pages) -> %d\n", errno)); return VERR_INTERNAL_ERROR; } if ((kn = (kstat_named_t *)kstat_data_lookup(mSysPages, "freemem")) == 0) { Log(("kstat_data_lookup(freemem) -> %d\n", errno)); return VERR_INTERNAL_ERROR; } *available = kn->value.ul * (PAGE_SIZE/1024); if (kstat_read(mKC, mZFSCache, 0) != -1) { if (mZFSCache) { if ((kn = (kstat_named_t *)kstat_data_lookup(mZFSCache, "size"))) { ulong_t ulSize = kn->value.ul; if ((kn = (kstat_named_t *)kstat_data_lookup(mZFSCache, "c_min"))) { /* * Account for ZFS minimum arc cache size limit. * "c_min" is the target minimum size of the ZFS cache, and not the hard limit. It's possible * for "size" to shrink below "c_min" (e.g: during boot & high memory consumption). */ ulong_t ulMin = kn->value.ul; *available += ulSize > ulMin ? (ulSize - ulMin) / 1024 : 0; } else Log(("kstat_data_lookup(c_min) ->%d\n", errno)); } else Log(("kstat_data_lookup(size) -> %d\n", errno)); } else Log(("mZFSCache missing.\n")); } if ((kn = (kstat_named_t *)kstat_data_lookup(mSysPages, "physmem")) == 0) { Log(("kstat_data_lookup(physmem) -> %d\n", errno)); return VERR_INTERNAL_ERROR; } *total = kn->value.ul * (PAGE_SIZE/1024); *used = *total - *available; return rc; }
int acquire_sys(struct snapshot *ss, kstat_ctl_t *kc) { size_t i; kstat_named_t *knp; kstat_t *ksp; if ((ksp = kstat_lookup(kc, "unix", 0, "sysinfo")) == NULL) return (errno); if (kstat_read(kc, ksp, &ss->s_sys.ss_sysinfo) == -1) return (errno); if ((ksp = kstat_lookup(kc, "unix", 0, "vminfo")) == NULL) return (errno); if (kstat_read(kc, ksp, &ss->s_sys.ss_vminfo) == -1) return (errno); if ((ksp = kstat_lookup(kc, "unix", 0, "dnlcstats")) == NULL) return (errno); if (kstat_read(kc, ksp, &ss->s_sys.ss_nc) == -1) return (errno); if ((ksp = kstat_lookup(kc, "unix", 0, "system_misc")) == NULL) return (errno); if (kstat_read(kc, ksp, NULL) == -1) return (errno); knp = (kstat_named_t *)kstat_data_lookup(ksp, "clk_intr"); if (knp == NULL) return (errno); ss->s_sys.ss_ticks = knp->value.l; knp = (kstat_named_t *)kstat_data_lookup(ksp, "deficit"); if (knp == NULL) return (errno); ss->s_sys.ss_deficit = knp->value.l; for (i = 0; i < ss->s_nr_cpus; i++) { if (!CPU_ACTIVE(&ss->s_cpus[i])) continue; if (kstat_add(&ss->s_cpus[i].cs_sys, &ss->s_sys.ss_agg_sys)) return (errno); if (kstat_add(&ss->s_cpus[i].cs_vm, &ss->s_sys.ss_agg_vm)) return (errno); ss->s_nr_active_cpus++; } return (0); }
int sigar_mem_get(sigar_t *sigar, sigar_mem_t *mem) { kstat_ctl_t *kc = sigar->kc; kstat_t *ksp; sigar_uint64_t kern = 0; SIGAR_ZERO(mem); /* XXX: is mem hot swappable or can we just do this during open ? */ mem->total = sysconf(_SC_PHYS_PAGES); mem->total <<= sigar->pagesize; if (sigar_kstat_update(sigar) == -1) { return errno; } if ((ksp = sigar->ks.syspages) && kstat_read(kc, ksp, NULL) >= 0) { sigar_koffsets_init_syspages(sigar, ksp); mem->free = kSYSPAGES(KSTAT_SYSPAGES_FREE); mem->free <<= sigar->pagesize; mem->used = mem->total - mem->free; } if ((ksp = sigar->ks.mempages) && kstat_read(kc, ksp, NULL) >= 0) { sigar_koffsets_init_mempages(sigar, ksp); } /* XXX mdb ::memstat cachelist/freelist not available to kstat, see: */ /* http://bugs.opensolaris.org/bugdatabase/view_bug.do?bug_id=6821980 */ /* ZFS ARC cache. see: http://opensolaris.org/jive/thread.jspa?messageID=393695 */ if ((ksp = kstat_lookup(sigar->kc, "zfs", 0, "arcstats")) && (kstat_read(sigar->kc, ksp, NULL) != -1)) { kstat_named_t *kn; if ((kn = (kstat_named_t *)kstat_data_lookup(ksp, "size"))) { kern = kn->value.i64; } if ((kn = (kstat_named_t *)kstat_data_lookup(ksp, "c_min"))) { /* c_min cannot be reclaimed they say */ if (kern > kn->value.i64) { kern -= kn->value.i64; } } } mem->actual_free = mem->free + kern; mem->actual_used = mem->used - kern; sigar_mem_calc_ram(sigar, mem); return SIGAR_OK; }
gboolean get_memory_usage (guint64 *memory_total, guint64 *memory_free, guint64 *memory_cache, guint64 *memory_buffers, guint64 *swap_total, guint64 *swap_free) { kstat_t *ksp; kstat_named_t *knp; gint n; if (!kc) init_stats(); if (!(ksp = kstat_lookup (kc, "unix", 0, "system_pages"))) return FALSE; kstat_read (kc, ksp, NULL); knp = kstat_data_lookup (ksp, "physmem"); *memory_total = getpagesize () * knp->value.ui64; knp = kstat_data_lookup (ksp, "freemem"); *memory_free = getpagesize () * knp->value.ui64; *memory_cache = 0; *memory_buffers = 0; *swap_total = *swap_free = 0; if ((n = swapctl (SC_GETNSWP, NULL)) > 0) { struct swaptable *st; struct swapent *swapent; gchar path[MAXPATHLEN]; gint i; if ((st = malloc (sizeof (int) + n * sizeof (swapent_t))) == NULL) return FALSE; st->swt_n = n; swapent = st->swt_ent; for (i = 0; i < n; i++, swapent++) swapent->ste_path = path; if ((swapctl (SC_LIST, st)) == -1) { free (st); return FALSE; } swapent = st->swt_ent; for (i = 0; i < n; i++, swapent++) { *swap_total += swapent->ste_pages * getpagesize (); *swap_free += swapent->ste_free * getpagesize (); } free (st); } return TRUE; }
/* * fetch6432 - return a uint64_t or a uint32_t value from kstat. * * The arguments are a kstat pointer, a potential ui64 value name, * a potential ui32 value name, and a default value in case both * lookup fails. The ui64 value is attempted first. */ static uint64_t fetch6432(kstat_t *ksp, char *value64, char *value, uint64_t def) { kstat_named_t *knp; /* Kstat named pointer */ /* try lookups and return */ if ((knp = kstat_data_lookup(ksp, value64)) != NULL) return (knp->value.ui64); if ((knp = kstat_data_lookup(ksp, value)) != NULL) return (knp->value.ui32); return (def); }
/* * populate_g_idnew - the master kstat function. * * This fetches all the network data from kstat and populates the * global variables g_idnew and g_interfacemax. It uses a kstat control * pointer as an argument, and the global array g_network. * * This function works by climbing down the kstat chains looking * for modules that look like network interfaces. The first step is * to check the module name against the global array g_network (the code * for this will need maintenance as new network cards are developed); * then a kstat variable is checked "obytes" or "obytes64" to ensure * that this really is a network module. This approach is not ideal, * I'd rather base the test on the kstat class == "net", however this * data does not yet appear reliable across all interfaces. */ static void populate_g_idnew(kstat_ctl_t *kc) { kstat_t *ksp; /* Kstat struct pointer */ int ok, i; int num = 0; for (ksp = kc->kc_chain; ksp != NULL; ksp = ksp->ks_next) { /* Search all modules */ for (ok = 0, i = 0; g_network[i] != NULL; i++) { if (strcmp(ksp->ks_module, g_network[i]) == 0) ok = 1; } /* Skip if this isn't a network module */ if (ok == 0) continue; if (kstat_read(kc, ksp, NULL) == -1) continue; if ((kstat_data_lookup(ksp, "obytes") == NULL) && (kstat_data_lookup(ksp, "obytes64") == NULL)) continue; /* Check for tracked interfaces */ if (g_someints) { for (ok = 0, i = 0; *g_tracked[i] != NULL; i++) { if (strcmp(ksp->ks_name, g_tracked[i]) == 0) ok = 1; } if (ok == 0) continue; } /* Save network values */ g_idnew[num].rbytes = fetch6432(ksp, "rbytes64", "rbytes", 0); g_idnew[num].wbytes = fetch6432(ksp, "obytes64", "obytes", 0); g_idnew[num].rpackets = fetch6432(ksp, "ipackets64", "ipackets", 0); g_idnew[num].wpackets = fetch6432(ksp, "opackets64", "opackets", 0); g_idnew[num].sat = fetch32(ksp, "defer", 0); g_idnew[num].sat += fetch_nocanput(ksp, 0); g_idnew[num].sat += fetch32(ksp, "norcvbuf", 0); g_idnew[num].sat += fetch32(ksp, "noxmtbuf", 0); g_idnew[num].time = time(0); fetchstr( ksp, "zonename", g_idnew[num].zone ); /* if the speed can't be fetched, this makes %util 0.0 */ g_idnew[num].speed = fetch64(ksp, "ifspeed", 1LL << 48); (void) strcpy(g_idnew[num].name, ksp->ks_name); num++; } g_interfacemax = num - 1; }
unsigned long SolarisSysUpTime::SysUpTime() { unsigned long retVal = 0; kstat_ctl_t* ksc = kstat_open(); if (ksc != NULL) { kstat_t* ks = kstat_lookup(ksc, "unix", -1, "system_misc"); if (ks != NULL) { kid_t kid = kstat_read(ksc, ks, NULL); if (kid != -1) { kstat_named_t* named = (kstat_named_t*)kstat_data_lookup(ks, "lbolt"); if (named != NULL) retVal = named->value.ul; } } kstat_close(ksc); } return retVal; }
static int get_chipid(kstat_ctl_t *kc, processorid_t cpuid) { kstat_t *ksp; kstat_named_t *k; if ((ksp = kstat_lookup(kc, "cpu_info", cpuid, NULL)) == NULL) return (-1); if (kstat_read(kc, ksp, NULL) == -1) { (void) fprintf(stderr, gettext("%s: kstat_read() failed for cpu %d: %s\n"), opts->pgmname, cpuid, strerror(errno)); return (-1); } if ((k = (kstat_named_t *)kstat_data_lookup(ksp, "chip_id")) == NULL) { (void) fprintf(stderr, gettext("%s: chip_id not found for cpu %d: %s\n"), opts->pgmname, cpuid, strerror(errno)); return (-1); } return (k->value.i32); }
static int get_kstat_system_misc(char *s, int *value) { kstat_ctl_t *kc; kstat_t *kp; kstat_named_t *kn = NULL; int n, i; if (NULL == (kc = kstat_open())) return FAIL; if (NULL == (kp = kstat_lookup(kc, "unix", 0, "system_misc"))) { kstat_close(kc); return FAIL; } if (-1 == kstat_read(kc, kp, NULL)) { kstat_close(kc); return FAIL; } if (NULL == (kn = (kstat_named_t*)kstat_data_lookup(kp, s))) { kstat_close(kc); return FAIL; } kstat_close(kc); *value = kn->value.ul; return SUCCEED; }
int SYSTEM_UPTIME(const char *cmd, const char *param, unsigned flags, AGENT_RESULT *result) { kstat_ctl_t *kc; kstat_t *kp; kstat_named_t *kn; time_t now; int ret = SYSINFO_RET_FAIL; assert(result); init_result(result); if (NULL == (kc = kstat_open())) return ret; if (NULL != (kp = kstat_lookup(kc, "unix", 0, "system_misc"))) { if (-1 != kstat_read(kc, kp, 0)) { if (NULL != (kn = (kstat_named_t*)kstat_data_lookup(kp, "boot_time"))) { time(&now); SET_UI64_RESULT(result, difftime(now, (time_t) kn->value.ul)); ret = SYSINFO_RET_OK; } } } kstat_close(kc); return ret; }
static int get_kstat_named_field( const char *name, const char *field, kstat_named_t *returned_data ) { int ret = SYSINFO_RET_FAIL; kstat_ctl_t *kc; kstat_t *kp; kstat_named_t *kn; kc = kstat_open(); if (kc) { kp = kstat_lookup(kc, NULL, -1, (char*) name); if ((kp) && (kstat_read(kc, kp, 0) != -1)) { kn = (kstat_named_t*) kstat_data_lookup(kp, (char*) field); if(kn) { *returned_data = *kn; ret = SYSINFO_RET_OK; } } kstat_close(kc); } return ret; }
JNIEXPORT jlong JNICALL Java_com_jezhumble_javasysmon_SolarisMonitor_uptimeInSeconds (JNIEnv *env, jobject obj) { struct timeval secs; kstat_ctl_t *kc; kstat_t *ksp; kstat_named_t *knp; unsigned long long uptime; if (gettimeofday(&secs, NULL) != 0) { return (jlong) 0; } uptime = (unsigned long long) secs.tv_sec; kc = kstat_open(); if ((ksp = kstat_lookup(kc, "unix", 0, "system_misc")) == NULL) { fprintf(stderr, "%s\n", "ERROR: Can't read boot time."); return 0; } if ((kstat_read(kc, ksp, NULL) != -1) && /* lookup the boot time record */ ((knp = kstat_data_lookup(ksp, "boot_time")) != NULL)) { return (jlong) (uptime - knp->value.ui32); } else { return 0; } }
static int req_width(kstat_t *req, int field_width) { int i, nreq, per, len; char fixlen[128]; kstat_named_t *knp; uint64_t tot; tot = 0; knp = KSTAT_NAMED_PTR(req); for (i = 0; i < req->ks_ndata; i++) tot += knp[i].value.ui64; knp = kstat_data_lookup(req, "null"); nreq = req->ks_ndata - (knp - KSTAT_NAMED_PTR(req)); for (i = 0; i < nreq; i++) { len = strlen(knp[i].name) + 1; if (field_width < len) field_width = len; if (tot) per = (int)(knp[i].value.ui64 * 100 / tot); else per = 0; (void) sprintf(fixlen, "%" PRIu64 " %d%%", knp[i].value.ui64, per); len = strlen(fixlen) + 1; if (field_width < len) field_width = len; } return (field_width); }
int SYSTEM_BOOTTIME(const char *cmd, const char *param, unsigned flags, AGENT_RESULT *result) { kstat_ctl_t *kc; kstat_t *kp; kstat_named_t *kn; int ret = SYSINFO_RET_FAIL; if (NULL == (kc = kstat_open())) return ret; if (NULL != (kp = kstat_lookup(kc, "unix", 0, "system_misc"))) { if (-1 != kstat_read(kc, kp, 0)) { if (NULL != (kn = (kstat_named_t*)kstat_data_lookup(kp, "boot_time"))) { SET_UI64_RESULT(result, (zbx_uint64_t)kn->value.ul); ret = SYSINFO_RET_OK; } } } kstat_close(kc); return ret; }
JNIEXPORT jboolean JNICALL Java_jp_co_acroquest_endosnipe_javelin_resource_proc_SolarisResourceReader_openQuery (JNIEnv *env, jobject obj) { kstat_t *kstat_ncpus; kstat_named_t *knamed_ncpus; int result; g_kstat_ctl = kstat_open(); /* get number of cpus */ kstat_ncpus = kstat_lookup(g_kstat_ctl, "unix", 0, "system_misc"); result = kstat_read(g_kstat_ctl, kstat_ncpus, 0); if (result == -1) { return JNI_FALSE; } knamed_ncpus = kstat_data_lookup(kstat_ncpus, "ncpus"); if (knamed_ncpus != NULL) { g_num_cpus = knamed_ncpus->value.ui32; } if (g_num_cpus <= 0) { g_num_cpus = 1; } getsysinfo(&g_system_info_prev); return JNI_TRUE; }
double kstat_load_avg(void) { static kstat_ctl_t *kc = NULL; /* libkstat cookie */ static kstat_t *ksp = NULL; /* kstat pointer */ kstat_named_t *ksdp = NULL; /* kstat data pointer */ if( ! kc ) { if( (kc = kstat_open()) == NULL ) { dprintf( D_ALWAYS, "kstat_open() failed, errno = %d\n", errno ); RETURN; } } if( ! ksp ) { if( (ksp = kstat_lookup(kc, "unix", 0, "system_misc")) == NULL ) { dprintf( D_ALWAYS, "kstat_lookup() failed, errno = %d\n", errno ); RETURN; } } if( kstat_read(kc, ksp, NULL) == -1 ) { dprintf( D_ALWAYS, "kstat_read() failed, errno = %d\n", errno ); RETURN; } ksdp = (kstat_named_t *) kstat_data_lookup(ksp, "avenrun_1min"); if( ksdp ) { return (double) ksdp->value.l / FSCALE; } else { dprintf( D_ALWAYS, "kstat_data_lookup() failed, errno = %d\n", errno); RETURN; } }
/* * Return value of named statistic for given kstat_named kstat; * return 0LL if named statistic is not in list (use "ll" as a * type qualifier when printing 64-bit int's with printf() ) */ static uint64_t kstat_named_value(kstat_t *ksp, char *name) { kstat_named_t *knp; uint64_t value; if (ksp == NULL) return (0LL); knp = kstat_data_lookup(ksp, name); if (knp == NULL) return (0LL); switch (knp->data_type) { case KSTAT_DATA_INT32: case KSTAT_DATA_UINT32: value = (uint64_t)(knp->value.ui32); break; case KSTAT_DATA_INT64: case KSTAT_DATA_UINT64: value = knp->value.ui64; break; default: value = 0LL; break; } return (value); }
static int get_kstat_system_misc(char *key, int *value) { kstat_ctl_t *kc; kstat_t *ksp; kstat_named_t *kn = NULL; int ret = FAIL; if (NULL == (kc = kstat_open())) return ret; if (NULL == (ksp = kstat_lookup(kc, "unix", 0, "system_misc"))) goto close; if (-1 == kstat_read(kc, ksp, NULL)) goto close; if (NULL == (kn = (kstat_named_t *)kstat_data_lookup(ksp, key))) goto close; *value = kn->value.ul; ret = SUCCEED; close: kstat_close(kc); return ret; }
/** * Helper for getting the core ID for a given CPU/strand/hyperthread. * * @returns The core ID. * @param idCpu The CPU ID instance. */ static inline uint64_t rtMpSolarisGetCoreId(RTCPUID idCpu) { kstat_named_t *pStat = (kstat_named_t *)kstat_data_lookup(g_papCpuInfo[idCpu], (char *)"core_id"); Assert(pStat->data_type == KSTAT_DATA_LONG); Assert(pStat->value.l >= 0); AssertCompile(sizeof(uint64_t) >= sizeof(long)); /* Paranoia. */ return (uint64_t)pStat->value.l; }
gboolean get_cpu_usage (gushort *cpu_count, gfloat *cpu_user, gfloat *cpu_system) { kstat_t *ksp; kstat_named_t *knp; static gulong ticks_total = 0, ticks_total_old = 0; gulong ticks_user = 0, ticks_system = 0; if (!kc) init_stats (); _cpu_count = 0; kstat_chain_update (kc); ticks_total_old = ticks_total; ticks_total = 0; for (ksp = kc->kc_chain; ksp != NULL; ksp = ksp->ks_next) { if (!g_strcmp0 (ksp->ks_module, "cpu_info")) { _cpu_count += 1; } else if (!g_strcmp0 (ksp->ks_module, "cpu") && !g_strcmp0 (ksp->ks_name, "sys")) { kstat_read (kc, ksp, NULL); knp = kstat_data_lookup (ksp, "cpu_nsec_user"); ticks_user += knp->value.ul / 100000; knp = kstat_data_lookup (ksp, "cpu_nsec_kernel"); ticks_system += knp->value.ul / 100000; knp = kstat_data_lookup (ksp, "cpu_nsec_intr"); ticks_system += knp->value.ul / 100000; knp = kstat_data_lookup (ksp, "cpu_nsec_idle"); ticks_total += knp->value.ul / 100000; ticks_total += ticks_user + ticks_system; } } if (ticks_total > ticks_total_old) ticks_total_delta = ticks_total - ticks_total_old; get_cpu_percent (0, ticks_user, cpu_user, ticks_system, cpu_system); *cpu_count = _cpu_count; return TRUE; }
static int acquire_intrs(struct snapshot *ss, kstat_ctl_t *kc) { kstat_t *ksp; size_t i = 0; kstat_t *sys_misc; kstat_named_t *clock; /* clock interrupt */ ss->s_nr_intrs = 1; for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) { if (ksp->ks_type == KSTAT_TYPE_INTR) ss->s_nr_intrs++; } ss->s_intrs = calloc(ss->s_nr_intrs, sizeof (struct intr_snapshot)); if (ss->s_intrs == NULL) return (errno); sys_misc = kstat_lookup_read(kc, "unix", 0, "system_misc"); if (sys_misc == NULL) goto out; clock = (kstat_named_t *)kstat_data_lookup(sys_misc, "clk_intr"); if (clock == NULL) goto out; (void) strlcpy(ss->s_intrs[0].is_name, "clock", KSTAT_STRLEN); ss->s_intrs[0].is_total = clock->value.ui32; i = 1; for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) { kstat_intr_t *ki; int j; if (ksp->ks_type != KSTAT_TYPE_INTR) continue; if (kstat_read(kc, ksp, NULL) == -1) goto out; ki = KSTAT_INTR_PTR(ksp); (void) strlcpy(ss->s_intrs[i].is_name, ksp->ks_name, KSTAT_STRLEN); ss->s_intrs[i].is_total = 0; for (j = 0; j < KSTAT_NUM_INTRS; j++) ss->s_intrs[i].is_total += ki->intrs[j]; i++; } errno = 0; out: return (errno); }
int Solaris_Network_Interface_Monitor::check_ks_module ( const unsigned long ks_instance, const unsigned long which_max, const char *max_str, ACE_UINT64 *value_array, ACE_UINT64 &which_member) { if (! (ks_instance < which_max)) { ACELIB_ERROR_RETURN ((LM_ERROR, "%s exceeded.\n", max_str), -1); } /// Because the kstat chain can change dynamically, /// watch the chain ID and restart the walk if the ID /// differs from what we saw during the walk. The restart /// is done by breaking from the cycle with kstat_ not 0. kid_t kstat_id = kstat_read (this->kstats_, this->kstat_, 0); if (kstat_id != this->kstat_id_) { /// This return value restarts the walk as described above. return 1; } kstat_named_t *value = (kstat_named_t *) kstat_data_lookup ( this->kstat_, ACE_TEXT_ALWAYS_CHAR (this->lookup_str_.rep ())); if (value == 0) { /// Just return and let the calling FOR loop advance. return 0; } if (value->data_type != KSTAT_DATA_UINT32) { ACELIB_ERROR_RETURN ((LM_ERROR, "Wrong data type.\n"), -1); } /// Guard against overflow. value_array[ks_instance] += value->value.ui32 - static_cast<ACE_UINT32> (value_array[ks_instance]); which_member += value_array[ks_instance]; return 0; }
/* * fetch32 - return a uint32_t value from kstat. * * The arguments are a kstat pointer, the value name, * and a default value in case the lookup fails. */ static uint32_t fetch32(kstat_t *ksp, char *value, uint32_t def) { kstat_named_t *knp; /* Kstat named pointer */ /* try a lookup and return */ if ((knp = kstat_data_lookup(ksp, value)) != NULL) return (knp->value.ui32); return (def); }
int updateLoadAvg(void) { #ifdef HAVE_KSTAT kstat_ctl_t *kctl; kstat_t *ksp; kstat_named_t *kdata; /* * get a kstat handle and update the user's kstat chain */ if((kctl = kstat_open()) == NULL) return (0); while(kstat_chain_update(kctl) != 0) ; /* * traverse the kstat chain to find the appropriate statistics */ if((ksp = kstat_lookup(kctl, "unix", 0, "system_misc")) == NULL) return (0); if(kstat_read(kctl, ksp, NULL) == -1) return (0); /* * lookup the data */ kdata = (kstat_named_t *)kstat_data_lookup(ksp, "avenrun_1min"); if(kdata != NULL) loadavg1 = LOAD(kdata->value.ui32); kdata = (kstat_named_t *)kstat_data_lookup(ksp, "avenrun_5min"); if(kdata != NULL) loadavg5 = LOAD(kdata->value.ui32); kdata = (kstat_named_t *)kstat_data_lookup(ksp, "avenrun_15min"); if(kdata != NULL) loadavg15 = LOAD(kdata->value.ui32); kstat_close(kctl); #endif /* ! HAVE_KSTAT */ return (0); }
/* * fetch_nocanput - return nocanput value, whose name(s) are driver-dependent. * * Most drivers have a kstat "nocanput", but the ce driver * at least has "rx_nocanput" and "tx_nocanput" */ static uint32_t fetch_nocanput(kstat_t *ksp, uint32_t def) { kstat_named_t *knp; /* Kstat named pointer */ uint32_t sum; /* Check "nocanput" first */ if ((knp = kstat_data_lookup(ksp, "nocanput")) != NULL) { return (knp->value.ui32); } else { if ((knp = kstat_data_lookup(ksp, "rx_nocanput")) != NULL) { sum = knp->value.ui32; if ((knp = kstat_data_lookup(ksp, "tx_nocanput")) != NULL) { sum += knp->value.ui32; return (sum); } } } return (def); }
/* * fetchstr - return a string value from kstat. */ static void fetchstr(kstat_t *ksp, char *value, char *dest) { kstat_named_t *knp; if((knp = kstat_data_lookup(ksp, value)) != NULL) { strncpy( dest, KSTAT_NAMED_STR_PTR(knp), KSTAT_NAMED_STR_BUFLEN(knp)); } else strcpy( dest, "Unknown" ); }
int netlink_fetch(pmdaMetric *pm, int inst, pmAtomValue *av) { char *lname; metricdesc_t *md = pm->m_user; kstat_t *k; char *stat = (char *)md->md_offset; if (pmdaCacheLookup(indomtab[NETLINK_INDOM].it_indom, inst, &lname, (void **)&k) != PMDA_CACHE_ACTIVE) return PM_ERR_INST; if (k) { kstat_named_t *kn = kstat_data_lookup(k, stat); if (kn == NULL) { fprintf(stderr, "No kstat called %s for %s\n", stat, lname); return 0; } switch (pm->m_desc.type) { case PM_TYPE_32: if (kn->data_type == KSTAT_DATA_INT32) { av->l = kn->value.i32; return 1; } break; case PM_TYPE_U32: if (kn->data_type == KSTAT_DATA_UINT32) { av->ul = kn->value.ui32; return 1; } break; case PM_TYPE_64: if (kn->data_type == KSTAT_DATA_INT64) { av->ll = kn->value.i64; return 1; } break; case PM_TYPE_U64: if (kn->data_type == KSTAT_DATA_UINT64) { av->ull = kn->value.ui64; return 1; } break; } } return 0; }
static int get_chip_id(sigar_t *sigar, int processor) { kstat_t *ksp = sigar->ks.cpu_info[processor]; kstat_named_t *chipid; if (ksp && (kstat_read(sigar->kc, ksp, NULL) != -1) && (chipid = (kstat_named_t *)kstat_data_lookup(ksp, "chip_id"))) { return chipid->value.i32; } else { return -1; } }