lookup_result_type<Type> lookup(const pmInDom indom, const instance_id_type instance_id, const lookup_flags flags = require_active) { lookup_result_type<Type> result; void * opaque; result.name = NULL; result.status = pmdaCacheLookup(indom, instance_id, &result.name, &opaque); if (result.status < 0) { throw pcp::exception(result.status); } if ((flags & require_active) && (result.status != PMDA_CACHE_ACTIVE)) { std::ostringstream message; message << "Cache entry " << indom << ':' << instance_id << " ("; if (result.name == NULL) { message << "NULL"; } else { message << '"' << result.name << '"'; } message << ") inactive"; throw pcp::exception(result.status, message.str()); } result.instance_id = instance_id; result.opaque = static_cast<Type>(opaque); return result; }
int cgroup_mounts_subsys(const char *system, char *buffer, int length) { pmInDom mounts = INDOM(CGROUP_MOUNTS_INDOM); pmInDom subsys = INDOM(CGROUP_SUBSYS_INDOM); filesys_t *fs; char *name; int sts; /* Iterate over cgroup.mounts.subsys indom, comparing the value * with the given subsys - if a match is found, return the inst * name, else NULL. */ pmdaCacheOp(mounts, PMDA_CACHE_WALK_REWIND); while ((sts = pmdaCacheOp(mounts, PMDA_CACHE_WALK_NEXT)) != -1) { if (!pmdaCacheLookup(mounts, sts, &name, (void **)&fs)) continue; if (strcmp(system, cgroup_find_subsys(subsys, fs)) != 0) continue; snprintf(buffer, length, "%s%s/", proc_statspath, name); buffer[length-1] = '\0'; return strlen(buffer); } return 0; }
int proc_slabinfo_fetch(pmInDom indom, int item, unsigned int inst, pmAtomValue *ap) { slab_cache_t *slab_cache = NULL; int sts; sts = pmdaCacheLookup(indom, inst, NULL, (void **)&slab_cache); if (sts < 0) return sts; if (sts == PMDA_CACHE_INACTIVE) return PM_ERR_INST; switch (item) { case 0: /* mem.slabinfo.objects.active */ ap->ull = slab_cache->num_active_objs; break; case 1: /* mem.slabinfo.objects.total */ ap->ull = slab_cache->total_objs; break; case 2: /* mem.slabinfo.objects.size */ if (slab_cache->seen < 11) /* version 1.1 or later only */ return 0; ap->ul = slab_cache->object_size; break; case 3: /* mem.slabinfo.slabs.active */ if (slab_cache->seen < 11) /* version 1.1 or later only */ return 0; ap->ul = slab_cache->num_active_slabs; break; case 4: /* mem.slabinfo.slabs.total */ if (slab_cache->seen == 11) /* version 1.1 only */ return 0; ap->ul = slab_cache->total_slabs; break; case 5: /* mem.slabinfo.slabs.pages_per_slab */ if (slab_cache->seen < 11) /* version 1.1 or later only */ return 0; ap->ul = slab_cache->pages_per_slab; break; case 6: /* mem.slabinfo.slabs.objects_per_slab */ if (slab_cache->seen != 20) /* version 2.0 only */ return 0; ap->ul = slab_cache->objects_per_slab; break; case 7: /* mem.slabinfo.slabs.total_size */ if (slab_cache->seen < 11) /* version 1.1 or later only */ return 0; ap->ull = slab_cache->total_size; break; default: return PM_ERR_PMID; } return 1; }
int netlink_fetch(pmdaMetric *pm, int inst, pmAtomValue *av) { char *lname; metricdesc_t *md = pm->m_user; kstat_t *k; char *stat = (char *)md->md_offset; if (pmdaCacheLookup(indomtab[NETLINK_INDOM].it_indom, inst, &lname, (void **)&k) != PMDA_CACHE_ACTIVE) return PM_ERR_INST; if (k) { kstat_named_t *kn = kstat_data_lookup(k, stat); if (kn == NULL) { fprintf(stderr, "No kstat called %s for %s\n", stat, lname); return 0; } switch (pm->m_desc.type) { case PM_TYPE_32: if (kn->data_type == KSTAT_DATA_INT32) { av->l = kn->value.i32; return 1; } break; case PM_TYPE_U32: if (kn->data_type == KSTAT_DATA_UINT32) { av->ul = kn->value.ui32; return 1; } break; case PM_TYPE_64: if (kn->data_type == KSTAT_DATA_INT64) { av->ll = kn->value.i64; return 1; } break; case PM_TYPE_U64: if (kn->data_type == KSTAT_DATA_UINT64) { av->ull = kn->value.ui64; return 1; } break; } } return 0; }
int refresh_net_ioctl(pmInDom indom, linux_container_t *cp, int *need_refresh) { net_interface_t *netip; char *p; int sts = 0; for (pmdaCacheOp(indom, PMDA_CACHE_WALK_REWIND);;) { if ((sts = pmdaCacheOp(indom, PMDA_CACHE_WALK_NEXT)) < 0) break; if (!pmdaCacheLookup(indom, sts, &p, (void **)&netip) || !p) continue; refresh_net_dev_ioctl(p, netip, cp, need_refresh); } return sts; }
/* * For block devices we have one instance domain for dev_t * based lookup, and another for (real) name lookup. * The reason we need this is that the blkio cgroup stats * are exported using the major:minor numbers, and not the * device names - we must perform that mapping ourselves. * In some places (value refresh) we need to lookup the blk * name from device major/minor, in other places (instances * refresh) we need the usual external instid:name lookup. */ static void refresh_cgroup_devices(void) { pmInDom diskindom = INDOM(DISK_INDOM); pmInDom devtindom = INDOM(DEVT_INDOM); char buf[MAXPATHLEN]; FILE *fp; pmdaCacheOp(devtindom, PMDA_CACHE_INACTIVE); pmdaCacheOp(diskindom, PMDA_CACHE_INACTIVE); if ((fp = proc_statsfile("/proc/diskstats", buf, sizeof(buf))) == NULL) return; while (fgets(buf, sizeof(buf), fp) != NULL) { unsigned int major, minor, unused; device_t *dev = NULL; char namebuf[1024]; int inst; if (sscanf(buf, "%u %u %s %u", &major, &minor, namebuf, &unused) != 4) continue; if (_pm_isloop(namebuf) || _pm_isramdisk(namebuf)) continue; if (pmdaCacheLookupName(diskindom, namebuf, &inst, (void **)&dev) < 0 || dev == NULL) { if (!(dev = (device_t *)malloc(sizeof(device_t)))) { __pmNoMem("device", sizeof(device_t), PM_RECOV_ERR); continue; } dev->major = major; dev->minor = minor; } /* keeping track of all fields (major/minor/inst/name) */ pmdaCacheStore(diskindom, PMDA_CACHE_ADD, namebuf, dev); (void)pmdaCacheLookupName(diskindom, namebuf, &dev->inst, NULL); (void)pmdaCacheLookup(diskindom, dev->inst, &dev->name, NULL); snprintf(buf, sizeof(buf), "%u:%u", major, minor); pmdaCacheStore(devtindom, PMDA_CACHE_ADD, buf, (void *)dev); if (pmDebug & DBG_TRACE_APPL0) fprintf(stderr, "refresh_devices: \"%s\" \"%d:%d\" inst=%d\n", dev->name, dev->major, dev->minor, dev->inst); } fclose(fp); }
/* * This separate indom provides the addresses for all interfaces including * aliases (e.g. eth0, eth0:0, eth0:1, etc) - this is what ifconfig does. */ void clear_net_addr_indom(pmInDom indom) { net_addr_t *p; int inst; for (pmdaCacheOp(indom, PMDA_CACHE_WALK_REWIND);;) { if ((inst = pmdaCacheOp(indom, PMDA_CACHE_WALK_NEXT)) < 0) break; if (!pmdaCacheLookup(indom, inst, NULL, (void **)&p) || !p) continue; p->has_inet = 0; p->has_ipv6 = 0; p->has_hw = 0; } pmdaCacheOp(indom, PMDA_CACHE_INACTIVE); }
/* * Primary driver interface - finds any/all mount points for a given * cgroup subsystem and iteratively expands all of the cgroups below * them. The setup callback inactivates each indoms contents, while * the refresh callback is called once per cgroup (with path/name) - * its role is to refresh the values for that one named cgroup. */ void refresh_cgroups(const char *subsys, const char *container, int length, cgroup_setup_t setup, cgroup_refresh_t refresh) { int sts; filesys_t *fs; pmInDom mounts = INDOM(CGROUP_MOUNTS_INDOM); pmdaCacheOp(mounts, PMDA_CACHE_WALK_REWIND); while ((sts = pmdaCacheOp(mounts, PMDA_CACHE_WALK_NEXT)) != -1) { if (!pmdaCacheLookup(mounts, sts, NULL, (void **)&fs)) continue; if (scan_filesys_options(fs->options, subsys) == NULL) continue; setup(); cgroup_scan(fs->path, "", refresh, container, length); } }
int proc_partitions_fetch(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom) { __pmID_int *idp = (__pmID_int *)&(mdesc->m_desc.pmid); int i; partitions_entry_t *p = NULL; if (inst != PM_IN_NULL) { if (pmdaCacheLookup(mdesc->m_desc.indom, inst, NULL, (void **)&p) < 0) return PM_ERR_INST; } switch (idp->cluster) { case CLUSTER_STAT: /* * disk.{dev,all} remain in CLUSTER_STAT for backward compatibility */ switch(idp->item) { case 4: /* disk.dev.read */ if (p == NULL) return PM_ERR_INST; _pm_assign_ulong(atom, p->rd_ios); break; case 5: /* disk.dev.write */ if (p == NULL) return PM_ERR_INST; _pm_assign_ulong(atom, p->wr_ios); break; case 6: /* disk.dev.blkread */ if (p == NULL) return PM_ERR_INST; atom->ull = p->rd_sectors; break; case 7: /* disk.dev.blkwrite */ if (p == NULL) return PM_ERR_INST; atom->ull = p->wr_sectors; break; case 28: /* disk.dev.total */ if (p == NULL) return PM_ERR_INST; atom->ull = p->rd_ios + p->wr_ios; break; case 36: /* disk.dev.blktotal */ if (p == NULL) return PM_ERR_INST; atom->ull = p->rd_sectors + p->wr_sectors; break; case 38: /* disk.dev.read_bytes */ if (p == NULL) return PM_ERR_INST; atom->ull = p->rd_sectors / 2; break; case 39: /* disk.dev.write_bytes */ if (p == NULL) return PM_ERR_INST; atom->ull = p->wr_sectors / 2; break; case 40: /* disk.dev.total_bytes */ if (p == NULL) return PM_ERR_INST; atom->ull = (p->rd_sectors + p->wr_sectors) / 2; break; case 46: /* disk.dev.avactive ... already msec from /proc/diskstats */ if (p == NULL) return PM_ERR_INST; atom->ul = p->io_ticks; break; case 47: /* disk.dev.aveq ... already msec from /proc/diskstats */ if (p == NULL) return PM_ERR_INST; atom->ul = p->aveq; break; case 49: /* disk.dev.read_merge */ if (p == NULL) return PM_ERR_INST; _pm_assign_ulong(atom, p->rd_merges); break; case 50: /* disk.dev.write_merge */ if (p == NULL) return PM_ERR_INST; _pm_assign_ulong(atom, p->wr_merges); break; case 59: /* disk.dev.scheduler */ if (p == NULL) return PM_ERR_INST; atom->cp = _pm_ioscheduler(p->namebuf); break; case 72: /* disk.dev.read_rawactive already ms from /proc/diskstats */ if (p == NULL) return PM_ERR_INST; atom->ul = p->rd_ticks; break; case 73: /* disk.dev.write_rawactive already ms from /proc/diskstats */ if (p == NULL) return PM_ERR_INST; atom->ul = p->wr_ticks; break; default: /* disk.all.* is a singular instance domain */ atom->ull = 0; for (pmdaCacheOp(INDOM(DISK_INDOM), PMDA_CACHE_WALK_REWIND);;) { if ((i = pmdaCacheOp(INDOM(DISK_INDOM), PMDA_CACHE_WALK_NEXT)) < 0) break; if (!pmdaCacheLookup(INDOM(DISK_INDOM), i, NULL, (void **)&p) || !p) continue; switch (idp->item) { case 24: /* disk.all.read */ atom->ull += p->rd_ios; break; case 25: /* disk.all.write */ atom->ull += p->wr_ios; break; case 26: /* disk.all.blkread */ atom->ull += p->rd_sectors; break; case 27: /* disk.all.blkwrite */ atom->ull += p->wr_sectors; break; case 29: /* disk.all.total */ atom->ull += p->rd_ios + p->wr_ios; break; case 37: /* disk.all.blktotal */ atom->ull += p->rd_sectors + p->wr_sectors; break; case 41: /* disk.all.read_bytes */ atom->ull += p->rd_sectors / 2; break; case 42: /* disk.all.write_bytes */ atom->ull += p->wr_sectors / 2; break; case 43: /* disk.all.total_bytes */ atom->ull += (p->rd_sectors + p->wr_sectors) / 2; break; case 44: /* disk.all.avactive ... already msec from /proc/diskstats */ atom->ull += p->io_ticks; break; case 45: /* disk.all.aveq ... already msec from /proc/diskstats */ atom->ull += p->aveq; break; case 51: /* disk.all.read_merge */ atom->ull += p->rd_merges; break; case 52: /* disk.all.write_merge */ atom->ull += p->wr_merges; break; case 74: /* disk.all.read_rawactive ... already msec from /proc/diskstats */ atom->ull += p->rd_ticks; break; case 75: /* disk.all.write_rawactive ... already msec from /proc/diskstats */ atom->ull += p->wr_ticks; break; default: return PM_ERR_PMID; } } /* loop */ } break; case CLUSTER_PARTITIONS: if (p == NULL) return PM_ERR_INST; switch(idp->item) { /* disk.partitions */ case 0: /* disk.partitions.read */ atom->ul = p->rd_ios; break; case 1: /* disk.partitions.write */ atom->ul = p->wr_ios; break; case 2: /* disk.partitions.total */ atom->ul = p->wr_ios + p->rd_ios; break; case 3: /* disk.partitions.blkread */ atom->ul = p->rd_sectors; break; case 4: /* disk.partitions.blkwrite */ atom->ul = p->wr_sectors; break; case 5: /* disk.partitions.blktotal */ atom->ul = p->rd_sectors + p->wr_sectors; break; case 6: /* disk.partitions.read_bytes */ atom->ul = p->rd_sectors / 2; break; case 7: /* disk.partitions.write_bytes */ atom->ul = p->wr_sectors / 2; break; case 8: /* disk.partitions.total_bytes */ atom->ul = (p->rd_sectors + p->wr_sectors) / 2; break; default: return PM_ERR_PMID; } break; case CLUSTER_DM: if (p == NULL) return PM_ERR_INST; switch(idp->item) { case 0: /* disk.dm.read */ atom->ull = p->rd_ios; break; case 1: /* disk.dm.write */ atom->ull = p->wr_ios; break; case 2: /* disk.dm.total */ atom->ull = p->rd_ios + p->wr_ios; break; case 3: /* disk.dm.blkread */ atom->ull = p->rd_sectors; break; case 4: /* disk.dm.blkwrite */ atom->ull = p->wr_sectors; break; case 5: /* disk.dm.blktotal */ atom->ull = p->rd_sectors + p->wr_sectors; break; case 6: /* disk.dm.read_bytes */ atom->ull = p->rd_sectors / 2; break; case 7: /* disk.dm.write_bytes */ atom->ull = p->wr_sectors / 2; break; case 8: /* disk.dm.total_bytes */ atom->ull = (p->rd_sectors + p->wr_sectors) / 2; break; case 9: /* disk.dm.read_merge */ atom->ull = p->rd_merges; break; case 10: /* disk.dm.write_merge */ atom->ull = p->wr_merges; break; case 11: /* disk.dm.avactive */ atom->ull = p->io_ticks; break; case 12: /* disk.dm.aveq */ atom->ull = p->aveq; break; case 13: /* hinv.map.dmname */ atom->cp = p->dmname; break; case 14: /* disk.dm.read_rawactive */ atom->ul = p->rd_ticks; break; case 15: /* disk.dm.write_rawactive */ atom->ul = p->wr_ticks; break; default: return PM_ERR_PMID; } break; default: /* switch cluster */ return PM_ERR_PMID; } return 1; }
int pmdaInstance(pmInDom indom, int inst, char *name, __pmInResult **result, pmdaExt *pmda) { int i; int namelen; int err = 0; __pmInResult *res; pmdaIndom *idp = NULL; /* initialize to pander to gcc */ int have_cache = 0; int myinst; char *np; if (pmdaCacheOp(indom, PMDA_CACHE_CHECK)) { have_cache = 1; } else { /* * check this is an instance domain we know about -- code below * assumes this test is complete */ for (i = 0; i < pmda->e_nindoms; i++) { if (pmda->e_indoms[i].it_indom == indom) break; } if (i >= pmda->e_nindoms) return PM_ERR_INDOM; idp = &pmda->e_indoms[i]; } if ((res = (__pmInResult *)malloc(sizeof(*res))) == NULL) return -oserror(); res->indom = indom; if (name == NULL && inst == PM_IN_NULL) res->numinst = __pmdaCntInst(indom, pmda); else res->numinst = 1; if (inst == PM_IN_NULL) { if ((res->instlist = (int *)malloc(res->numinst * sizeof(res->instlist[0]))) == NULL) { free(res); return -oserror(); } } else res->instlist = NULL; if (name == NULL) { if ((res->namelist = (char **)malloc(res->numinst * sizeof(res->namelist[0]))) == NULL) { __pmFreeInResult(res); return -oserror(); } for (i = 0; i < res->numinst; i++) res->namelist[0] = NULL; } else res->namelist = NULL; if (name == NULL && inst == PM_IN_NULL) { /* return inst and name for everything */ if (have_cache) { pmdaCacheOp(indom, PMDA_CACHE_WALK_REWIND); i = 0; while (i < res->numinst && (myinst = pmdaCacheOp(indom, PMDA_CACHE_WALK_NEXT)) != -1) { if (pmdaCacheLookup(indom, myinst, &np, NULL) != PMDA_CACHE_ACTIVE) continue; res->instlist[i] = myinst; if ((res->namelist[i++] = strdup(np)) == NULL) { __pmFreeInResult(res); return -oserror(); } } } else { for (i = 0; i < res->numinst; i++) { res->instlist[i] = idp->it_set[i].i_inst; if ((res->namelist[i] = strdup(idp->it_set[i].i_name)) == NULL) { __pmFreeInResult(res); return -oserror(); } } } } else if (name == NULL) { /* given an inst, return the name */ if (have_cache) { if (pmdaCacheLookup(indom, inst, &np, NULL) == PMDA_CACHE_ACTIVE) { if ((res->namelist[0] = strdup(np)) == NULL) { __pmFreeInResult(res); return -oserror(); } } else err = 1; } else { for (i = 0; i < idp->it_numinst; i++) { if (inst == idp->it_set[i].i_inst) { if ((res->namelist[0] = strdup(idp->it_set[i].i_name)) == NULL) { __pmFreeInResult(res); return -oserror(); } break; } } if (i == idp->it_numinst) err = 1; } } else if (inst == PM_IN_NULL && (namelen = (int)strlen(name)) > 0) { if (have_cache) { if (pmdaCacheLookupName(indom, name, &myinst, NULL) == PMDA_CACHE_ACTIVE) res->instlist[0] = myinst; else err = 1; } else { /* given a name, return an inst. If the name contains spaces, * only exact matches are good enough for us, otherwise, we're * prepared to accept a match upto the first space in the * instance name on the assumption that pmdas will play by the * rules and guarantee the first "word" in the instance name * is unique. That allows for the things like "1 5 15" to match * instances for kernel.all.load["1 minute","5 minute","15 minutes"] */ char * nspace = strchr (name, ' '); for (i = 0; i < idp->it_numinst; i++) { char *instname = idp->it_set[i].i_name; if (strcmp(name, instname) == 0) { /* accept an exact match */ #ifdef PCP_DEBUG if (pmDebug & DBG_TRACE_LIBPMDA) { fprintf(stderr, "pmdaInstance: exact match name=%s id=%d\n", name, idp->it_set[i].i_inst); } #endif res->instlist[0] = idp->it_set[i].i_inst; break; } else if (nspace == NULL) { /* all of name must match instname up to the the first * space in instname. */ char *p = strchr(instname, ' '); if (p != NULL) { int len = (int)(p - instname); if (namelen == len && strncmp(name, instname, len) == 0) { #ifdef PCP_DEBUG if (pmDebug & DBG_TRACE_LIBPMDA) { fprintf(stderr, "pmdaInstance: matched argument name=\"%s\" with indom id=%d name=\"%s\" len=%d\n", name, idp->it_set[i].i_inst, instname, len); } #endif res->instlist[0] = idp->it_set[i].i_inst; break; } } } } if (i == idp->it_numinst) err = 1; } } else err = 1; if (err == 1) { /* bogus arguments or instance id/name */ __pmFreeInResult(res); return PM_ERR_INST; } *result = res; return 0; }
int refresh_proc_slabinfo(pmInDom slab_indom, proc_slabinfo_t *slabinfo) { slab_cache_t sbuf, *s; char buf[BUFSIZ]; char name[128]; char *w, *p; FILE *fp; int i, sts = 0, indom_change = 0; static int major_version = -1; static int minor_version = 0; for (pmdaCacheOp(slab_indom, PMDA_CACHE_WALK_REWIND);;) { if ((i = pmdaCacheOp(slab_indom, PMDA_CACHE_WALK_NEXT)) < 0) break; if (!pmdaCacheLookup(slab_indom, i, NULL, (void **)&s) || !s) continue; s->seen = 0; } pmdaCacheOp(slab_indom, PMDA_CACHE_INACTIVE); if ((fp = linux_statsfile("/proc/slabinfo", buf, sizeof(buf))) == NULL) return -oserror(); /* skip header */ if (fgets(buf, sizeof(buf), fp) == NULL) { /* oops, no header! */ fclose(fp); return -oserror(); } if (major_version < 0) { major_version = minor_version = 0; if (strstr(buf, "slabinfo - version:")) { for (p = buf; *p; p++) { if (isdigit((int)*p)) { sscanf(p, "%d.%d", &major_version, &minor_version); break; } } } } while (fgets(buf, sizeof(buf), fp) != NULL) { /* try to convert whitespace in cache names to underscores, */ /* by looking for alphabetic chars which follow whitespace. */ if (buf[0] == '#') continue; for (w = NULL, p = buf; *p != '\0'; p++) { if (isspace((int)*p)) w = p; else if (isdigit((int)*p)) break; else if (isalpha((int)*p) && w) { for (; w && w != p; w++) *w = '_'; w = NULL; } } memset(&sbuf, 0, sizeof(slab_cache_t)); if (major_version == 1 && minor_version == 0) { /* * <name> <active_objs> <num_objs> * (generally 2.2 kernels) */ i = sscanf(buf, "%s %lu %lu", name, (unsigned long *)&sbuf.num_active_objs, (unsigned long *)&sbuf.total_objs); if (i != 3) { sts = PM_ERR_APPVERSION; break; } } else if (major_version == 1 && minor_version == 1) { /* * <name> <active_objs> <num_objs> <objsize> <active_slabs> <num_slabs> <pagesperslab> * (generally 2.4 kernels) */ i = sscanf(buf, "%s %lu %lu %u %u %u %u", name, (unsigned long *)&sbuf.num_active_objs, (unsigned long *)&sbuf.total_objs, &sbuf.object_size, &sbuf.num_active_slabs, &sbuf.total_slabs, &sbuf.pages_per_slab); if (i != 7) { sts = PM_ERR_APPVERSION; break; } sbuf.total_size = sbuf.pages_per_slab * sbuf.num_active_slabs; sbuf.total_size <<= _pm_pageshift; } else if (major_version == 2 && minor_version >= 0 && minor_version <= 1) { /* * <name> <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab> .. and more * (generally for kernels up to at least 2.6.11) */ i = sscanf(buf, "%s %lu %lu %u %u %u", name, (unsigned long *)&sbuf.num_active_objs, (unsigned long *)&sbuf.total_objs, &sbuf.object_size, &sbuf.objects_per_slab, &sbuf.pages_per_slab); if (i != 6) { sts = PM_ERR_APPVERSION; break; } sbuf.total_size = sbuf.pages_per_slab * sbuf.num_active_objs; sbuf.total_size <<= _pm_pageshift; sbuf.total_size /= sbuf.objects_per_slab; } else { /* no support */ sts = PM_ERR_APPVERSION; break; } sts = pmdaCacheLookupName(slab_indom, name, &i, (void **)&s); if (sts < 0 || !s) { /* new cache has appeared */ if ((s = calloc(1, sizeof(*s))) == NULL) continue; #if PCP_DEBUG if (pmDebug & DBG_TRACE_LIBPMDA) fprintf(stderr, "refresh_slabinfo: added \"%s\"\n", name); #endif indom_change++; } s->num_active_objs = sbuf.num_active_objs; s->total_objs = sbuf.total_objs; s->object_size = sbuf.object_size; s->num_active_slabs = sbuf.num_active_slabs; s->total_slabs = sbuf.total_slabs; s->pages_per_slab = sbuf.pages_per_slab; s->objects_per_slab = sbuf.objects_per_slab; s->total_size = sbuf.total_size; s->seen = major_version * 10 + minor_version; pmdaCacheStore(slab_indom, PMDA_CACHE_ADD, name, s); } fclose(fp); if (indom_change) pmdaCacheOp(slab_indom, PMDA_CACHE_SAVE); return sts; }
int do_netif_metrics(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom) { struct if_msghdr *ifm; int sts; if (!valid) return 0; if (inst != PM_IN_NULL) { /* * per-network interface metrics */ sts = pmdaCacheLookup(indomtab[NETIF_INDOM].it_indom, inst, NULL, (void **)&ifm); if (sts == PMDA_CACHE_ACTIVE) { sts = 1; /* cluster and domain already checked, just need item ... */ switch (pmid_item(mdesc->m_desc.pmid)) { case 0: /* network.interface.mtu */ atom->ull = ifm->ifm_data.ifi_mtu; break; case 1: /* network.interface.up */ atom->ul = (ifm->ifm_flags & IFF_UP) == IFF_UP; break; case 2: /* network.interface.baudrate */ atom->ull = ifm->ifm_data.ifi_baudrate; break; case 3: /* network.interface.in.bytes */ atom->ull = ifm->ifm_data.ifi_ibytes; break; case 4: /* network.interface.in.packets */ atom->ull = ifm->ifm_data.ifi_ipackets; break; case 5: /* network.interface.in.mcasts */ atom->ull = ifm->ifm_data.ifi_imcasts; break; case 6: /* network.interface.in.errors */ atom->ull = ifm->ifm_data.ifi_ierrors; break; case 7: /* network.interface.in.drops */ atom->ull = ifm->ifm_data.ifi_iqdrops; break; case 8: /* network.interface.out.bytes */ atom->ull = ifm->ifm_data.ifi_obytes; break; case 9: /* network.interface.out.packets */ atom->ull = ifm->ifm_data.ifi_opackets; break; case 10: /* network.interface.out.mcasts */ atom->ull = ifm->ifm_data.ifi_omcasts; break; case 11: /* network.interface.out.errors */ atom->ull = ifm->ifm_data.ifi_oerrors; break; case 12: /* network.interface.out.collisions */ atom->ull = ifm->ifm_data.ifi_collisions; break; case 13: /* network.interface.total.bytes */ atom->ull = ifm->ifm_data.ifi_ibytes + ifm->ifm_data.ifi_obytes; break; case 14: /* network.interface.total.packets */ atom->ull = ifm->ifm_data.ifi_ipackets + ifm->ifm_data.ifi_opackets; break; case 15: /* network.interface.total.mcasts */ atom->ull = ifm->ifm_data.ifi_imcasts + ifm->ifm_data.ifi_omcasts; break; case 16: /* network.interface.total.errors */ atom->ull = ifm->ifm_data.ifi_ierrors + ifm->ifm_data.ifi_oerrors; break; default: sts = PM_ERR_PMID; break; } } else sts = 0; } else { /* * most network interface metrics don't have an instance domain * * cluster and domain already checked, just need item ... */ switch (pmid_item(mdesc->m_desc.pmid)) { case 17: /* hinv.interface */ atom->ul = valid; sts = 1; break; default: sts = PM_ERR_INST; break; } } return sts; }
/* * We use /proc/stat as a single source of truth regarding online/offline * state for CPUs (its per-CPU stats are for online CPUs only). * This drives the contents of the CPU indom for all per-CPU metrics, so * it is important to ensure this refresh routine is called first before * refreshing any other per-CPU metrics (e.g. interrupts, softnet). */ int refresh_proc_stat(proc_stat_t *proc_stat) { pernode_t *np; percpu_t *cp; pmInDom cpus, nodes; char buf[MAXPATHLEN], *name; int n = 0, i, size; static int fd = -1; /* kept open until exit(), unless testing */ static char *statbuf; static int maxstatbuf; static char **bufindex; static int nbufindex; static int maxbufindex; cpu_node_setup(); cpus = INDOM(CPU_INDOM); pmdaCacheOp(cpus, PMDA_CACHE_INACTIVE); nodes = INDOM(NODE_INDOM); /* reset per-node aggregate CPU utilisation stats */ for (pmdaCacheOp(nodes, PMDA_CACHE_WALK_REWIND);;) { if ((i = pmdaCacheOp(nodes, PMDA_CACHE_WALK_NEXT)) < 0) break; if (!pmdaCacheLookup(nodes, i, NULL, (void **)&np) || !np) continue; memset(&np->stat, 0, sizeof(np->stat)); } /* in test mode we replace procfs files (keeping fd open thwarts that) */ if (fd >= 0 && (linux_test_mode & LINUX_TEST_STATSPATH)) { close(fd); fd = -1; } if (fd >= 0) { if (lseek(fd, 0, SEEK_SET) < 0) return -oserror(); } else { snprintf(buf, sizeof(buf), "%s/proc/stat", linux_statspath); if ((fd = open(buf, O_RDONLY)) < 0) return -oserror(); } for (;;) { while (n >= maxstatbuf) { size = maxstatbuf + 512; if ((statbuf = (char *)realloc(statbuf, size)) == NULL) return -ENOMEM; maxstatbuf = size; } size = (statbuf + maxstatbuf) - (statbuf + n); if ((i = read(fd, statbuf + n, size)) > 0) n += i; else break; } statbuf[n] = '\0'; if (bufindex == NULL) { size = 16 * sizeof(char *); if ((bufindex = (char **)malloc(size)) == NULL) return -ENOMEM; maxbufindex = 16; } nbufindex = 0; bufindex[nbufindex] = statbuf; for (i = 0; i < n; i++) { if (statbuf[i] == '\n' || statbuf[i] == '\0') { statbuf[i] = '\0'; if (nbufindex + 1 >= maxbufindex) { size = (maxbufindex + 4) * sizeof(char *); if ((bufindex = (char **)realloc(bufindex, size)) == NULL) return -ENOMEM; maxbufindex += 4; } bufindex[++nbufindex] = statbuf + i + 1; } } #define ALLCPU_FMT "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu" n = sscanf((const char *)bufindex[0], ALLCPU_FMT, &proc_stat->all.user, &proc_stat->all.nice, &proc_stat->all.sys, &proc_stat->all.idle, &proc_stat->all.wait, &proc_stat->all.irq, &proc_stat->all.sirq, &proc_stat->all.steal, &proc_stat->all.guest, &proc_stat->all.guest_nice); #define PERCPU_FMT "cpu%u %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu" /* * per-CPU stats * e.g. cpu0 95379 4 20053 6502503 * 2.6 kernels have 3 additional fields for wait, irq and soft_irq. * More recent (2008) 2.6 kernels have an extra field for guest and * also (since 2009) guest_nice. * In the single-CPU system case, don't bother scanning, use "all"; * this handles non-SMP kernels with no line starting with "cpu0". */ if ((size = pmdaCacheOp(cpus, PMDA_CACHE_SIZE)) == 1) { pmdaCacheLookup(cpus, 0, &name, (void **)&cp); memcpy(&cp->stat, &proc_stat->all, sizeof(cp->stat)); pmdaCacheStore(cpus, PMDA_CACHE_ADD, name, (void *)cp); pmdaCacheLookup(nodes, 0, NULL, (void **)&np); memcpy(&np->stat, &proc_stat->all, sizeof(np->stat)); } else { for (n = 0; n < nbufindex; n++) { if (strncmp("cpu", bufindex[n], 3) != 0 || !isdigit((int)bufindex[n][3])) continue; cp = NULL; np = NULL; i = atoi(&bufindex[n][3]); /* extract CPU identifier */ if (pmdaCacheLookup(cpus, i, &name, (void **)&cp) < 0 || !cp) continue; memset(&cp->stat, 0, sizeof(cp->stat)); sscanf(bufindex[n], PERCPU_FMT, &i, &cp->stat.user, &cp->stat.nice, &cp->stat.sys, &cp->stat.idle, &cp->stat.wait, &cp->stat.irq, &cp->stat.sirq, &cp->stat.steal, &cp->stat.guest, &cp->stat.guest_nice); pmdaCacheStore(cpus, PMDA_CACHE_ADD, name, (void *)cp); /* update per-node aggregate CPU utilisation stats as well */ if (pmdaCacheLookup(nodes, cp->nodeid, NULL, (void **)&np) < 0) continue; np->stat.user += cp->stat.user; np->stat.nice += cp->stat.nice; np->stat.sys += cp->stat.sys; np->stat.idle += cp->stat.idle; np->stat.wait += cp->stat.wait; np->stat.irq += cp->stat.irq; np->stat.sirq += cp->stat.sirq; np->stat.steal += cp->stat.steal; np->stat.guest += cp->stat.guest; np->stat.guest_nice += cp->stat.guest_nice; } } i = size; #define PAGE_FMT "page %u %u" /* NB: moved to /proc/vmstat in 2.6 kernels */ if ((i = find_line_format(PAGE_FMT, 5, bufindex, nbufindex, i)) >= 0) sscanf((const char *)bufindex[i], PAGE_FMT, &proc_stat->page[0], &proc_stat->page[1]); #define SWAP_FMT "swap %u %u" /* NB: moved to /proc/vmstat in 2.6 kernels */ if ((i = find_line_format(SWAP_FMT, 5, bufindex, nbufindex, i)) >= 0) sscanf((const char *)bufindex[i], SWAP_FMT, &proc_stat->swap[0], &proc_stat->swap[1]); #define INTR_FMT "intr %llu" /* (export 1st 'total interrupts' value only) */ if ((i = find_line_format(INTR_FMT, 5, bufindex, nbufindex, i)) >= 0) sscanf((const char *)bufindex[i], INTR_FMT, &proc_stat->intr); #define CTXT_FMT "ctxt %llu" if ((i = find_line_format(CTXT_FMT, 5, bufindex, nbufindex, i)) >= 0) sscanf((const char *)bufindex[i], CTXT_FMT, &proc_stat->ctxt); #define BTIME_FMT "btime %lu" if ((i = find_line_format(BTIME_FMT, 6, bufindex, nbufindex, i)) >= 0) sscanf((const char *)bufindex[i], BTIME_FMT, &proc_stat->btime); #define PROCESSES_FMT "processes %lu" if ((i = find_line_format(PROCESSES_FMT, 10, bufindex, nbufindex, i)) >= 0) sscanf((const char *)bufindex[i], PROCESSES_FMT, &proc_stat->processes); #define RUNNING_FMT "procs_running %lu" if ((i = find_line_format(RUNNING_FMT, 14, bufindex, nbufindex, i)) >= 0) sscanf((const char *)bufindex[i], RUNNING_FMT, &proc_stat->procs_running); #define BLOCKED_FMT "procs_blocked %lu" if ((i = find_line_format(BLOCKED_FMT, 14, bufindex, nbufindex, i)) >= 0) sscanf((const char *)bufindex[i], BLOCKED_FMT, &proc_stat->procs_blocked); /* success */ return 0; }