Пример #1
0
void
refresh_cpuset(const char *path, const char *name)
{
    pmInDom indom = INDOM(CGROUP_CPUSET_INDOM);
    cgroup_cpuset_t *cpuset;
    char file[MAXPATHLEN];
    int sts;

    sts = pmdaCacheLookupName(indom, name, NULL, (void **)&cpuset);
    if (sts == PMDA_CACHE_ACTIVE)
	return;
    if (sts != PMDA_CACHE_INACTIVE) {
	cpuset = (cgroup_cpuset_t *)malloc(sizeof(cgroup_cpuset_t));
	if (!cpuset)
	    return;
    }
    snprintf(file, sizeof(file), "%s/cpuset.cpus", path);
    cpuset->cpus = read_oneline_string(file);
    snprintf(file, sizeof(file), "%s/cpuset.mems", path);
    cpuset->mems = read_oneline_string(file);
    pmdaCacheStore(indom, PMDA_CACHE_ADD, name, cpuset);
}
Пример #2
0
void
refresh_cgroup_subsys(void)
{
    pmInDom subsys = INDOM(CGROUP_SUBSYS_INDOM);
    char buf[4096];
    FILE *fp;

    pmdaCacheOp(subsys, PMDA_CACHE_INACTIVE);
    if ((fp = proc_statsfile("/proc/cgroups", buf, sizeof(buf))) == NULL)
	return;

    while (fgets(buf, sizeof(buf), fp) != NULL) {
	unsigned int hierarchy, num_cgroups, enabled;
	char name[MAXPATHLEN];
	subsys_t *ssp;
	int sts;

	/* skip lines starting with hash (header) */
	if (buf[0] == '#')
	    continue;
	if (sscanf(buf, "%s %u %u %u", &name[0],
			&hierarchy, &num_cgroups, &enabled) < 4)
	    continue;
	sts = pmdaCacheLookupName(subsys, name, NULL, (void **)&ssp);
	if (sts != PMDA_CACHE_INACTIVE) {
	    if ((ssp = (subsys_t *)malloc(sizeof(subsys_t))) == NULL)
		continue;
	}
	ssp->hierarchy = hierarchy;
	ssp->num_cgroups = num_cgroups;
	ssp->enabled = enabled;
	pmdaCacheStore(subsys, PMDA_CACHE_ADD, name, (void *)ssp);

	if (pmDebug & DBG_TRACE_APPL0)
	    fprintf(stderr, "refresh_subsys: \"%s\" h=%u nc=%u on=%u\n",
			name, hierarchy, num_cgroups, enabled);
    }
    fclose(fp);
}
Пример #3
0
void
refresh_cpuacct(const char *path, const char *name)
{
    pmInDom indom = INDOM(CGROUP_CPUACCT_INDOM);
    cgroup_cpuacct_t *cpuacct;
    char file[MAXPATHLEN];
    int sts;

    sts = pmdaCacheLookupName(indom, name, NULL, (void **)&cpuacct);
    if (sts == PMDA_CACHE_ACTIVE)
	return;
    if (sts != PMDA_CACHE_INACTIVE) {
	cpuacct = (cgroup_cpuacct_t *)malloc(sizeof(cgroup_cpuacct_t));
	if (!cpuacct)
	    return;
    }
    snprintf(file, sizeof(file), "%s/cpuacct.stat", path);
    read_cpuacct_stats(file, cpuacct);
    snprintf(file, sizeof(file), "%s/cpuacct.usage", path);
    cpuacct->usage = read_oneline_ull(file);
    snprintf(file, sizeof(file), "%s/cpuacct.usage_percpu", path);
    read_percpuacct_usage(file, name);
    pmdaCacheStore(indom, PMDA_CACHE_ADD, name, cpuacct);
}
Пример #4
0
int
proc_partitions_fetch(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom)
{
    __pmID_int          *idp = (__pmID_int *)&(mdesc->m_desc.pmid);
    int                 i;
    partitions_entry_t	*p = NULL;

    if (inst != PM_IN_NULL) {
	if (pmdaCacheLookup(mdesc->m_desc.indom, inst, NULL, (void **)&p) < 0)
	    return PM_ERR_INST;
    }

    switch (idp->cluster) {
    case CLUSTER_STAT:
	/*
	 * disk.{dev,all} remain in CLUSTER_STAT for backward compatibility
	 */
	switch(idp->item) {
	case 4: /* disk.dev.read */
	    if (p == NULL)
		return PM_ERR_INST;
	    _pm_assign_ulong(atom, p->rd_ios);
	    break;
	case 5: /* disk.dev.write */
	    if (p == NULL)
		return PM_ERR_INST;
	    _pm_assign_ulong(atom, p->wr_ios);
	    break;
	case 6: /* disk.dev.blkread */
	    if (p == NULL)
		return PM_ERR_INST;
	    atom->ull = p->rd_sectors;
	    break;
	case 7: /* disk.dev.blkwrite */
	    if (p == NULL)
		return PM_ERR_INST;
	    atom->ull = p->wr_sectors;
	    break;
	case 28: /* disk.dev.total */
	    if (p == NULL)
		return PM_ERR_INST;
	    atom->ull = p->rd_ios + p->wr_ios;
	    break;
	case 36: /* disk.dev.blktotal */
	    if (p == NULL)
		return PM_ERR_INST;
	    atom->ull = p->rd_sectors + p->wr_sectors;
	    break;
	case 38: /* disk.dev.read_bytes */
	    if (p == NULL)
		return PM_ERR_INST;
	    atom->ull = p->rd_sectors / 2;
	    break;
	case 39: /* disk.dev.write_bytes */
	    if (p == NULL)
		return PM_ERR_INST;
	    atom->ull = p->wr_sectors / 2;
	    break;
	case 40: /* disk.dev.total_bytes */
	    if (p == NULL)
		return PM_ERR_INST;
	    atom->ull = (p->rd_sectors + p->wr_sectors) / 2;
	    break;
	case 46: /* disk.dev.avactive ... already msec from /proc/diskstats */
	    if (p == NULL)
		return PM_ERR_INST;
	    atom->ul = p->io_ticks;
	    break;
	case 47: /* disk.dev.aveq ... already msec from /proc/diskstats */
	    if (p == NULL)
		return PM_ERR_INST;
	    atom->ul = p->aveq;
	    break;
	case 49: /* disk.dev.read_merge */
	    if (p == NULL)
		return PM_ERR_INST;
	    _pm_assign_ulong(atom, p->rd_merges);
	    break;
	case 50: /* disk.dev.write_merge */
	    if (p == NULL)
		return PM_ERR_INST;
	    _pm_assign_ulong(atom, p->wr_merges);
	    break;
	case 59: /* disk.dev.scheduler */
	    if (p == NULL)
		return PM_ERR_INST;
	    atom->cp = _pm_ioscheduler(p->namebuf);
	    break;
	case 72: /* disk.dev.read_rawactive already ms from /proc/diskstats */
	    if (p == NULL)
		return PM_ERR_INST;
	    atom->ul = p->rd_ticks;
	    break;
	case 73: /* disk.dev.write_rawactive already ms from /proc/diskstats */
	    if (p == NULL)
		return PM_ERR_INST;
	    atom->ul = p->wr_ticks;
	    break;
	default:
	    /* disk.all.* is a singular instance domain */
	    atom->ull = 0;
	    for (pmdaCacheOp(INDOM(DISK_INDOM), PMDA_CACHE_WALK_REWIND);;) {
	        if ((i = pmdaCacheOp(INDOM(DISK_INDOM), PMDA_CACHE_WALK_NEXT)) < 0)
		    break;
		if (!pmdaCacheLookup(INDOM(DISK_INDOM), i, NULL, (void **)&p) || !p)
		    continue;
		switch (idp->item) {
		case 24: /* disk.all.read */
		    atom->ull += p->rd_ios;
		    break;
		case 25: /* disk.all.write */
		    atom->ull += p->wr_ios;
		    break;
		case 26: /* disk.all.blkread */
		    atom->ull += p->rd_sectors;
		    break;
		case 27: /* disk.all.blkwrite */
		    atom->ull += p->wr_sectors;
		    break;
		case 29: /* disk.all.total */
		    atom->ull += p->rd_ios + p->wr_ios;
		    break;
		case 37: /* disk.all.blktotal */
		    atom->ull += p->rd_sectors + p->wr_sectors;
		    break;
		case 41: /* disk.all.read_bytes */
		    atom->ull += p->rd_sectors / 2;
		    break;
		case 42: /* disk.all.write_bytes */
		    atom->ull += p->wr_sectors / 2;
		    break;
		case 43: /* disk.all.total_bytes */
		    atom->ull += (p->rd_sectors + p->wr_sectors) / 2;
		    break;
		case 44: /* disk.all.avactive ... already msec from /proc/diskstats */
		    atom->ull += p->io_ticks;
		    break;
		case 45: /* disk.all.aveq ... already msec from /proc/diskstats */
		    atom->ull += p->aveq;
		    break;
		case 51: /* disk.all.read_merge */
		    atom->ull += p->rd_merges;
		    break;
		case 52: /* disk.all.write_merge */
		    atom->ull += p->wr_merges;
		    break;
		case 74: /* disk.all.read_rawactive ... already msec from /proc/diskstats */
		    atom->ull += p->rd_ticks;
		    break;
		case 75: /* disk.all.write_rawactive ... already msec from /proc/diskstats */
		    atom->ull += p->wr_ticks;
		    break;
		default:
		    return PM_ERR_PMID;
		}
	    } /* loop */
	}
	break;

    case CLUSTER_PARTITIONS:
	if (p == NULL)
	    return PM_ERR_INST;
	switch(idp->item) {
	    /* disk.partitions */
	    case 0: /* disk.partitions.read */
		atom->ul = p->rd_ios;
		break;
	    case 1: /* disk.partitions.write */
		atom->ul = p->wr_ios;
		break;
	    case 2: /* disk.partitions.total */
		atom->ul = p->wr_ios +
			   p->rd_ios;
		break;
	    case 3: /* disk.partitions.blkread */
		atom->ul = p->rd_sectors;
		break;
	    case 4: /* disk.partitions.blkwrite */
		atom->ul = p->wr_sectors;
		break;
	    case 5: /* disk.partitions.blktotal */
		atom->ul = p->rd_sectors +
			   p->wr_sectors;
		break;
	    case 6: /* disk.partitions.read_bytes */
		atom->ul = p->rd_sectors / 2;
		break;
	    case 7: /* disk.partitions.write_bytes */
		atom->ul = p->wr_sectors / 2;
		break;
	    case 8: /* disk.partitions.total_bytes */
		atom->ul = (p->rd_sectors +
			   p->wr_sectors) / 2;
		break;
	    default:
	    return PM_ERR_PMID;
	}
	break;

    case CLUSTER_DM:
	if (p == NULL)
	    return PM_ERR_INST;
	switch(idp->item) {
	case 0: /* disk.dm.read */
	    atom->ull = p->rd_ios;
	    break;
	case 1: /* disk.dm.write */
	    atom->ull = p->wr_ios;
	    break;
	case 2: /* disk.dm.total */
	    atom->ull = p->rd_ios + p->wr_ios;
	    break;
	case 3: /* disk.dm.blkread */
	    atom->ull = p->rd_sectors;
	    break;
	case 4: /* disk.dm.blkwrite */
	    atom->ull = p->wr_sectors;
	    break;
	case 5: /* disk.dm.blktotal */
	    atom->ull = p->rd_sectors + p->wr_sectors;
	    break;
	case 6: /* disk.dm.read_bytes */
	    atom->ull = p->rd_sectors / 2;
	    break;
	case 7: /* disk.dm.write_bytes */
	    atom->ull = p->wr_sectors / 2;
	    break;
	case 8: /* disk.dm.total_bytes */
	    atom->ull = (p->rd_sectors + p->wr_sectors) / 2;
	    break;
	case 9: /* disk.dm.read_merge */
	    atom->ull = p->rd_merges;
	    break;
	case 10: /* disk.dm.write_merge */
	    atom->ull = p->wr_merges;
	    break;
	case 11: /* disk.dm.avactive */
	    atom->ull = p->io_ticks;
	    break;
	case 12: /* disk.dm.aveq */
	    atom->ull = p->aveq;
	    break;
	case 13: /* hinv.map.dmname */
	    atom->cp = p->dmname;
	    break;
	case 14: /* disk.dm.read_rawactive */
	    atom->ul = p->rd_ticks;
	    break;
	case 15: /* disk.dm.write_rawactive */
	    atom->ul = p->wr_ticks;
	    break;
	default:
	    return PM_ERR_PMID;
	}
	break;

    default: /* switch cluster */
	return PM_ERR_PMID;
    }

    return 1;
}
Пример #5
0
/* encode the domain(x), cluster (y) and item (z) parts of the PMID */
#define PMID(x,y,z) ((x<<22)|(y<<10)|z)

/* encode the domain(x) and serial (y) parts of the pmInDom */
#define INDOM(x,y) ((x<<22)|y)

/*
 * Note: these pmDesc entries MUST correspond to the corrsponding
 *	 entries from the real PMDA ...
 *	 We fake it out here to accommodate logging from PCP 1.1
 *	 PMCD's and to avoid round-trip dependencies in setting up
 *	 the preamble
 */
static pmDesc	desc[] = {
/* pmcd.pmlogger.host */
    { PMID(2,3,3), PM_TYPE_STRING, INDOM(2,1), PM_SEM_DISCRETE, {0,0,0,0,0,0} },
/* pmcd.pmlogger.port */
    { PMID(2,3,0), PM_TYPE_U32, INDOM(2,1), PM_SEM_DISCRETE, {0,0,0,0,0,0} },
/* pmcd.pmlogger.archive */
    { PMID(2,3,2), PM_TYPE_STRING, INDOM(2,1), PM_SEM_DISCRETE, {0,0,0,0,0,0} },
};
/* names added for version 2 archives */
static char*	names[] = {
"pmcd.pmlogger.host",
"pmcd.pmlogger.port",
"pmcd.pmlogger.archive"
};

static int	n_metric = sizeof(desc) / sizeof(desc[0]);

int
Пример #6
0
Файл: fetch.c Проект: Aconex/pcp
/*
 * Called before each PMDA fetch ... force value refreshes for
 * requested metrics here; and special case any derived metrics.
 */
void
windows_fetch_refresh(int numpmid, pmID pmidlist[], pmdaExt *pmda)
{
    int	i, j, extra_filesys = 0, extra_memstat = 0;
    int extra_hinv_ncpu = -1, extra_hinv_ndisk = -1;
    int extra_network = -1;

    for (i = 0; i < NUMINDOMS; i++)
	windows_indom_reset[i] = 0;

    for (i = 0; i < metricdesc_sz; i++)
	for (j = 0; j < metricdesc[i].num_vals; j++)
	    metricdesc[i].vals[j].flags = V_NONE;

    for (i = 0; i < numpmid; i++) {
	__pmID_int *pmidp = (__pmID_int *)&pmidlist[i];
	int cluster = pmidp->cluster;
	int item = pmidp->item;

	if (cluster == 1)
	    extra_memstat = 1;
	else if (cluster != 0)
	    continue;
	else if (item == 106)
	    extra_memstat = 1;
	else if (item == 107 && extra_hinv_ncpu == -1)
	    extra_hinv_ncpu = 1;
	else if (item == 108 && extra_hinv_ndisk == -1)
	    extra_hinv_ndisk = 1;
	else if (item >= 117 && item <= 119)
	    extra_filesys = 1;
	else if (item >= 236 && item <= 237 && extra_network == -1)
	    extra_network = 1;
	else {
	    if (item >= 4 && item <= 7)
		extra_hinv_ncpu = 0;
	    else if ((item >=  21 && item <=  26) || item ==  68 ||
		     (item >= 217 && item <= 219) || item == 101 ||
		     (item >= 226 && item <= 231) || item == 133)
		extra_hinv_ndisk = 0;
	    else if (item == 235)
		extra_network = 0;

	    windows_visit_metric(&metricdesc[item], windows_collect_callback);
	}
    }

    if (extra_memstat)
	windows_fetch_memstat();
    if (extra_hinv_ncpu == 1)
	windows_visit_metric(&metricdesc[4], NULL);
    if (extra_hinv_ndisk == 1)
	windows_visit_metric(&metricdesc[21], NULL);
    if (extra_filesys) {
	windows_visit_metric(&metricdesc[120], windows_collect_callback);
	windows_visit_metric(&metricdesc[121], windows_collect_callback);
    }
    if (extra_network == 1)
	windows_visit_metric(&metricdesc[235], windows_collect_callback);

    for (i = 0; i < NUMINDOMS; i++) {
	/* Do we want to persist this instance domain to disk? */
	if (windows_indom_reset[i] && windows_indom_fixed(i))
	    pmdaCacheOp(INDOM(pmda->e_domain, i), PMDA_CACHE_SAVE);
    }
}
Пример #7
0
/*
 * We use /proc/stat as a single source of truth regarding online/offline
 * state for CPUs (its per-CPU stats are for online CPUs only).
 * This drives the contents of the CPU indom for all per-CPU metrics, so
 * it is important to ensure this refresh routine is called first before
 * refreshing any other per-CPU metrics (e.g. interrupts, softnet).
 */
int
refresh_proc_stat(proc_stat_t *proc_stat)
{
    pernode_t	*np;
    percpu_t	*cp;
    pmInDom	cpus, nodes;
    char	buf[MAXPATHLEN], *name;
    int		n = 0, i, size;

    static int fd = -1; /* kept open until exit(), unless testing */
    static char *statbuf;
    static int maxstatbuf;
    static char **bufindex;
    static int nbufindex;
    static int maxbufindex;

    cpu_node_setup();
    cpus = INDOM(CPU_INDOM);
    pmdaCacheOp(cpus, PMDA_CACHE_INACTIVE);
    nodes = INDOM(NODE_INDOM);

    /* reset per-node aggregate CPU utilisation stats */
    for (pmdaCacheOp(nodes, PMDA_CACHE_WALK_REWIND);;) {
	if ((i = pmdaCacheOp(nodes, PMDA_CACHE_WALK_NEXT)) < 0)
	    break;
	if (!pmdaCacheLookup(nodes, i, NULL, (void **)&np) || !np)
	    continue;
	memset(&np->stat, 0, sizeof(np->stat));
    }

    /* in test mode we replace procfs files (keeping fd open thwarts that) */
    if (fd >= 0 && (linux_test_mode & LINUX_TEST_STATSPATH)) {
	close(fd);
	fd = -1;
    }

    if (fd >= 0) {
	if (lseek(fd, 0, SEEK_SET) < 0)
	    return -oserror();
    } else {
	snprintf(buf, sizeof(buf), "%s/proc/stat", linux_statspath);
	if ((fd = open(buf, O_RDONLY)) < 0)
	    return -oserror();
    }

    for (;;) {
	while (n >= maxstatbuf) {
	    size = maxstatbuf + 512;
	    if ((statbuf = (char *)realloc(statbuf, size)) == NULL)
		return -ENOMEM;
	    maxstatbuf = size;
	}
	size = (statbuf + maxstatbuf) - (statbuf + n);
	if ((i = read(fd, statbuf + n, size)) > 0)
	    n += i;
	else
	    break;
    }
    statbuf[n] = '\0';

    if (bufindex == NULL) {
	size = 16 * sizeof(char *);
	if ((bufindex = (char **)malloc(size)) == NULL)
	    return -ENOMEM;
	maxbufindex = 16;
    }

    nbufindex = 0;
    bufindex[nbufindex] = statbuf;
    for (i = 0; i < n; i++) {
	if (statbuf[i] == '\n' || statbuf[i] == '\0') {
	    statbuf[i] = '\0';
	    if (nbufindex + 1 >= maxbufindex) {
		size = (maxbufindex + 4) * sizeof(char *);
		if ((bufindex = (char **)realloc(bufindex, size)) == NULL)
		    return -ENOMEM;
	    	maxbufindex += 4;
	    }
	    bufindex[++nbufindex] = statbuf + i + 1;
	}
    }

#define ALLCPU_FMT "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu"
    n = sscanf((const char *)bufindex[0], ALLCPU_FMT,
	&proc_stat->all.user, &proc_stat->all.nice,
	&proc_stat->all.sys, &proc_stat->all.idle,
	&proc_stat->all.wait, &proc_stat->all.irq,
	&proc_stat->all.sirq, &proc_stat->all.steal,
	&proc_stat->all.guest, &proc_stat->all.guest_nice);

#define PERCPU_FMT "cpu%u %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu"
    /*
     * per-CPU stats
     * e.g. cpu0 95379 4 20053 6502503
     * 2.6 kernels have 3 additional fields for wait, irq and soft_irq.
     * More recent (2008) 2.6 kernels have an extra field for guest and
     * also (since 2009) guest_nice.
     * In the single-CPU system case, don't bother scanning, use "all";
     * this handles non-SMP kernels with no line starting with "cpu0".
     */
    if ((size = pmdaCacheOp(cpus, PMDA_CACHE_SIZE)) == 1) {
	pmdaCacheLookup(cpus, 0, &name, (void **)&cp);
	memcpy(&cp->stat, &proc_stat->all, sizeof(cp->stat));
	pmdaCacheStore(cpus, PMDA_CACHE_ADD, name, (void *)cp);
	pmdaCacheLookup(nodes, 0, NULL, (void **)&np);
	memcpy(&np->stat, &proc_stat->all, sizeof(np->stat));
    }
    else {
	for (n = 0; n < nbufindex; n++) {
	    if (strncmp("cpu", bufindex[n], 3) != 0 ||
		!isdigit((int)bufindex[n][3]))
		continue;
	    cp = NULL;
	    np = NULL;
	    i = atoi(&bufindex[n][3]);	/* extract CPU identifier */
	    if (pmdaCacheLookup(cpus, i, &name, (void **)&cp) < 0 || !cp)
		continue;
	    memset(&cp->stat, 0, sizeof(cp->stat));
	    sscanf(bufindex[n], PERCPU_FMT, &i,
		    &cp->stat.user, &cp->stat.nice, &cp->stat.sys,
		    &cp->stat.idle, &cp->stat.wait, &cp->stat.irq,
		    &cp->stat.sirq, &cp->stat.steal, &cp->stat.guest,
		    &cp->stat.guest_nice);
	    pmdaCacheStore(cpus, PMDA_CACHE_ADD, name, (void *)cp);

	    /* update per-node aggregate CPU utilisation stats as well */
	    if (pmdaCacheLookup(nodes, cp->nodeid, NULL, (void **)&np) < 0)
		continue;
	    np->stat.user += cp->stat.user;
	    np->stat.nice += cp->stat.nice;
	    np->stat.sys += cp->stat.sys;
	    np->stat.idle += cp->stat.idle;
	    np->stat.wait += cp->stat.wait;
	    np->stat.irq += cp->stat.irq;
	    np->stat.sirq += cp->stat.sirq;
	    np->stat.steal += cp->stat.steal;
	    np->stat.guest += cp->stat.guest;
	    np->stat.guest_nice += cp->stat.guest_nice;
	}
    }

    i = size;

#define PAGE_FMT "page %u %u"	/* NB: moved to /proc/vmstat in 2.6 kernels */
    if ((i = find_line_format(PAGE_FMT, 5, bufindex, nbufindex, i)) >= 0)
	sscanf((const char *)bufindex[i], PAGE_FMT,
		&proc_stat->page[0], &proc_stat->page[1]);

#define SWAP_FMT "swap %u %u"	/* NB: moved to /proc/vmstat in 2.6 kernels */
    if ((i = find_line_format(SWAP_FMT, 5, bufindex, nbufindex, i)) >= 0)
	sscanf((const char *)bufindex[i], SWAP_FMT,
		&proc_stat->swap[0], &proc_stat->swap[1]);

#define INTR_FMT "intr %llu"	/* (export 1st 'total interrupts' value only) */
    if ((i = find_line_format(INTR_FMT, 5, bufindex, nbufindex, i)) >= 0)
	sscanf((const char *)bufindex[i], INTR_FMT, &proc_stat->intr);

#define CTXT_FMT "ctxt %llu"
    if ((i = find_line_format(CTXT_FMT, 5, bufindex, nbufindex, i)) >= 0)
	sscanf((const char *)bufindex[i], CTXT_FMT, &proc_stat->ctxt);

#define BTIME_FMT "btime %lu"
    if ((i = find_line_format(BTIME_FMT, 6, bufindex, nbufindex, i)) >= 0)
	sscanf((const char *)bufindex[i], BTIME_FMT, &proc_stat->btime);

#define PROCESSES_FMT "processes %lu"
    if ((i = find_line_format(PROCESSES_FMT, 10, bufindex, nbufindex, i)) >= 0)
	sscanf((const char *)bufindex[i], PROCESSES_FMT, &proc_stat->processes);

#define RUNNING_FMT "procs_running %lu"
    if ((i = find_line_format(RUNNING_FMT, 14, bufindex, nbufindex, i)) >= 0)
	sscanf((const char *)bufindex[i], RUNNING_FMT, &proc_stat->procs_running);

#define BLOCKED_FMT "procs_blocked %lu"
    if ((i = find_line_format(BLOCKED_FMT, 14, bufindex, nbufindex, i)) >= 0)
	sscanf((const char *)bufindex[i], BLOCKED_FMT, &proc_stat->procs_blocked);

    /* success */
    return 0;
}
Пример #8
0
static int
read_blkio_devices_stats(const char *file, const char *name, int style,
			cgroup_blkiops_t *total)
{
    pmInDom indom = INDOM(CGROUP_PERDEVBLKIO_INDOM);
    pmInDom devtindom = INDOM(DEVT_INDOM);
    cgroup_perdevblkio_t *blkdev;
    cgroup_blkiops_t *blkios;
    char *devname = NULL;
    char buffer[4096];
    FILE *fp;

    static cgroup_blkiops_t blkiops;
    static struct {
	char		*field;
	__uint64_t	*offset;
    } blkio_fields[] = {
	{ "Read",			&blkiops.read },
	{ "Write",			&blkiops.write },
	{ "Sync",			&blkiops.sync },
	{ "Async",			&blkiops.async },
	{ "Total",			&blkiops.total },
	{ NULL, NULL },
    };

    /* reset, so counts accumulate from zero for this set of devices */
    memset(total, 0, sizeof(cgroup_blkiops_t));

    if ((fp = fopen(file, "r")) == NULL)
	return -ENOENT;

    while (fgets(buffer, sizeof(buffer), fp) != NULL) {
	unsigned int major, minor;
	unsigned long long value;
	char *realname, op[8];
	int i;

	i = sscanf(buffer, "Total %llu\n", &value);
	if (i == 2) {	/* final field - per-cgroup Total operations */
	    break;
	}

	i = sscanf(buffer, "%u:%u %s %llu\n", &major, &minor, &op[0], &value);
	if (i < 3)
	    continue;
	realname = get_blkdev(devtindom, major, minor);
	if (!realname)
	    continue;
	if (!devname || strcmp(devname, realname) != 0) /* lines for next device */
	    memset(&blkiops, 0, sizeof(cgroup_blkiops_t));
	devname = realname;
	for (i = 0; blkio_fields[i].field != NULL; i++) {
	    if (strcmp(name, blkio_fields[i].field) != 0)
		continue;
	    *blkio_fields[i].offset = value;
	    if (strcmp("Total", blkio_fields[i].field) != 0)
		break;
	    /* all device fields are now acquired, update indom and cgroup totals */
	    blkdev = get_perdevblkio(indom, name, devname, buffer, sizeof(buffer));
	    blkios = get_blkiops(style, blkdev);
	    memcpy(blkios, &blkiops, sizeof(cgroup_blkiops_t));
	    pmdaCacheStore(indom, PMDA_CACHE_ADD, buffer, blkdev);
	    /* accumulate stats for this latest device into the per-cgroup totals */
	    total->read += blkiops.read;
	    total->write += blkiops.write;
	    total->sync += blkiops.sync;
	    total->async += blkiops.async;
	    total->total += blkiops.total;
	    break;
	}
    }
    fclose(fp);
    return 0;
}
Пример #9
0
void
setup_netcls(void)
{
    pmdaCacheOp(INDOM(CGROUP_NETCLS_INDOM), PMDA_CACHE_INACTIVE);
}
Пример #10
0
void
setup_memory(void)
{
    pmdaCacheOp(INDOM(CGROUP_MEMORY_INDOM), PMDA_CACHE_INACTIVE);
}
Пример #11
0
void
setup_cpusched(void)
{
    pmdaCacheOp(INDOM(CGROUP_CPUSCHED_INDOM), PMDA_CACHE_INACTIVE);
}