示例#1
0
文件: openbsd.c 项目: ColeJackes/pcp
/*
 * wrapper for pmdaFetch ... force value caches to be reloaded if needed,
 * then do the fetch
 */
static int
openbsd_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda)
{
    int		i;
    int		done_disk = 0;
    int		done_percpu = 0;
    int		done_netif = 0;
    int		done_filesys = 0;
    int		done_swap = 0;
    int		done_vm_uvmexp = 0;

    for (i = 0; i < maplen; i++) {
	map[i].m_fetched = 0;
    }

    /*
     * pre-fetch all metrics if needed, and update instance domains if
     * they have changed
     */
    for (i = 0; i < numpmid; i++) {
	if (pmid_cluster(pmidlist[i]) == CL_DISK) {
	    if (!done_disk) {
		refresh_disk_metrics();
		done_disk = 1;
	    }
	}
	else if (pmid_cluster(pmidlist[i]) == CL_CPUTIME) {
	    if (!done_percpu) {
		refresh_percpu_metrics();
		done_percpu = 1;
	    }
	}
	else if (pmid_cluster(pmidlist[i]) == CL_NETIF) {
	    if (!done_netif) {
		refresh_netif_metrics();
		done_netif = 1;
	    }
	}
	else if (pmid_cluster(pmidlist[i]) == CL_FILESYS) {
	    if (!done_filesys) {
		refresh_filesys_metrics();
		done_netif = 1;
	    }
	}
	else if (pmid_cluster(pmidlist[i]) == CL_SWAP) {
	    if (!done_swap) {
		refresh_swap_metrics();
		done_swap = 1;
	    }
	}
	else if (pmid_cluster(pmidlist[i]) == CL_VM_UVMEXP) {
	    if (!done_vm_uvmexp) {
		refresh_vm_uvmexp_metrics();
		done_vm_uvmexp = 1;
	    }
	}
    }

    return pmdaFetch(numpmid, pmidlist, resp, pmda);
}
示例#2
0
文件: freebsd.c 项目: Aconex/pcp
/*
 * wrapper for pmdaFetch ... force value caches to be reloaded if needed,
 * then do the fetch
 */
static int
freebsd_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda)
{
    int		i;
    int		done_disk = 0;
    int		done_netif = 0;

    for (i = 0; i < maplen; i++) {
	map[i].m_fetched = 0;
    }

    /*
     * pre-fetch all metrics if needed, and update instance domains if
     * they have changed
     */
    for (i = 0; !done_disk && !done_netif && i < numpmid; i++) {
	if (pmid_cluster(pmidlist[i]) == CL_DISK) {
	    refresh_disk_metrics();
	    done_disk = 1;
	}
	else if (pmid_cluster(pmidlist[i]) == CL_NETIF) {
	    refresh_netif_metrics();
	    done_netif = 1;
	}
    }

    return pmdaFetch(numpmid, pmidlist, resp, pmda);
}
示例#3
0
文件: freebsd.c 项目: Aconex/pcp
/*
 * wrapper for pmdaInstance ... refresh required instance domain first
 */
static int
freebsd_instance(pmInDom indom, int inst, char *name, __pmInResult **result, pmdaExt *pmda)
{
    /*
     * indomtab[] instance names and ids are not used for some indoms,
     * ensure pmdaCache is current
     */
    if (indom == indomtab[DISK_INDOM].it_indom)
	refresh_disk_metrics();
    if (indom == indomtab[NETIF_INDOM].it_indom)
	refresh_netif_metrics();

    return pmdaInstance(indom, inst, name, result, pmda);
}
示例#4
0
文件: openbsd.c 项目: ColeJackes/pcp
/*
 * Callback provided to pmdaFetch ... come here once per metric-instance
 * pair in each pmFetch().
 */
static int
openbsd_fetchCallBack(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom)
{
    int			sts = PM_ERR_PMID;
    __pmID_int		*idp = (__pmID_int *)&(mdesc->m_desc.pmid);
    mib_t		*mp;
    int			i;

    mp = (mib_t *)mdesc->m_user;
    if (idp->cluster == CL_SYSCTL) {
	/* sysctl() simple cases */
	switch (idp->item) {
	    /* 32-bit integer values */
	    case 0:		/* hinv.ncpu */
		sts = do_sysctl(mp, sizeof(atom->ul));
		if (sts > 0) {
		    atom->ul = *((__uint32_t *)mp->m_data);
		    sts = 1;
		}
		break;

	    /* 64-bit integer values */
	    case 1:		/* hinv.physmem */
		sts = do_sysctl(mp, sizeof(atom->ull));
		if (sts > 0) {
		    /* sysctl returns bytes, convert to Mbytes */
		    atom->ull = *((__uint64_t *)mp->m_data);
		    atom->ull /= 1024*1024;
		    sts = 1;
		}
		break;

	    /* string values */
	    case 15:		/* hinv.cpu.vendor */
	    case 16:		/* hinv.cpu.model */
		sts = do_sysctl(mp, (size_t)0);
		if (sts > 0) {
		    atom->cp = (char *)mp->m_data;
		    sts = 1;
		}
		break;

	    /* structs and aggregates */
	    case 3:		/* kernel.all.cpu.user */
	    case 4:		/* kernel.all.cpu.nice */
	    case 5:		/* kernel.all.cpu.sys */
	    case 6:		/* kernel.all.cpu.intr */
	    case 7:		/* kernel.all.cpu.idle */
		sts = do_sysctl(mp, CPUSTATES*sizeof(atom->ull));
		if (sts > 0) {
		    /*
		     * PMID assignment is important in the "-3" below so
		     * that metrics map to consecutive elements of the
		     * returned value in the order defined for CPUSTATES,
		     * i.e. CP_USER, CP_NICE, CP_SYS, CP_INTR and
		     * CP_IDLE
		     */
		    atom->ull = 1000*((__uint64_t *)mp->m_data)[idp->item-3]/cpuhz;
		    sts = 1;
		}
		break;

	    case 13:		/* kernel.all.hz */
		sts = do_sysctl(mp, sizeof(struct clockinfo));
		if (sts > 0) {
		    struct clockinfo	*cp = (struct clockinfo *)mp->m_data;
		    atom->ul = cp->hz;
		    sts = 1;
		}
		break;

	}
    }
    else if (idp->cluster == CL_SPECIAL) {
	/* special cases */
	double	loadavg[3];
	char 	uname_string[sizeof(kernel_uname)];

	switch (idp->item) {
	    case 0:	/* hinv.ndisk */
		refresh_disk_metrics();
		atom->ul = pmdaCacheOp(indomtab[DISK_INDOM].it_indom, PMDA_CACHE_SIZE_ACTIVE);
		sts = 1;
		break;

	    case 2:		/* kernel.all.load */
		sts = getloadavg(loadavg, 3);
		if (sts == 3) {
		    if (inst == 1)
			i = 0;
		    else if (inst == 5)
			i = 1;
		    else if (inst == 15)
			i = 2;
		    else
			return PM_ERR_INST;
		    atom->f = (float)loadavg[i];
		    sts = 1;
		}
		else
		    return PM_ERR_INST;
		break;

	    case 3:	/* hinv.pagesize */
		atom->ul = pagesize;
		sts = 1;
		break;

	    case 14:	/* kernel.uname.release */
		atom->cp = kernel_uname.release;
		sts = 1;
		break;

	    case 15:	/* kernel.uname.version */
		atom->cp = kernel_uname.version;
		sts = 1;
		break;

	    case 16:	/* kernel.uname.sysname */
		atom->cp = kernel_uname.sysname;
		sts = 1;
		break;

	    case 17:	/* kernel.uname.machine */
		atom->cp = kernel_uname.machine;
		sts = 1;
		break;

	    case 18:	/* kernel.uname.nodename */
		atom->cp = kernel_uname.nodename;
		sts = 1;
		break;

	    case 20: /* pmda.uname */
		snprintf(uname_string, sizeof(uname_string), "%s %s %s %s %s",
		    kernel_uname.sysname, 
		    kernel_uname.nodename,
		    kernel_uname.release,
		    kernel_uname.version,
		    kernel_uname.machine);
		atom->cp = uname_string;
		sts = 1;
		break;

	    case 21: /* pmda.version */
		atom->cp = pmGetConfig("PCP_VERSION");
		sts = 1;
		break;

	}
    }
    else if (idp->cluster == CL_DISK) {
	sts = do_disk_metrics(mdesc, inst, atom);
    }
    else if (idp->cluster == CL_CPUTIME) {
	sts = do_percpu_metrics(mdesc, inst, atom);
    }
    else if (idp->cluster == CL_NETIF) {
	sts = do_netif_metrics(mdesc, inst, atom);
    }
    else if (idp->cluster == CL_FILESYS) {
	sts = do_filesys_metrics(mdesc, inst, atom);
    }
    else if (idp->cluster == CL_SWAP) {
	sts = do_swap_metrics(mdesc, inst, atom);
    }
    else if (idp->cluster == CL_VM_UVMEXP) {
	/* vm.uvmexp sysctl metrics */
	sts = do_vm_uvmexp_metrics(mdesc, inst, atom);
    }

    return sts;
}
示例#5
0
文件: freebsd.c 项目: Aconex/pcp
/*
 * Callback provided to pmdaFetch ... come here once per metric-instance
 * pair in each pmFetch().
 */
static int
freebsd_fetchCallBack(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom)
{
    int			sts = PM_ERR_PMID;
    __pmID_int		*idp = (__pmID_int *)&(mdesc->m_desc.pmid);
    mib_t		*mp;

    mp = (mib_t *)mdesc->m_user;
    if (idp->cluster == CL_SYSCTL) {
	/* sysctl() simple cases */
	switch (idp->item) {
	    /* 32-bit integer values */
	    case 0:		/* hinv.ncpu */
	    case 18:		/* swap.pagesin */
	    case 19:		/* swap.pagesout */
	    case 20:		/* swap.in */
	    case 21:		/* swap.out */
	    case 22:		/* kernel.all.pswitch */
	    case 23:		/* kernel.all.syscall */
	    case 24:		/* kernel.all.intr */
		sts = do_sysctl(mp, sizeof(atom->ul));
		if (sts > 0) {
		    atom->ul = *((__uint32_t *)mp->m_data);
		    sts = 1;
		}
		break;

	    /* 64-bit integer values */
	    case 25:		/* swap.length */
	    case 26:		/* swap.used */
		sts = do_sysctl(mp, sizeof(atom->ull));
		if (sts > 0) {
		    atom->ull = *((__uint64_t *)mp->m_data);
		    sts = 1;
		}
		break;

	    /* long integer value */
	    case 1:		/* hinv.physmem */
		sts = do_sysctl(mp, sizeof(long));
		if (sts > 0) {
		    atom->ull = *((long *)mp->m_data);
		    sts = 1;
		}
		break;

	    /* string values */
	    case 15:		/* hinv.cpu.vendor */
	    case 16:		/* hinv.cpu.model */
	    case 17:		/* hinv.cpu.arch */
		sts = do_sysctl(mp, (size_t)0);
		if (sts > 0) {
		    atom->cp = (char *)mp->m_data;
		    sts = 1;
		}
		break;

	    /* structs and aggregates */
	    case 2:		/* kernel.all.load */
		sts = do_sysctl(mp, sizeof(struct loadavg));
		if (sts > 0) {
		    int			i;
		    struct loadavg	*lp = (struct loadavg *)mp->m_data;
		    if (inst == 1)
			i = 0;
		    else if (inst == 5)
			i = 1;
		    else if (inst == 15)
			i = 2;
		    else
			return PM_ERR_INST;
		    atom->f = (float)((double)lp->ldavg[i] / lp->fscale);
		    sts = 1;
		}
		break;

	    case 3:		/* kernel.all.cpu.user */
	    case 4:		/* kernel.all.cpu.nice */
	    case 5:		/* kernel.all.cpu.sys */
	    case 6:		/* kernel.all.cpu.intr */
	    case 7:		/* kernel.all.cpu.idle */
		/*
		 * assume this declaration is correct ...
		 * long pc_cp_time[CPUSTATES];	...
		 * from /usr/include/sys/pcpu.h
		 */
		sts = do_sysctl(mp, CPUSTATES*sizeof(long));
		if (sts > 0) {
		    /*
		     * PMID assignment is important in the "-3" below so
		     * that metrics map to consecutive elements of the
		     * returned value in the order defined for CPUSTATES,
		     * i.e. CP_USER, CP_NICE, CP_SYS, CP_INTR and
		     * CP_IDLE
		     */
		    atom->ull = 1000*((__uint64_t)((long *)mp->m_data)[idp->item-3])/cpuhz;
		    sts = 1;
		}
		break;

	    case 8:		/* kernel.percpu.cpu.user */
	    case 9:		/* kernel.percpu.cpu.nice */
	    case 10:		/* kernel.percpu.cpu.sys */
	    case 11:		/* kernel.percpu.cpu.intr */
	    case 12:		/* kernel.percpu.cpu.idle */
		sts = do_sysctl(mp, ncpu*CPUSTATES*sizeof(atom->ull));
		if (sts > 0) {
		    /*
		     * PMID assignment is important in the "-8" below so
		     * that metrics map to consecutive elements of the
		     * returned value in the order defined for CPUSTATES,
		     * i.e. CP_USER, CP_NICE, CP_SYS, CP_INTR and
		     * CP_IDLE, and then there is one such set for each
		     * CPU up to the maximum number of CPUs installed in
		     * the system.
		     */
		    atom->ull = 1000*((__uint64_t *)mp->m_data)[inst * CPUSTATES + idp->item-8]/cpuhz;
		    sts = 1;
		}
		break;

	    case 13:		/* kernel.all.hz */
		sts = do_sysctl(mp, sizeof(struct clockinfo));
		if (sts > 0) {
		    struct clockinfo	*cp = (struct clockinfo *)mp->m_data;
		    atom->ul = cp->hz;
		    sts = 1;
		}
		break;

	}
    }
    else if (idp->cluster == CL_SPECIAL) {
	/* special cases */
	switch (idp->item) {
	    case 0:	/* hinv.ndisk */
		refresh_disk_metrics();
		atom->ul = pmdaCacheOp(indomtab[DISK_INDOM].it_indom, PMDA_CACHE_SIZE_ACTIVE);
		sts = 1;
		break;

	    case 1:	/* swap.free */
		/* first vm.swap_total */
		sts = do_sysctl(mp, sizeof(atom->ull));
		if (sts > 0) {
		    atom->ull = *((__uint64_t *)mp->m_data);
		    /*
		     * now subtract vm.swap_reserved ... assumes consecutive
		     * mib[] entries
		     */
		    mp++;
		    sts = do_sysctl(mp, sizeof(atom->ull));
		    if (sts > 0) {
			atom->ull -= *((__uint64_t *)mp->m_data);
			sts = 1;
		    }
		}
		break;

	    case 3:	/* hinv.pagesize */
		atom->ul = pagesize;
		sts = 1;
		break;

	    case 4:	/* mem.util.all */
	    case 6:	/* mem.util.free */
	    case 8:	/* mem.util.cached */
	    case 9:	/* mem.util.wired */
	    case 10:	/* mem.util.active */
	    case 11:	/* mem.util.inactive */
		sts = do_sysctl(mp, sizeof(atom->ul));
		if (sts > 0) {
		    atom->ul = *((__uint32_t *)mp->m_data) * (pagesize / 1024);
		    sts = 1;
		}
		break;

	    case 7:	/* mem.util.bufmem */
		sts = do_sysctl(mp, sizeof(atom->ul));
		if (sts > 0) {
		    atom->ul = *((__uint32_t *)mp->m_data) / 1024;
		    sts = 1;
		}
		break;

	    case 5:	/* mem.util.used */
		/*
		 * mp-> v_page_count entry in mib[]
		 * assuming consecutive mib[] entries, we want
		 * v_page_count mp[0] - v_free_count mp[1] -
		 * v_cache_count mp[2] - v_inactive_count mp[5]
		 */
		sts = do_sysctl(mp, sizeof(atom->ul));
		if (sts > 0) {
		    atom->ul = *((__uint32_t *)mp->m_data);
		    sts = do_sysctl(&mp[1], sizeof(atom->ul));
		    if (sts > 0) {
			atom->ul -= *((__uint32_t *)mp[1].m_data);
			sts = do_sysctl(&mp[2], sizeof(atom->ul));
			if (sts > 0) {
			    atom->ul -= *((__uint32_t *)mp[2].m_data);
			    sts = do_sysctl(&mp[5], sizeof(atom->ul));
			    if (sts > 0) {
				atom->ul -= *((__uint32_t *)mp[5].m_data);
				atom->ul *= (pagesize / 1024);
				sts = 1;
			    }
			}
		    }
		}
		break;

	    case 12:	/* mem.util.avail */
		/*
		 * mp-> v_page_count entry in mib[]
		 * assuming consecutive mib[] entries, we want
		 * v_free_count mp[1] + v_cache_count mp[2] +
		 * v_inactive_count mp[5]
		 */
		sts = do_sysctl(&mp[1], sizeof(atom->ul));
		if (sts > 0) {
		    atom->ul = *((__uint32_t *)mp[1].m_data);
		    sts = do_sysctl(&mp[2], sizeof(atom->ul));
		    if (sts > 0) {
			atom->ul += *((__uint32_t *)mp[2].m_data);
			sts = do_sysctl(&mp[5], sizeof(atom->ul));
			if (sts > 0) {
			    atom->ul += *((__uint32_t *)mp[5].m_data);
			    atom->ul *= (pagesize / 1024);
			    sts = 1;
			}
		    }
		}
		break;

	}
    }
    else if (idp->cluster == CL_DISK) {
	/* disk metrics */
	sts = do_disk_metrics(mdesc, inst, atom);
    }
    else if (idp->cluster == CL_NETIF) {
	/* network interface metrics */
	sts = do_netif_metrics(mdesc, inst, atom);
    }

    return sts;
}