/* * This is the wrapper over the pmdaFetch routine, to handle the problem * of varying instance domains. All this does is delete the previous * mount list, and then get the current one, by calling * mounts_refresh_mounts. */ static int mounts_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda) { mounts_config_file_check(); mounts_refresh_mounts(); return pmdaFetch(numpmid, pmidlist, resp, pmda); }
static int etw_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda) { __pmNotifyErr(LOG_WARNING, "called %s", __FUNCTION__); pmdaEventNewClient(pmda->e_context); return pmdaFetch(numpmid, pmidlist, resp, pmda); }
/* * This routine is called once for each pmFetch(3) operation, so is a * good place to do once-per-fetch functions, such as value caching or * instance domain evaluation. */ static int pmda_ctdb_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda) { int ret; struct timeval ctdb_timeout; if (client == NULL) { fprintf(stderr, "attempting reconnect to ctdbd\n"); ret = pmda_ctdb_daemon_connect(); if (ret < 0) { fprintf(stderr, "reconnect failed\n"); return PM_ERR_VALUE; } } ret = ctdb_ctrl_statistics(client, ev, client, CTDB_CURRENT_NODE, ctdb_timeout, &stats); if (ret != 0) { fprintf(stderr, "ctdb control for statistics failed, reconnecting\n"); pmda_ctdb_daemon_disconnect(); ret = PM_ERR_VALUE; goto err_out; } ret = pmdaFetch(numpmid, pmidlist, resp, pmda); talloc_free(stats); err_out: return ret; }
/* * wrapper for pmdaFetch ... force value caches to be reloaded if needed, * then do the fetch */ static int openbsd_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda) { int i; int done_disk = 0; int done_percpu = 0; int done_netif = 0; int done_filesys = 0; int done_swap = 0; int done_vm_uvmexp = 0; for (i = 0; i < maplen; i++) { map[i].m_fetched = 0; } /* * pre-fetch all metrics if needed, and update instance domains if * they have changed */ for (i = 0; i < numpmid; i++) { if (pmid_cluster(pmidlist[i]) == CL_DISK) { if (!done_disk) { refresh_disk_metrics(); done_disk = 1; } } else if (pmid_cluster(pmidlist[i]) == CL_CPUTIME) { if (!done_percpu) { refresh_percpu_metrics(); done_percpu = 1; } } else if (pmid_cluster(pmidlist[i]) == CL_NETIF) { if (!done_netif) { refresh_netif_metrics(); done_netif = 1; } } else if (pmid_cluster(pmidlist[i]) == CL_FILESYS) { if (!done_filesys) { refresh_filesys_metrics(); done_netif = 1; } } else if (pmid_cluster(pmidlist[i]) == CL_SWAP) { if (!done_swap) { refresh_swap_metrics(); done_swap = 1; } } else if (pmid_cluster(pmidlist[i]) == CL_VM_UVMEXP) { if (!done_vm_uvmexp) { refresh_vm_uvmexp_metrics(); done_vm_uvmexp = 1; } } } return pmdaFetch(numpmid, pmidlist, resp, pmda); }
/* * wrapper for pmdaFetch ... force value caches to be reloaded if needed, * then do the fetch */ static int freebsd_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda) { int i; int done_disk = 0; int done_netif = 0; for (i = 0; i < maplen; i++) { map[i].m_fetched = 0; } /* * pre-fetch all metrics if needed, and update instance domains if * they have changed */ for (i = 0; !done_disk && !done_netif && i < numpmid; i++) { if (pmid_cluster(pmidlist[i]) == CL_DISK) { refresh_disk_metrics(); done_disk = 1; } else if (pmid_cluster(pmidlist[i]) == CL_NETIF) { refresh_netif_metrics(); done_netif = 1; } } return pmdaFetch(numpmid, pmidlist, resp, pmda); }
/* * wrapper for pmdaFetch which primes the methods ready for * the next fetch * ... real callback is fetch_callback() */ static int aix_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda) { int i; // TODO: this should only fetch metrics from "pmidlist" for (i = 0; i < methodtab_sz; i++) { methodtab[i].m_prefetch(); } return pmdaFetch(numpmid, pmidlist, resp, pmda); }
static int darwin_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda) { int i, need_refresh[NUM_CLUSTERS] = { 0 }; for (i = 0; i < numpmid; i++) { __pmID_int *idp = (__pmID_int *)&(pmidlist[i]); if (idp->cluster >= 0 && idp->cluster < NUM_CLUSTERS) need_refresh[idp->cluster]++; } darwin_refresh(need_refresh); return pmdaFetch(numpmid, pmidlist, resp, pmda); }
static int systemd_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda) { int sts; (void) pmdaEventNewClient(pmda->e_context); enlarge_ctxtab(pmda->e_context); sts = pmdaEventSetFilter(pmda->e_context, queue_entries, & ctxtab[pmda->e_context], /* any non-NULL value */ systemd_journal_event_filter, systemd_journal_event_filter_release /* NULL */); if (sts < 0) return sts; return pmdaFetch(numpmid, pmidlist, resp, pmda); }
/* * This routine is called once for each pmFetch(3) operation, so is a * good place to do once-per-fetch functions, such as value caching or * instance domain evaluation. */ static int pmda_ctdb_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda) { int ret; TDB_DATA data; int32_t res; struct timeval ctdb_timeout; if (ctdb == NULL) { fprintf(stderr, "attempting reconnect to ctdbd\n"); ret = pmda_ctdb_daemon_connect(); if (ret < 0) { fprintf(stderr, "reconnect failed\n"); return PM_ERR_VALUE; } } ctdb_timeout = timeval_current_ofs(1, 0); ret = ctdb_control(ctdb, ctdb->pnn, 0, CTDB_CONTROL_STATISTICS, 0, tdb_null, ctdb, &data, &res, &ctdb_timeout, NULL); if (ret != 0 || res != 0) { fprintf(stderr, "ctdb control for statistics failed, reconnecting\n"); pmda_ctdb_daemon_disconnect(); ret = PM_ERR_VALUE; goto err_out; } stats = (struct ctdb_statistics *)data.dptr; if (data.dsize != sizeof(struct ctdb_statistics)) { fprintf(stderr, "incorrect statistics size %zu - not %zu\n", data.dsize, sizeof(struct ctdb_statistics)); ret = PM_ERR_VALUE; goto err_stats; } ret = pmdaFetch(numpmid, pmidlist, resp, pmda); err_stats: talloc_free(stats); err_out: return ret; }
static int logger_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda) { pmdaEventNewClient(pmda->e_context); return pmdaFetch(numpmid, pmidlist, resp, pmda); }
static int papi_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda) { int i, sts = 0; __pmAFblock(); auto_enable_expiry_cb(0, NULL); // run auto-expiry /* In auto-enable mode, handle a mass-refresh of the papi counter state in a big batch here, ahead of individual attempts to confirm the counters' activation & read (initial) values. */ if (auto_enable_time) { int need_refresh_p = 0; time_t now = time (NULL); for (i=0; i<numpmid; i++) { __pmID_int *idp = (__pmID_int *)&(pmidlist[i]); if (idp->cluster == CLUSTER_PAPI) { if (idp->item >= 0 && idp->item <= number_of_events) { if (papi_info[idp->item].position < 0) { // new counter? need_refresh_p = 1; } // update or initialize remaining lifetime if (papi_info[idp->item].metric_enabled != METRIC_ENABLED_FOREVER) papi_info[idp->item].metric_enabled = now + auto_enable_time; } } } if (need_refresh_p) { refresh_metrics(1); // NB: A non-0 sts here would not be a big problem; no // need to abort the whole fetch sequence just for that. // Each individual CLUSTER_PAPI fetch will get a // PM_ERR_VALUE to let the user know something's up. } } /* Update our copy of the papi counter values, so that we do so only once per pcp-fetch batch. Though it's relatively cheap, and harmless even if the incoming pcp-fetch is for non-counter pcp metrics, we do this only for CLUSTER_PAPI pmids. This is independent of auto-enable mode. */ for (i=0; i<numpmid; i++) { __pmID_int *idp = (__pmID_int *)&(pmidlist[i]); if (idp->cluster == CLUSTER_PAPI) { sts = check_papi_state(); if (sts & PAPI_RUNNING) { sts = PAPI_read(EventSet, values); if (sts != PAPI_OK) { __pmNotifyErr(LOG_ERR, "PAPI_read: %s\n", PAPI_strerror(sts)); return PM_ERR_VALUE; } } break; /* No need to look at other pmids. */ } } sts = 0; /* clear out any PAPI remnant flags */ for (i = 0; i < numpmid; i++) { __pmID_int *idp = (__pmID_int *)&(pmidlist[i]); if (idp->cluster != CLUSTER_AVAILABLE) sts = 1; } if (sts == 0 || permission_check(pmda->e_context)) sts = pmdaFetch(numpmid, pmidlist, resp, pmda); else sts = PM_ERR_PERMISSION; __pmAFunblock(); return sts; }
/* * wrapper for pmdaFetch which refreshes the metrics */ static int mailq_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda) { static int warn = 0; int num; int i; int b; struct stat sbuf; time_t now; static time_t last_refresh = 0; time_t waiting; char *p; struct dirent **list; time(&now); /* clip refresh rate to at most once per 30 seconds */ if (now - last_refresh > 30) { last_refresh = now; queue = 0; for (b = 0; b < numhisto; b++) histo[b].count = 0; if (chdir(queuedir) < 0) { if (warn == 0) { __pmNotifyErr(LOG_ERR, "chdir(\"%s\") failed: %s\n", queuedir, osstrerror()); warn = 1; } } else { if (warn == 1) { __pmNotifyErr(LOG_INFO, "chdir(\"%s\") success\n", queuedir); warn = 0; } num = scandir(".", &list, NULL, NULL); for (i = 0; i < num; i++) { p = list[i]->d_name; /* only file names that match the regular expression */ if (regexstring && regexec(&mq_regex, list[i]->d_name, 0, NULL, 0)) continue; else if (!regexstring && (*p != 'd' || *(p+1) != 'f')) continue; if (stat(p, &sbuf) != 0) { /* * ENOENT expected sometimes if sendmail is doing its job */ if (oserror() == ENOENT) continue; fprintf(stderr, "stat(\"%s\"): %s\n", p, osstrerror()); continue; } if (sbuf.st_size > 0 && S_ISREG(sbuf.st_mode)) { /* really in the queue */ #if defined(HAVE_ST_MTIME_WITH_E) waiting = now - sbuf.st_mtime; #elif defined(HAVE_ST_MTIME_WITH_SPEC) waiting = now - sbuf.st_mtimespec.tv_sec; #else waiting = now - sbuf.st_mtim.tv_sec; #endif for (b = 0; b < numhisto; b++) { if (waiting >= histo[b].delay) { histo[b].count++; break; } } queue++; } } for (i = 0; i < num; i++) free(list[i]); if (num > 0) free(list); } if (chdir(startdir) < 0) { __pmNotifyErr(LOG_ERR, "chdir(\"%s\") failed: %s\n", startdir, osstrerror()); } } return pmdaFetch(numpmid, pmidlist, resp, pmda); }