void vt_metric_read(struct vt_metv* metv, uint64_t offsets[], uint64_t values[]) { int i; if ( metv == NULL ) return; /* read counter values of set */ if ( cpc_set_sample(cpc, metv->set, metv->buffer) == -1 ) vt_error_msg("cpc_set_sample: %s", strerror(errno)); for ( i = 0; i < nmetrics; i++ ) { /* get 64-bit counter values from CPC buffer */ if ( cpc_buf_get(cpc, metv->buffer, metv->indices[i], &(values[i])) == -1 ) break; } if ( i != nmetrics ) vt_error_msg("cpc_buf_get: %s", strerror(errno)); /* add offsets to values, if necessary */ if ( offsets != NULL ) { for ( i = 0; i < nmetrics; i++ ) values[i] += offsets[i]; } }
static void * gtick(void *arg) { struct tstate *state = arg; char *errstr; uint_t nsamples; uint_t sample_cnt = 1; hrtime_t ht, htdelta, restdelta; cpc_setgrp_t *sgrp = state->sgrp; cpc_set_t *this = cpc_setgrp_getset(sgrp); const char *name = cpc_setgrp_getname(sgrp); cpc_buf_t **data1, **data2, **scratch; cpc_buf_t *tmp; int nreqs; thread_t tid; htdelta = NSECS_PER_MSEC * opts->mseconds; restdelta = NSECS_PER_MSEC * opts->mseconds_rest; ht = gethrtime(); /* * If this CPU is SMT, we run one gtick() thread per _physical_ CPU, * instead of per cpu_t. The following check returns if it detects that * this cpu_t has not been designated to do the counting for this * physical CPU. */ if (smt && chip_designees[state->chip_id] != state->cpuid) return (NULL); /* * If we need to run a soaker thread on this CPU, start it here. */ if (opts->dosoaker) { if (cond_init(&state->soak_cv, USYNC_THREAD, NULL) != 0) goto bad; if (mutex_init(&state->soak_lock, USYNC_THREAD, NULL) != 0) goto bad; (void) mutex_lock(&state->soak_lock); state->soak_state = SOAK_PAUSE; if (thr_create(NULL, 0, soaker, state, NULL, &tid) != 0) goto bad; while (state->soak_state == SOAK_PAUSE) (void) cond_wait(&state->soak_cv, &state->soak_lock); (void) mutex_unlock(&state->soak_lock); /* * If the soaker needs to pause for the first set, stop it now. */ if (cpc_setgrp_sysonly(sgrp) == 0) { (void) mutex_lock(&state->soak_lock); state->soak_state = SOAK_PAUSE; (void) mutex_unlock(&state->soak_lock); } } if (cpc_bind_cpu(cpc, state->cpuid, this, 0) == -1) goto bad; for (nsamples = opts->nsamples; nsamples; nsamples--, sample_cnt++) { hrtime_t htnow; struct timespec ts; nreqs = cpc_setgrp_getbufs(sgrp, &data1, &data2, &scratch); ht += htdelta; htnow = gethrtime(); if (ht <= htnow) continue; ts.tv_sec = (time_t)((ht - htnow) / NSECS_PER_SEC); ts.tv_nsec = (suseconds_t)((ht - htnow) % NSECS_PER_SEC); (void) nanosleep(&ts, NULL); if (opts->nsets == 1) { /* * If we're dealing with one set, buffer usage is: * * data1 = most recent data snapshot * data2 = previous data snapshot * scratch = used for diffing data1 and data2 * * Save the snapshot from the previous sample in data2 * before putting the current sample in data1. */ tmp = *data1; *data1 = *data2; *data2 = tmp; if (cpc_set_sample(cpc, this, *data1) != 0) goto bad; cpc_buf_sub(cpc, *scratch, *data1, *data2); print_sample(state->cpuid, *scratch, nreqs, name, 0); } else { /* * More than one set is in use (multiple -c options * given). Buffer usage in this case is: * * data1 = total counts for this set since program began * data2 = unused * scratch = most recent data snapshot */ name = cpc_setgrp_getname(sgrp); nreqs = cpc_setgrp_getbufs(sgrp, &data1, &data2, &scratch); if (cpc_set_sample(cpc, this, *scratch) != 0) goto bad; cpc_buf_add(cpc, *data1, *data1, *scratch); if (cpc_unbind(cpc, this) != 0) (void) fprintf(stderr, gettext("%s: error " "unbinding on cpu %d - %s\n"), opts->pgmname, state->cpuid, strerror(errno)); this = cpc_setgrp_nextset(sgrp); print_sample(state->cpuid, *scratch, nreqs, name, 0); /* * If periodic behavior was requested, rest here. */ if (opts->doperiod && opts->mseconds_rest > 0 && (sample_cnt % opts->nsets) == 0) { /* * Stop the soaker while the tool rests. */ if (opts->dosoaker) { (void) mutex_lock(&state->soak_lock); if (state->soak_state == SOAK_RUN) state->soak_state = SOAK_PAUSE; (void) mutex_unlock(&state->soak_lock); } htnow = gethrtime(); ht += restdelta; ts.tv_sec = (time_t)((ht - htnow) / NSECS_PER_SEC); ts.tv_nsec = (suseconds_t)((ht - htnow) % NSECS_PER_SEC); (void) nanosleep(&ts, NULL); } /* * Start or stop the soaker if needed. */ if (opts->dosoaker) { (void) mutex_lock(&state->soak_lock); if (cpc_setgrp_sysonly(sgrp) && state->soak_state == SOAK_PAUSE) { /* * Soaker is paused but the next set is * sysonly: start the soaker. */ state->soak_state = SOAK_RUN; (void) cond_signal(&state->soak_cv); } else if (cpc_setgrp_sysonly(sgrp) == 0 && state->soak_state == SOAK_RUN) /* * Soaker is running but the next set * counts user events: stop the soaker. */ state->soak_state = SOAK_PAUSE; (void) mutex_unlock(&state->soak_lock); } if (cpc_bind_cpu(cpc, state->cpuid, this, 0) != 0) goto bad; } } if (cpc_unbind(cpc, this) != 0) (void) fprintf(stderr, gettext("%s: error unbinding on" " cpu %d - %s\n"), opts->pgmname, state->cpuid, strerror(errno)); /* * We're done, so stop the soaker if needed. */ if (opts->dosoaker) { (void) mutex_lock(&state->soak_lock); if (state->soak_state == SOAK_RUN) state->soak_state = SOAK_PAUSE; (void) mutex_unlock(&state->soak_lock); } return (NULL); bad: state->status = 3; errstr = strerror(errno); (void) fprintf(stderr, gettext("%s: cpu%d - %s\n"), opts->pgmname, state->cpuid, errstr); return (NULL); }