/* * kstat update function of the c-state info */ static int cpu_idle_kstat_update(kstat_t *ksp, int flag) { cpu_acpi_cstate_t *cstate = ksp->ks_private; if (flag == KSTAT_WRITE) { return (EACCES); } if (cstate->cs_addrspace_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { kstat_named_setstr(&cpu_idle_kstat.addr_space_id, "FFixedHW"); } else if (cstate->cs_addrspace_id == ACPI_ADR_SPACE_SYSTEM_IO) { kstat_named_setstr(&cpu_idle_kstat.addr_space_id, "SystemIO"); } else { kstat_named_setstr(&cpu_idle_kstat.addr_space_id, "Unsupported"); } cpu_idle_kstat.cs_latency.value.ui32 = cstate->cs_latency; cpu_idle_kstat.cs_power.value.ui32 = cstate->cs_power; return (0); }
static kstat_t * task_kstat_create(task_t *tk, zone_t *zone) { kstat_t *ksp; task_kstat_t *ktk; char *zonename = zone->zone_name; ksp = rctl_kstat_create_task(tk, "nprocs", KSTAT_TYPE_NAMED, sizeof (task_kstat_t) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (ksp == NULL) return (NULL); ktk = ksp->ks_data = kmem_alloc(sizeof (task_kstat_t), KM_SLEEP); ksp->ks_data_size += strlen(zonename) + 1; kstat_named_init(&ktk->ktk_zonename, "zonename", KSTAT_DATA_STRING); kstat_named_setstr(&ktk->ktk_zonename, zonename); kstat_named_init(&ktk->ktk_usage, "usage", KSTAT_DATA_UINT64); kstat_named_init(&ktk->ktk_value, "value", KSTAT_DATA_UINT64); ksp->ks_update = task_nprocs_kstat_update; ksp->ks_private = tk; kstat_install(ksp); return (ksp); }
/* * Convert internal cap statistics into values exported by cap kstat. * Note that the kstat is held throughout this function but caps_lock is not. */ static int cap_kstat_update(kstat_t *ksp, int rw) { struct cap_kstat *capsp = &cap_kstat; cpucap_t *cap = ksp->ks_private; clock_t tick_sec = SEC_TO_TICK(1); char *zonename = cap->cap_zone->zone_name; if (rw == KSTAT_WRITE) return (EACCES); capsp->cap_value.value.ui64 = ROUND_SCALE(cap->cap_value, cap_tick_cost); capsp->cap_baseline.value.ui64 = ROUND_SCALE(cap->cap_base, cap_tick_cost); capsp->cap_effective.value.ui64 = ROUND_SCALE(cap->cap_chk_value, cap_tick_cost); capsp->cap_burst_limit.value.ui64 = ROUND_SCALE(cap->cap_burst_limit, tick_sec); capsp->cap_usage.value.ui64 = ROUND_SCALE(cap->cap_usage, cap_tick_cost); capsp->cap_maxusage.value.ui64 = ROUND_SCALE(cap->cap_maxusage, cap_tick_cost); capsp->cap_nwait.value.ui64 = cap->cap_waitq.wq_count; capsp->cap_below.value.ui64 = ROUND_SCALE(cap->cap_below, tick_sec); capsp->cap_above.value.ui64 = ROUND_SCALE(cap->cap_above, tick_sec); capsp->cap_above_base.value.ui64 = ROUND_SCALE(cap->cap_above_base, tick_sec); capsp->cap_bursting.value.ui64 = ROUND_SCALE(cap->cap_bursting, tick_sec); kstat_named_setstr(&capsp->cap_zonename, zonename); return (0); }
int pghw_kstat_update(kstat_t *ksp, int rw) { struct pghw_kstat *pgsp = &pghw_kstat; pghw_t *pg = ksp->ks_private; if (rw == KSTAT_WRITE) return (EACCES); pgsp->pg_id.value.ui64 = ((pg_t *)pg)->pg_id; pgsp->pg_ncpus.value.ui64 = GROUP_SIZE(&((pg_t *)pg)->pg_cpus); pgsp->pg_instance_id.value.ui64 = (uint64_t)pg->pghw_instance; kstat_named_setstr(&pgsp->pg_class, ((pg_t *)pg)->pg_class->pgc_name); kstat_named_setstr(&pgsp->pg_hw, pghw_type_string(pg->pghw_hw)); kstat_named_setstr(&pgsp->pg_policy, pg_policy_name((pg_t *)pg)); return (0); }