static void ipf_kstat_init(ipf_stack_t *ifs, boolean_t from_gz) { ifs->ifs_kstatp[0] = net_kstat_create(ifs->ifs_netid, (from_gz ? "ipf_gz" : "ipf"), 0, "inbound", "net", KSTAT_TYPE_NAMED, sizeof (filter_kstats_t) / sizeof (kstat_named_t), 0); if (ifs->ifs_kstatp[0] != NULL) { bcopy(&ipf_kstat_tmp, ifs->ifs_kstatp[0]->ks_data, sizeof (filter_kstats_t)); ifs->ifs_kstatp[0]->ks_update = ipf_kstat_update; ifs->ifs_kstatp[0]->ks_private = &ifs->ifs_frstats[0]; kstat_install(ifs->ifs_kstatp[0]); } ifs->ifs_kstatp[1] = net_kstat_create(ifs->ifs_netid, (from_gz ? "ipf_gz" : "ipf"), 0, "outbound", "net", KSTAT_TYPE_NAMED, sizeof (filter_kstats_t) / sizeof (kstat_named_t), 0); if (ifs->ifs_kstatp[1] != NULL) { bcopy(&ipf_kstat_tmp, ifs->ifs_kstatp[1]->ks_data, sizeof (filter_kstats_t)); ifs->ifs_kstatp[1]->ks_update = ipf_kstat_update; ifs->ifs_kstatp[1]->ks_private = &ifs->ifs_frstats[1]; kstat_install(ifs->ifs_kstatp[1]); } #ifdef IPFDEBUG cmn_err(CE_NOTE, "IP Filter: ipf_kstat_init(%p) installed %p, %p", ifs, ifs->ifs_kstatp[0], ifs->ifs_kstatp[1]); #endif }
/* * Create the "mac" kstat. The "mac" kstat is comprised of three kinds of * statistics: statistics maintained by the mac module itself, generic mac * statistics maintained by the driver, and MAC-type specific statistics * also maintained by the driver. */ void mac_driver_stat_create(mac_impl_t *mip) { kstat_t *ksp; kstat_named_t *knp; uint_t count; major_t major = getmajor(mip->mi_phy_dev); count = MAC_MOD_NKSTAT + MAC_NKSTAT + mip->mi_type->mt_statcount; ksp = kstat_create_zone((const char *)ddi_major_to_name(major), getminor(mip->mi_phy_dev) - 1, MAC_KSTAT_NAME, MAC_KSTAT_CLASS, KSTAT_TYPE_NAMED, count, 0, getzoneid()); if (ksp == NULL) return; ksp->ks_update = i_mac_driver_stat_update; ksp->ks_private = mip; mip->mi_ksp = ksp; mip->mi_kstat_count = count; knp = (kstat_named_t *)ksp->ks_data; i_mac_kstat_init(knp, i_mac_mod_si, MAC_MOD_NKSTAT); knp += MAC_MOD_NKSTAT; i_mac_kstat_init(knp, i_mac_si, MAC_NKSTAT); if (mip->mi_type->mt_statcount > 0) { knp += MAC_NKSTAT; i_mac_kstat_init(knp, mip->mi_type->mt_stats, mip->mi_type->mt_statcount); } kstat_install(ksp); }
static void ii_create_kstats() { /* create global info structure */ if (!ii_gkstat) { ii_gkstat = kstat_create("ii", 0, "global", "StorEdge", KSTAT_TYPE_NAMED, sizeof (iigkstat) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (ii_gkstat) { ii_gkstat->ks_data = &iigkstat; ii_gkstat->ks_update = ii_stats_update; ii_gkstat->ks_private = 0; kstat_install(ii_gkstat); /* fill in immutable values */ iigkstat.ii_debug.value.ul = ii_debug; iigkstat.ii_bitmap.value.ul = ii_bitmap; iigkstat.ii_throttle_unit.value.ul = ii_throttle_unit; iigkstat.ii_throttle_delay.value.ul = ii_throttle_delay; iigkstat.ii_copy_direct.value.ul = ii_copy_direct; } else { cmn_err(CE_WARN, "!Unable to create II global stats"); } } }
/* * Enable cap for a zone * It is safe to enable already enabled zone cap. * Should be called with caps_lock held. */ static void cap_zone_enable(zone_t *zone, hrtime_t value) { cpucap_t *cap = zone->zone_cpucap; ASSERT(MUTEX_HELD(&caps_lock)); ASSERT(cap != NULL); if (CAP_DISABLED(cap)) { ASSERT(cap->cap_kstat == NULL); cap_enable(&capped_zones, cap, value); cap->cap_zone = zone; /* * Create cap kstats */ if ((cap->cap_kstat = rctl_kstat_create_zone(zone, "cpucaps", KSTAT_TYPE_NAMED, sizeof (cap_kstat) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) != NULL) { cap->cap_kstat->ks_data_size += strlen(cap->cap_zone->zone_name) + 1; cap->cap_kstat->ks_lock = &cap_kstat_lock; cap->cap_kstat->ks_data = &cap_kstat; cap->cap_kstat->ks_update = cap_kstat_update; cap->cap_kstat->ks_private = cap; kstat_install(cap->cap_kstat); } } }
static kstat_t * task_kstat_create(task_t *tk, zone_t *zone) { kstat_t *ksp; task_kstat_t *ktk; char *zonename = zone->zone_name; ksp = rctl_kstat_create_task(tk, "nprocs", KSTAT_TYPE_NAMED, sizeof (task_kstat_t) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (ksp == NULL) return (NULL); ktk = ksp->ks_data = kmem_alloc(sizeof (task_kstat_t), KM_SLEEP); ksp->ks_data_size += strlen(zonename) + 1; kstat_named_init(&ktk->ktk_zonename, "zonename", KSTAT_DATA_STRING); kstat_named_setstr(&ktk->ktk_zonename, zonename); kstat_named_init(&ktk->ktk_usage, "usage", KSTAT_DATA_UINT64); kstat_named_init(&ktk->ktk_value, "value", KSTAT_DATA_UINT64); ksp->ks_update = task_nprocs_kstat_update; ksp->ks_private = tk; kstat_install(ksp); return (ksp); }
int drm_init_kstats(drm_device_t *sc) { int instance; kstat_t *ksp; kstat_named_t *knp; char *np; char **aknp; instance = ddi_get_instance(sc->dip); aknp = drmkstat_name; ksp = kstat_create("drm", instance, "drminfo", "drm", KSTAT_TYPE_NAMED, sizeof (drmkstat_name)/sizeof (char *) - 1, KSTAT_FLAG_PERSISTENT); if (ksp == NULL) return (NULL); ksp->ks_private = sc; ksp->ks_update = drm_kstat_update; for (knp = ksp->ks_data; (np = (*aknp)) != NULL; knp++, aknp++) { kstat_named_init(knp, np, KSTAT_DATA_UINT32); } kstat_install(ksp); sc->asoft_ksp = ksp; return (0); }
static void dsl_pool_tx_assign_init(dsl_pool_t *dp, unsigned int ndata) { kstat_named_t *ks; char name[KSTAT_STRLEN]; int i, data_size = ndata * sizeof(kstat_named_t); (void) snprintf(name, KSTAT_STRLEN, "dmu_tx_assign-%s", spa_name(dp->dp_spa)); dp->dp_tx_assign_size = ndata; if (data_size) dp->dp_tx_assign_buckets = kmem_alloc(data_size, KM_SLEEP); else dp->dp_tx_assign_buckets = NULL; for (i = 0; i < dp->dp_tx_assign_size; i++) { ks = &dp->dp_tx_assign_buckets[i]; ks->data_type = KSTAT_DATA_UINT64; ks->value.ui64 = 0; (void) snprintf(ks->name, KSTAT_STRLEN, "%u us", 1 << i); } dp->dp_tx_assign_kstat = kstat_create("zfs", 0, name, "misc", KSTAT_TYPE_NAMED, 0, KSTAT_FLAG_VIRTUAL); if (dp->dp_tx_assign_kstat) { dp->dp_tx_assign_kstat->ks_data = dp->dp_tx_assign_buckets; dp->dp_tx_assign_kstat->ks_ndata = dp->dp_tx_assign_size; dp->dp_tx_assign_kstat->ks_data_size = data_size; kstat_install(dp->dp_tx_assign_kstat); } }
cpupm_mach_turbo_info_t * cpupm_turbo_init(cpu_t *cp) { cpupm_mach_turbo_info_t *turbo_info; turbo_info = kmem_zalloc(sizeof (cpupm_mach_turbo_info_t), KM_SLEEP); turbo_info->turbo_supported = 1; turbo_info->turbo_ksp = kstat_create("turbo", cp->cpu_id, "turbo", "misc", KSTAT_TYPE_NAMED, sizeof (turbo_kstat) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (turbo_info->turbo_ksp == NULL) { cmn_err(CE_NOTE, "kstat_create(turbo) fail"); } else { turbo_info->turbo_ksp->ks_data = &turbo_kstat; turbo_info->turbo_ksp->ks_lock = &turbo_mutex; turbo_info->turbo_ksp->ks_update = turbo_kstat_update; turbo_info->turbo_ksp->ks_data_size += MAXNAMELEN; turbo_info->turbo_ksp->ks_private = turbo_info; kstat_install(turbo_info->turbo_ksp); } return (turbo_info); }
static kstat_t * hxge_setup_local_kstat(p_hxge_t hxgep, int instance, char *name, const hxge_kstat_index_t *ksip, size_t count, int (*update) (kstat_t *, int)) { kstat_t *ksp; kstat_named_t *knp; int i; ksp = kstat_create(HXGE_DRIVER_NAME, instance, name, "net", KSTAT_TYPE_NAMED, count, 0); if (ksp == NULL) return (NULL); ksp->ks_private = (void *) hxgep; ksp->ks_update = update; knp = ksp->ks_data; for (i = 0; ksip[i].name != NULL; i++) { kstat_named_init(&knp[i], ksip[i].name, ksip[i].type); } kstat_install(ksp); return (ksp); }
/* * Creates and initializes all the structures needed by the framework. */ void kcf_sched_init(void) { int i; kcf_reqid_table_t *rt; /* * Create all the kmem caches needed by the framework. We set the * align argument to 64, to get a slab aligned to 64-byte as well as * have the objects (cache_chunksize) to be a 64-byte multiple. * This helps to avoid false sharing as this is the size of the * CPU cache line. */ kcf_sreq_cache = kmem_cache_create("kcf_sreq_cache", sizeof (struct kcf_sreq_node), 64, kcf_sreq_cache_constructor, kcf_sreq_cache_destructor, NULL, NULL, NULL, 0); kcf_areq_cache = kmem_cache_create("kcf_areq_cache", sizeof (struct kcf_areq_node), 64, kcf_areq_cache_constructor, kcf_areq_cache_destructor, NULL, NULL, NULL, 0); kcf_context_cache = kmem_cache_create("kcf_context_cache", sizeof (struct kcf_context), 64, kcf_context_cache_constructor, kcf_context_cache_destructor, NULL, NULL, NULL, 0); gswq = kmem_alloc(sizeof (kcf_global_swq_t), KM_SLEEP); mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL); gswq->gs_njobs = 0; gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc; gswq->gs_first = gswq->gs_last = NULL; /* Initialize the global reqid table */ for (i = 0; i < REQID_TABLES; i++) { rt = kmem_zalloc(sizeof (kcf_reqid_table_t), KM_SLEEP); kcf_reqid_table[i] = rt; mutex_init(&rt->rt_lock, NULL, MUTEX_DEFAULT, NULL); rt->rt_curid = i; } /* Allocate and initialize the thread pool */ kcfpool_alloc(); /* Initialize the event notification list variables */ mutex_init(&ntfy_list_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&ntfy_list_cv, NULL, CV_DEFAULT, NULL); /* Create the kcf kstat */ kcf_misc_kstat = kstat_create("kcf", 0, "framework_stats", "crypto", KSTAT_TYPE_NAMED, sizeof (kcf_stats_t) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (kcf_misc_kstat != NULL) { kcf_misc_kstat->ks_data = &kcf_ksdata; kcf_misc_kstat->ks_update = kcf_misc_kstat_update; kstat_install(kcf_misc_kstat); } }
void fletcher_4_init(void) { static const size_t data_size = 1 << SPA_OLD_MAXBLOCKSHIFT; /* 128kiB */ fletcher_4_ops_t *curr_impl; char *databuf; int i, c; /* move supported impl into fletcher_4_supp_impls */ for (i = 0, c = 0; i < ARRAY_SIZE(fletcher_4_impls); i++) { curr_impl = (fletcher_4_ops_t *) fletcher_4_impls[i]; if (curr_impl->valid && curr_impl->valid()) fletcher_4_supp_impls[c++] = curr_impl; } membar_producer(); /* complete fletcher_4_supp_impls[] init */ fletcher_4_supp_impls_cnt = c; /* number of supported impl */ #if !defined(_KERNEL) /* Skip benchmarking and use last implementation as fastest */ memcpy(&fletcher_4_fastest_impl, fletcher_4_supp_impls[fletcher_4_supp_impls_cnt-1], sizeof (fletcher_4_fastest_impl)); fletcher_4_fastest_impl.name = "fastest"; membar_producer(); fletcher_4_initialized = B_TRUE; /* Use 'cycle' math selection method for userspace */ VERIFY0(fletcher_4_impl_set("cycle")); return; #endif /* Benchmark all supported implementations */ databuf = vmem_alloc(data_size, KM_SLEEP); for (i = 0; i < data_size / sizeof (uint64_t); i++) ((uint64_t *)databuf)[i] = (uintptr_t)(databuf+i); /* warm-up */ fletcher_4_benchmark_impl(B_FALSE, databuf, data_size); fletcher_4_benchmark_impl(B_TRUE, databuf, data_size); vmem_free(databuf, data_size); /* install kstats for all implementations */ fletcher_4_kstat = kstat_create("zfs", 0, "fletcher_4_bench", "misc", KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL); if (fletcher_4_kstat != NULL) { fletcher_4_kstat->ks_data = NULL; fletcher_4_kstat->ks_ndata = UINT32_MAX; kstat_set_raw_ops(fletcher_4_kstat, fletcher_4_kstat_headers, fletcher_4_kstat_data, fletcher_4_kstat_addr); kstat_install(fletcher_4_kstat); } /* Finish initialization */ fletcher_4_initialized = B_TRUE; }
void lqfs_init(void) { /* Create kmem caches */ lqfs_sv = kmem_cache_create("lqfs_save", sizeof (lqfs_save_t), 0, NULL, NULL, NULL, NULL, NULL, 0); lqfs_bp = kmem_cache_create("lqfs_bufs", sizeof (lqfs_buf_t), 0, NULL, NULL, NULL, NULL, NULL, 0); sam_mutex_init(&log_mutex, NULL, MUTEX_DEFAULT, NULL); _init_top(); if (&bio_lqfs_strategy != NULL) { bio_lqfs_strategy = (void (*) (void *, buf_t *)) lqfs_strategy; } /* * Initialise general logging and delta kstats */ lqfs_logstats = kstat_create("sam-qfs", 0, "logstats", "fs", KSTAT_TYPE_NAMED, sizeof (logstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (lqfs_logstats) { lqfs_logstats->ks_data = (void *) &logstats; kstat_install(lqfs_logstats); } lqfs_deltastats = kstat_create("sam-qfs", 0, "deltastats", "fs", KSTAT_TYPE_NAMED, sizeof (dkstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (lqfs_deltastats) { lqfs_deltastats->ks_data = (void *) &dkstats; lqfs_deltastats->ks_update = delta_stats_update; kstat_install(lqfs_deltastats); } /* * Set up the maximum amount of kmem that the crbs (system wide) * can use. */ qfs_crb_limit = kmem_maxavail() / qfs_max_crb_divisor; }
/* * Create the "counters" kstat. */ static kstat_t * ni_create_cntr_kstat(char *name, int instance, int (*update)(kstat_t *, int), void *ksinfop) { struct kstat *counters_ksp; struct kstat_named *counters_named_data; char pic_str[10]; int i; int num_pics = NUM_OF_PICS; #ifdef DEBUG if (ni_perf_debug > 1) printf("ni_create_cntr_kstat: name: %s instance: %d\n", name, instance); #endif /* * Size of kstat is num_pics + 1 as it * also contains the %pcr */ if ((counters_ksp = kstat_create(name, instance, "counters", "bus", KSTAT_TYPE_NAMED, num_pics + 1, KSTAT_FLAG_WRITABLE)) == NULL) { cmn_err(CE_WARN, "%s: kstat_create for %s%d failed", cpu_module_name, name, instance); return (NULL); } counters_named_data = (struct kstat_named *)(counters_ksp->ks_data); /* * Iinitialize the named kstats */ kstat_named_init(&counters_named_data[0], "pcr", KSTAT_DATA_UINT64); for (i = 0; i < num_pics; i++) { (void) sprintf(pic_str, "pic%d", i); kstat_named_init(&counters_named_data[i+1], pic_str, KSTAT_DATA_UINT64); } /* * Store the register offset's in the kstat's * private field so that they are available * to the update function. */ counters_ksp->ks_private = (void *)ksinfop; counters_ksp->ks_update = update; kstat_install(counters_ksp); return (counters_ksp); }
void vdev_mirror_stat_init(void) { mirror_ksp = kstat_create("zfs", 0, "vdev_mirror_stats", "misc", KSTAT_TYPE_NAMED, sizeof (mirror_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (mirror_ksp != NULL) { mirror_ksp->ks_data = &mirror_stats; kstat_install(mirror_ksp); } }
/* * Create the picN kstat. Returns a pointer to the * kstat which the driver must store to allow it * to be deleted when necessary. */ static kstat_t * ni_create_picN_kstat(char *mod_name, int pic, int pic_sel_shift, int num_ev, ni_kev_mask_t *ev_array) { struct kstat_named *pic_named_data; int inst = 0; int event; char pic_name[30]; kstat_t *picN_ksp = NULL; (void) sprintf(pic_name, "pic%d", pic); if ((picN_ksp = kstat_create(mod_name, inst, pic_name, "bus", KSTAT_TYPE_NAMED, num_ev, NULL)) == NULL) { cmn_err(CE_WARN, "%s %s : kstat create failed", mod_name, pic_name); /* * It is up to the calling function to delete any kstats * that may have been created already. We just * return NULL to indicate an error has occured. */ return (NULL); } pic_named_data = (struct kstat_named *) picN_ksp->ks_data; /* * Write event names and their associated pcr masks. The * last entry in the array (clear_pic) is added seperately * below as the pic value must be inverted. */ for (event = 0; event < num_ev - 1; event++) { pic_named_data[event].value.ui64 = (ev_array[event].pcr_mask << pic_sel_shift); kstat_named_init(&pic_named_data[event], ev_array[event].event_name, KSTAT_DATA_UINT64); } /* * add the clear_pic entry. */ pic_named_data[event].value.ui64 = (uint64_t)~(ev_array[event].pcr_mask << pic_sel_shift); kstat_named_init(&pic_named_data[event], ev_array[event].event_name, KSTAT_DATA_UINT64); kstat_install(picN_ksp); return (picN_ksp); }
void zfetch_init(void) { zfetch_ksp = kstat_create("zfs", 0, "zfetchstats", "misc", KSTAT_TYPE_NAMED, sizeof (zfetch_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (zfetch_ksp != NULL) { zfetch_ksp->ks_data = &zfetch_stats; kstat_install(zfetch_ksp); } }
int kstat_osx_init(void) { osx_kstat_ksp = kstat_create("zfs", 0, "tunable", "darwin", KSTAT_TYPE_NAMED, sizeof (osx_kstat) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL|KSTAT_FLAG_WRITABLE); if (osx_kstat_ksp != NULL) { osx_kstat_ksp->ks_data = &osx_kstat; osx_kstat_ksp->ks_update = osx_kstat_update; kstat_install(osx_kstat_ksp); } return 0; }
static void dls_stat_init() { if ((dls_ksp = kstat_create("dls", 0, "dls_stat", "net", KSTAT_TYPE_NAMED, sizeof (dls_kstat) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) == NULL) { cmn_err(CE_WARN, "DLS: failed to create kstat structure for dls stats"); return; } dls_ksp->ks_data = (void *)&dls_kstat; kstat_install(dls_ksp); }
/* * dcopy_stats_init() */ static int dcopy_stats_init(dcopy_handle_t channel) { #define CHANSTRSIZE 20 char chanstr[CHANSTRSIZE]; dcopy_stats_t *stats; int instance; char *name; stats = &channel->ch_stat; name = (char *)ddi_driver_name(channel->ch_device->dc_info.di_dip); instance = ddi_get_instance(channel->ch_device->dc_info.di_dip); (void) snprintf(chanstr, CHANSTRSIZE, "channel%d", (uint32_t)channel->ch_info.qc_chan_num); channel->ch_kstat = kstat_create(name, instance, chanstr, "misc", KSTAT_TYPE_NAMED, sizeof (dcopy_stats_t) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (channel->ch_kstat == NULL) { return (DCOPY_FAILURE); } channel->ch_kstat->ks_data = stats; kstat_named_init(&stats->cs_bytes_xfer, "bytes_xfer", KSTAT_DATA_UINT64); kstat_named_init(&stats->cs_cmd_alloc, "cmd_alloc", KSTAT_DATA_UINT64); kstat_named_init(&stats->cs_cmd_post, "cmd_post", KSTAT_DATA_UINT64); kstat_named_init(&stats->cs_cmd_poll, "cmd_poll", KSTAT_DATA_UINT64); kstat_named_init(&stats->cs_notify_poll, "notify_poll", KSTAT_DATA_UINT64); kstat_named_init(&stats->cs_notify_pending, "notify_pending", KSTAT_DATA_UINT64); kstat_named_init(&stats->cs_id, "id", KSTAT_DATA_UINT64); kstat_named_init(&stats->cs_capabilities, "capabilities", KSTAT_DATA_UINT64); kstat_install(channel->ch_kstat); channel->ch_stat.cs_id.value.ui64 = channel->ch_info.qc_id; channel->ch_stat.cs_capabilities.value.ui64 = channel->ch_info.qc_capabilities; return (DCOPY_SUCCESS); }
/*ARGSUSED1*/ static int mm_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { int i; struct mem_minor { char *name; minor_t minor; int privonly; const char *rdpriv; const char *wrpriv; mode_t priv_mode; } mm[] = { { "mem", M_MEM, 0, NULL, "all", 0640 }, { "kmem", M_KMEM, 0, NULL, "all", 0640 }, { "allkmem", M_ALLKMEM, 0, "all", "all", 0600 }, { "null", M_NULL, PRIVONLY_DEV, NULL, NULL, 0666 }, { "zero", M_ZERO, PRIVONLY_DEV, NULL, NULL, 0666 }, }; kstat_t *ksp; mutex_init(&mm_lock, NULL, MUTEX_DEFAULT, NULL); mm_map = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP); for (i = 0; i < (sizeof (mm) / sizeof (mm[0])); i++) { if (ddi_create_priv_minor_node(devi, mm[i].name, S_IFCHR, mm[i].minor, DDI_PSEUDO, mm[i].privonly, mm[i].rdpriv, mm[i].wrpriv, mm[i].priv_mode) == DDI_FAILURE) { ddi_remove_minor_node(devi, NULL); return (DDI_FAILURE); } } mm_dip = devi; ksp = kstat_create("mm", 0, "phys_installed", "misc", KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VAR_SIZE | KSTAT_FLAG_VIRTUAL); if (ksp != NULL) { ksp->ks_update = mm_kstat_update; ksp->ks_snapshot = mm_kstat_snapshot; ksp->ks_lock = &mm_lock; /* XXX - not really needed */ kstat_install(ksp); } mm_kmem_io_access = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, "kmem_io_access", 0); return (DDI_SUCCESS); }
/* * sockpfp_init: called as part of the initialisation of the module when * loaded into the kernel. * * Being able to create and record the kstats data in the kernel is not * considered to be vital to the operation of this kernel module, thus * its failure is tolerated. */ static int sockpfp_init(void) { (void) memset(&ks_stats, 0, sizeof (ks_stats)); (void) memcpy(&ks_stats, &pfp_kstats, sizeof (pfp_kstats)); pfp_ksp = kstat_create("pfpacket", 0, "global", "misc", KSTAT_TYPE_NAMED, sizeof (pfp_kstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (pfp_ksp != NULL) { pfp_ksp->ks_data = &ks_stats; kstat_install(pfp_ksp); } return (0); }
static void simmstat_add_kstats(struct simmstat_soft_state *softsp) { struct kstat *simmstat_ksp; if ((simmstat_ksp = kstat_create("unix", softsp->board, SIMMSTAT_KSTAT_NAME, "misc", KSTAT_TYPE_RAW, SIMM_COUNT, KSTAT_FLAG_PERSISTENT)) == NULL) { cmn_err(CE_WARN, "simmstat%d: kstat_create failed", ddi_get_instance(softsp->dip)); } simmstat_ksp->ks_update = simmstat_kstat_update; simmstat_ksp->ks_private = (void *)softsp; softsp->simmstat_ksp = simmstat_ksp; kstat_install(simmstat_ksp); }
static void cpupart_kstat_create(cpupart_t *cp) { kstat_t *ksp; zoneid_t zoneid; ASSERT(MUTEX_HELD(&cpu_lock)); /* * We have a bit of a chicken-egg problem since this code will * get called to create the kstats for CP_DEFAULT before the * pools framework gets initialized. We circumvent the problem * by special-casing cp_default. */ if (cp != &cp_default && pool_pset_enabled()) zoneid = GLOBAL_ZONEID; else zoneid = ALL_ZONES; ksp = kstat_create_zone("unix", cp->cp_id, "pset", "misc", KSTAT_TYPE_NAMED, sizeof (cpupart_kstat_t) / sizeof (kstat_named_t), 0, zoneid); if (ksp != NULL) { cpupart_kstat_t *cpksp = ksp->ks_data; kstat_named_init(&cpksp->cpk_updates, "updates", KSTAT_DATA_UINT64); kstat_named_init(&cpksp->cpk_runnable, "runnable", KSTAT_DATA_UINT64); kstat_named_init(&cpksp->cpk_waiting, "waiting", KSTAT_DATA_UINT64); kstat_named_init(&cpksp->cpk_ncpus, "ncpus", KSTAT_DATA_UINT32); kstat_named_init(&cpksp->cpk_avenrun_1min, "avenrun_1min", KSTAT_DATA_UINT32); kstat_named_init(&cpksp->cpk_avenrun_5min, "avenrun_5min", KSTAT_DATA_UINT32); kstat_named_init(&cpksp->cpk_avenrun_15min, "avenrun_15min", KSTAT_DATA_UINT32); ksp->ks_update = cpupart_kstat_update; ksp->ks_private = cp; kstat_install(ksp); } cp->cp_kstat = ksp; }
/* * Create the picN kstat. Returns a pointer to the * kstat which the driver must store to allow it * to be deleted when necessary. */ static kstat_t * iospc_create_picN_kstat(char *mod_name, int pic, uint64_t ev_offset, int num_ev, iospc_event_t *ev_array) { int event; char pic_name[PIC_STR_LEN]; kstat_t *picN_ksp = NULL; struct kstat_named *pic_named_data; (void) snprintf(pic_name, PIC_STR_LEN, "pic%1d", pic); if ((picN_ksp = kstat_create(mod_name, 0, pic_name, "bus", KSTAT_TYPE_NAMED, num_ev, NULL)) == NULL) { return (NULL); } /* NOTE: Number of events is assumed to always be non-zero. */ pic_named_data = (struct kstat_named *)picN_ksp->ks_data; /* * Fill up data section of the kstat * Write event names and their associated pcr masks. * num_ev - 1 is because CLEAR_PIC is added separately. */ for (event = 0; event < num_ev - 1; event++) { pic_named_data[event].value.ui64 = ev_array[event].value << ev_offset; kstat_named_init(&pic_named_data[event], ev_array[event].name, KSTAT_DATA_UINT64); } /* * add the clear_pic entry */ pic_named_data[event].value.ui64 = (uint64_t)~(ev_array[event].value << ev_offset); kstat_named_init(&pic_named_data[event], ev_array[event].name, KSTAT_DATA_UINT64); kstat_install(picN_ksp); return (picN_ksp); }
void bge_init_kstats(bge_t *bgep, int instance) { kstat_t *ksp; BGE_TRACE(("bge_init_kstats($%p, %d)", (void *)bgep, instance)); if (bgep->chipid.statistic_type == BGE_STAT_BLK) { DMA_ZERO(bgep->statistics); bgep->bge_kstats[BGE_KSTAT_RAW] = ksp = kstat_create(BGE_DRIVER_NAME, instance, "raw_statistics", "net", KSTAT_TYPE_RAW, sizeof (bge_statistics_t), KSTAT_FLAG_VIRTUAL); if (ksp != NULL) { ksp->ks_data = DMA_VPTR(bgep->statistics); kstat_install(ksp); } bgep->bge_kstats[BGE_KSTAT_STATS] = bge_setup_named_kstat(bgep, instance, "statistics", bge_statistics, sizeof (bge_statistics), bge_statistics_update); } else { bgep->bge_kstats[BGE_KSTAT_STATS] = bge_setup_named_kstat(bgep, instance, "statistics", bge_stat_val, sizeof (bge_stat_val), bge_statistics_update); } bgep->bge_kstats[BGE_KSTAT_CHIPID] = bge_setup_named_kstat(bgep, instance, "chipid", bge_chipid, sizeof (bge_chipid), bge_chipid_update); bgep->bge_kstats[BGE_KSTAT_DRIVER] = bge_setup_named_kstat(bgep, instance, "driverinfo", bge_driverinfo, sizeof (bge_driverinfo), bge_driverinfo_update); if (bgep->chipid.flags & CHIP_FLAG_SERDES) bgep->bge_kstats[BGE_KSTAT_PHYS] = bge_setup_named_kstat(bgep, instance, "serdes", bge_serdes, sizeof (bge_serdes), bge_serdes_update); else bgep->bge_kstats[BGE_KSTAT_PHYS] = bge_setup_named_kstat(bgep, instance, "phydata", bge_phydata, sizeof (bge_phydata), bge_phydata_update); }
/* * Create the "counters" kstat. */ kstat_t *pcmu_create_cntr_kstat(pcmu_t *pcmu_p, char *name, int num_pics, int (*update)(kstat_t *, int), void *cntr_addr_p) { struct kstat_named *counters_named_data; struct kstat *counters_ksp; dev_info_t *dip = pcmu_p->pcmu_dip; char *drv_name = (char *)ddi_driver_name(dip); int drv_instance = ddi_get_instance(dip); char pic_str[10]; int i; /* * Size of kstat is num_pics + 1 as it * also contains the %pcr */ if ((counters_ksp = kstat_create(name, drv_instance, "counters", "bus", KSTAT_TYPE_NAMED, num_pics + 1, KSTAT_FLAG_WRITABLE)) == NULL) { cmn_err(CE_WARN, "%s%d counters kstat_create failed", drv_name, drv_instance); return (NULL); } counters_named_data = (struct kstat_named *)(counters_ksp->ks_data); /* initialize the named kstats */ kstat_named_init(&counters_named_data[0], "pcr", KSTAT_DATA_UINT64); for (i = 0; i < num_pics; i++) { (void) sprintf(pic_str, "pic%d", i); kstat_named_init(&counters_named_data[i+1], pic_str, KSTAT_DATA_UINT64); } /* * Store the register offset's in the kstat's * private field so that they are available * to the update function. */ counters_ksp->ks_private = (void *)cntr_addr_p; counters_ksp->ks_update = update; kstat_install(counters_ksp); return (counters_ksp); }
void zfs_dbgmsg_init(void) { list_create(&zfs_dbgmsgs, sizeof (zfs_dbgmsg_t), offsetof(zfs_dbgmsg_t, zdm_node)); mutex_init(&zfs_dbgmsgs_lock, NULL, MUTEX_DEFAULT, NULL); zfs_dbgmsg_kstat = kstat_create("zfs", 0, "dbgmsg", "misc", KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL); if (zfs_dbgmsg_kstat) { zfs_dbgmsg_kstat->ks_lock = &zfs_dbgmsgs_lock; zfs_dbgmsg_kstat->ks_ndata = UINT32_MAX; zfs_dbgmsg_kstat->ks_private = NULL; zfs_dbgmsg_kstat->ks_update = zfs_dbgmsg_update; kstat_set_raw_ops(zfs_dbgmsg_kstat, zfs_dbgmsg_headers, zfs_dbgmsg_data, zfs_dbgmsg_addr); kstat_install(zfs_dbgmsg_kstat); } }
/* * Initialize the page retire mechanism: * * - Establish the correctable error retire limit. * - Initialize locks. * - Build the retired_pages vnode. * - Set up the kstats. * - Fire off the background thread. * - Tell page_tryretire() it's OK to start retiring pages. */ void page_retire_init(void) { const fs_operation_def_t retired_vnodeops_template[] = {NULL, NULL}; struct vnodeops *vops; const uint_t page_retire_ndata = sizeof (page_retire_kstat) / sizeof (kstat_named_t); ASSERT(page_retire_ksp == NULL); if (max_pages_retired_bps <= 0) { max_pages_retired_bps = MCE_BPT; } mutex_init(&pr_q_mutex, NULL, MUTEX_DEFAULT, NULL); retired_pages = vn_alloc(KM_SLEEP); if (vn_make_ops("retired_pages", retired_vnodeops_template, &vops)) { cmn_err(CE_PANIC, "page_retired_init: can't make retired vnodeops"); } vn_setops(retired_pages, vops); if ((page_retire_ksp = kstat_create("unix", 0, "page_retire", "misc", KSTAT_TYPE_NAMED, page_retire_ndata, KSTAT_FLAG_VIRTUAL)) == NULL) { cmn_err(CE_WARN, "kstat_create for page_retire failed"); } else { page_retire_ksp->ks_data = (void *)&page_retire_kstat; page_retire_ksp->ks_update = page_retire_kstat_update; kstat_install(page_retire_ksp); } pr_thread_shortwait = 23 * hz; pr_thread_longwait = 1201 * hz; mutex_init(&pr_thread_mutex, NULL, MUTEX_DEFAULT, NULL); cv_init(&pr_cv, NULL, CV_DEFAULT, NULL); pr_thread_id = thread_create(NULL, 0, page_retire_thread, NULL, 0, &p0, TS_RUN, minclsyspri); pr_enable = 1; }
static void dsl_pool_txg_history_init(dsl_pool_t *dp, uint64_t txg) { char name[KSTAT_STRLEN]; list_create(&dp->dp_txg_history, sizeof (txg_history_t), offsetof(txg_history_t, th_link)); dsl_pool_txg_history_add(dp, txg); (void) snprintf(name, KSTAT_STRLEN, "txgs-%s", spa_name(dp->dp_spa)); dp->dp_txg_kstat = kstat_create("zfs", 0, name, "misc", KSTAT_TYPE_TXG, 0, KSTAT_FLAG_VIRTUAL); if (dp->dp_txg_kstat) { dp->dp_txg_kstat->ks_data = NULL; dp->dp_txg_kstat->ks_private = dp; dp->dp_txg_kstat->ks_update = dsl_pool_txg_history_update; kstat_install(dp->dp_txg_kstat); } }
void rds_init() { rds_alloc_cache = kmem_cache_create("rds_alloc_cache", sizeof (rds_t), 0, NULL, NULL, NULL, NULL, NULL, 0); rds_hash_init(); /* * kstats */ rds_kstatsp = kstat_create("rds", 0, "rds_kstat", "misc", KSTAT_TYPE_NAMED, sizeof (rds_kstat) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_WRITABLE); if (rds_kstatsp != NULL) { rds_kstatsp->ks_lock = &rds_kstat_mutex; rds_kstatsp->ks_data = (void *)&rds_kstat; kstat_install(rds_kstatsp); } }