static void dbuf_stats_hash_table_init(dbuf_hash_table_t *hash) { dbuf_stats_t *dsh = &dbuf_stats_hash_table; kstat_t *ksp; mutex_init(&dsh->lock, NULL, MUTEX_DEFAULT, NULL); dsh->hash = hash; ksp = kstat_create("zfs", 0, "dbufs", "misc", KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL); dsh->kstat = ksp; if (ksp) { ksp->ks_lock = &dsh->lock; ksp->ks_ndata = UINT32_MAX; ksp->ks_private = dsh; kstat_set_raw_ops(ksp, dbuf_stats_hash_table_headers, dbuf_stats_hash_table_data, dbuf_stats_hash_table_addr); kstat_install(ksp); } }
/* * Initialize drop facility kstats. */ void ip_drop_init(void) { ip_drop_kstat = kstat_create("ip", 0, "ipdrop", "net", KSTAT_TYPE_NAMED, sizeof (*ip_drop_types) / sizeof (kstat_named_t), KSTAT_FLAG_PERSISTENT); if (ip_drop_kstat == NULL) return; ip_drop_types = ip_drop_kstat->ks_data; /* TCP IPsec drop statistics. */ kstat_named_init(&ipdrops_tcp_clear, "tcp_clear", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_tcp_secure, "tcp_secure", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_tcp_mismatch, "tcp_mismatch", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_tcp_ipsec_alloc, "tcp_ipsec_alloc", KSTAT_DATA_UINT64); /* SADB-specific drop statistics. */ kstat_named_init(&ipdrops_sadb_inlarval_timeout, "sadb_inlarval_timeout", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_sadb_inlarval_replace, "sadb_inlarval_replace", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_sadb_acquire_nomem, "sadb_acquire_nomem", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_sadb_acquire_toofull, "sadb_acquire_toofull", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_sadb_acquire_timeout, "sadb_acquire_timeout", KSTAT_DATA_UINT64); /* SPD drop statistics. */ kstat_named_init(&ipdrops_spd_ahesp_diffid, "spd_ahesp_diffid", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_loopback_mismatch, "spd_loopback_mismatch", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_explicit, "spd_explicit", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_got_secure, "spd_got_secure", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_got_clear, "spd_got_clear", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_bad_ahalg, "spd_bad_ahalg", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_got_ah, "spd_got_ah", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_bad_espealg, "spd_bad_espealg", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_bad_espaalg, "spd_bad_espaalg", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_got_esp, "spd_got_esp", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_got_selfencap, "spd_got_selfencap", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_bad_selfencap, "spd_bad_selfencap", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_nomem, "spd_nomem", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_ah_badid, "spd_ah_badid", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_esp_badid, "spd_esp_badid", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_ah_innermismatch, "spd_ah_innermismatch", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_spd_esp_innermismatch, "spd_esp_innermismatch", KSTAT_DATA_UINT64); /* ESP-specific drop statistics. */ kstat_named_init(&ipdrops_esp_nomem, "esp_nomem", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_esp_no_sa, "esp_no_sa", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_esp_early_replay, "esp_early_replay", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_esp_replay, "esp_replay", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_esp_bytes_expire, "esp_bytes_expire", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_esp_bad_padlen, "esp_bad_padlen", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_esp_bad_padding, "esp_bad_padding", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_esp_bad_auth, "esp_bad_auth", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_esp_crypto_failed, "esp_crypto_failed", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_esp_icmp, "esp_icmp", KSTAT_DATA_UINT64); /* AH-specific drop statistics. */ kstat_named_init(&ipdrops_ah_nomem, "ah_nomem", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_ah_bad_v6_hdrs, "ah_bad_v6_hdrs", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_ah_bad_v4_opts, "ah_bad_v4_opts", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_ah_no_sa, "ah_no_sa", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_ah_bad_length, "ah_bad_length", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_ah_bad_auth, "ah_bad_auth", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_ah_crypto_failed, "ah_crypto_failed", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_ah_early_replay, "ah_early_replay", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_ah_replay, "ah_replay", KSTAT_DATA_UINT64); kstat_named_init(&ipdrops_ah_bytes_expire, "ah_bytes_expire", KSTAT_DATA_UINT64); /* IP-specific drop statistics. */ kstat_named_init(&ipdrops_ip_ipsec_not_loaded, "ip_ipsec_not_loaded", KSTAT_DATA_UINT64); kstat_install(ip_drop_kstat); }
void vdev_raidz_math_init(void) { raidz_impl_ops_t *curr_impl; zio_t *bench_zio = NULL; raidz_map_t *bench_rm = NULL; uint64_t bench_parity; int i, c, fn; /* move supported impl into raidz_supp_impl */ for (i = 0, c = 0; i < ARRAY_SIZE(raidz_all_maths); i++) { curr_impl = (raidz_impl_ops_t *)raidz_all_maths[i]; /* initialize impl */ if (curr_impl->init) curr_impl->init(); if (curr_impl->is_supported()) raidz_supp_impl[c++] = (raidz_impl_ops_t *)curr_impl; } membar_producer(); /* complete raidz_supp_impl[] init */ raidz_supp_impl_cnt = c; /* number of supported impl */ #if !defined(_KERNEL) /* Skip benchmarking and use last implementation as fastest */ memcpy(&vdev_raidz_fastest_impl, raidz_supp_impl[raidz_supp_impl_cnt-1], sizeof (vdev_raidz_fastest_impl)); strcpy(vdev_raidz_fastest_impl.name, "fastest"); raidz_math_initialized = B_TRUE; /* Use 'cycle' math selection method for userspace */ VERIFY0(vdev_raidz_impl_set("cycle")); return; #endif /* Fake an zio and run the benchmark on a warmed up buffer */ bench_zio = kmem_zalloc(sizeof (zio_t), KM_SLEEP); bench_zio->io_offset = 0; bench_zio->io_size = BENCH_ZIO_SIZE; /* only data columns */ bench_zio->io_abd = abd_alloc_linear(BENCH_ZIO_SIZE, B_TRUE); memset(abd_to_buf(bench_zio->io_abd), 0xAA, BENCH_ZIO_SIZE); /* Benchmark parity generation methods */ for (fn = 0; fn < RAIDZ_GEN_NUM; fn++) { bench_parity = fn + 1; /* New raidz_map is needed for each generate_p/q/r */ bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT, BENCH_D_COLS + bench_parity, bench_parity); benchmark_raidz_impl(bench_rm, fn, benchmark_gen_impl); vdev_raidz_map_free(bench_rm); } /* Benchmark data reconstruction methods */ bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT, BENCH_COLS, PARITY_PQR); for (fn = 0; fn < RAIDZ_REC_NUM; fn++) benchmark_raidz_impl(bench_rm, fn, benchmark_rec_impl); vdev_raidz_map_free(bench_rm); /* cleanup the bench zio */ abd_free(bench_zio->io_abd); kmem_free(bench_zio, sizeof (zio_t)); /* install kstats for all impl */ raidz_math_kstat = kstat_create("zfs", 0, "vdev_raidz_bench", "misc", KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL); if (raidz_math_kstat != NULL) { raidz_math_kstat->ks_data = NULL; raidz_math_kstat->ks_ndata = UINT32_MAX; kstat_set_raw_ops(raidz_math_kstat, raidz_math_kstat_headers, raidz_math_kstat_data, raidz_math_kstat_addr); kstat_install(raidz_math_kstat); } /* Finish initialization */ atomic_swap_32(&zfs_vdev_raidz_impl, user_sel_impl); raidz_math_initialized = B_TRUE; }
/* * Create and initialize the driver private statistics. */ int igb_init_stats(igb_t *igb) { kstat_t *ks; igb_stat_t *igb_ks; /* * Create and init kstat */ ks = kstat_create(MODULE_NAME, ddi_get_instance(igb->dip), "statistics", "net", KSTAT_TYPE_NAMED, sizeof (igb_stat_t) / sizeof (kstat_named_t), 0); if (ks == NULL) { igb_error(igb, "Could not create kernel statistics"); return (IGB_FAILURE); } igb->igb_ks = ks; igb_ks = (igb_stat_t *)ks->ks_data; /* * Initialize all the statistics. */ kstat_named_init(&igb_ks->reset_count, "reset_count", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->dout_sync, "DMA_out_sync", KSTAT_DATA_UINT64); #ifdef IGB_DEBUG kstat_named_init(&igb_ks->rx_frame_error, "rx_frame_error", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->rx_cksum_error, "rx_cksum_error", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->rx_exceed_pkt, "rx_exceed_pkt", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tx_overload, "tx_overload", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tx_fail_no_tbd, "tx_fail_no_tbd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tx_fail_no_tcb, "tx_fail_no_tcb", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tx_fail_dma_bind, "tx_fail_dma_bind", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tx_reschedule, "tx_reschedule", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->gprc, "good_pkts_recvd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->gptc, "good_pkts_xmitd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->gor, "good_octets_recvd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->got, "good_octets_xmitd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->prc64, "pkts_recvd_( 64b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->prc127, "pkts_recvd_( 65- 127b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->prc255, "pkts_recvd_( 127- 255b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->prc511, "pkts_recvd_( 256- 511b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->prc1023, "pkts_recvd_( 511-1023b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->prc1522, "pkts_recvd_(1024-1522b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->ptc64, "pkts_xmitd_( 64b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->ptc127, "pkts_xmitd_( 65- 127b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->ptc255, "pkts_xmitd_( 128- 255b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->ptc511, "pkts_xmitd_( 255- 511b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->ptc1023, "pkts_xmitd_( 512-1023b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->ptc1522, "pkts_xmitd_(1024-1522b)", KSTAT_DATA_UINT64); #endif kstat_named_init(&igb_ks->symerrs, "recv_symbol_errors", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->mpc, "recv_missed_packets", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->rlec, "recv_length_errors", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->fcruc, "recv_unsupport_FC_pkts", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->rfc, "recv_frag", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tncrs, "xmit_with_no_CRS", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tsctc, "xmit_TCP_seg_contexts", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tsctfc, "xmit_TCP_seg_contexts_fail", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->xonrxc, "XONs_recvd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->xontxc, "XONs_xmitd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->xoffrxc, "XOFFs_recvd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->xofftxc, "XOFFs_xmitd", KSTAT_DATA_UINT64); /* * Function to provide kernel stat update on demand */ ks->ks_update = igb_update_stats; ks->ks_private = (void *)igb; /* * Add kstat to systems kstat chain */ kstat_install(ks); return (IGB_SUCCESS); }
static int bd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int inst; bd_handle_t hdl; bd_t *bd; bd_drive_t drive; int rv; char name[16]; char kcache[32]; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: /* We don't do anything native for suspend/resume */ return (DDI_SUCCESS); default: return (DDI_FAILURE); } inst = ddi_get_instance(dip); hdl = ddi_get_parent_data(dip); (void) snprintf(name, sizeof (name), "%s%d", ddi_driver_name(dip), ddi_get_instance(dip)); (void) snprintf(kcache, sizeof (kcache), "%s_xfer", name); if (hdl == NULL) { cmn_err(CE_WARN, "%s: missing parent data!", name); return (DDI_FAILURE); } if (ddi_soft_state_zalloc(bd_state, inst) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s: unable to zalloc soft state!", name); return (DDI_FAILURE); } bd = ddi_get_soft_state(bd_state, inst); if (hdl->h_dma) { bd->d_dma = *(hdl->h_dma); bd->d_dma.dma_attr_granular = max(DEV_BSIZE, bd->d_dma.dma_attr_granular); bd->d_use_dma = B_TRUE; if (bd->d_maxxfer && (bd->d_maxxfer != bd->d_dma.dma_attr_maxxfer)) { cmn_err(CE_WARN, "%s: inconsistent maximum transfer size!", name); /* We force it */ bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer; } else { bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer; } } else { bd->d_use_dma = B_FALSE; if (bd->d_maxxfer == 0) { bd->d_maxxfer = 1024 * 1024; } } bd->d_ops = hdl->h_ops; bd->d_private = hdl->h_private; bd->d_blkshift = 9; /* 512 bytes, to start */ if (bd->d_maxxfer % DEV_BSIZE) { cmn_err(CE_WARN, "%s: maximum transfer misaligned!", name); bd->d_maxxfer &= ~(DEV_BSIZE - 1); } if (bd->d_maxxfer < DEV_BSIZE) { cmn_err(CE_WARN, "%s: maximum transfer size too small!", name); ddi_soft_state_free(bd_state, inst); return (DDI_FAILURE); } bd->d_dip = dip; bd->d_handle = hdl; hdl->h_bd = bd; ddi_set_driver_private(dip, bd); mutex_init(&bd->d_iomutex, NULL, MUTEX_DRIVER, NULL); mutex_init(&bd->d_ocmutex, NULL, MUTEX_DRIVER, NULL); mutex_init(&bd->d_statemutex, NULL, MUTEX_DRIVER, NULL); cv_init(&bd->d_statecv, NULL, CV_DRIVER, NULL); list_create(&bd->d_waitq, sizeof (bd_xfer_impl_t), offsetof(struct bd_xfer_impl, i_linkage)); list_create(&bd->d_runq, sizeof (bd_xfer_impl_t), offsetof(struct bd_xfer_impl, i_linkage)); bd->d_cache = kmem_cache_create(kcache, sizeof (bd_xfer_impl_t), 8, bd_xfer_ctor, bd_xfer_dtor, NULL, bd, NULL, 0); bd->d_ksp = kstat_create(ddi_driver_name(dip), inst, NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); if (bd->d_ksp != NULL) { bd->d_ksp->ks_lock = &bd->d_iomutex; kstat_install(bd->d_ksp); bd->d_kiop = bd->d_ksp->ks_data; } else { /* * Even if we cannot create the kstat, we create a * scratch kstat. The reason for this is to ensure * that we can update the kstat all of the time, * without adding an extra branch instruction. */ bd->d_kiop = kmem_zalloc(sizeof (kstat_io_t), KM_SLEEP); } cmlb_alloc_handle(&bd->d_cmlbh); bd->d_state = DKIO_NONE; bzero(&drive, sizeof (drive)); bd->d_ops.o_drive_info(bd->d_private, &drive); bd->d_qsize = drive.d_qsize; bd->d_removable = drive.d_removable; bd->d_hotpluggable = drive.d_hotpluggable; if (drive.d_maxxfer && drive.d_maxxfer < bd->d_maxxfer) bd->d_maxxfer = drive.d_maxxfer; rv = cmlb_attach(dip, &bd_tg_ops, DTYPE_DIRECT, bd->d_removable, bd->d_hotpluggable, drive.d_lun >= 0 ? DDI_NT_BLOCK_CHAN : DDI_NT_BLOCK, CMLB_FAKE_LABEL_ONE_PARTITION, bd->d_cmlbh, 0); if (rv != 0) { cmlb_free_handle(&bd->d_cmlbh); kmem_cache_destroy(bd->d_cache); mutex_destroy(&bd->d_iomutex); mutex_destroy(&bd->d_ocmutex); mutex_destroy(&bd->d_statemutex); cv_destroy(&bd->d_statecv); list_destroy(&bd->d_waitq); list_destroy(&bd->d_runq); if (bd->d_ksp != NULL) { kstat_delete(bd->d_ksp); bd->d_ksp = NULL; } else { kmem_free(bd->d_kiop, sizeof (kstat_io_t)); } ddi_soft_state_free(bd_state, inst); return (DDI_FAILURE); } if (bd->d_ops.o_devid_init != NULL) { rv = bd->d_ops.o_devid_init(bd->d_private, dip, &bd->d_devid); if (rv == DDI_SUCCESS) { if (ddi_devid_register(dip, bd->d_devid) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s: unable to register devid", name); } } } /* * Add a zero-length attribute to tell the world we support * kernel ioctls (for layered drivers). Also set up properties * used by HAL to identify removable media. */ (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, DDI_KERNEL_IOCTL, NULL, 0); if (bd->d_removable) { (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "removable-media", NULL, 0); } if (bd->d_hotpluggable) { (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "hotpluggable", NULL, 0); } ddi_report_dev(dip); return (DDI_SUCCESS); }
/* * Cache initialization routine. This routine should only be called * once. It performs the following tasks: * - Initalize all global locks * - Call sub-initialization routines (localize access to variables) */ static int cachefs_init(int fstyp, char *name) { kstat_t *ksp; int error; ASSERT(cachefs_up == B_FALSE); error = cachefs_init_vfsops(fstyp); if (error != 0) return (error); error = cachefs_init_vnops(name); if (error != 0) return (error); mutex_init(&cachefs_cachelock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&cachefs_newnum_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&cachefs_kstat_key_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&cachefs_kmem_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&cachefs_rename_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&cachefs_minor_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&cachefs_async_lock, NULL, MUTEX_DEFAULT, NULL); #ifdef CFSRLDEBUG mutex_init(&cachefs_rl_debug_mutex, NULL, MUTEX_DEFAULT, NULL); #endif /* CFSRLDEBUG */ /* * set up kmem_cache entities */ cachefs_cnode_cache = kmem_cache_create("cachefs_cnode_cache", sizeof (struct cnode), 0, NULL, NULL, NULL, NULL, NULL, 0); cachefs_req_cache = kmem_cache_create("cachefs_async_request", sizeof (struct cachefs_req), 0, cachefs_req_create, cachefs_req_destroy, NULL, NULL, NULL, 0); cachefs_fscache_cache = kmem_cache_create("cachefs_fscache", sizeof (fscache_t), 0, NULL, NULL, NULL, NULL, NULL, 0); cachefs_filegrp_cache = kmem_cache_create("cachefs_filegrp", sizeof (filegrp_t), 0, filegrp_cache_create, filegrp_cache_destroy, NULL, NULL, NULL, 0); cachefs_cache_kmcache = kmem_cache_create("cachefs_cache_t", sizeof (cachefscache_t), 0, NULL, NULL, NULL, NULL, NULL, 0); /* * set up the cachefs.0.key kstat */ cachefs_kstat_key = NULL; cachefs_kstat_key_n = 0; ksp = kstat_create("cachefs", 0, "key", "misc", KSTAT_TYPE_RAW, 1, KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_VAR_SIZE); if (ksp != NULL) { ksp->ks_data = &cachefs_kstat_key; ksp->ks_update = cachefs_kstat_key_update; ksp->ks_snapshot = cachefs_kstat_key_snapshot; ksp->ks_lock = &cachefs_kstat_key_lock; kstat_install(ksp); } /* * Assign unique major number for all nfs mounts */ if ((cachefs_major = getudev()) == -1) { cmn_err(CE_WARN, "cachefs: init: can't get unique device number"); cachefs_major = 0; } cachefs_up = B_TRUE; #ifdef CFSRLDEBUG cachefs_dbvalid = time; #endif /* CFSRLDEBUG */ return (0); }
void dataset_kstats_create(dataset_kstats_t *dk, objset_t *objset) { /* * There should not be anything wrong with having kstats for * snapshots. Since we are not sure how useful they would be * though nor how much their memory overhead would matter in * a filesystem with many snapshots, we skip them for now. */ if (dmu_objset_is_snapshot(objset)) return; /* * At the time of this writing, KSTAT_STRLEN is 255 in Linux, * and the spa_name can theoretically be up to 256 characters. * In reality though the spa_name can be 240 characters max * [see origin directory name check in pool_namecheck()]. Thus, * the naming scheme for the module name below should not cause * any truncations. In the event that a truncation does happen * though, due to some future change, we silently skip creating * the kstat and log the event. */ char kstat_module_name[KSTAT_STRLEN]; int n = snprintf(kstat_module_name, sizeof (kstat_module_name), "zfs/%s", spa_name(dmu_objset_spa(objset))); if (n < 0) { zfs_dbgmsg("failed to create dataset kstat for objset %lld: " " snprintf() for kstat module name returned %d", (unsigned long long)dmu_objset_id(objset), n); return; } else if (n >= KSTAT_STRLEN) { zfs_dbgmsg("failed to create dataset kstat for objset %lld: " "kstat module name length (%d) exceeds limit (%d)", (unsigned long long)dmu_objset_id(objset), n, KSTAT_STRLEN); return; } char kstat_name[KSTAT_STRLEN]; n = snprintf(kstat_name, sizeof (kstat_name), "objset-0x%llx", (unsigned long long)dmu_objset_id(objset)); if (n < 0) { zfs_dbgmsg("failed to create dataset kstat for objset %lld: " " snprintf() for kstat name returned %d", (unsigned long long)dmu_objset_id(objset), n); return; } ASSERT3U(n, <, KSTAT_STRLEN); kstat_t *kstat = kstat_create(kstat_module_name, 0, kstat_name, "dataset", KSTAT_TYPE_NAMED, sizeof (empty_dataset_kstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (kstat == NULL) return; dataset_kstat_values_t *dk_kstats = kmem_alloc(sizeof (empty_dataset_kstats), KM_SLEEP); bcopy(&empty_dataset_kstats, dk_kstats, sizeof (empty_dataset_kstats)); char *ds_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); dsl_dataset_name(objset->os_dsl_dataset, ds_name); KSTAT_NAMED_STR_PTR(&dk_kstats->dkv_ds_name) = ds_name; KSTAT_NAMED_STR_BUFLEN(&dk_kstats->dkv_ds_name) = ZFS_MAX_DATASET_NAME_LEN; kstat->ks_data = dk_kstats; kstat->ks_update = dataset_kstats_update; kstat->ks_private = dk; kstat_install(kstat); dk->dk_kstats = kstat; aggsum_init(&dk->dk_aggsums.das_writes, 0); aggsum_init(&dk->dk_aggsums.das_nwritten, 0); aggsum_init(&dk->dk_aggsums.das_reads, 0); aggsum_init(&dk->dk_aggsums.das_nread, 0); aggsum_init(&dk->dk_aggsums.das_nunlinks, 0); aggsum_init(&dk->dk_aggsums.das_nunlinked, 0); }
{ "sctpInDupAck", KSTAT_DATA_INT32, 0 }, { "sctpInAckUnsent", KSTAT_DATA_INT32, 0 }, { "sctpFragUsrMsgs", KSTAT_DATA_INT64, 0 }, { "sctpReasmUsrMsgs", KSTAT_DATA_INT64, 0 }, { "sctpOutSCTPPkts", KSTAT_DATA_INT64, 0 }, { "sctpInSCTPPkts", KSTAT_DATA_INT64, 0 }, { "sctpInInvalidCookie", KSTAT_DATA_INT32, 0 }, { "sctpTimRetrans", KSTAT_DATA_INT32, 0 }, { "sctpTimRetransDrop", KSTAT_DATA_INT32, 0 }, { "sctpTimHearBeatProbe", KSTAT_DATA_INT32, 0 }, { "sctpTimHearBeatDrop", KSTAT_DATA_INT32, 0 }, { "sctpListenDrop", KSTAT_DATA_INT32, 0 }, { "sctpInClosed", KSTAT_DATA_INT32, 0 } }; sctp_mibkp = kstat_create(SCTP_MOD_NAME, 0, "sctp", "mib2", KSTAT_TYPE_NAMED, NUM_OF_FIELDS(sctp_named_kstat_t), 0); if (sctp_mibkp == NULL) return; /* These won't change. */ template.sctpRtoAlgorithm.value.i32 = MIB2_SCTP_RTOALGO_VANJ; template.sctpMaxAssocs.value.i32 = -1; bcopy(&template, sctp_mibkp->ks_data, sizeof (template)); sctp_mibkp->ks_update = sctp_kstat_update; kstat_install(sctp_mibkp); if ((sctp_kstat = kstat_create(SCTP_MOD_NAME, 0, "sctpstat",
/* * Validate that this processor supports deep cstate and if so, * get the c-state data from ACPI and cache it. */ static int cpu_idle_init(cpu_t *cp) { cpupm_mach_state_t *mach_state = (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; cpu_acpi_cstate_t *cstate; char name[KSTAT_STRLEN]; int cpu_max_cstates, i; int ret; /* * Cache the C-state specific ACPI data. */ if ((ret = cpu_acpi_cache_cstate_data(handle)) != 0) { if (ret < 0) cmn_err(CE_NOTE, "!Support for CPU deep idle states is being " "disabled due to errors parsing ACPI C-state " "objects exported by BIOS."); cpu_idle_fini(cp); return (-1); } cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); cpu_max_cstates = cpu_acpi_get_max_cstates(handle); for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) { (void) snprintf(name, KSTAT_STRLEN - 1, "c%d", cstate->cs_type); /* * Allocate, initialize and install cstate kstat */ cstate->cs_ksp = kstat_create("cstate", CPU->cpu_id, name, "misc", KSTAT_TYPE_NAMED, sizeof (cpu_idle_kstat) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (cstate->cs_ksp == NULL) { cmn_err(CE_NOTE, "kstat_create(c_state) fail"); } else { cstate->cs_ksp->ks_data = &cpu_idle_kstat; cstate->cs_ksp->ks_lock = &cpu_idle_mutex; cstate->cs_ksp->ks_update = cpu_idle_kstat_update; cstate->cs_ksp->ks_data_size += MAXNAMELEN; cstate->cs_ksp->ks_private = cstate; kstat_install(cstate->cs_ksp); cstate++; } } cpupm_alloc_domains(cp, CPUPM_C_STATES); cpupm_alloc_ms_cstate(cp); if (cpu_deep_cstates_supported()) { uint32_t value; mutex_enter(&cpu_idle_callb_mutex); if (cpu_deep_idle_callb_id == (callb_id_t)0) cpu_deep_idle_callb_id = callb_add(&cpu_deep_idle_callb, (void *)NULL, CB_CL_CPU_DEEP_IDLE, "cpu_deep_idle"); if (cpu_idle_cpr_callb_id == (callb_id_t)0) cpu_idle_cpr_callb_id = callb_add(&cpu_idle_cpr_callb, (void *)NULL, CB_CL_CPR_PM, "cpu_idle_cpr"); mutex_exit(&cpu_idle_callb_mutex); /* * All supported CPUs (Nehalem and later) will remain in C3 * during Bus Master activity. * All CPUs set ACPI_BITREG_BUS_MASTER_RLD to 0 here if it * is not already 0 before enabling Deeper C-states. */ cpu_acpi_get_register(ACPI_BITREG_BUS_MASTER_RLD, &value); if (value & 1) cpu_acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); } return (0); }
/* * This routine is used to add cryptographic providers to the KEF framework. * Providers pass a crypto_provider_info structure to crypto_register_provider() * and get back a handle. The crypto_provider_info structure contains a * list of mechanisms supported by the provider and an ops vector containing * provider entry points. Hardware providers call this routine in their attach * routines. Software providers call this routine in their _init() routine. */ int crypto_register_provider(crypto_provider_info_t *info, crypto_kcf_provider_handle_t *handle) { char ks_name[KSTAT_STRLEN]; kcf_provider_desc_t *prov_desc = NULL; int ret = CRYPTO_ARGUMENTS_BAD; if (info->pi_interface_version > CRYPTO_SPI_VERSION_3) return (CRYPTO_VERSION_MISMATCH); /* * Check provider type, must be software, hardware, or logical. */ if (info->pi_provider_type != CRYPTO_HW_PROVIDER && info->pi_provider_type != CRYPTO_SW_PROVIDER && info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) return (CRYPTO_ARGUMENTS_BAD); /* * Allocate and initialize a new provider descriptor. We also * hold it and release it when done. */ prov_desc = kcf_alloc_provider_desc(info); KCF_PROV_REFHOLD(prov_desc); prov_desc->pd_prov_type = info->pi_provider_type; /* provider-private handle, opaque to KCF */ prov_desc->pd_prov_handle = info->pi_provider_handle; /* copy provider description string */ if (info->pi_provider_description != NULL) { /* * pi_provider_descriptor is a string that can contain * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters * INCLUDING the terminating null character. A bcopy() * is necessary here as pd_description should not have * a null character. See comments in kcf_alloc_provider_desc() * for details on pd_description field. */ bcopy(info->pi_provider_description, prov_desc->pd_description, MIN(strlen(info->pi_provider_description), (size_t)CRYPTO_PROVIDER_DESCR_MAX_LEN)); } if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) { if (info->pi_ops_vector == NULL) { goto bail; } copy_ops_vector_v1(info->pi_ops_vector, prov_desc->pd_ops_vector); if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) { copy_ops_vector_v2(info->pi_ops_vector, prov_desc->pd_ops_vector); prov_desc->pd_flags = info->pi_flags; } if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) { copy_ops_vector_v3(info->pi_ops_vector, prov_desc->pd_ops_vector); } } /* object_ops and nostore_key_ops are mutually exclusive */ if (prov_desc->pd_ops_vector->co_object_ops && prov_desc->pd_ops_vector->co_nostore_key_ops) { goto bail; } /* process the mechanisms supported by the provider */ if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS) goto bail; /* * Add provider to providers tables, also sets the descriptor * pd_prov_id field. */ if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) { undo_register_provider(prov_desc, B_FALSE); goto bail; } /* * We create a taskq only for a hardware provider. The global * software queue is used for software providers. We handle ordering * of multi-part requests in the taskq routine. So, it is safe to * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag * to keep some entries cached to improve performance. */ if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq", crypto_taskq_threads, minclsyspri, crypto_taskq_minalloc, crypto_taskq_maxalloc, TASKQ_PREPOPULATE); else prov_desc->pd_sched_info.ks_taskq = NULL; /* no kernel session to logical providers */ if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { /* * Open a session for session-oriented providers. This session * is used for all kernel consumers. This is fine as a provider * is required to support multiple thread access to a session. * We can do this only after the taskq has been created as we * do a kcf_submit_request() to open the session. */ if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) { kcf_req_params_t params; KCF_WRAP_SESSION_OPS_PARAMS(¶ms, KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0, CRYPTO_USER, NULL, 0, prov_desc); ret = kcf_submit_request(prov_desc, NULL, NULL, ¶ms, B_FALSE); if (ret != CRYPTO_SUCCESS) { undo_register_provider(prov_desc, B_TRUE); ret = CRYPTO_FAILED; goto bail; } } } if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { /* * Create the kstat for this provider. There is a kstat * installed for each successfully registered provider. * This kstat is deleted, when the provider unregisters. */ if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) { (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s", "NONAME", "provider_stats"); } else { (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s", "NONAME", 0, prov_desc->pd_prov_id, "provider_stats"); } prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto", KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (prov_desc->pd_kstat != NULL) { bcopy(&kcf_stats_ks_data_template, &prov_desc->pd_ks_data, sizeof (kcf_stats_ks_data_template)); prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data; KCF_PROV_REFHOLD(prov_desc); KCF_PROV_IREFHOLD(prov_desc); prov_desc->pd_kstat->ks_private = prov_desc; prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update; kstat_install(prov_desc->pd_kstat); } } if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) process_logical_providers(info, prov_desc); mutex_enter(&prov_desc->pd_lock); prov_desc->pd_state = KCF_PROV_READY; mutex_exit(&prov_desc->pd_lock); kcf_do_notify(prov_desc, B_TRUE); *handle = prov_desc->pd_kcf_prov_handle; ret = CRYPTO_SUCCESS; bail: KCF_PROV_REFRELE(prov_desc); return (ret); }
/* * Initialize memory power management subsystem. * Note: This function should only be called from ATTACH. * Note: caller must ensure exclusive access to all fipe_xxx interfaces. */ int fipe_init(dev_info_t *dip) { size_t nsize; hrtime_t hrt; /* Initialize global control structure. */ bzero(&fipe_gbl_ctrl, sizeof (fipe_gbl_ctrl)); mutex_init(&fipe_gbl_ctrl.lock, NULL, MUTEX_DRIVER, NULL); /* Query power management policy from device property. */ fipe_pm_policy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, FIPE_PROP_PM_POLICY, fipe_pm_policy); if (fipe_pm_policy < 0 || fipe_pm_policy >= FIPE_PM_POLICY_MAX) { cmn_err(CE_CONT, "?fipe: invalid power management policy %d.\n", fipe_pm_policy); fipe_pm_policy = FIPE_PM_POLICY_BALANCE; } fipe_profile_curr = &fipe_profiles[fipe_pm_policy]; /* * Compute unscaled hrtime value corresponding to FIPE_STAT_INTERVAL. * (1 << 36) should be big enough here. */ hrt = 1ULL << 36; scalehrtime(&hrt); fipe_idle_ctrl.tick_interval = FIPE_STAT_INTERVAL * (1ULL << 36) / hrt; if (fipe_mc_init(dip) != 0) { cmn_err(CE_WARN, "!fipe: failed to initialize mc state."); goto out_mc_error; } if (fipe_ioat_init() != 0) { cmn_err(CE_NOTE, "!fipe: failed to initialize ioat state."); goto out_ioat_error; } /* Allocate per-CPU structure. */ nsize = max_ncpus * sizeof (fipe_cpu_state_t); nsize += CPU_CACHE_COHERENCE_SIZE; fipe_gbl_ctrl.state_buf = kmem_zalloc(nsize, KM_SLEEP); fipe_gbl_ctrl.state_size = nsize; fipe_cpu_states = (fipe_cpu_state_t *)P2ROUNDUP( (intptr_t)fipe_gbl_ctrl.state_buf, CPU_CACHE_COHERENCE_SIZE); #ifdef FIPE_KSTAT_SUPPORT fipe_gbl_ctrl.fipe_kstat = kstat_create("fipe", 0, "fipe-pm", "misc", KSTAT_TYPE_NAMED, sizeof (fipe_kstat) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (fipe_gbl_ctrl.fipe_kstat == NULL) { cmn_err(CE_CONT, "?fipe: failed to create kstat object.\n"); } else { fipe_gbl_ctrl.fipe_kstat->ks_lock = &fipe_gbl_ctrl.lock; fipe_gbl_ctrl.fipe_kstat->ks_data = &fipe_kstat; fipe_gbl_ctrl.fipe_kstat->ks_update = fipe_kstat_update; kstat_install(fipe_gbl_ctrl.fipe_kstat); } #endif /* FIPE_KSTAT_SUPPORT */ return (0); out_ioat_error: fipe_mc_fini(); out_mc_error: mutex_destroy(&fipe_gbl_ctrl.lock); bzero(&fipe_gbl_ctrl, sizeof (fipe_gbl_ctrl)); return (-1); }
void hxge_setup_kstats(p_hxge_t hxgep) { struct kstat *ksp; p_hxge_port_kstat_t hxgekp; size_t hxge_kstat_sz; char stat_name[64]; int i; HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_setup_kstats")); /* Setup RDC statistics */ for (i = 0; i < hxgep->nrdc; i++) { (void) sprintf(stat_name, "%s"CH_NAME_FORMAT, RDC_NAME_FORMAT1, i); hxgep->statsp->rdc_ksp[i] = hxge_setup_local_kstat(hxgep, hxgep->instance, stat_name, &hxge_rdc_stats[0], RDC_STAT_END, hxge_rdc_stat_update); if (hxgep->statsp->rdc_ksp[i] == NULL) cmn_err(CE_WARN, "kstat_create failed for rdc channel %d", i); } /* Setup RDC System statistics */ hxgep->statsp->rdc_sys_ksp = hxge_setup_local_kstat(hxgep, hxgep->instance, "RDC_system", &hxge_rdc_sys_stats[0], RDC_SYS_STAT_END, hxge_rdc_sys_stat_update); if (hxgep->statsp->rdc_sys_ksp == NULL) cmn_err(CE_WARN, "kstat_create failed for rdc_sys_ksp"); /* Setup TDC statistics */ for (i = 0; i < hxgep->ntdc; i++) { (void) sprintf(stat_name, "%s"CH_NAME_FORMAT, TDC_NAME_FORMAT1, i); hxgep->statsp->tdc_ksp[i] = hxge_setup_local_kstat(hxgep, hxgep->instance, stat_name, &hxge_tdc_stats[0], TDC_STAT_END, hxge_tdc_stat_update); if (hxgep->statsp->tdc_ksp[i] == NULL) cmn_err(CE_WARN, "kstat_create failed for tdc channel %d", i); } /* Setup TDC System statistics */ hxgep->statsp->tdc_sys_ksp = hxge_setup_local_kstat(hxgep, hxgep->instance, "TDC_system", &hxge_tdc_sys_stats[0], RDC_SYS_STAT_END, hxge_tdc_sys_stat_update); if (hxgep->statsp->tdc_sys_ksp == NULL) cmn_err(CE_WARN, "kstat_create failed for tdc_sys_ksp"); /* Setup PFC statistics */ hxgep->statsp->pfc_ksp = hxge_setup_local_kstat(hxgep, hxgep->instance, "PFC", &hxge_pfc_stats[0], PFC_STAT_END, hxge_pfc_stat_update); if (hxgep->statsp->pfc_ksp == NULL) cmn_err(CE_WARN, "kstat_create failed for pfc"); /* Setup VMAC statistics */ hxgep->statsp->vmac_ksp = hxge_setup_local_kstat(hxgep, hxgep->instance, "VMAC", &hxge_vmac_stats[0], VMAC_STAT_END, hxge_vmac_stat_update); if (hxgep->statsp->vmac_ksp == NULL) cmn_err(CE_WARN, "kstat_create failed for vmac"); /* Setup MMAC Statistics. */ hxgep->statsp->mmac_ksp = hxge_setup_local_kstat(hxgep, hxgep->instance, "MMAC", &hxge_mmac_stats[0], MMAC_STATS_END, hxge_mmac_stat_update); if (hxgep->statsp->mmac_ksp == NULL) cmn_err(CE_WARN, "kstat_create failed for mmac"); /* Setup PEU System statistics */ hxgep->statsp->peu_sys_ksp = hxge_setup_local_kstat(hxgep, hxgep->instance, "PEU", &hxge_peu_sys_stats[0], PEU_SYS_STAT_END, hxge_peu_sys_stat_update); if (hxgep->statsp->peu_sys_ksp == NULL) cmn_err(CE_WARN, "kstat_create failed for peu sys"); /* Port stats */ hxge_kstat_sz = sizeof (hxge_port_kstat_t); if ((ksp = kstat_create(HXGE_DRIVER_NAME, hxgep->instance, "Port", "net", KSTAT_TYPE_NAMED, hxge_kstat_sz / sizeof (kstat_named_t), 0)) == NULL) { cmn_err(CE_WARN, "kstat_create failed for port stat"); return; } hxgekp = (p_hxge_port_kstat_t)ksp->ks_data; kstat_named_init(&hxgekp->cap_10gfdx, "cap_10gfdx", KSTAT_DATA_ULONG); /* * Link partner capabilities. */ kstat_named_init(&hxgekp->lp_cap_10gfdx, "lp_cap_10gfdx", KSTAT_DATA_ULONG); /* * Shared link setup. */ kstat_named_init(&hxgekp->link_speed, "link_speed", KSTAT_DATA_ULONG); kstat_named_init(&hxgekp->link_duplex, "link_duplex", KSTAT_DATA_CHAR); kstat_named_init(&hxgekp->link_up, "link_up", KSTAT_DATA_ULONG); /* * Loopback statistics. */ kstat_named_init(&hxgekp->lb_mode, "lb_mode", KSTAT_DATA_ULONG); /* General MAC statistics */ kstat_named_init(&hxgekp->ifspeed, "ifspeed", KSTAT_DATA_UINT64); kstat_named_init(&hxgekp->promisc, "promisc", KSTAT_DATA_CHAR); ksp->ks_update = hxge_port_kstat_update; ksp->ks_private = (void *) hxgep; kstat_install(ksp); hxgep->statsp->port_ksp = ksp; HXGE_DEBUG_MSG((hxgep, KST_CTL, "<== hxge_setup_kstats")); }
static void dadk_create_errstats(struct dadk *dadkp, int instance) { dadk_errstats_t *dep; char kstatname[KSTAT_STRLEN]; dadk_ioc_string_t dadk_ioc_string; if (dadkp->dad_errstats) return; (void) sprintf(kstatname, "cmdk%d,error", instance); dadkp->dad_errstats = kstat_create("cmdkerror", instance, kstatname, "device_error", KSTAT_TYPE_NAMED, sizeof (dadk_errstats_t) / sizeof (kstat_named_t), KSTAT_FLAG_PERSISTENT); if (!dadkp->dad_errstats) return; dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data; kstat_named_init(&dep->dadk_softerrs, "Soft Errors", KSTAT_DATA_UINT32); kstat_named_init(&dep->dadk_harderrs, "Hard Errors", KSTAT_DATA_UINT32); kstat_named_init(&dep->dadk_transerrs, "Transport Errors", KSTAT_DATA_UINT32); kstat_named_init(&dep->dadk_model, "Model", KSTAT_DATA_CHAR); kstat_named_init(&dep->dadk_revision, "Revision", KSTAT_DATA_CHAR); kstat_named_init(&dep->dadk_serial, "Serial No", KSTAT_DATA_CHAR); kstat_named_init(&dep->dadk_capacity, "Size", KSTAT_DATA_ULONGLONG); kstat_named_init(&dep->dadk_rq_media_err, "Media Error", KSTAT_DATA_UINT32); kstat_named_init(&dep->dadk_rq_ntrdy_err, "Device Not Ready", KSTAT_DATA_UINT32); kstat_named_init(&dep->dadk_rq_nodev_err, "No Device", KSTAT_DATA_UINT32); kstat_named_init(&dep->dadk_rq_recov_err, "Recoverable", KSTAT_DATA_UINT32); kstat_named_init(&dep->dadk_rq_illrq_err, "Illegal Request", KSTAT_DATA_UINT32); dadkp->dad_errstats->ks_private = dep; dadkp->dad_errstats->ks_update = nulldev; kstat_install(dadkp->dad_errstats); /* get model */ dep->dadk_model.value.c[0] = 0; dadk_ioc_string.is_buf = &dep->dadk_model.value.c[0]; dadk_ioc_string.is_size = sizeof (dep->dadk_model.value.c); (void) dadk_ctl_ioctl(dadkp, DIOCTL_GETMODEL, (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE); /* get serial */ dep->dadk_serial.value.c[0] = 0; dadk_ioc_string.is_buf = &dep->dadk_serial.value.c[0]; dadk_ioc_string.is_size = sizeof (dep->dadk_serial.value.c); (void) dadk_ctl_ioctl(dadkp, DIOCTL_GETSERIAL, (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE); /* Get revision */ dep->dadk_revision.value.c[0] = 0; /* Get capacity */ dep->dadk_capacity.value.ui64 = (uint64_t)dadkp->dad_logg.g_cap * (uint64_t)dadkp->dad_logg.g_secsiz; }
/* * audioixp_attach() * * Description: * Attach an instance of the audioixp driver. This routine does * the device dependent attach tasks. * * Arguments: * dev_info_t *dip Pointer to the device's dev_info struct * ddi_attach_cmd_t cmd Attach command * * Returns: * DDI_SUCCESS The driver was initialized properly * DDI_FAILURE The driver couldn't be initialized properly */ static int audioixp_attach(dev_info_t *dip) { uint16_t cmdeg; audioixp_state_t *statep; audio_dev_t *adev; uint32_t devid; const char *name; const char *rev; /* we don't support high level interrupts in the driver */ if (ddi_intr_hilevel(dip, 0) != 0) { cmn_err(CE_WARN, "!%s%d: unsupported high level interrupt", ddi_driver_name(dip), ddi_get_instance(dip)); return (DDI_FAILURE); } /* allocate the soft state structure */ statep = kmem_zalloc(sizeof (*statep), KM_SLEEP); statep->dip = dip; ddi_set_driver_private(dip, statep); if (ddi_get_iblock_cookie(dip, 0, &statep->iblock) != DDI_SUCCESS) { cmn_err(CE_WARN, "!%s%d: cannot get iblock cookie", ddi_driver_name(dip), ddi_get_instance(dip)); kmem_free(statep, sizeof (*statep)); return (DDI_FAILURE); } mutex_init(&statep->inst_lock, NULL, MUTEX_DRIVER, statep->iblock); /* allocate framework audio device */ if ((adev = audio_dev_alloc(dip, 0)) == NULL) { cmn_err(CE_WARN, "!%s%d: unable to allocate audio dev", ddi_driver_name(dip), ddi_get_instance(dip)); goto error; } statep->adev = adev; /* map in the registers */ if (audioixp_map_regs(statep) != DDI_SUCCESS) { audio_dev_warn(adev, "couldn't map registers"); goto error; } /* set device information -- this could be smarter */ devid = ((pci_config_get16(statep->pcih, PCI_CONF_VENID)) << 16) | pci_config_get16(statep->pcih, PCI_CONF_DEVID); name = "ATI AC'97"; switch (devid) { case IXP_PCI_ID_200: rev = "IXP150"; break; case IXP_PCI_ID_300: rev = "SB300"; break; case IXP_PCI_ID_400: if (pci_config_get8(statep->pcih, PCI_CONF_REVID) & 0x80) { rev = "SB450"; } else { rev = "SB400"; } break; case IXP_PCI_ID_SB600: rev = "SB600"; break; default: rev = "Unknown"; break; } audio_dev_set_description(adev, name); audio_dev_set_version(adev, rev); /* allocate port structures */ if ((audioixp_alloc_port(statep, IXP_PLAY) != DDI_SUCCESS) || (audioixp_alloc_port(statep, IXP_REC) != DDI_SUCCESS)) { goto error; } statep->ac97 = ac97_alloc(dip, audioixp_rd97, audioixp_wr97, statep); if (statep->ac97 == NULL) { audio_dev_warn(adev, "failed to allocate ac97 handle"); goto error; } /* set PCI command register */ cmdeg = pci_config_get16(statep->pcih, PCI_CONF_COMM); pci_config_put16(statep->pcih, PCI_CONF_COMM, cmdeg | PCI_COMM_IO | PCI_COMM_MAE); /* set up kernel statistics */ if ((statep->ksp = kstat_create(IXP_NAME, ddi_get_instance(dip), IXP_NAME, "controller", KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT)) != NULL) { kstat_install(statep->ksp); } if (audioixp_chip_init(statep) != DDI_SUCCESS) { audio_dev_warn(statep->adev, "failed to init chip"); goto error; } /* initialize the AC'97 part */ if (ac97_init(statep->ac97, adev) != DDI_SUCCESS) { audio_dev_warn(adev, "ac'97 initialization failed"); goto error; } /* set up the interrupt handler */ if (ddi_add_intr(dip, 0, &statep->iblock, NULL, audioixp_intr, (caddr_t)statep) != DDI_SUCCESS) { audio_dev_warn(adev, "bad interrupt specification"); } statep->intr_added = B_TRUE; if (audio_dev_register(adev) != DDI_SUCCESS) { audio_dev_warn(adev, "unable to register with framework"); goto error; } ddi_report_dev(dip); return (DDI_SUCCESS); error: audioixp_destroy(statep); return (DDI_FAILURE); }
/* * function to setup the kstat_t structure for the device and install it * * dev - software handle to the device * * return DDI_SUCCESS => success, failure otherwise */ int oce_stat_init(struct oce_dev *dev) { struct oce_stat *stats; uint32_t num_stats = sizeof (struct oce_stat) / sizeof (kstat_named_t); /* allocate the kstat */ dev->oce_kstats = kstat_create(OCE_MOD_NAME, dev->dev_id, "stats", "net", KSTAT_TYPE_NAMED, num_stats, 0); if (dev->oce_kstats == NULL) { oce_log(dev, CE_NOTE, MOD_CONFIG, "kstat creation failed: 0x%p", (void *)dev->oce_kstats); return (DDI_FAILURE); } /* allocate the device copy of the stats */ dev->stats_dbuf = oce_alloc_dma_buffer(dev, sizeof (struct mbx_get_nic_stats), NULL, DDI_DMA_CONSISTENT); if (dev->stats_dbuf == NULL) { oce_log(dev, CE_NOTE, MOD_CONFIG, "Could not allocate stats_dbuf: %p", (void *)dev->stats_dbuf); kstat_delete(dev->oce_kstats); return (DDI_FAILURE); } dev->hw_stats = (struct mbx_get_nic_stats *)DBUF_VA(dev->stats_dbuf); /* initialize the counters */ stats = (struct oce_stat *)dev->oce_kstats->ks_data; kstat_named_init(&stats->rx_bytes_hi, "rx bytes msd", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_bytes_lo, "rx bytes lsd", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_frames, "rx frames", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_errors, "rx errors", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_drops, "rx drops", KSTAT_DATA_ULONG); kstat_named_init(&stats->tx_bytes_hi, "tx bytes msd", KSTAT_DATA_ULONG); kstat_named_init(&stats->tx_bytes_lo, "tx bytes lsd", KSTAT_DATA_ULONG); kstat_named_init(&stats->tx_frames, "tx frames", KSTAT_DATA_ULONG); kstat_named_init(&stats->tx_errors, "tx errors", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_unicast_frames, "rx unicast frames", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_multicast_frames, "rx multicast frames", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_broadcast_frames, "rx broadcast frames", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_crc_errors, "rx crc errors", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_alignment_symbol_errors, "rx alignment symbol errors", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_in_range_errors, "rx in range errors", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_out_range_errors, "rx out range errors", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_frame_too_long, "rx frame too long", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_address_match_errors, "rx address match errors", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_pause_frames, "rx pause frames", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_control_frames, "rx control frames", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_ip_checksum_errs, "rx ip checksum errors", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_tcp_checksum_errs, "rx tcp checksum errors", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_udp_checksum_errs, "rx udp checksum errors", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_fifo_overflow, "rx fifo overflow", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_input_fifo_overflow, "rx input fifo overflow", KSTAT_DATA_ULONG); kstat_named_init(&stats->tx_unicast_frames, "tx unicast frames", KSTAT_DATA_ULONG); kstat_named_init(&stats->tx_multicast_frames, "tx multicast frames", KSTAT_DATA_ULONG); kstat_named_init(&stats->tx_broadcast_frames, "tx broadcast frames", KSTAT_DATA_ULONG); kstat_named_init(&stats->tx_pause_frames, "tx pause frames", KSTAT_DATA_ULONG); kstat_named_init(&stats->tx_control_frames, "tx control frames", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_drops_no_pbuf, "rx_drops_no_pbuf", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_drops_no_txpb, "rx_drops_no_txpb", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_drops_no_erx_descr, "rx_drops_no_erx_descr", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_drops_no_tpre_descr, "rx_drops_no_tpre_descr", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_drops_too_many_frags, "rx_drops_too_many_frags", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_drops_invalid_ring, "rx_drops_invalid_ring", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_drops_mtu, "rx_drops_mtu", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_dropped_too_small, "rx_dropped_too_small", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_dropped_too_short, "rx_dropped_too_short", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_dropped_header_too_small, "rx_dropped_header_too_small", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_dropped_tcp_length, "rx_dropped_tcp_length", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_dropped_runt, "rx_dropped_runt", KSTAT_DATA_ULONG); kstat_named_init(&stats->rx_drops_no_fragments, "rx_drop_no_frag", KSTAT_DATA_ULONG); dev->oce_kstats->ks_update = oce_update_stats; dev->oce_kstats->ks_private = (void *)dev; kstat_install(dev->oce_kstats); return (DDI_SUCCESS); } /* oce_stat_init */
void vdev_raidz_math_init(void) { raidz_impl_ops_t *curr_impl; zio_t *bench_zio = NULL; raidz_map_t *bench_rm = NULL; uint64_t bench_parity; int i, c, fn; /* init & vdev_raidz_impl_lock */ rw_init(&vdev_raidz_impl_lock, NULL, RW_DEFAULT, NULL); /* move supported impl into raidz_supp_impl */ for (i = 0, c = 0; i < ARRAY_SIZE(raidz_all_maths); i++) { curr_impl = (raidz_impl_ops_t *) raidz_all_maths[i]; /* initialize impl */ if (curr_impl->init) curr_impl->init(); if (curr_impl->is_supported()) { /* init kstat */ init_raidz_kstat(&raidz_impl_kstats[c], curr_impl->name); raidz_supp_impl[c++] = (raidz_impl_ops_t *) curr_impl; } } raidz_supp_impl_cnt = c; /* number of supported impl */ raidz_supp_impl[c] = NULL; /* sentinel */ /* init kstat for original routines */ init_raidz_kstat(&(raidz_impl_kstats[raidz_supp_impl_cnt]), "original"); #if !defined(_KERNEL) /* * Skip benchmarking and use last implementation as fastest */ memcpy(&vdev_raidz_fastest_impl, raidz_supp_impl[raidz_supp_impl_cnt-1], sizeof (vdev_raidz_fastest_impl)); vdev_raidz_fastest_impl.name = "fastest"; raidz_math_initialized = B_TRUE; /* Use 'cycle' math selection method for userspace */ VERIFY0(vdev_raidz_impl_set("cycle")); return; #endif /* Fake an zio and run the benchmark on it */ bench_zio = kmem_zalloc(sizeof (zio_t), KM_SLEEP); bench_zio->io_offset = 0; bench_zio->io_size = BENCH_ZIO_SIZE; /* only data columns */ bench_zio->io_data = zio_data_buf_alloc(BENCH_ZIO_SIZE); VERIFY(bench_zio->io_data); /* Benchmark parity generation methods */ for (fn = 0; fn < RAIDZ_GEN_NUM; fn++) { bench_parity = fn + 1; /* New raidz_map is needed for each generate_p/q/r */ bench_rm = vdev_raidz_map_alloc(bench_zio, 9, BENCH_D_COLS + bench_parity, bench_parity); benchmark_raidz_impl(bench_rm, fn, benchmark_gen_impl); vdev_raidz_map_free(bench_rm); } /* Benchmark data reconstruction methods */ bench_rm = vdev_raidz_map_alloc(bench_zio, 9, BENCH_COLS, PARITY_PQR); for (fn = 0; fn < RAIDZ_REC_NUM; fn++) benchmark_raidz_impl(bench_rm, fn, benchmark_rec_impl); vdev_raidz_map_free(bench_rm); /* cleanup the bench zio */ zio_data_buf_free(bench_zio->io_data, BENCH_ZIO_SIZE); kmem_free(bench_zio, sizeof (zio_t)); /* install kstats for all impl */ raidz_math_kstat = kstat_create("zfs", 0, "vdev_raidz_bench", "misc", KSTAT_TYPE_NAMED, sizeof (raidz_impl_kstat_t) / sizeof (kstat_named_t) * (raidz_supp_impl_cnt + 1), KSTAT_FLAG_VIRTUAL); if (raidz_math_kstat != NULL) { raidz_math_kstat->ks_data = raidz_impl_kstats; kstat_install(raidz_math_kstat); } /* Finish initialization */ raidz_math_initialized = B_TRUE; if (!vdev_raidz_impl_user_set) VERIFY0(vdev_raidz_impl_set("fastest")); }