Esempio n. 1
0
int
lqfs_fini(void)
{
	if (lqfs_sv != NULL) {
		kmem_cache_destroy(lqfs_sv);
	}
	if (lqfs_bp != NULL) {
		kmem_cache_destroy(lqfs_bp);
	}
	if (mapentry_cache != NULL) {
		kmem_cache_destroy(mapentry_cache);
	}
	if (lqfs_logstats != NULL) {
		kstat_delete(lqfs_logstats);
		lqfs_logstats = NULL;
	}
	if (lqfs_deltastats != NULL) {
		kstat_delete(lqfs_deltastats);
		lqfs_deltastats = NULL;
	}

	ASSERT(topkey);
	tsd_destroy(&topkey);
	topkey = 0;

	return (0);
}
Esempio n. 2
0
void
hxge_destroy_kstats(p_hxge_t hxgep)
{
	int			channel;
	p_hxge_dma_pt_cfg_t	p_dma_cfgp;
	p_hxge_hw_pt_cfg_t	p_cfgp;

	HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_destroy_kstats"));
	if (hxgep->statsp == NULL)
		return;

	if (hxgep->statsp->ksp)
		kstat_delete(hxgep->statsp->ksp);

	p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;

	for (channel = 0; channel < p_cfgp->max_rdcs; channel++) {
		if (hxgep->statsp->rdc_ksp[channel]) {
			kstat_delete(hxgep->statsp->rdc_ksp[channel]);
		}
	}

	for (channel = 0; channel < p_cfgp->max_tdcs; channel++) {
		if (hxgep->statsp->tdc_ksp[channel]) {
			kstat_delete(hxgep->statsp->tdc_ksp[channel]);
		}
	}

	if (hxgep->statsp->rdc_sys_ksp)
		kstat_delete(hxgep->statsp->rdc_sys_ksp);

	if (hxgep->statsp->tdc_sys_ksp)
		kstat_delete(hxgep->statsp->tdc_sys_ksp);

	if (hxgep->statsp->peu_sys_ksp)
		kstat_delete(hxgep->statsp->peu_sys_ksp);

	if (hxgep->statsp->mmac_ksp)
		kstat_delete(hxgep->statsp->mmac_ksp);

	if (hxgep->statsp->pfc_ksp)
		kstat_delete(hxgep->statsp->pfc_ksp);

	if (hxgep->statsp->vmac_ksp)
		kstat_delete(hxgep->statsp->vmac_ksp);

	if (hxgep->statsp->port_ksp)
		kstat_delete(hxgep->statsp->port_ksp);

	if (hxgep->statsp)
		KMEM_FREE(hxgep->statsp, hxgep->statsp->stats_size);

	HXGE_DEBUG_MSG((hxgep, KST_CTL, "<== hxge_destroy_kstats"));
}
Esempio n. 3
0
/*
 * Destroy memory power management subsystem.
 * Note: This function should only be called from DETACH.
 * Note: caller must ensure exclusive access to all fipe_xxx interfaces.
 */
int
fipe_fini(void)
{
	if (fipe_gbl_ctrl.pm_enabled) {
		cmn_err(CE_NOTE, "!fipe: call fipe_fini without stopping PM.");
		return (EBUSY);
	}

	ASSERT(!fipe_gbl_ctrl.pm_active);
	fipe_ioat_fini();
	fipe_mc_fini();

#ifdef	FIPE_KSTAT_SUPPORT
	if (fipe_gbl_ctrl.fipe_kstat != NULL) {
		kstat_delete(fipe_gbl_ctrl.fipe_kstat);
		fipe_gbl_ctrl.fipe_kstat = NULL;
	}
#endif	/* FIPE_KSTAT_SUPPORT */

	if (fipe_gbl_ctrl.state_buf != NULL) {
		ASSERT(fipe_gbl_ctrl.state_size != 0);
		kmem_free(fipe_gbl_ctrl.state_buf, fipe_gbl_ctrl.state_size);
		fipe_cpu_states = NULL;
	}

	fipe_profile_curr = NULL;
	mutex_destroy(&fipe_gbl_ctrl.lock);
	bzero(&fipe_gbl_ctrl, sizeof (fipe_gbl_ctrl));

	return (0);
}
Esempio n. 4
0
static void
cpu_idle_stop(cpu_t *cp)
{
	cpupm_mach_state_t *mach_state =
	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
	cpu_acpi_cstate_t *cstate;
	uint_t cpu_max_cstates, i;

	/*
	 * place the CPUs in a safe place so that we can disable
	 * deep c-state on them.
	 */
	pause_cpus(NULL);
	cp->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu;
	start_cpus();

	cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
	if (cstate) {
		cpu_max_cstates = cpu_acpi_get_max_cstates(handle);

		for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) {
			if (cstate->cs_ksp != NULL)
				kstat_delete(cstate->cs_ksp);
			cstate++;
		}
	}
	cpupm_free_ms_cstate(cp);
	cpupm_remove_domains(cp, CPUPM_C_STATES, &cpupm_cstate_domains);
	cpu_acpi_free_cstate_data(handle);
}
Esempio n. 5
0
void
kcf_sched_destroy(void)
{
	int i;

	if (kcf_misc_kstat)
		kstat_delete(kcf_misc_kstat);

	if (kcfpool)
		kmem_free(kcfpool, sizeof (kcf_pool_t));

	for (i = 0; i < REQID_TABLES; i++) {
		if (kcf_reqid_table[i])
			kmem_free(kcf_reqid_table[i],
			    sizeof (kcf_reqid_table_t));
	}

	if (gswq)
		kmem_free(gswq, sizeof (kcf_global_swq_t));

	if (kcf_context_cache)
		kmem_cache_destroy(kcf_context_cache);
	if (kcf_areq_cache)
		kmem_cache_destroy(kcf_areq_cache);
	if (kcf_sreq_cache)
		kmem_cache_destroy(kcf_sreq_cache);
}
Esempio n. 6
0
void
mac_soft_ring_stat_delete(mac_soft_ring_t *ringp)
{
	mac_soft_ring_set_t	*mac_srs = ringp->s_ring_set;
	boolean_t		is_tx_srs;

	is_tx_srs = ((mac_srs->srs_type & SRST_TX) != 0);
	if (is_tx_srs) {
		/*
		 * Tx ring has been taken away. Before destroying corresponding
		 * soft ring, save the stats recorded by that soft ring.
		 */
		mac_client_impl_t	*mcip = mac_srs->srs_mcip;
		mac_misc_stats_t	*mac_misc_stat = &mcip->mci_misc_stat;
		mac_tx_stats_t		*mac_tx_stat = &ringp->s_st_stat;

		i_mac_add_stats(&mac_misc_stat->mms_defuncttxlanestats,
		    mac_tx_stat, &mac_misc_stat->mms_defuncttxlanestats,
		    tx_softring_stats_list, TX_SOFTRING_STAT_SIZE);
	}

	if (ringp->s_ring_ksp) {
		kstat_delete(ringp->s_ring_ksp);
		ringp->s_ring_ksp = NULL;
	}
}
Esempio n. 7
0
void
mac_srs_stat_delete(mac_soft_ring_set_t *mac_srs)
{
	boolean_t	is_tx_srs;

	is_tx_srs = ((mac_srs->srs_type & SRST_TX) != 0);
	if (!is_tx_srs) {
		/*
		 * Rx ring has been taken away. Before destroying corresponding
		 * SRS, save the stats recorded by that SRS.
		 */
		mac_client_impl_t	*mcip = mac_srs->srs_mcip;
		mac_misc_stats_t	*mac_misc_stat = &mcip->mci_misc_stat;
		mac_rx_stats_t		*mac_rx_stat = &mac_srs->srs_rx.sr_stat;

		i_mac_add_stats(&mac_misc_stat->mms_defunctrxlanestats,
		    mac_rx_stat, &mac_misc_stat->mms_defunctrxlanestats,
		    rx_srs_stats_list, RX_SRS_STAT_SIZE);
	}

	if (mac_srs->srs_ksp != NULL) {
		kstat_delete(mac_srs->srs_ksp);
		mac_srs->srs_ksp = NULL;
	}
}
Esempio n. 8
0
/*
 * Deactivate cap
 *   - Block its wait queue. This prevents any new threads from being
 *	enqueued there and moves all enqueued threads to the run queue.
 *   - Remove cap from list l.
 *   - Disable CPU caps globally if there are no capped projects or zones
 *
 * Should be called with caps_lock held.
 */
static void
cap_disable(list_t *l, cpucap_t *cap)
{
	ASSERT(MUTEX_HELD(&caps_lock));
	/*
	 * Cap should be currently active
	 */
	ASSERT(CPUCAPS_ON());
	ASSERT(list_link_active(&cap->cap_link));
	ASSERT(CAP_ENABLED(cap));

	waitq_block(&cap->cap_waitq);

	/* do this first to avoid race with cap_kstat_update */
	if (cap->cap_kstat != NULL) {
		kstat_delete(cap->cap_kstat);
		cap->cap_kstat = NULL;
	}

	list_remove(l, cap);
	if (list_is_empty(&capped_projects) && list_is_empty(&capped_zones)) {
		cpucaps_enabled = B_FALSE;
		cpucaps_clock_callout = NULL;
	}
	cap->cap_value = cap->cap_chk_value = 0;
	cap->cap_project = NULL;
	cap->cap_zone = NULL;
}
Esempio n. 9
0
/*
 * Matched with pcmu_add_upstream_kstat()
 */
void
pcmu_rem_upstream_kstat(pcmu_t *pcmu_p)
{
	if (pcmu_p->pcmu_uksp != NULL)
		kstat_delete(pcmu_p->pcmu_uksp);
	pcmu_p->pcmu_uksp = NULL;
}
Esempio n. 10
0
void
cpupm_turbo_fini(cpupm_mach_turbo_info_t *turbo_info)
{
	if (turbo_info->turbo_ksp != NULL)
		kstat_delete(turbo_info->turbo_ksp);
	kmem_free(turbo_info, sizeof (cpupm_mach_turbo_info_t));
}
Esempio n. 11
0
void kstat_osx_fini(void)
{
    if (osx_kstat_ksp != NULL) {
        kstat_delete(osx_kstat_ksp);
        osx_kstat_ksp = NULL;
    }
}
Esempio n. 12
0
void
iospc_kstat_detach(iospc_t *iospc_p)
{
	iospc_grp_t **grp_pp;
	iospc_grp_t *grp_p;
	int i;

	IOSPC_DBG2("iospc_kstat_detach called\n");

	for (i = 0, grp_pp = iospc_leaf_grps; *grp_pp != NULL; i++, grp_pp++) {

		if (i >= IOSPC_MAX_NUM_GRPS)
			return;

		grp_p = *grp_pp;
		if (iospc_p->iospc_ksinfo_p[i] != NULL) {

			grp_p->access_fini(iospc_p, iospc_p->iospc_ksinfo_p[i]);

			if (iospc_p->iospc_ksinfo_p[i]->cntr_ksp != NULL)
				kstat_delete(
				    iospc_p->iospc_ksinfo_p[i]->cntr_ksp);
			kmem_free(iospc_p->iospc_ksinfo_p[i],
			    sizeof (iospc_ksinfo_t));
		}

	}
}
Esempio n. 13
0
void
mac_misc_stat_delete(flow_entry_t *flent)
{
	if (flent->fe_misc_stat_ksp != NULL) {
		kstat_delete(flent->fe_misc_stat_ksp);
		flent->fe_misc_stat_ksp = NULL;
	}
}
Esempio n. 14
0
void
mac_ring_stat_delete(mac_ring_t *ring)
{
	if (ring->mr_ksp != NULL) {
		kstat_delete(ring->mr_ksp);
		ring->mr_ksp = NULL;
	}
}
Esempio n. 15
0
void
drm_fini_kstats(drm_device_t *sc)
{
	if (sc->asoft_ksp)
		kstat_delete(sc->asoft_ksp);
	else
		cmn_err(CE_WARN, "attempt to delete null kstat");
}
Esempio n. 16
0
void
vdev_mirror_stat_fini(void)
{
	if (mirror_ksp != NULL) {
		kstat_delete(mirror_ksp);
		mirror_ksp = NULL;
	}
}
Esempio n. 17
0
void
zfetch_fini(void)
{
	if (zfetch_ksp != NULL) {
		kstat_delete(zfetch_ksp);
		zfetch_ksp = NULL;
	}
}
Esempio n. 18
0
void
kstat_delete_netstack(kstat_t *ks, netstackid_t ks_netstackid)
{
	if (ks_netstackid == GLOBAL_NETSTACKID) {
		netstack_shared_kstat_remove(ks);
	}
	kstat_delete(ks);
}
Esempio n. 19
0
void
vdev_cache_stat_fini(void)
{
	if (vdc_ksp != NULL) {
		kstat_delete(vdc_ksp);
		vdc_ksp = NULL;
	}
}
Esempio n. 20
0
void
fletcher_4_fini(void)
{
	if (fletcher_4_kstat != NULL) {
		kstat_delete(fletcher_4_kstat);
		fletcher_4_kstat = NULL;
	}
}
Esempio n. 21
0
/*
 * function to undo initialization done in oce_stat_init
 *
 * dev - software handle to the device
 *
 * return none
 */
void
oce_stat_fini(struct oce_dev *dev)
{
	oce_free_dma_buffer(dev, dev->stats_dbuf);
	dev->hw_stats = NULL;
	dev->stats_dbuf = NULL;
	kstat_delete(dev->oce_kstats);
	dev->oce_kstats = NULL;
} /* oce_stat_fini */
Esempio n. 22
0
/*ARGSUSED*/
void
mac_driver_stat_delete(mac_impl_t *mip)
{
	if (mip->mi_ksp != NULL) {
		kstat_delete(mip->mi_ksp);
		mip->mi_ksp = NULL;
		mip->mi_kstat_count = 0;
	}
}
Esempio n. 23
0
static void
dadk_destroy_errstats(struct dadk *dadkp)
{
	if (!dadkp->dad_errstats)
		return;

	kstat_delete(dadkp->dad_errstats);
	dadkp->dad_errstats = NULL;
}
Esempio n. 24
0
static void
dsl_pool_tx_assign_destroy(dsl_pool_t *dp)
{
	if (dp->dp_tx_assign_buckets)
		kmem_free(dp->dp_tx_assign_buckets,
			  dp->dp_tx_assign_size * sizeof(kstat_named_t));

	if (dp->dp_tx_assign_kstat)
		kstat_delete(dp->dp_tx_assign_kstat);
}
Esempio n. 25
0
void
bge_fini_kstats(bge_t *bgep)
{
	int i;

	BGE_TRACE(("bge_fini_kstats($%p)", (void *)bgep));

	for (i = BGE_KSTAT_COUNT; --i >= 0; )
		if (bgep->bge_kstats[i] != NULL)
			kstat_delete(bgep->bge_kstats[i]);
}
Esempio n. 26
0
File: zfs_debug.c Progetto: LLNL/zfs
void
zfs_dbgmsg_fini(void)
{
	if (zfs_dbgmsg_kstat)
		kstat_delete(zfs_dbgmsg_kstat);

	mutex_enter(&zfs_dbgmsgs_lock);
	zfs_dbgmsg_purge(0);
	mutex_exit(&zfs_dbgmsgs_lock);
	mutex_destroy(&zfs_dbgmsgs_lock);
}
Esempio n. 27
0
static void
task_kstat_delete(task_t *tk)
{
	void *data;

	if (tk->tk_nprocs_kstat != NULL) {
		data = tk->tk_nprocs_kstat->ks_data;
		kstat_delete(tk->tk_nprocs_kstat);
		kmem_free(data, sizeof (task_kstat_t));
		tk->tk_nprocs_kstat = NULL;
	}
}
Esempio n. 28
0
static void
dbuf_stats_hash_table_destroy(void)
{
	dbuf_stats_t *dsh = &dbuf_stats_hash_table;
	kstat_t *ksp;

	ksp = dsh->kstat;
	if (ksp)
		kstat_delete(ksp);

	mutex_destroy(&dsh->lock);
}
static void
ni_delete_name_kstat(ni_ksinfo_t *pp)
{
	int	i;

	if (pp != NULL) {
		for (i = 0; i < NUM_OF_PICS; i++) {
			if (pp->pic_name_ksp[i] != NULL)
				kstat_delete(pp->pic_name_ksp[i]);
		}
	}
}
Esempio n. 30
0
static void
iospc_delete_name_kstats(kstat_t **name_kstats_pp, int num_kstats)
{
	int i;

	if (name_kstats_pp != NULL) {
		for (i = 0; i < num_kstats; i++) {
			if (name_kstats_pp[i] != NULL)
				kstat_delete(name_kstats_pp[i]);
		}
	}
}