/*ARGSUSED*/ static void fbt_resume(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg,id) fbt_probe_t *fbt = parg; struct modctl *ctl = fbt->fbtp_ctl; #if defined (__ppc__) || defined (__ppc64__) dtrace_casptr(&tempDTraceIntHook, NULL, fbt_perfIntCallback); if (tempDTraceIntHook != (perfCallback)fbt_perfIntCallback) { if (fbt_verbose) { cmn_err(CE_NOTE, "fbt_enable is failing for probe %s " "in module %s: tempDTraceIntHook already occupied.", fbt->fbtp_name, ctl->mod_modname); } return; } #endif dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback); if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) { if (fbt_verbose) { cmn_err(CE_NOTE, "fbt_resume is failing for probe %s " "in module %s: tempDTraceTrapHook already occupied.", fbt->fbtp_name, ctl->mod_modname); } return; } for (; fbt != NULL; fbt = fbt->fbtp_next) (void)ml_nofault_copy( (vm_offset_t)&fbt->fbtp_patchval, (vm_offset_t)fbt->fbtp_patchpoint, sizeof(fbt->fbtp_patchval)); dtrace_membar_consumer(); }
/*ARGSUSED*/ static void fbt_resume(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg,id) fbt_probe_t *fbt = parg; struct modctl *ctl = NULL; for (; fbt != NULL; fbt = fbt->fbtp_next) { ctl = fbt->fbtp_ctl; ASSERT(ctl->mod_nenabled > 0); if (!ctl->mod_loaded || (ctl->mod_loadcnt != fbt->fbtp_loadcnt)) continue; dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback); if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) { if (fbt_verbose) { cmn_err(CE_NOTE, "fbt_resume is failing for probe %s " "in module %s: tempDTraceTrapHook already occupied.", fbt->fbtp_name, ctl->mod_modname); } return; } (void)ml_nofault_copy( (vm_offset_t)&fbt->fbtp_patchval, (vm_offset_t)fbt->fbtp_patchpoint, sizeof(fbt->fbtp_patchval)); #if CONFIG_EMBEDDED /* * Make the patched instruction visible via a data + instruction cache flush. */ flush_dcache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0); invalidate_icache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0); #endif fbt->fbtp_currentval = fbt->fbtp_patchval; } dtrace_membar_consumer(); }
void mutex_enter_common(mutex_t *mp, int dflag) { unsigned long flags; unsigned int cnt; if (!mp->m_initted) { /***********************************************/ /* Special debug: detect a dynamic mutex */ /* being used (one coming from a kmalloc */ /* type block of memory), vs the statically */ /* defined ones). */ /***********************************************/ if (mp->m_initted != 2) { dtrace_printf("initting a mutex\n"); dump_stack(); } dmutex_init(mp); } /***********************************************/ /* Check for recursive mutex. Theres a */ /* number of scenarios. */ /* */ /* Non-intr followed by an intr: we have to */ /* allow the intr. */ /* */ /* Non-intr followed by non-intr: normal */ /* recursive mutex. */ /* */ /* Intr followed by an intr: shouldnt */ /* happen. */ /* */ /* We mustnt allow us to be put on another */ /* cpu, else we will lose track of which */ /* cpu has the mutex. */ /* */ /* Now that the mutex code is working, we */ /* mustnt allow recursive mutexes. This */ /* causes problems for two dtrace user */ /* space apps running at the same time. */ /* Turn off for now. Later on, we can */ /* delete the code below. */ /***********************************************/ if (0 && mp->m_count && mp->m_cpu == smp_processor_id()) { static int x; if (x++ < 4 || (x < 1000000 && (x % 5000) == 0)) dtrace_printf("%p mutex recursive, dflag=%d %d [%d]\n", mp, dflag, mp->m_type, x); mp->m_level++; return; } if (disable_ints && dflag) flags = dtrace_interrupt_disable(); else flags = dtrace_interrupt_get(); for (cnt = 0; dtrace_casptr(&mp->m_count, 0, (void *) 1) == (void *) 1; ) { /***********************************************/ /* We are waiting for the lock. Someone */ /* else has it. Someone else might be */ /* waiting for us (xcall), so occasionally */ /* empty the xcall queue for us. */ /***********************************************/ if ((cnt++ % 100) == 0) xcall_slave2(); /***********************************************/ /* If we are running in the upper half of */ /* the kernel, periodically let the */ /* scheduler run, to avoid deadlock when */ /* running N+1 copies of dtrace on an N CPU */ /* system. */ /***********************************************/ if (/*!dflag &&*/ (cnt % 2000) == 0) schedule(); /***********************************************/ /* If we start locking up the kernel, let */ /* user know something bad is happening. */ /* Probably pointless if mutex is working */ /* correctly. */ /***********************************************/ if ((cnt % (500 * 1000 * 1000)) == 0) { dtrace_printf("mutex_enter: taking a long time to grab lock mtx3=%llu\n", cnt_mtx3); cnt_mtx3++; } } //preempt_disable(); mp->m_flags = flags; mp->m_cpu = smp_processor_id(); mp->m_level = 1; mp->m_type = dflag; }
/*ARGSUSED*/ static int sdt_enable(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg,id) sdt_probe_t *sdp = parg; struct modctl *ctl = sdp->sdp_ctl; ctl->mod_nenabled++; /* * If this module has disappeared since we discovered its probes, * refuse to enable it. */ if (!ctl->mod_loaded) { if (sdt_verbose) { cmn_err(CE_NOTE, "sdt is failing for probe %s " "(module %s unloaded)", sdp->sdp_name, ctl->mod_modname); } goto err; } /* * Now check that our modctl has the expected load count. If it * doesn't, this module must have been unloaded and reloaded -- and * we're not going to touch it. */ if (ctl->mod_loadcnt != sdp->sdp_loadcnt) { if (sdt_verbose) { cmn_err(CE_NOTE, "sdt is failing for probe %s " "(module %s reloaded)", sdp->sdp_name, ctl->mod_modname); } goto err; } dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback); if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) { if (sdt_verbose) { cmn_err(CE_NOTE, "sdt_enable is failing for probe %s " "in module %s: tempDTraceTrapHook already occupied.", sdp->sdp_name, ctl->mod_modname); } return (0); } while (sdp != NULL) { (void)ml_nofault_copy( (vm_offset_t)&sdp->sdp_patchval, (vm_offset_t)sdp->sdp_patchpoint, (vm_size_t)sizeof(sdp->sdp_patchval)); /* * Make the patched instruction visible via a data + instruction * cache fush on platforms that need it */ flush_dcache((vm_offset_t)sdp->sdp_patchpoint,(vm_size_t)sizeof(sdp->sdp_patchval), 0); invalidate_icache((vm_offset_t)sdp->sdp_patchpoint,(vm_size_t)sizeof(sdp->sdp_patchval), 0); sdp = sdp->sdp_next; } err: return (0); }
/*ARGSUSED*/ int fbt_enable(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg,id) fbt_probe_t *fbt = parg; struct modctl *ctl = NULL; for (; fbt != NULL; fbt = fbt->fbtp_next) { ctl = fbt->fbtp_ctl; if (!ctl->mod_loaded) { if (fbt_verbose) { cmn_err(CE_NOTE, "fbt is failing for probe %s " "(module %s unloaded)", fbt->fbtp_name, ctl->mod_modname); } continue; } /* * Now check that our modctl has the expected load count. If it * doesn't, this module must have been unloaded and reloaded -- and * we're not going to touch it. */ if (ctl->mod_loadcnt != fbt->fbtp_loadcnt) { if (fbt_verbose) { cmn_err(CE_NOTE, "fbt is failing for probe %s " "(module %s reloaded)", fbt->fbtp_name, ctl->mod_modname); } continue; } dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback); if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) { if (fbt_verbose) { cmn_err(CE_NOTE, "fbt_enable is failing for probe %s " "in module %s: tempDTraceTrapHook already occupied.", fbt->fbtp_name, ctl->mod_modname); } continue; } if (fbt->fbtp_currentval != fbt->fbtp_patchval) { (void)ml_nofault_copy( (vm_offset_t)&fbt->fbtp_patchval, (vm_offset_t)fbt->fbtp_patchpoint, sizeof(fbt->fbtp_patchval)); /* * Make the patched instruction visible via a data + instruction * cache flush for the platforms that need it */ flush_dcache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0); invalidate_icache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0); fbt->fbtp_currentval = fbt->fbtp_patchval; ctl->mod_nenabled++; } } dtrace_membar_consumer(); return (0); }