/*ARGSUSED*/ static void machtrace_provide(void *arg, const dtrace_probedesc_t *desc) { #pragma unused(arg) /* __APPLE__ */ int i; if (desc != NULL) return; machtrace_init(mach_trap_table, &machtrace_sysent); for (i = 0; i < NSYSCALL; i++) { if (machtrace_sysent[i].stsy_underlying == NULL) continue; if (dtrace_probe_lookup(machtrace_id, NULL, mach_syscall_name_table[i], "entry") != 0) continue; (void) dtrace_probe_create(machtrace_id, NULL, mach_syscall_name_table[i], "entry", MACHTRACE_ARTIFICIAL_FRAMES, (void *)((uintptr_t)SYSTRACE_ENTRY(i))); (void) dtrace_probe_create(machtrace_id, NULL, mach_syscall_name_table[i], "return", MACHTRACE_ARTIFICIAL_FRAMES, (void *)((uintptr_t)SYSTRACE_RETURN(i))); machtrace_sysent[i].stsy_entry = DTRACE_IDNONE; machtrace_sysent[i].stsy_return = DTRACE_IDNONE; } }
void dt_perf_provide(void *arg, const dtrace_probedesc_t *desc) { if (dtrace_probe_lookup(dt_perf_id, "dt_perf", NULL, "invoke") != 0) return; invoke_pid = dtrace_probe_create(dt_perf_id, "dt_perf", NULL, "invoke", 0, NULL); result_pid = dtrace_probe_create(dt_perf_id, "dt_perf", NULL, "result", 0, NULL); }
static void profile_create(hrtime_t interval, const char *name, int kind) { profile_probe_t *prof; if (interval < profile_interval_min) return; if (dtrace_probe_lookup(profile_id, NULL, NULL, name) != 0) return; atomic_add_32(&profile_total, 1); if (profile_total > profile_max) { atomic_add_32(&profile_total, -1); return; } if (PROF_TICK == kind) prof = kmem_zalloc(sizeof (profile_probe_t), KM_SLEEP); else prof = kmem_zalloc(sizeof (profile_probe_t) + NCPU*sizeof(profile_probe_percpu_t), KM_SLEEP); (void) strlcpy(prof->prof_name, name, sizeof(prof->prof_name)); prof->prof_interval = interval; prof->prof_cyclic = CYCLIC_NONE; prof->prof_kind = kind; prof->prof_id = dtrace_probe_create(profile_id, NULL, NULL, name, profile_aframes ? profile_aframes : PROF_ARTIFICIAL_FRAMES, prof); }
static void profile_create(hrtime_t interval, const char *name, int kind) { profile_probe_t *prof; int nr_frames = PROF_ARTIFICIAL_FRAMES + dtrace_mach_aframes(); if (profile_aframes) nr_frames = profile_aframes; if (interval < profile_interval_min) return; if (dtrace_probe_lookup(profile_id, NULL, NULL, name) != 0) return; atomic_inc_32(&profile_total); if (profile_total > profile_max) { atomic_dec_32(&profile_total); return; } prof = kmem_zalloc(sizeof (profile_probe_t), KM_SLEEP); (void) strcpy(prof->prof_name, name); prof->prof_interval = interval; prof->prof_cyclic = CYCLIC_NONE; prof->prof_kind = kind; prof->prof_id = dtrace_probe_create(profile_id, NULL, NULL, name, nr_frames, prof); }
static void sdt_create_probe(struct sdt_probe *probe) { struct sdt_provider *prov; char mod[DTRACE_MODNAMELEN]; char func[DTRACE_FUNCNAMELEN]; char name[DTRACE_NAMELEN]; const char *from; char *to; size_t len; if (probe->version != (int)sizeof(*probe)) { printf("ignoring probe %p, version %u expected %u\n", probe, probe->version, (int)sizeof(*probe)); return; } TAILQ_FOREACH(prov, &sdt_prov_list, prov_entry) if (strcmp(prov->name, probe->prov->name) == 0) break; KASSERT(prov != NULL, ("probe defined without a provider")); /* If no module name was specified, use the module filename. */ if (*probe->mod == 0) { len = strlcpy(mod, probe->sdtp_lf->filename, sizeof(mod)); if (len > 3 && strcmp(mod + len - 3, ".ko") == 0) mod[len - 3] = '\0'; } else strlcpy(mod, probe->mod, sizeof(mod)); /* * Unfortunately this is necessary because the Solaris DTrace * code mixes consts and non-consts with casts to override * the incompatibilies. On FreeBSD, we use strict warnings * in the C compiler, so we have to respect const vs non-const. */ strlcpy(func, probe->func, sizeof(func)); if (func[0] == '\0') strcpy(func, "none"); from = probe->name; to = name; for (len = 0; len < (sizeof(name) - 1) && *from != '\0'; len++, from++, to++) { if (from[0] == '_' && from[1] == '_') { *to = '-'; from++; } else *to = *from; } *to = '\0'; if (dtrace_probe_lookup(prov->id, mod, func, name) != DTRACE_IDNONE) return; (void)dtrace_probe_create(prov->id, mod, func, name, 1, probe); }
static void systrace_provide(void *arg, const dtrace_probedesc_t *desc) { int i; if (desc != NULL) return; for (i = 0; i < MAXSYSCALL; i++) { if (dtrace_probe_lookup(systrace_id, NULL, SYSCALLNAMES[i], "entry") != 0) continue; (void) dtrace_probe_create(systrace_id, NULL, SYSCALLNAMES[i], "entry", SYSTRACE_ARTIFICIAL_FRAMES, (void *)(intptr_t)SYSTRACE_ENTRY(i)); (void) dtrace_probe_create(systrace_id, NULL, SYSCALLNAMES[i], "return", SYSTRACE_ARTIFICIAL_FRAMES, (void *)(intptr_t)SYSTRACE_RETURN(i)); } }
static void systrace_provide(void *arg, dtrace_probedesc_t *desc) { int i; if (desc != NULL) return; for (i = 0; i < MAXSYSCALL; i++) { if (dtrace_probe_lookup(systrace_id, MODNAME, uglyhack.pp_syscallnames[i], "entry") != 0) continue; (void) dtrace_probe_create(systrace_id, MODNAME, uglyhack.pp_syscallnames[i], "entry", SYSTRACE_ARTIFICIAL_FRAMES, (void *)((uintptr_t)SYSTRACE_ENTRY(i))); (void) dtrace_probe_create(systrace_id, MODNAME, uglyhack.pp_syscallnames[i], "return", SYSTRACE_ARTIFICIAL_FRAMES, (void *)((uintptr_t)SYSTRACE_RETURN(i))); } }
static int fbt_prov_entry(pf_info_t *infp, instr_t *instr, int size, int modrm) { fbt_probe_t *fbt; /***********************************************/ /* Avoid patching a patched probe point. */ /***********************************************/ if (*instr == 0xcc) return 1; /***********************************************/ /* This is temporary. Because of the issue */ /* of %RIP relative addressing modes */ /* potentially not being addressable in our */ /* single-step jump buffer, disable those */ /* probes which look like one of these. */ /***********************************************/ if (modrm >= 0 && (instr[modrm] & 0xc7) == 0x05) return 1; fbt = kmem_zalloc(sizeof (fbt_probe_t), KM_SLEEP); fbt->fbtp_name = infp->name; fbt->fbtp_id = dtrace_probe_create(fbt_id, infp->modname, infp->name, FBT_ENTRY, 3, fbt); num_probes++; fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = infp->mp; // ctl; fbt->fbtp_loadcnt = get_refcount(infp->mp); fbt->fbtp_rval = DTRACE_INVOP_ANY; /***********************************************/ /* Save potential overwrite of instruction */ /* and length, because we will need the */ /* entire instruction when we single step */ /* over it. */ /***********************************************/ fbt->fbtp_savedval = *instr; fbt->fbtp_inslen = size; fbt->fbtp_type = 0; /* entry */ //if (modrm >= 0 && (instr[modrm] & 0xc7) == 0x05) printk("modrm %s %p rm=%d\n", name, instr, modrm); fbt->fbtp_modrm = modrm; fbt->fbtp_patchval = FBT_PATCHVAL; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt->fbtp_symndx = infp->symndx; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; infp->pmp->fbt_nentries++; infp->retptr = NULL; return 1; }
static int io_prov_return(pf_info_t *infp, uint8_t *instr, int size) { sdt_probe_t *sdp; sdt_provider_t *prov; uint8_t *offset; char *name; sdt_probe_t *retsdt = infp->retptr; printk("io_prov_return called %s:%s %p sz=%x\n", infp->modname, infp->name, instr, size); for (prov = sdt_providers; prov->sdtp_prefix != NULL; prov++) { if (strcmp(prov->sdtp_name, infp->modname) == 0) break; } name = kstrdup(infp->name2, KM_SLEEP); sdp = kmem_zalloc(sizeof (sdt_probe_t), KM_SLEEP); /***********************************************/ /* Daisy chain the return exit points so we */ /* dont end up firing all of them when we */ /* return from the probe. */ /***********************************************/ if (retsdt == NULL) { sdp->sdp_id = dtrace_probe_create(prov->sdtp_id, infp->func, NULL, name, 0, sdp); infp->retptr = sdp; } else { retsdt->sdp_next = sdp; sdp->sdp_id = retsdt->sdp_id; } sdp->sdp_name = name; sdp->sdp_namelen = strlen(name); sdp->sdp_inslen = size; sdp->sdp_provider = prov; sdp->sdp_flags = infp->flags; sdp->sdp_entry = FALSE; /***********************************************/ /* Add the entry to the hash table. */ /***********************************************/ offset = instr; sdp->sdp_hashnext = sdt_probetab[SDT_ADDR2NDX(offset)]; sdt_probetab[SDT_ADDR2NDX(offset)] = sdp; sdp->sdp_patchval = PATCHVAL; sdp->sdp_patchpoint = (uint8_t *)offset; sdp->sdp_savedval = *sdp->sdp_patchpoint; return 1; }
/*ARGSUSED*/ static void systrace_provide(void *arg, const dtrace_probedesc_t *desc) { #pragma unused(arg) /* __APPLE__ */ int i; if (desc != NULL) return; systrace_init(sysent, &systrace_sysent); #ifdef _SYSCALL32_IMPL systrace_init(sysent32, &systrace_sysent32); #endif for (i = 0; i < NSYSCALL; i++) { if (systrace_sysent[i].stsy_underlying == NULL) continue; if (dtrace_probe_lookup(systrace_id, NULL, syscallnames[i], "entry") != 0) continue; (void) dtrace_probe_create(systrace_id, NULL, syscallnames[i], "entry", SYSTRACE_ARTIFICIAL_FRAMES, (void *)((uintptr_t)SYSTRACE_ENTRY(i))); (void) dtrace_probe_create(systrace_id, NULL, syscallnames[i], "return", SYSTRACE_ARTIFICIAL_FRAMES, (void *)((uintptr_t)SYSTRACE_RETURN(i))); systrace_sysent[i].stsy_entry = DTRACE_IDNONE; systrace_sysent[i].stsy_return = DTRACE_IDNONE; #ifdef _SYSCALL32_IMPL systrace_sysent32[i].stsy_entry = DTRACE_IDNONE; systrace_sysent32[i].stsy_return = DTRACE_IDNONE; #endif } }
/*ARGSUSED*/ static void lockstat_provide(void *arg, const dtrace_probedesc_t *desc) { int i = 0; for (i = 0; lockstat_probes[i].lsp_func != NULL; i++) { lockstat_probe_t *probe = &lockstat_probes[i]; if (dtrace_probe_lookup(lockstat_id, "genunix", probe->lsp_func, probe->lsp_name) != 0) continue; ASSERT(!probe->lsp_id); probe->lsp_id = dtrace_probe_create(lockstat_id, "genunix", probe->lsp_func, probe->lsp_name, 1, probe); } }
static void sdt_provide(void *arg, const dtrace_probedesc_t *desc) { sdt_provider_t *sprov = arg; int res; int ind; int num_probes = 0; #ifdef SDT_DEBUG if (desc == NULL) { printf("sdt: provide null\n"); } else { printf("sdt: provide %d %02x:%02x:%02x:%02x\n", desc->dtpd_id, desc->dtpd_provider[0], desc->dtpd_mod[0], desc->dtpd_func[0], desc->dtpd_name[0]); } #endif for (ind = 0; sprov->probes[ind] != NULL; ind++) { if (sprov->probes[ind]->created == 0) { res = dtrace_probe_create(sprov->id, sprov->probes[ind]->module, sprov->probes[ind]->function, sprov->probes[ind]->name, 0, sprov->probes[ind]); sprov->probes[ind]->id = res; #ifdef SDT_DEBUG printf("%s: dtrace_probe_create[%d] res=%d\n", __func__, ind, res); #endif sprov->probes[ind]->created = 1; num_probes++; } } #ifdef SDT_DEBUG printf("sdt: %s num_probes %d\n", __func__, ind); #endif }
/*ARGSUSED*/ static void lockstat_provide(void *arg, const dtrace_probedesc_t *desc) { #pragma unused(arg, desc) /* __APPLE__ */ int i = 0; for (i = 0; lockstat_probes[i].lsp_func != NULL; i++) { lockstat_probe_t *probe = &lockstat_probes[i]; if (dtrace_probe_lookup(lockstat_id, "mach_kernel", probe->lsp_func, probe->lsp_name) != 0) continue; ASSERT(!probe->lsp_id); probe->lsp_id = dtrace_probe_create(lockstat_id, "mach_kernel", probe->lsp_func, probe->lsp_name, LOCKSTAT_AFRAMES, probe); } }
static int io_prov_entry(pf_info_t *infp, uint8_t *instr, int size, int modrm) { sdt_probe_t *sdp; sdt_provider_t *prov; uint8_t *offset; char *name; printk("io_prov_entry called %s:%s\n", infp->modname, infp->name); for (prov = sdt_providers; prov->sdtp_prefix != NULL; prov++) { if (strcmp(prov->sdtp_name, infp->modname) == 0) break; } name = kstrdup(infp->name, KM_SLEEP); sdp = kmem_zalloc(sizeof (sdt_probe_t), KM_SLEEP); sdp->sdp_id = dtrace_probe_create(prov->sdtp_id, infp->func, NULL, name, 0, sdp); sdp->sdp_name = name; sdp->sdp_namelen = strlen(name); sdp->sdp_inslen = size; sdp->sdp_modrm = modrm; sdp->sdp_provider = prov; sdp->sdp_flags = infp->flags; sdp->sdp_entry = TRUE; /***********************************************/ /* Add the entry to the hash table. */ /***********************************************/ offset = instr; sdp->sdp_hashnext = sdt_probetab[SDT_ADDR2NDX(offset)]; sdt_probetab[SDT_ADDR2NDX(offset)] = sdp; sdp->sdp_patchval = PATCHVAL; sdp->sdp_patchpoint = (uint8_t *)offset; sdp->sdp_savedval = *sdp->sdp_patchpoint; infp->retptr = NULL; return 1; }
int fbt_provide_module_function(linker_file_t lf, int symindx, linker_symval_t *symval, void *opaque) { char *modname = opaque; const char *name = symval->name; fbt_probe_t *fbt, *retfbt; int j; uint32_t *instr, *limit; #ifdef __powerpc64__ /* * PowerPC64 uses '.' prefixes on symbol names, ignore it, but only * allow symbols with the '.' prefix, so that we don't get the function * descriptor instead. */ if (name[0] == '.') name++; else return (0); #endif if (strncmp(name, "dtrace_", 7) == 0 && strncmp(name, "dtrace_safe_", 12) != 0) { /* * Anything beginning with "dtrace_" may be called * from probe context unless it explicitly indicates * that it won't be called from probe context by * using the prefix "dtrace_safe_". */ return (0); } if (name[0] == '_' && name[1] == '_') return (0); instr = (uint32_t *) symval->value; limit = (uint32_t *) (symval->value + symval->size); for (; instr < limit; instr++) if (*instr == FBT_MFLR_R0) break; if (*instr != FBT_MFLR_R0) return (0); fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); fbt->fbtp_name = name; fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_ENTRY, FBT_AFRAMES, fbt); fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = lf; fbt->fbtp_loadcnt = lf->loadcnt; fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = FBT_PATCHVAL; fbt->fbtp_rval = DTRACE_INVOP_MFLR_R0; fbt->fbtp_symindx = symindx; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; lf->fbt_nentries++; retfbt = NULL; again: if (instr >= limit) return (0); /* * We (desperately) want to avoid erroneously instrumenting a * jump table. To determine if we're looking at a true instruction * sequence or an inline jump table that happens to contain the same * byte sequences, we resort to some heuristic sleeze: we treat this * instruction as being contained within a pointer, and see if that * pointer points to within the body of the function. If it does, we * refuse to instrument it. */ { uint32_t *ptr; ptr = *(uint32_t **)instr; if (ptr >= (uint32_t *) symval->value && ptr < limit) { instr++; goto again; } } if (*instr != FBT_MTLR_R0) { instr++; goto again; } instr++; for (j = 0; j < 12 && instr < limit; j++, instr++) { if ((*instr == FBT_BCTR) || (*instr == FBT_BLR) || FBT_IS_JUMP(*instr)) break; } if (!(*instr == FBT_BCTR || *instr == FBT_BLR || FBT_IS_JUMP(*instr))) goto again; /* * We have a winner! */ fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); fbt->fbtp_name = name; if (retfbt == NULL) { fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_RETURN, FBT_AFRAMES, fbt); } else { retfbt->fbtp_next = fbt; fbt->fbtp_id = retfbt->fbtp_id; } retfbt = fbt; fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = lf; fbt->fbtp_loadcnt = lf->loadcnt; fbt->fbtp_symindx = symindx; if (*instr == FBT_BCTR) fbt->fbtp_rval = DTRACE_INVOP_BCTR; else if (*instr == FBT_BLR) fbt->fbtp_rval = DTRACE_INVOP_RET; else fbt->fbtp_rval = DTRACE_INVOP_JUMP; fbt->fbtp_roffset = (uintptr_t)((uint8_t *)instr - (uint8_t *)symval->value); fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = FBT_PATCHVAL; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; lf->fbt_nentries++; instr += 4; goto again; }
/*ARGSUSED*/ static void __sdt_provide_module(void *arg, struct modctl *ctl) { #pragma unused(arg) struct module *mp = (struct module *)ctl->mod_address; char *modname = ctl->mod_modname; sdt_probedesc_t *sdpd; sdt_probe_t *sdp, *old; sdt_provider_t *prov; int len; /* * One for all, and all for one: if we haven't yet registered all of * our providers, we'll refuse to provide anything. */ for (prov = sdt_providers; prov->sdtp_name != NULL; prov++) { if (prov->sdtp_id == DTRACE_PROVNONE) return; } if (!mp || mp->sdt_nprobes != 0 || (sdpd = mp->sdt_probes) == NULL) return; for (sdpd = mp->sdt_probes; sdpd != NULL; sdpd = sdpd->sdpd_next) { const char *name = sdpd->sdpd_name, *func; char *nname; int i, j; dtrace_id_t id; for (prov = sdt_providers; prov->sdtp_prefix != NULL; prov++) { const char *prefpart, *prefix = prov->sdtp_prefix; if ((prefpart = strstr(name, prefix))) { name = prefpart + strlen(prefix); break; } } nname = kmem_alloc(len = strlen(name) + 1, KM_SLEEP); for (i = 0, j = 0; name[j] != '\0'; i++) { if (name[j] == '_' && name[j + 1] == '_') { nname[i] = '-'; j += 2; } else { nname[i] = name[j++]; } } nname[i] = '\0'; sdp = kmem_zalloc(sizeof (sdt_probe_t), KM_SLEEP); sdp->sdp_loadcnt = ctl->mod_loadcnt; sdp->sdp_ctl = ctl; sdp->sdp_name = nname; sdp->sdp_namelen = len; sdp->sdp_provider = prov; func = sdpd->sdpd_func; if (func == NULL) func = "<unknown>"; /* * We have our provider. Now create the probe. */ if ((id = dtrace_probe_lookup(prov->sdtp_id, modname, func, nname)) != DTRACE_IDNONE) { old = dtrace_probe_arg(prov->sdtp_id, id); ASSERT(old != NULL); sdp->sdp_next = old->sdp_next; sdp->sdp_id = id; old->sdp_next = sdp; } else { sdp->sdp_id = dtrace_probe_create(prov->sdtp_id, modname, func, nname, SDT_AFRAMES, sdp); mp->sdt_nprobes++; } #if 0 printf ("__sdt_provide_module: sdpd=0x%p sdp=0x%p name=%s, id=%d\n", sdpd, sdp, nname, sdp->sdp_id); #endif sdp->sdp_hashnext = sdt_probetab[SDT_ADDR2NDX(sdpd->sdpd_offset)]; sdt_probetab[SDT_ADDR2NDX(sdpd->sdpd_offset)] = sdp; sdp->sdp_patchval = SDT_PATCHVAL; sdp->sdp_patchpoint = (sdt_instr_t *)sdpd->sdpd_offset; sdp->sdp_savedval = *sdp->sdp_patchpoint; } }
/** * @callback_method_impl{dtrace_pops_t,dtps_provide} */ static void vboxDtPOps_Provide(void *pvProv, const dtrace_probedesc_t *pDtProbeDesc) { PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv; AssertPtrReturnVoid(pProv); LOG_DTRACE(("%s: %p / %p pDtProbeDesc=%p\n", __FUNCTION__, pProv, pProv->TracerData.DTrace.idProvider, pDtProbeDesc)); if (pDtProbeDesc) return; /* We don't generate probes, so never mind these requests. */ if (pProv->TracerData.DTrace.fZombie) return; dtrace_provider_id_t const idProvider = pProv->TracerData.DTrace.idProvider; AssertPtrReturnVoid(idProvider); AssertPtrReturnVoid(pProv->pHdr); AssertReturnVoid(pProv->pHdr->offProbeLocs != 0); uint32_t const cProbeLocs = pProv->pHdr->cbProbeLocs / sizeof(VTGPROBELOC); /* Need a buffer for extracting the function names and mangling them in case of collision. */ size_t const cbFnNmBuf = _4K + _1K; char *pszFnNmBuf = (char *)RTMemAlloc(cbFnNmBuf); if (!pszFnNmBuf) return; /* * Itereate the probe location list and register all probes related to * this provider. */ uint16_t const idxProv = (uint16_t)((PVTGDESCPROVIDER)((uintptr_t)pProv->pHdr + pProv->pHdr->offProviders) - pProv->pDesc); uint32_t idxProbeLoc; for (idxProbeLoc = 0; idxProbeLoc < cProbeLocs; idxProbeLoc++) { /* Skip probe location belonging to other providers or once that we've already reported. */ PCVTGPROBELOC pProbeLocRO = &pProv->paProbeLocsRO[idxProbeLoc]; PVTGDESCPROBE pProbeDesc = pProbeLocRO->pProbe; if (pProbeDesc->idxProvider != idxProv) continue; uint32_t *pidProbe; if (!pProv->fUmod) pidProbe = (uint32_t *)&pProbeLocRO->idProbe; else pidProbe = &pProv->paR0ProbeLocs[idxProbeLoc].idProbe; if (*pidProbe != 0) continue; /* The function name may need to be stripped since we're using C++ compilers for most of the code. ASSUMES nobody are brave/stupid enough to use function pointer returns without typedef'ing properly them (e.g. signal). */ const char *pszPrbName = vboxDtVtgGetString(pProv->pHdr, pProbeDesc->offName); const char *pszFunc = pProbeLocRO->pszFunction; const char *psz = strchr(pProbeLocRO->pszFunction, '('); size_t cch; if (psz) { /* skip blanks preceeding the parameter parenthesis. */ while ( (uintptr_t)psz > (uintptr_t)pProbeLocRO->pszFunction && RT_C_IS_BLANK(psz[-1])) psz--; /* Find the start of the function name. */ pszFunc = psz - 1; while ((uintptr_t)pszFunc > (uintptr_t)pProbeLocRO->pszFunction) { char ch = pszFunc[-1]; if (!RT_C_IS_ALNUM(ch) && ch != '_' && ch != ':') break; pszFunc--; } cch = psz - pszFunc; } else cch = strlen(pszFunc); RTStrCopyEx(pszFnNmBuf, cbFnNmBuf, pszFunc, cch); /* Look up the probe, if we have one in the same function, mangle the function name a little to avoid having to deal with having multiple location entries with the same probe ID. (lazy bird) */ Assert(!*pidProbe); if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) != DTRACE_IDNONE) { RTStrPrintf(pszFnNmBuf+cch, cbFnNmBuf - cch, "-%u", pProbeLocRO->uLine); if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) != DTRACE_IDNONE) { unsigned iOrd = 2; while (iOrd < 128) { RTStrPrintf(pszFnNmBuf+cch, cbFnNmBuf - cch, "-%u-%u", pProbeLocRO->uLine, iOrd); if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) == DTRACE_IDNONE) break; iOrd++; } if (iOrd >= 128) { LogRel(("VBoxDrv: More than 128 duplicate probe location instances %s at line %u in function %s [%s], probe %s\n", pProbeLocRO->uLine, pProbeLocRO->pszFunction, pszFnNmBuf, pszPrbName)); continue; } } } /* Create the probe. */ AssertCompile(sizeof(*pidProbe) == sizeof(dtrace_id_t)); *pidProbe = dtrace_probe_create(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName, 1 /*aframes*/, (void *)(uintptr_t)idxProbeLoc); pProv->TracerData.DTrace.cProvidedProbes++; } RTMemFree(pszFnNmBuf); LOG_DTRACE(("%s: returns\n", __FUNCTION__)); }
int fbt_provide_module_function(linker_file_t lf, int symindx, linker_symval_t *symval, void *opaque) { char *modname = opaque; const char *name = symval->name; fbt_probe_t *fbt, *retfbt; uint32_t *instr, *limit; int popm; if (strncmp(name, "dtrace_", 7) == 0 && strncmp(name, "dtrace_safe_", 12) != 0) { /* * Anything beginning with "dtrace_" may be called * from probe context unless it explicitly indicates * that it won't be called from probe context by * using the prefix "dtrace_safe_". */ return (0); } if (name[0] == '_' && name[1] == '_') return (0); instr = (uint32_t *)symval->value; limit = (uint32_t *)(symval->value + symval->size); /* * va_arg functions has first instruction of * sub sp, sp, #? */ if ((*instr & 0xfffff000) == FBT_SUBSP) instr++; /* * check if insn is a pushm with LR */ if ((*instr & 0xffff0000) != FBT_PUSHM || (*instr & (1 << LR)) == 0) return (0); fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); fbt->fbtp_name = name; fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_ENTRY, 2, fbt); fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = lf; fbt->fbtp_loadcnt = lf->loadcnt; fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = FBT_BREAKPOINT; fbt->fbtp_rval = DTRACE_INVOP_PUSHM; fbt->fbtp_symindx = symindx; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; lf->fbt_nentries++; popm = FBT_POPM | ((*instr) & 0x3FFF) | 0x8000; retfbt = NULL; again: for (; instr < limit; instr++) { if (*instr == popm) break; else if ((*instr & 0xff000000) == FBT_JUMP) { uint32_t *target, *start; int offset; offset = (*instr & 0xffffff); offset <<= 8; offset /= 64; target = instr + (2 + offset); start = (uint32_t *)symval->value; if (target >= limit || target < start) break; } } if (instr >= limit) return (0); /* * We have a winner! */ fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); fbt->fbtp_name = name; if (retfbt == NULL) { fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_RETURN, 2, fbt); } else { retfbt->fbtp_next = fbt; fbt->fbtp_id = retfbt->fbtp_id; } retfbt = fbt; fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = lf; fbt->fbtp_loadcnt = lf->loadcnt; fbt->fbtp_symindx = symindx; if ((*instr & 0xff000000) == FBT_JUMP) fbt->fbtp_rval = DTRACE_INVOP_B; else fbt->fbtp_rval = DTRACE_INVOP_POPM; fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = FBT_BREAKPOINT; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; lf->fbt_nentries++; instr++; goto again; }
static int fbt_provide_module_function(linker_file_t lf, int symindx, linker_symval_t *symval, void *opaque) { char *modname = opaque; const char *name = symval->name; fbt_probe_t *fbt, *retfbt; int j; int size; u_int8_t *instr, *limit; if (strncmp(name, "dtrace_", 7) == 0 && strncmp(name, "dtrace_safe_", 12) != 0) { /* * Anything beginning with "dtrace_" may be called * from probe context unless it explicitly indicates * that it won't be called from probe context by * using the prefix "dtrace_safe_". */ return (0); } if (name[0] == '_' && name[1] == '_') return (0); size = symval->size; instr = (u_int8_t *) symval->value; limit = (u_int8_t *) symval->value + symval->size; #ifdef __amd64__ while (instr < limit) { if (*instr == FBT_PUSHL_EBP) break; if ((size = dtrace_instr_size(instr)) <= 0) break; instr += size; } if (instr >= limit || *instr != FBT_PUSHL_EBP) { /* * We either don't save the frame pointer in this * function, or we ran into some disassembly * screw-up. Either way, we bail. */ return (0); } #else if (instr[0] != FBT_PUSHL_EBP) return (0); if (!(instr[1] == FBT_MOVL_ESP_EBP0_V0 && instr[2] == FBT_MOVL_ESP_EBP1_V0) && !(instr[1] == FBT_MOVL_ESP_EBP0_V1 && instr[2] == FBT_MOVL_ESP_EBP1_V1)) return (0); #endif fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); fbt->fbtp_name = name; fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_ENTRY, 3, fbt); fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = lf; fbt->fbtp_loadcnt = lf->loadcnt; fbt->fbtp_rval = DTRACE_INVOP_PUSHL_EBP; fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = FBT_PATCHVAL; fbt->fbtp_symindx = symindx; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; lf->fbt_nentries++; retfbt = NULL; again: if (instr >= limit) return (0); /* * If this disassembly fails, then we've likely walked off into * a jump table or some other unsuitable area. Bail out of the * disassembly now. */ if ((size = dtrace_instr_size(instr)) <= 0) return (0); #ifdef __amd64__ /* * We only instrument "ret" on amd64 -- we don't yet instrument * ret imm16, largely because the compiler doesn't seem to * (yet) emit them in the kernel... */ if (*instr != FBT_RET) { instr += size; goto again; } #else if (!(size == 1 && (*instr == FBT_POPL_EBP || *instr == FBT_LEAVE) && (*(instr + 1) == FBT_RET || *(instr + 1) == FBT_RET_IMM16))) { instr += size; goto again; } #endif /* * We (desperately) want to avoid erroneously instrumenting a * jump table, especially given that our markers are pretty * short: two bytes on x86, and just one byte on amd64. To * determine if we're looking at a true instruction sequence * or an inline jump table that happens to contain the same * byte sequences, we resort to some heuristic sleeze: we * treat this instruction as being contained within a pointer, * and see if that pointer points to within the body of the * function. If it does, we refuse to instrument it. */ for (j = 0; j < sizeof (uintptr_t); j++) { caddr_t check = (caddr_t) instr - j; uint8_t *ptr; if (check < symval->value) break; if (check + sizeof (caddr_t) > (caddr_t)limit) continue; ptr = *(uint8_t **)check; if (ptr >= (uint8_t *) symval->value && ptr < limit) { instr += size; goto again; } } /* * We have a winner! */ fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); fbt->fbtp_name = name; if (retfbt == NULL) { fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_RETURN, 3, fbt); } else { retfbt->fbtp_next = fbt; fbt->fbtp_id = retfbt->fbtp_id; } retfbt = fbt; fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = lf; fbt->fbtp_loadcnt = lf->loadcnt; fbt->fbtp_symindx = symindx; #ifndef __amd64__ if (*instr == FBT_POPL_EBP) { fbt->fbtp_rval = DTRACE_INVOP_POPL_EBP; } else { ASSERT(*instr == FBT_LEAVE); fbt->fbtp_rval = DTRACE_INVOP_LEAVE; } fbt->fbtp_roffset = (uintptr_t)(instr - (uint8_t *) symval->value) + 1; #else ASSERT(*instr == FBT_RET); fbt->fbtp_rval = DTRACE_INVOP_RET; fbt->fbtp_roffset = (uintptr_t)(instr - (uint8_t *) symval->value); #endif fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = FBT_PATCHVAL; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; lf->fbt_nentries++; instr += size; goto again; }
static int fbt_prov_return(pf_info_t *infp, instr_t *instr, int size) { fbt_probe_t *fbt; fbt_probe_t *retfbt = infp->retptr; # if defined(__i386) || defined(__amd64) if (*instr == 0xcc) return 1; # endif /***********************************************/ /* Sanity check for bad things happening. */ /***********************************************/ if (fbt_is_patched(infp->name, instr)) { return 0; } fbt = kmem_zalloc(sizeof (fbt_probe_t), KM_SLEEP); fbt->fbtp_name = infp->name; if (retfbt == NULL) { fbt->fbtp_id = dtrace_probe_create(fbt_id, infp->modname, infp->name, FBT_RETURN, 3, fbt); num_probes++; } else { retfbt->fbtp_next = fbt; fbt->fbtp_id = retfbt->fbtp_id; } infp->retptr = fbt; fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = infp->mp; //ctl; fbt->fbtp_loadcnt = get_refcount(infp->mp); /***********************************************/ /* Swapped sense of the following ifdef */ /* around so we are consistent. */ /***********************************************/ fbt->fbtp_rval = DTRACE_INVOP_ANY; #if defined(__amd64) ASSERT(*instr == FBT_RET); fbt->fbtp_roffset = (uintptr_t)(instr - (uint8_t *)infp->st_value); #elif defined(__i386) fbt->fbtp_roffset = (uintptr_t)(instr - (uint8_t *)infp->st_value) + 1; #elif defined(__arm__) fbt->fbtp_roffset = 0; #endif /***********************************************/ /* Save potential overwrite of instruction */ /* and length, because we will need the */ /* entire instruction when we single step */ /* over it. */ /***********************************************/ fbt->fbtp_savedval = *instr; fbt->fbtp_inslen = size; fbt->fbtp_type = 1; /* return */ fbt->fbtp_modrm = -1; fbt->fbtp_patchval = FBT_PATCHVAL; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt->fbtp_symndx = infp->symndx; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; infp->pmp->fbt_nentries++; return 1; }
/*ARGSUSED*/ static void sdt_provide_module(void *arg, struct modctl *ctl) { struct module *mp = ctl->mod_mp; char *modname = ctl->mod_modname; int primary, nprobes = 0; sdt_probedesc_t *sdpd; sdt_probe_t *sdp, *old; uint32_t *tab; sdt_provider_t *prov; int len; /* * One for all, and all for one: if we haven't yet registered all of * our providers, we'll refuse to provide anything. */ for (prov = sdt_providers; prov->sdtp_name != NULL; prov++) { if (prov->sdtp_id == DTRACE_PROVNONE) return; } if (mp->sdt_nprobes != 0 || (sdpd = mp->sdt_probes) == NULL) return; kobj_textwin_alloc(mp); /* * Hack to identify unix/genunix/krtld. */ primary = vmem_contains(heap_arena, (void *)ctl, sizeof (struct modctl)) == 0; /* * If there hasn't been an sdt table allocated, we'll do so now. */ if (mp->sdt_tab == NULL) { for (; sdpd != NULL; sdpd = sdpd->sdpd_next) { nprobes++; } /* * We could (should?) determine precisely the size of the * table -- but a reasonable maximum will suffice. */ mp->sdt_size = nprobes * SDT_ENTRY_SIZE; mp->sdt_tab = kobj_texthole_alloc(mp->text, mp->sdt_size); if (mp->sdt_tab == NULL) { cmn_err(CE_WARN, "couldn't allocate SDT table " "for module %s", modname); return; } } tab = (uint32_t *)mp->sdt_tab; for (sdpd = mp->sdt_probes; sdpd != NULL; sdpd = sdpd->sdpd_next) { char *name = sdpd->sdpd_name, *func, *nname; int i, j; sdt_provider_t *prov; ulong_t offs; dtrace_id_t id; for (prov = sdt_providers; prov->sdtp_prefix != NULL; prov++) { char *prefix = prov->sdtp_prefix; if (strncmp(name, prefix, strlen(prefix)) == 0) { name += strlen(prefix); break; } } nname = kmem_alloc(len = strlen(name) + 1, KM_SLEEP); for (i = 0, j = 0; name[j] != '\0'; i++) { if (name[j] == '_' && name[j + 1] == '_') { nname[i] = '-'; j += 2; } else { nname[i] = name[j++]; } } nname[i] = '\0'; sdp = kmem_zalloc(sizeof (sdt_probe_t), KM_SLEEP); sdp->sdp_loadcnt = ctl->mod_loadcnt; sdp->sdp_primary = primary; sdp->sdp_ctl = ctl; sdp->sdp_name = nname; sdp->sdp_namelen = len; sdp->sdp_provider = prov; func = kobj_searchsym(mp, sdpd->sdpd_offset + (uintptr_t)mp->text, &offs); if (func == NULL) func = "<unknown>"; /* * We have our provider. Now create the probe. */ if ((id = dtrace_probe_lookup(prov->sdtp_id, modname, func, nname)) != DTRACE_IDNONE) { old = dtrace_probe_arg(prov->sdtp_id, id); ASSERT(old != NULL); sdp->sdp_next = old->sdp_next; sdp->sdp_id = id; old->sdp_next = sdp; } else { sdp->sdp_id = dtrace_probe_create(prov->sdtp_id, modname, func, nname, 1, sdp); mp->sdt_nprobes++; } sdp->sdp_patchval = SDT_CALL((uintptr_t)mp->text + sdpd->sdpd_offset, tab); sdp->sdp_patchpoint = (uint32_t *)((uintptr_t)mp->textwin + sdpd->sdpd_offset); sdp->sdp_savedval = *sdp->sdp_patchpoint; sdt_initialize(sdp, &tab); } }
static void instr_provide_function(struct modctl *mp, par_module_t *pmp, char *modname, char *name, uint8_t *st_value, uint8_t *instr, uint8_t *limit, int symndx) { int do_print = FALSE; instr_probe_t *fbt; int size; int modrm; char *orig_name = name; char name_buf[128]; char pred_buf[128]; # define UNHANDLED_FBT() if (do_print || dtrace_unhandled) { \ printk("fbt:unhandled instr %s:%p %02x %02x %02x %02x\n", \ name, instr, instr[0], instr[1], instr[2], instr[3]); \ } # define INSTR(val, opcode_name) do {if (instr[0] == val) { \ snprintf(name_buf, sizeof name_buf, "%s-%s", orig_name, opcode_name); \ name = name_buf; \ }} while (0) for (; instr < limit; instr += size) { /***********************************************/ /* Make sure we dont try and handle data or */ /* bad instructions. */ /***********************************************/ if ((size = dtrace_instr_size_modrm(instr, &modrm)) <= 0) return; name_buf[0] = '\0'; INSTR(0x70, "jo"); INSTR(0x71, "jno"); INSTR(0x72, "jb"); INSTR(0x73, "jae"); INSTR(0x74, "je"); INSTR(0x75, "jne"); INSTR(0x76, "jbe"); INSTR(0x77, "ja"); INSTR(0x78, "js"); INSTR(0x79, "jns"); INSTR(0x7a, "jp"); INSTR(0x7b, "jnp"); INSTR(0x7c, "jl"); INSTR(0x7d, "jge"); INSTR(0x7e, "jle"); INSTR(0x7f, "jg"); INSTR(0x90, "nop"); INSTR(0xa6, "scas"); INSTR(0xe0, "loopne"); INSTR(0xe1, "loope"); INSTR(0xe2, "loop"); INSTR(0xe8, "callr"); /***********************************************/ /* I was debugging the fwd/back scenario - */ /* dont need this now. */ /***********************************************/ if (0 && *instr == 0xe8) { if (instr[4] & 0x80) INSTR(0xe8, "callr-back"); else INSTR(0xe8, "callr-fwd"); } INSTR(0xf0, "lock"); INSTR(0xf1, "icebp"); INSTR(0xf2, "repz"); INSTR(0xf3, "repz"); INSTR(0xfa, "cli"); INSTR(0xfb, "sti"); if (name_buf[0] == 0) { continue; } sprintf(pred_buf, "0x%p", instr); /***********************************************/ /* Make sure this doesnt overlap another */ /* sym. We are in trouble when this happens */ /* - eg we will mistaken what the emulation */ /* is for, but also, it means something */ /* strange happens, like kernel is reusing */ /* a page (eg for init/exit section of a */ /* module). */ /***********************************************/ if (instr_is_patched(name, instr)) return; fbt = kmem_zalloc(sizeof (instr_probe_t), KM_SLEEP); fbt->insp_name = name; fbt->insp_id = dtrace_probe_create(instr_id, modname, name, pred_buf, 3, fbt); num_probes++; fbt->insp_patchpoint = instr; fbt->insp_ctl = mp; // ctl; fbt->insp_loadcnt = get_refcount(mp); /***********************************************/ /* Save potential overwrite of instruction */ /* and length, because we will need the */ /* entire instruction when we single step */ /* over it. */ /***********************************************/ fbt->insp_savedval = *instr; fbt->insp_inslen = size; //if (modrm >= 0 && (instr[modrm] & 0xc7) == 0x05) printk("modrm %s %p rm=%d\n", name, instr, modrm); fbt->insp_modrm = modrm; fbt->insp_patchval = INSTR_PATCHVAL; fbt->insp_hashnext = instr_probetab[INSTR_ADDR2NDX(instr)]; fbt->insp_symndx = symndx; instr_probetab[INSTR_ADDR2NDX(instr)] = fbt; if (do_print) printk("%d:alloc entry-patchpoint: %s %p sz=%d %02x %02x %02x\n", __LINE__, name, fbt->insp_patchpoint, fbt->insp_inslen, instr[0], instr[1], instr[2]); pmp->fbt_nentries++; } }
static void dtnfsclient_provide(void *arg, dtrace_probedesc_t *desc) { int i; if (desc != NULL) return; /* * Register access cache probes. */ if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_accesscache_str, dtnfsclient_flush_str, dtnfsclient_done_str) == 0) { nfsclient_accesscache_flush_done_id = dtrace_probe_create( dtnfsclient_id, dtnfsclient_accesscache_str, dtnfsclient_flush_str, dtnfsclient_done_str, 0, NULL); } if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_accesscache_str, dtnfsclient_get_str, dtnfsclient_hit_str) == 0) { nfsclient_accesscache_get_hit_id = dtrace_probe_create( dtnfsclient_id, dtnfsclient_accesscache_str, dtnfsclient_get_str, dtnfsclient_hit_str, 0, NULL); } if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_accesscache_str, dtnfsclient_get_str, dtnfsclient_miss_str) == 0) { nfsclient_accesscache_get_miss_id = dtrace_probe_create( dtnfsclient_id, dtnfsclient_accesscache_str, dtnfsclient_get_str, dtnfsclient_miss_str, 0, NULL); } if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_accesscache_str, dtnfsclient_load_str, dtnfsclient_done_str) == 0) { nfsclient_accesscache_load_done_id = dtrace_probe_create( dtnfsclient_id, dtnfsclient_accesscache_str, dtnfsclient_load_str, dtnfsclient_done_str, 0, NULL); } /* * Register attribute cache probes. */ if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_attrcache_str, dtnfsclient_flush_str, dtnfsclient_done_str) == 0) { nfsclient_attrcache_flush_done_id = dtrace_probe_create( dtnfsclient_id, dtnfsclient_attrcache_str, dtnfsclient_flush_str, dtnfsclient_done_str, 0, NULL); } if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_attrcache_str, dtnfsclient_get_str, dtnfsclient_hit_str) == 0) { nfsclient_attrcache_get_hit_id = dtrace_probe_create( dtnfsclient_id, dtnfsclient_attrcache_str, dtnfsclient_get_str, dtnfsclient_hit_str, 0, NULL); } if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_attrcache_str, dtnfsclient_get_str, dtnfsclient_miss_str) == 0) { nfsclient_attrcache_get_miss_id = dtrace_probe_create( dtnfsclient_id, dtnfsclient_attrcache_str, dtnfsclient_get_str, dtnfsclient_miss_str, 0, NULL); } if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_attrcache_str, dtnfsclient_load_str, dtnfsclient_done_str) == 0) { nfsclient_attrcache_load_done_id = dtrace_probe_create( dtnfsclient_id, dtnfsclient_attrcache_str, dtnfsclient_load_str, dtnfsclient_done_str, 0, NULL); } /* * Register NFSv2 RPC procedures; note sparseness check for each slot * in the NFSv3 procnum-indexed array. */ for (i = 0; i < NFS_NPROCS; i++) { if (dtnfsclient_rpcs[i].nr_v2_name != NULL && dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_nfs2_str, dtnfsclient_rpcs[i].nr_v2_name, dtnfsclient_start_str) == 0) { dtnfsclient_rpcs[i].nr_v2_id_start = dtrace_probe_create(dtnfsclient_id, dtnfsclient_nfs2_str, dtnfsclient_rpcs[i].nr_v2_name, dtnfsclient_start_str, 0, &nfsclient_nfs2_start_probes[i]); } if (dtnfsclient_rpcs[i].nr_v2_name != NULL && dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_nfs2_str, dtnfsclient_rpcs[i].nr_v2_name, dtnfsclient_done_str) == 0) { dtnfsclient_rpcs[i].nr_v2_id_done = dtrace_probe_create(dtnfsclient_id, dtnfsclient_nfs2_str, dtnfsclient_rpcs[i].nr_v2_name, dtnfsclient_done_str, 0, &nfsclient_nfs2_done_probes[i]); } } /* * Register NFSv3 RPC procedures. */ for (i = 0; i < NFS_NPROCS; i++) { if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_nfs3_str, dtnfsclient_rpcs[i].nr_v3_name, dtnfsclient_start_str) == 0) { dtnfsclient_rpcs[i].nr_v3_id_start = dtrace_probe_create(dtnfsclient_id, dtnfsclient_nfs3_str, dtnfsclient_rpcs[i].nr_v3_name, dtnfsclient_start_str, 0, &nfsclient_nfs3_start_probes[i]); } if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_nfs3_str, dtnfsclient_rpcs[i].nr_v3_name, dtnfsclient_done_str) == 0) { dtnfsclient_rpcs[i].nr_v3_id_done = dtrace_probe_create(dtnfsclient_id, dtnfsclient_nfs3_str, dtnfsclient_rpcs[i].nr_v3_name, dtnfsclient_done_str, 0, &nfsclient_nfs3_done_probes[i]); } } }
/*ARGSUSED*/ static void fbt_provide_module(void *arg, struct modctl *ctl) { struct module *mp = ctl->mod_mp; char *str = mp->strings; int nsyms = mp->nsyms; Shdr *symhdr = mp->symhdr; char *modname = ctl->mod_modname; char *name; fbt_probe_t *fbt, *retfbt; size_t symsize; int i, size; /* * Employees of dtrace and their families are ineligible. Void * where prohibited. */ if (strcmp(modname, "dtrace") == 0) return; if (ctl->mod_requisites != NULL) { struct modctl_list *list; list = (struct modctl_list *)ctl->mod_requisites; for (; list != NULL; list = list->modl_next) { if (strcmp(list->modl_modp->mod_modname, "dtrace") == 0) return; } } /* * KMDB is ineligible for instrumentation -- it may execute in * any context, including probe context. */ if (strcmp(modname, "kmdbmod") == 0) return; if (str == NULL || symhdr == NULL || symhdr->sh_addr == NULL) { /* * If this module doesn't (yet) have its string or symbol * table allocated, clear out. */ return; } symsize = symhdr->sh_entsize; if (mp->fbt_nentries) { /* * This module has some FBT entries allocated; we're afraid * to screw with it. */ return; } for (i = 1; i < nsyms; i++) { uint8_t *instr, *limit; Sym *sym = (Sym *)(symhdr->sh_addr + i * symsize); int j; if (ELF_ST_TYPE(sym->st_info) != STT_FUNC) continue; /* * Weak symbols are not candidates. This could be made to * work (where weak functions and their underlying function * appear as two disjoint probes), but it's not simple. */ if (ELF_ST_BIND(sym->st_info) == STB_WEAK) continue; name = str + sym->st_name; if (strstr(name, "dtrace_") == name && strstr(name, "dtrace_safe_") != name) { /* * Anything beginning with "dtrace_" may be called * from probe context unless it explitly indicates * that it won't be called from probe context by * using the prefix "dtrace_safe_". */ continue; } if (strstr(name, "kdi_") == name || strstr(name, "_kdi_") != NULL) { /* * Any function name beginning with "kdi_" or * containing the string "_kdi_" is a part of the * kernel debugger interface and may be called in * arbitrary context -- including probe context. */ continue; } /* * Due to 4524008, _init and _fini may have a bloated st_size. * While this bug was fixed quite some time ago, old drivers * may be lurking. We need to develop a better solution to * this problem, such that correct _init and _fini functions * (the vast majority) may be correctly traced. One solution * may be to scan through the entire symbol table to see if * any symbol overlaps with _init. If none does, set a bit in * the module structure that this module has correct _init and * _fini sizes. This will cause some pain the first time a * module is scanned, but at least it would be O(N) instead of * O(N log N)... */ if (strcmp(name, "_init") == 0) continue; if (strcmp(name, "_fini") == 0) continue; /* * In order to be eligible, the function must begin with the * following sequence: * * pushl %esp * movl %esp, %ebp * * Note that there are two variants of encodings that generate * the movl; we must check for both. For 64-bit, we would * normally insist that a function begin with the following * sequence: * * pushq %rbp * movq %rsp, %rbp * * However, the compiler for 64-bit often splits these two * instructions -- and the first instruction in the function * is often not the pushq. As a result, on 64-bit we look * for any "pushq %rbp" in the function and we instrument * this with a breakpoint instruction. */ instr = (uint8_t *)sym->st_value; limit = (uint8_t *)(sym->st_value + sym->st_size); #ifdef __amd64 while (instr < limit) { if (*instr == FBT_PUSHL_EBP) break; if ((size = dtrace_instr_size(instr)) <= 0) break; instr += size; } if (instr >= limit || *instr != FBT_PUSHL_EBP) { /* * We either don't save the frame pointer in this * function, or we ran into some disassembly * screw-up. Either way, we bail. */ continue; } #else if (instr[0] != FBT_PUSHL_EBP) continue; if (!(instr[1] == FBT_MOVL_ESP_EBP0_V0 && instr[2] == FBT_MOVL_ESP_EBP1_V0) && !(instr[1] == FBT_MOVL_ESP_EBP0_V1 && instr[2] == FBT_MOVL_ESP_EBP1_V1)) continue; #endif fbt = kmem_zalloc(sizeof (fbt_probe_t), KM_SLEEP); fbt->fbtp_name = name; fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_ENTRY, 3, fbt); fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = ctl; fbt->fbtp_loadcnt = ctl->mod_loadcnt; fbt->fbtp_rval = DTRACE_INVOP_PUSHL_EBP; fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = FBT_PATCHVAL; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt->fbtp_symndx = i; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; mp->fbt_nentries++; retfbt = NULL; again: if (instr >= limit) continue; /* * If this disassembly fails, then we've likely walked off into * a jump table or some other unsuitable area. Bail out of the * disassembly now. */ if ((size = dtrace_instr_size(instr)) <= 0) continue; #ifdef __amd64 /* * We only instrument "ret" on amd64 -- we don't yet instrument * ret imm16, largely because the compiler doesn't seem to * (yet) emit them in the kernel... */ if (*instr != FBT_RET) { instr += size; goto again; } #else if (!(size == 1 && (*instr == FBT_POPL_EBP || *instr == FBT_LEAVE) && (*(instr + 1) == FBT_RET || *(instr + 1) == FBT_RET_IMM16))) { instr += size; goto again; } #endif /* * We (desperately) want to avoid erroneously instrumenting a * jump table, especially given that our markers are pretty * short: two bytes on x86, and just one byte on amd64. To * determine if we're looking at a true instruction sequence * or an inline jump table that happens to contain the same * byte sequences, we resort to some heuristic sleeze: we * treat this instruction as being contained within a pointer, * and see if that pointer points to within the body of the * function. If it does, we refuse to instrument it. */ for (j = 0; j < sizeof (uintptr_t); j++) { uintptr_t check = (uintptr_t)instr - j; uint8_t *ptr; if (check < sym->st_value) break; if (check + sizeof (uintptr_t) > (uintptr_t)limit) continue; ptr = *(uint8_t **)check; if (ptr >= (uint8_t *)sym->st_value && ptr < limit) { instr += size; goto again; } } /* * We have a winner! */ fbt = kmem_zalloc(sizeof (fbt_probe_t), KM_SLEEP); fbt->fbtp_name = name; if (retfbt == NULL) { fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_RETURN, 3, fbt); } else { retfbt->fbtp_next = fbt; fbt->fbtp_id = retfbt->fbtp_id; } retfbt = fbt; fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = ctl; fbt->fbtp_loadcnt = ctl->mod_loadcnt; #ifndef __amd64 if (*instr == FBT_POPL_EBP) { fbt->fbtp_rval = DTRACE_INVOP_POPL_EBP; } else { ASSERT(*instr == FBT_LEAVE); fbt->fbtp_rval = DTRACE_INVOP_LEAVE; } fbt->fbtp_roffset = (uintptr_t)(instr - (uint8_t *)sym->st_value) + 1; #else ASSERT(*instr == FBT_RET); fbt->fbtp_rval = DTRACE_INVOP_RET; fbt->fbtp_roffset = (uintptr_t)(instr - (uint8_t *)sym->st_value); #endif fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = FBT_PATCHVAL; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt->fbtp_symndx = i; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; mp->fbt_nentries++; instr += size; goto again; } }
void sdt_provide_module(void *arg, struct module *mp) { char *modname = mp->name; dtrace_mprovider_t *prov; sdt_probedesc_t *sdpd; sdt_probe_t *sdp, *prv; int idx, len; /* * Nothing to do if the module SDT probes were already created. */ if (PDATA(mp)->sdt_probe_cnt != 0) return; /* * Nothing to do if there are no SDT probes. */ if (mp->sdt_probec == 0) return; /* * Do not provide any probes unless all SDT providers have been created * for this meta-provider. */ for (prov = sdt_providers; prov->dtmp_name != NULL; prov++) { if (prov->dtmp_id == DTRACE_PROVNONE) return; } if (!sdt_provide_module_arch(arg, mp)) return; for (idx = 0, sdpd = mp->sdt_probes; idx < mp->sdt_probec; idx++, sdpd++) { char *name = sdpd->sdpd_name, *nname; int i, j; dtrace_mprovider_t *prov; dtrace_id_t id; for (prov = sdt_providers; prov->dtmp_pref != NULL; prov++) { char *prefix = prov->dtmp_pref; int len = strlen(prefix); if (strncmp(name, prefix, len) == 0) { name += len; break; } } nname = kmalloc(len = strlen(name) + 1, GFP_KERNEL); if (nname == NULL) { pr_warn("Unable to create probe %s: out-of-memory\n", name); continue; } for (i = j = 0; name[j] != '\0'; i++) { if (name[j] == '_' && name[j + 1] == '_') { nname[i] = '-'; j += 2; } else nname[i] = name[j++]; } nname[i] = '\0'; sdp = kzalloc(sizeof(sdt_probe_t), GFP_KERNEL); if (sdp == NULL) { pr_warn("Unable to create probe %s: out-of-memory\n", nname); continue; } sdp->sdp_loadcnt = 1; /* FIXME */ sdp->sdp_module = mp; sdp->sdp_name = nname; sdp->sdp_namelen = len; sdp->sdp_provider = prov; if ((id = dtrace_probe_lookup(prov->dtmp_id, modname, sdpd->sdpd_func, nname)) != DTRACE_IDNONE) { prv = dtrace_probe_arg(prov->dtmp_id, id); ASSERT(prv != NULL); sdp->sdp_next = prv->sdp_next; sdp->sdp_id = id; prv->sdp_next = sdp; } else { sdp->sdp_id = dtrace_probe_create(prov->dtmp_id, modname, sdpd->sdpd_func, nname, SDT_AFRAMES, sdp); PDATA(mp)->sdt_probe_cnt++; } sdp->sdp_hashnext = sdt_probetab[ SDT_ADDR2NDX(sdpd->sdpd_offset)]; sdt_probetab[SDT_ADDR2NDX(sdpd->sdpd_offset)] = sdp; sdp->sdp_patchpoint = (asm_instr_t *)sdpd->sdpd_offset; sdt_provide_probe_arch(sdp, mp, idx); } }
/*ARGSUSED*/ static void sdt_provide_module(void *arg, struct modctl *ctl) { # if defined(sun) struct module *mp = ctl->mod_mp; char *modname = ctl->mod_modname; sdt_probedesc_t *sdpd; sdt_probe_t *sdp, *old; sdt_provider_t *prov; /* * One for all, and all for one: if we haven't yet registered all of * our providers, we'll refuse to provide anything. */ for (prov = sdt_providers; prov->sdtp_name != NULL; prov++) { if (prov->sdtp_id == DTRACE_PROVNONE) return; } if (mp->sdt_nprobes != 0 || (sdpd = mp->sdt_probes) == NULL) return; for (sdpd = mp->sdt_probes; sdpd != NULL; sdpd = sdpd->sdpd_next) { char *name = sdpd->sdpd_name, *func, *nname; int i, j, len; sdt_provider_t *prov; ulong_t offs; dtrace_id_t id; for (prov = sdt_providers; prov->sdtp_prefix != NULL; prov++) { char *prefix = prov->sdtp_prefix; if (strncmp(name, prefix, strlen(prefix)) == 0) { name += strlen(prefix); break; } } nname = kmem_alloc(len = strlen(name) + 1, KM_SLEEP); for (i = 0, j = 0; name[j] != '\0'; i++) { if (name[j] == '_' && name[j + 1] == '_') { nname[i] = '-'; j += 2; } else { nname[i] = name[j++]; } } nname[i] = '\0'; sdp = kmem_zalloc(sizeof (sdt_probe_t), KM_SLEEP); sdp->sdp_loadcnt = ctl->mod_loadcnt; sdp->sdp_ctl = ctl; sdp->sdp_name = nname; sdp->sdp_namelen = len; sdp->sdp_provider = prov; func = kobj_searchsym(mp, sdpd->sdpd_offset, &offs); if (func == NULL) func = "<unknown>"; /* * We have our provider. Now create the probe. */ if ((id = dtrace_probe_lookup(prov->sdtp_id, modname, func, nname)) != DTRACE_IDNONE) { old = dtrace_probe_arg(prov->sdtp_id, id); ASSERT(old != NULL); sdp->sdp_next = old->sdp_next; sdp->sdp_id = id; old->sdp_next = sdp; } else { sdp->sdp_id = dtrace_probe_create(prov->sdtp_id, modname, func, nname, 3, sdp); mp->sdt_nprobes++; } sdp->sdp_hashnext = sdt_probetab[SDT_ADDR2NDX(sdpd->sdpd_offset)]; sdt_probetab[SDT_ADDR2NDX(sdpd->sdpd_offset)] = sdp; sdp->sdp_patchval = SDT_PATCHVAL; sdp->sdp_patchpoint = (uint8_t *)sdpd->sdpd_offset; sdp->sdp_savedval = *sdp->sdp_patchpoint; } # endif }
static void dtrace_load(void *dummy) { dtrace_provider_id_t id; CPU_INFO_ITERATOR cpuind; struct cpu_info *cinfo; dtrace_debug_init(NULL); dtrace_gethrtime_init(NULL); /* Hook into the trap handler. */ dtrace_trap_func = dtrace_trap; /* Hang our hook for thread switches. */ dtrace_vtime_switch_func = dtrace_vtime_switch; /* Hang our hook for exceptions. */ dtrace_invop_init(); /* * XXX This is a short term hack to avoid having to comment * out lots and lots of lock/unlock calls. */ mutex_init(&mod_lock,"XXX mod_lock hack", MUTEX_DEFAULT, NULL); /* * Initialise the mutexes without 'witness' because the dtrace * code is mostly written to wait for memory. To have the * witness code change a malloc() from M_WAITOK to M_NOWAIT * because a lock is held would surely create a panic in a * low memory situation. And that low memory situation might be * the very problem we are trying to trace. */ mutex_init(&dtrace_lock,"dtrace probe state", MUTEX_DEFAULT, NULL); mutex_init(&dtrace_provider_lock,"dtrace provider state", MUTEX_DEFAULT, NULL); mutex_init(&dtrace_meta_lock,"dtrace meta-provider state", MUTEX_DEFAULT, NULL); mutex_init(&dtrace_errlock,"dtrace error lock", MUTEX_DEFAULT, NULL); mutex_enter(&dtrace_provider_lock); mutex_enter(&dtrace_lock); mutex_enter(&cpu_lock); ASSERT(MUTEX_HELD(&cpu_lock)); dtrace_arena = vmem_create("dtrace", 1, INT_MAX, 1, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE); dtrace_state_cache = kmem_cache_create(__UNCONST("dtrace_state_cache"), sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); ASSERT(MUTEX_HELD(&cpu_lock)); dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), offsetof(dtrace_probe_t, dtpr_nextmod), offsetof(dtrace_probe_t, dtpr_prevmod)); dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), offsetof(dtrace_probe_t, dtpr_nextfunc), offsetof(dtrace_probe_t, dtpr_prevfunc)); dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), offsetof(dtrace_probe_t, dtpr_nextname), offsetof(dtrace_probe_t, dtpr_prevname)); if (dtrace_retain_max < 1) { cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " "setting to 1", dtrace_retain_max); dtrace_retain_max = 1; } /* * Now discover our toxic ranges. */ dtrace_toxic_ranges(dtrace_toxrange_add); /* * Before we register ourselves as a provider to our own framework, * we would like to assert that dtrace_provider is NULL -- but that's * not true if we were loaded as a dependency of a DTrace provider. * Once we've registered, we can assert that dtrace_provider is our * pseudo provider. */ (void) dtrace_register("dtrace", &dtrace_provider_attr, DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); ASSERT(dtrace_provider != NULL); ASSERT((dtrace_provider_id_t)dtrace_provider == id); dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "END", 0, NULL); dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "ERROR", 1, NULL); mutex_exit(&cpu_lock); /* * If DTrace helper tracing is enabled, we need to allocate the * trace buffer and initialize the values. */ if (dtrace_helptrace_enabled) { ASSERT(dtrace_helptrace_buffer == NULL); dtrace_helptrace_buffer = kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); dtrace_helptrace_next = 0; dtrace_helptrace_size = dtrace_helptrace_bufsize; } mutex_exit(&dtrace_lock); mutex_exit(&dtrace_provider_lock); mutex_enter(&cpu_lock); /* Setup the CPUs */ for (CPU_INFO_FOREACH(cpuind, cinfo)) { (void) dtrace_cpu_setup(CPU_CONFIG, cpu_index(cinfo)); } mutex_exit(&cpu_lock); dtrace_anon_init(NULL); #if 0 dtrace_dev = make_dev(&dtrace_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "dtrace/dtrace"); #endif return; }
int fbt_provide_module_function(linker_file_t lf, int symindx, linker_symval_t *symval, void *opaque) { fbt_probe_t *fbt, *retfbt; uint32_t *instr, *limit; const char *name; char *modname; int patchval; int rval; modname = opaque; name = symval->name; /* Check if function is excluded from instrumentation */ if (fbt_excluded(name)) return (0); instr = (uint32_t *)(symval->value); limit = (uint32_t *)(symval->value + symval->size); /* Look for sd operation */ for (; instr < limit; instr++) { /* Look for a non-compressed store of ra to sp */ if (match_opcode(*instr, (MATCH_SD | RS2_RA | RS1_SP), (MASK_SD | RS2_MASK | RS1_MASK))) { rval = DTRACE_INVOP_SD; patchval = FBT_PATCHVAL; break; } /* Look for a 'C'-compressed store of ra to sp. */ if (check_c_sdsp(&instr)) { rval = DTRACE_INVOP_C_SDSP; patchval = FBT_C_PATCHVAL; break; } } if (instr >= limit) return (0); fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); fbt->fbtp_name = name; fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_ENTRY, 3, fbt); fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = lf; fbt->fbtp_loadcnt = lf->loadcnt; fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = patchval; fbt->fbtp_rval = rval; fbt->fbtp_symindx = symindx; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; lf->fbt_nentries++; retfbt = NULL; again: for (; instr < limit; instr++) { /* Look for non-compressed return */ if (match_opcode(*instr, (MATCH_JALR | (X_RA << RS1_SHIFT)), (MASK_JALR | RD_MASK | RS1_MASK | IMM_MASK))) { rval = DTRACE_INVOP_RET; patchval = FBT_PATCHVAL; break; } /* Look for 'C'-compressed return */ if (check_c_ret(&instr)) { rval = DTRACE_INVOP_C_RET; patchval = FBT_C_PATCHVAL; break; } } if (instr >= limit) return (0); /* * We have a winner! */ fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); fbt->fbtp_name = name; if (retfbt == NULL) { fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_RETURN, 3, fbt); } else { retfbt->fbtp_probenext = fbt; fbt->fbtp_id = retfbt->fbtp_id; } retfbt = fbt; fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = lf; fbt->fbtp_loadcnt = lf->loadcnt; fbt->fbtp_symindx = symindx; fbt->fbtp_rval = rval; fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = patchval; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; lf->fbt_nentries++; instr++; goto again; }
int fbt_provide_module_function(linker_file_t lf, int symindx, linker_symval_t *symval, void *opaque) { fbt_probe_t *fbt, *retfbt; uint32_t *instr, *limit; const char *name; char *modname; modname = opaque; name = symval->name; /* Check if function is excluded from instrumentation */ if (fbt_excluded(name)) return (0); instr = (uint32_t *)(symval->value); limit = (uint32_t *)(symval->value + symval->size); /* Look for store double to ra register */ for (; instr < limit; instr++) { if ((*instr & LDSD_RA_SP_MASK) == SD_RA_SP) break; } if (instr >= limit) return (0); fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); fbt->fbtp_name = name; fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_ENTRY, 3, fbt); fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = lf; fbt->fbtp_loadcnt = lf->loadcnt; fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = FBT_PATCHVAL; fbt->fbtp_rval = DTRACE_INVOP_SD; fbt->fbtp_symindx = symindx; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; lf->fbt_nentries++; retfbt = NULL; again: for (; instr < limit; instr++) { if ((*instr & LDSD_RA_SP_MASK) == LD_RA_SP) { break; } } if (instr >= limit) return (0); /* * We have a winner! */ fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); fbt->fbtp_name = name; if (retfbt == NULL) { fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_RETURN, 3, fbt); } else { retfbt->fbtp_next = fbt; fbt->fbtp_id = retfbt->fbtp_id; } retfbt = fbt; fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = lf; fbt->fbtp_loadcnt = lf->loadcnt; fbt->fbtp_symindx = symindx; fbt->fbtp_rval = DTRACE_INVOP_LD; fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = FBT_PATCHVAL; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; lf->fbt_nentries++; instr++; goto again; }
static void dtrace_load(void *dummy) { dtrace_provider_id_t id; /* Hook into the trap handler. */ dtrace_trap_func = dtrace_trap; /* Hang our hook for thread switches. */ dtrace_vtime_switch_func = dtrace_vtime_switch; /* Hang our hook for exceptions. */ dtrace_invop_init(); dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 0, 0, 0); dtrace_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); /* Register callbacks for linker file load and unload events. */ dtrace_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, dtrace_kld_load, NULL, EVENTHANDLER_PRI_ANY); dtrace_kld_unload_try_tag = EVENTHANDLER_REGISTER(kld_unload_try, dtrace_kld_unload_try, NULL, EVENTHANDLER_PRI_ANY); /* * Initialise the mutexes without 'witness' because the dtrace * code is mostly written to wait for memory. To have the * witness code change a malloc() from M_WAITOK to M_NOWAIT * because a lock is held would surely create a panic in a * low memory situation. And that low memory situation might be * the very problem we are trying to trace. */ mutex_init(&dtrace_lock,"dtrace probe state", MUTEX_DEFAULT, NULL); mutex_init(&dtrace_provider_lock,"dtrace provider state", MUTEX_DEFAULT, NULL); mutex_init(&dtrace_meta_lock,"dtrace meta-provider state", MUTEX_DEFAULT, NULL); #ifdef DEBUG mutex_init(&dtrace_errlock,"dtrace error lock", MUTEX_DEFAULT, NULL); #endif mutex_enter(&dtrace_provider_lock); mutex_enter(&dtrace_lock); mutex_enter(&cpu_lock); ASSERT(MUTEX_HELD(&cpu_lock)); dtrace_state_cache = kmem_cache_create("dtrace_state_cache", sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); ASSERT(MUTEX_HELD(&cpu_lock)); dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), offsetof(dtrace_probe_t, dtpr_nextmod), offsetof(dtrace_probe_t, dtpr_prevmod)); dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), offsetof(dtrace_probe_t, dtpr_nextfunc), offsetof(dtrace_probe_t, dtpr_prevfunc)); dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), offsetof(dtrace_probe_t, dtpr_nextname), offsetof(dtrace_probe_t, dtpr_prevname)); if (dtrace_retain_max < 1) { cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " "setting to 1", dtrace_retain_max); dtrace_retain_max = 1; } /* * Now discover our toxic ranges. */ dtrace_toxic_ranges(dtrace_toxrange_add); /* * Before we register ourselves as a provider to our own framework, * we would like to assert that dtrace_provider is NULL -- but that's * not true if we were loaded as a dependency of a DTrace provider. * Once we've registered, we can assert that dtrace_provider is our * pseudo provider. */ (void) dtrace_register("dtrace", &dtrace_provider_attr, DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); ASSERT(dtrace_provider != NULL); ASSERT((dtrace_provider_id_t)dtrace_provider == id); dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "END", 0, NULL); dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "ERROR", 1, NULL); mutex_exit(&cpu_lock); /* * If DTrace helper tracing is enabled, we need to allocate the * trace buffer and initialize the values. */ if (dtrace_helptrace_enabled) { ASSERT(dtrace_helptrace_buffer == NULL); dtrace_helptrace_buffer = kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); dtrace_helptrace_next = 0; } mutex_exit(&dtrace_lock); mutex_exit(&dtrace_provider_lock); mutex_enter(&cpu_lock); /* Setup the boot CPU */ (void) dtrace_cpu_setup(CPU_CONFIG, 0); mutex_exit(&cpu_lock); #if __FreeBSD_version < 800039 /* Enable device cloning. */ clone_setup(&dtrace_clones); /* Setup device cloning events. */ eh_tag = EVENTHANDLER_REGISTER(dev_clone, dtrace_clone, 0, 1000); #else dtrace_dev = make_dev(&dtrace_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "dtrace/dtrace"); helper_dev = make_dev(&helper_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660, "dtrace/helper"); #endif return; }