void osd_del(u_int type, struct osd *osd, u_int slot) { struct rm_priotracker tracker; rm_rlock(&osd_object_lock[type], &tracker); do_osd_del(type, osd, slot, 0); rm_runlock(&osd_object_lock[type], &tracker); }
int osd_set(u_int type, struct osd *osd, u_int slot, void *value) { struct rm_priotracker tracker; KASSERT(type >= OSD_FIRST && type <= OSD_LAST, ("Invalid type.")); KASSERT(slot > 0, ("Invalid slot.")); KASSERT(osd_destructors[type][slot - 1] != NULL, ("Unused slot.")); rm_rlock(&osd_object_lock[type], &tracker); if (slot > osd->osd_nslots) { if (value == NULL) { OSD_DEBUG( "Not allocating null slot (type=%u, slot=%u).", type, slot); rm_runlock(&osd_object_lock[type], &tracker); return (0); } else if (osd->osd_nslots == 0) { /* * First OSD for this object, so we need to allocate * space and put it onto the list. */ osd->osd_slots = malloc(sizeof(void *) * slot, M_OSD, M_NOWAIT | M_ZERO); if (osd->osd_slots == NULL) { rm_runlock(&osd_object_lock[type], &tracker); return (ENOMEM); } osd->osd_nslots = slot; mtx_lock(&osd_list_lock[type]); LIST_INSERT_HEAD(&osd_list[type], osd, osd_next); mtx_unlock(&osd_list_lock[type]); OSD_DEBUG("Setting first slot (type=%u).", type); } else { void *newptr; /* * Too few slots allocated here, needs to extend * the array. */ newptr = realloc(osd->osd_slots, sizeof(void *) * slot, M_OSD, M_NOWAIT | M_ZERO); if (newptr == NULL) { rm_runlock(&osd_object_lock[type], &tracker); return (ENOMEM); } osd->osd_slots = newptr; osd->osd_nslots = slot; OSD_DEBUG("Growing slots array (type=%u).", type); } } OSD_DEBUG("Setting slot value (type=%u, slot=%u, value=%p).", type, slot, value); osd->osd_slots[slot - 1] = value; rm_runlock(&osd_object_lock[type], &tracker); return (0); }
static void lock_rm(struct lock_object *lock, uintptr_t how) { struct rmlock *rm; struct rm_priotracker *tracker; rm = (struct rmlock *)lock; if (how == 0) rm_wlock(rm); else { tracker = (struct rm_priotracker *)how; rm_rlock(rm, tracker); } }
void * osd_get(u_int type, struct osd *osd, u_int slot) { struct rm_priotracker tracker; void *value; KASSERT(type >= OSD_FIRST && type <= OSD_LAST, ("Invalid type.")); KASSERT(slot > 0, ("Invalid slot.")); KASSERT(osd_destructors[type][slot - 1] != NULL, ("Unused slot.")); rm_rlock(&osd_object_lock[type], &tracker); if (slot > osd->osd_nslots) { value = NULL; OSD_DEBUG("Slot doesn't exist (type=%u, slot=%u).", type, slot); } else { value = osd->osd_slots[slot - 1]; OSD_DEBUG("Returning slot value (type=%u, slot=%u, value=%p).", type, slot, value); } rm_runlock(&osd_object_lock[type], &tracker); return (value); }
void osd_exit(u_int type, struct osd *osd) { struct rm_priotracker tracker; u_int i; KASSERT(type >= OSD_FIRST && type <= OSD_LAST, ("Invalid type.")); if (osd->osd_nslots == 0) { KASSERT(osd->osd_slots == NULL, ("Non-null osd_slots.")); /* No OSD attached, just leave. */ return; } rm_rlock(&osd_object_lock[type], &tracker); for (i = 1; i <= osd->osd_nslots; i++) { if (osd_destructors[type][i - 1] != NULL) do_osd_del(type, osd, i, 0); else OSD_DEBUG("Unused slot (type=%u, slot=%u).", type, i); } rm_runlock(&osd_object_lock[type], &tracker); OSD_DEBUG("Object exit (type=%u).", type); }
int fasttrap_pid_probe(struct reg *rp) { proc_t *p = curproc; #ifndef illumos struct rm_priotracker tracker; proc_t *pp; #endif uintptr_t pc = rp->r_rip - 1; uintptr_t new_pc = 0; fasttrap_bucket_t *bucket; #ifdef illumos kmutex_t *pid_mtx; #endif fasttrap_tracepoint_t *tp, tp_local; pid_t pid; dtrace_icookie_t cookie; uint_t is_enabled = 0; /* * It's possible that a user (in a veritable orgy of bad planning) * could redirect this thread's flow of control before it reached the * return probe fasttrap. In this case we need to kill the process * since it's in a unrecoverable state. */ if (curthread->t_dtrace_step) { ASSERT(curthread->t_dtrace_on); fasttrap_sigtrap(p, curthread, pc); return (0); } /* * Clear all user tracing flags. */ curthread->t_dtrace_ft = 0; curthread->t_dtrace_pc = 0; curthread->t_dtrace_npc = 0; curthread->t_dtrace_scrpc = 0; curthread->t_dtrace_astpc = 0; #ifdef __amd64 curthread->t_dtrace_regv = 0; #endif /* * Treat a child created by a call to vfork(2) as if it were its * parent. We know that there's only one thread of control in such a * process: this one. */ #ifdef illumos while (p->p_flag & SVFORK) { p = p->p_parent; } pid = p->p_pid; pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock; mutex_enter(pid_mtx); #else pp = p; sx_slock(&proctree_lock); while (pp->p_vmspace == pp->p_pptr->p_vmspace) pp = pp->p_pptr; pid = pp->p_pid; sx_sunlock(&proctree_lock); pp = NULL; rm_rlock(&fasttrap_tp_lock, &tracker); #endif bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; /* * Lookup the tracepoint that the process just hit. */ for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { if (pid == tp->ftt_pid && pc == tp->ftt_pc && tp->ftt_proc->ftpc_acount != 0) break; } /* * If we couldn't find a matching tracepoint, either a tracepoint has * been inserted without using the pid<pid> ioctl interface (see * fasttrap_ioctl), or somehow we have mislaid this tracepoint. */ if (tp == NULL) { #ifdef illumos mutex_exit(pid_mtx); #else rm_runlock(&fasttrap_tp_lock, &tracker); #endif return (-1); } /* * Set the program counter to the address of the traced instruction * so that it looks right in ustack() output. */ rp->r_rip = pc; if (tp->ftt_ids != NULL) { fasttrap_id_t *id; #ifdef __amd64 if (p->p_model == DATAMODEL_LP64) { for (id = tp->ftt_ids; id != NULL; id = id->fti_next) { fasttrap_probe_t *probe = id->fti_probe; if (id->fti_ptype == DTFTP_ENTRY) { /* * We note that this was an entry * probe to help ustack() find the * first caller. */ cookie = dtrace_interrupt_disable(); DTRACE_CPUFLAG_SET(CPU_DTRACE_ENTRY); dtrace_probe(probe->ftp_id, rp->r_rdi, rp->r_rsi, rp->r_rdx, rp->r_rcx, rp->r_r8); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_ENTRY); dtrace_interrupt_enable(cookie); } else if (id->fti_ptype == DTFTP_IS_ENABLED) { /* * Note that in this case, we don't * call dtrace_probe() since it's only * an artificial probe meant to change * the flow of control so that it * encounters the true probe. */ is_enabled = 1; } else if (probe->ftp_argmap == NULL) { dtrace_probe(probe->ftp_id, rp->r_rdi, rp->r_rsi, rp->r_rdx, rp->r_rcx, rp->r_r8); } else { uintptr_t t[5]; fasttrap_usdt_args64(probe, rp, sizeof (t) / sizeof (t[0]), t); dtrace_probe(probe->ftp_id, t[0], t[1], t[2], t[3], t[4]); } } } else { #else /* __amd64 */ uintptr_t s0, s1, s2, s3, s4, s5; uint32_t *stack = (uint32_t *)rp->r_esp; /* * In 32-bit mode, all arguments are passed on the * stack. If this is a function entry probe, we need * to skip the first entry on the stack as it * represents the return address rather than a * parameter to the function. */ s0 = fasttrap_fuword32_noerr(&stack[0]); s1 = fasttrap_fuword32_noerr(&stack[1]); s2 = fasttrap_fuword32_noerr(&stack[2]); s3 = fasttrap_fuword32_noerr(&stack[3]); s4 = fasttrap_fuword32_noerr(&stack[4]); s5 = fasttrap_fuword32_noerr(&stack[5]); for (id = tp->ftt_ids; id != NULL; id = id->fti_next) { fasttrap_probe_t *probe = id->fti_probe; if (id->fti_ptype == DTFTP_ENTRY) { /* * We note that this was an entry * probe to help ustack() find the * first caller. */ cookie = dtrace_interrupt_disable(); DTRACE_CPUFLAG_SET(CPU_DTRACE_ENTRY); dtrace_probe(probe->ftp_id, s1, s2, s3, s4, s5); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_ENTRY); dtrace_interrupt_enable(cookie); } else if (id->fti_ptype == DTFTP_IS_ENABLED) { /* * Note that in this case, we don't * call dtrace_probe() since it's only * an artificial probe meant to change * the flow of control so that it * encounters the true probe. */ is_enabled = 1; } else if (probe->ftp_argmap == NULL) { dtrace_probe(probe->ftp_id, s0, s1, s2, s3, s4); } else { uint32_t t[5]; fasttrap_usdt_args32(probe, rp, sizeof (t) / sizeof (t[0]), t); dtrace_probe(probe->ftp_id, t[0], t[1], t[2], t[3], t[4]); } } #endif /* __amd64 */ #ifdef __amd64 } #endif } /* * We're about to do a bunch of work so we cache a local copy of * the tracepoint to emulate the instruction, and then find the * tracepoint again later if we need to light up any return probes. */ tp_local = *tp; #ifdef illumos mutex_exit(pid_mtx); #else rm_runlock(&fasttrap_tp_lock, &tracker); #endif tp = &tp_local; /* * Set the program counter to appear as though the traced instruction * had completely executed. This ensures that fasttrap_getreg() will * report the expected value for REG_RIP. */ rp->r_rip = pc + tp->ftt_size; /* * If there's an is-enabled probe connected to this tracepoint it * means that there was a 'xorl %eax, %eax' or 'xorq %rax, %rax' * instruction that was placed there by DTrace when the binary was * linked. As this probe is, in fact, enabled, we need to stuff 1 * into %eax or %rax. Accordingly, we can bypass all the instruction * emulation logic since we know the inevitable result. It's possible * that a user could construct a scenario where the 'is-enabled' * probe was on some other instruction, but that would be a rather * exotic way to shoot oneself in the foot. */ if (is_enabled) { rp->r_rax = 1; new_pc = rp->r_rip; goto done; } /* * We emulate certain types of instructions to ensure correctness * (in the case of position dependent instructions) or optimize * common cases. The rest we have the thread execute back in user- * land. */ switch (tp->ftt_type) { case FASTTRAP_T_RET: case FASTTRAP_T_RET16: { uintptr_t dst = 0; uintptr_t addr = 0; int ret = 0; /* * We have to emulate _every_ facet of the behavior of a ret * instruction including what happens if the load from %esp * fails; in that case, we send a SIGSEGV. */ #ifdef __amd64 if (p->p_model == DATAMODEL_NATIVE) { ret = dst = fasttrap_fulword((void *)rp->r_rsp); addr = rp->r_rsp + sizeof (uintptr_t); } else { #endif #ifdef __i386__ uint32_t dst32; ret = dst32 = fasttrap_fuword32((void *)rp->r_esp); dst = dst32; addr = rp->r_esp + sizeof (uint32_t); #endif #ifdef __amd64 } #endif if (ret == -1) { fasttrap_sigsegv(p, curthread, rp->r_rsp); new_pc = pc; break; } if (tp->ftt_type == FASTTRAP_T_RET16) addr += tp->ftt_dest; rp->r_rsp = addr; new_pc = dst; break; } case FASTTRAP_T_JCC: { uint_t taken = 0; switch (tp->ftt_code) { case FASTTRAP_JO: taken = (rp->r_rflags & FASTTRAP_EFLAGS_OF) != 0; break; case FASTTRAP_JNO: taken = (rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0; break; case FASTTRAP_JB: taken = (rp->r_rflags & FASTTRAP_EFLAGS_CF) != 0; break; case FASTTRAP_JAE: taken = (rp->r_rflags & FASTTRAP_EFLAGS_CF) == 0; break; case FASTTRAP_JE: taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) != 0; break; case FASTTRAP_JNE: taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) == 0; break; case FASTTRAP_JBE: taken = (rp->r_rflags & FASTTRAP_EFLAGS_CF) != 0 || (rp->r_rflags & FASTTRAP_EFLAGS_ZF) != 0; break; case FASTTRAP_JA: taken = (rp->r_rflags & FASTTRAP_EFLAGS_CF) == 0 && (rp->r_rflags & FASTTRAP_EFLAGS_ZF) == 0; break; case FASTTRAP_JS: taken = (rp->r_rflags & FASTTRAP_EFLAGS_SF) != 0; break; case FASTTRAP_JNS: taken = (rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0; break; case FASTTRAP_JP: taken = (rp->r_rflags & FASTTRAP_EFLAGS_PF) != 0; break; case FASTTRAP_JNP: taken = (rp->r_rflags & FASTTRAP_EFLAGS_PF) == 0; break; case FASTTRAP_JL: taken = ((rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0) != ((rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0); break; case FASTTRAP_JGE: taken = ((rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0) == ((rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0); break; case FASTTRAP_JLE: taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) != 0 || ((rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0) != ((rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0); break; case FASTTRAP_JG: taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) == 0 && ((rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0) == ((rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0); break; } if (taken) new_pc = tp->ftt_dest; else new_pc = pc + tp->ftt_size; break; } case FASTTRAP_T_LOOP: { uint_t taken = 0; #ifdef __amd64 greg_t cx = rp->r_rcx--; #else greg_t cx = rp->r_ecx--; #endif switch (tp->ftt_code) { case FASTTRAP_LOOPNZ: taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) == 0 && cx != 0; break; case FASTTRAP_LOOPZ: taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) != 0 && cx != 0; break; case FASTTRAP_LOOP: taken = (cx != 0); break; } if (taken) new_pc = tp->ftt_dest; else new_pc = pc + tp->ftt_size; break; } case FASTTRAP_T_JCXZ: { #ifdef __amd64 greg_t cx = rp->r_rcx; #else greg_t cx = rp->r_ecx; #endif if (cx == 0) new_pc = tp->ftt_dest; else new_pc = pc + tp->ftt_size; break; } case FASTTRAP_T_PUSHL_EBP: { int ret = 0; #ifdef __amd64 if (p->p_model == DATAMODEL_NATIVE) { rp->r_rsp -= sizeof (uintptr_t); ret = fasttrap_sulword((void *)rp->r_rsp, rp->r_rbp); } else { #endif #ifdef __i386__ rp->r_rsp -= sizeof (uint32_t); ret = fasttrap_suword32((void *)rp->r_rsp, rp->r_rbp); #endif #ifdef __amd64 } #endif if (ret == -1) { fasttrap_sigsegv(p, curthread, rp->r_rsp); new_pc = pc; break; } new_pc = pc + tp->ftt_size; break; } case FASTTRAP_T_NOP: new_pc = pc + tp->ftt_size; break; case FASTTRAP_T_JMP: case FASTTRAP_T_CALL: if (tp->ftt_code == 0) { new_pc = tp->ftt_dest; } else { uintptr_t value, addr = tp->ftt_dest; if (tp->ftt_base != FASTTRAP_NOREG) addr += fasttrap_getreg(rp, tp->ftt_base); if (tp->ftt_index != FASTTRAP_NOREG) addr += fasttrap_getreg(rp, tp->ftt_index) << tp->ftt_scale; if (tp->ftt_code == 1) { /* * If there's a segment prefix for this * instruction, we'll need to check permissions * and bounds on the given selector, and adjust * the address accordingly. */ if (tp->ftt_segment != FASTTRAP_SEG_NONE && fasttrap_do_seg(tp, rp, &addr) != 0) { fasttrap_sigsegv(p, curthread, addr); new_pc = pc; break; } #ifdef __amd64 if (p->p_model == DATAMODEL_NATIVE) { #endif if ((value = fasttrap_fulword((void *)addr)) == -1) { fasttrap_sigsegv(p, curthread, addr); new_pc = pc; break; } new_pc = value; #ifdef __amd64 } else { uint32_t value32; addr = (uintptr_t)(uint32_t)addr; if ((value32 = fasttrap_fuword32((void *)addr)) == -1) { fasttrap_sigsegv(p, curthread, addr); new_pc = pc; break; } new_pc = value32; } #endif } else { new_pc = addr; } } /* * If this is a call instruction, we need to push the return * address onto the stack. If this fails, we send the process * a SIGSEGV and reset the pc to emulate what would happen if * this instruction weren't traced. */ if (tp->ftt_type == FASTTRAP_T_CALL) { int ret = 0; uintptr_t addr = 0, pcps; #ifdef __amd64 if (p->p_model == DATAMODEL_NATIVE) { addr = rp->r_rsp - sizeof (uintptr_t); pcps = pc + tp->ftt_size; ret = fasttrap_sulword((void *)addr, pcps); } else { #endif addr = rp->r_rsp - sizeof (uint32_t); pcps = (uint32_t)(pc + tp->ftt_size); ret = fasttrap_suword32((void *)addr, pcps); #ifdef __amd64 } #endif if (ret == -1) { fasttrap_sigsegv(p, curthread, addr); new_pc = pc; break; } rp->r_rsp = addr; } break; case FASTTRAP_T_COMMON: { uintptr_t addr; #if defined(__amd64) uint8_t scratch[2 * FASTTRAP_MAX_INSTR_SIZE + 22]; #else uint8_t scratch[2 * FASTTRAP_MAX_INSTR_SIZE + 7]; #endif uint_t i = 0; #ifdef illumos klwp_t *lwp = ttolwp(curthread); /* * Compute the address of the ulwp_t and step over the * ul_self pointer. The method used to store the user-land * thread pointer is very different on 32- and 64-bit * kernels. */ #if defined(__amd64) if (p->p_model == DATAMODEL_LP64) { addr = lwp->lwp_pcb.pcb_fsbase; addr += sizeof (void *); } else { addr = lwp->lwp_pcb.pcb_gsbase; addr += sizeof (caddr32_t); } #else addr = USD_GETBASE(&lwp->lwp_pcb.pcb_gsdesc); addr += sizeof (void *); #endif #else /* !illumos */ fasttrap_scrspace_t *scrspace; scrspace = fasttrap_scraddr(curthread, tp->ftt_proc); if (scrspace == NULL) { /* * We failed to allocate scratch space for this thread. * Try to write the original instruction back out and * reset the pc. */ if (fasttrap_copyout(tp->ftt_instr, (void *)pc, tp->ftt_size)) fasttrap_sigtrap(p, curthread, pc); new_pc = pc; break; } addr = scrspace->ftss_addr; #endif /* illumos */ /* * Generic Instruction Tracing * --------------------------- * * This is the layout of the scratch space in the user-land * thread structure for our generated instructions. * * 32-bit mode bytes * ------------------------ ----- * a: <original instruction> <= 15 * jmp <pc + tp->ftt_size> 5 * b: <original instruction> <= 15 * int T_DTRACE_RET 2 * ----- * <= 37 * * 64-bit mode bytes * ------------------------ ----- * a: <original instruction> <= 15 * jmp 0(%rip) 6 * <pc + tp->ftt_size> 8 * b: <original instruction> <= 15 * int T_DTRACE_RET 2 * ----- * <= 46 * * The %pc is set to a, and curthread->t_dtrace_astpc is set * to b. If we encounter a signal on the way out of the * kernel, trap() will set %pc to curthread->t_dtrace_astpc * so that we execute the original instruction and re-enter * the kernel rather than redirecting to the next instruction. * * If there are return probes (so we know that we're going to * need to reenter the kernel after executing the original * instruction), the scratch space will just contain the * original instruction followed by an interrupt -- the same * data as at b. * * %rip-relative Addressing * ------------------------ * * There's a further complication in 64-bit mode due to %rip- * relative addressing. While this is clearly a beneficial * architectural decision for position independent code, it's * hard not to see it as a personal attack against the pid * provider since before there was a relatively small set of * instructions to emulate; with %rip-relative addressing, * almost every instruction can potentially depend on the * address at which it's executed. Rather than emulating * the broad spectrum of instructions that can now be * position dependent, we emulate jumps and others as in * 32-bit mode, and take a different tack for instructions * using %rip-relative addressing. * * For every instruction that uses the ModRM byte, the * in-kernel disassembler reports its location. We use the * ModRM byte to identify that an instruction uses * %rip-relative addressing and to see what other registers * the instruction uses. To emulate those instructions, * we modify the instruction to be %rax-relative rather than * %rip-relative (or %rcx-relative if the instruction uses * %rax; or %r8- or %r9-relative if the REX.B is present so * we don't have to rewrite the REX prefix). We then load * the value that %rip would have been into the scratch * register and generate an instruction to reset the scratch * register back to its original value. The instruction * sequence looks like this: * * 64-mode %rip-relative bytes * ------------------------ ----- * a: <modified instruction> <= 15 * movq $<value>, %<scratch> 6 * jmp 0(%rip) 6 * <pc + tp->ftt_size> 8 * b: <modified instruction> <= 15 * int T_DTRACE_RET 2 * ----- * 52 * * We set curthread->t_dtrace_regv so that upon receiving * a signal we can reset the value of the scratch register. */ ASSERT(tp->ftt_size < FASTTRAP_MAX_INSTR_SIZE); curthread->t_dtrace_scrpc = addr; bcopy(tp->ftt_instr, &scratch[i], tp->ftt_size); i += tp->ftt_size; #ifdef __amd64 if (tp->ftt_ripmode != 0) { greg_t *reg = NULL; ASSERT(p->p_model == DATAMODEL_LP64); ASSERT(tp->ftt_ripmode & (FASTTRAP_RIP_1 | FASTTRAP_RIP_2)); /* * If this was a %rip-relative instruction, we change * it to be either a %rax- or %rcx-relative * instruction (depending on whether those registers * are used as another operand; or %r8- or %r9- * relative depending on the value of REX.B). We then * set that register and generate a movq instruction * to reset the value. */ if (tp->ftt_ripmode & FASTTRAP_RIP_X) scratch[i++] = FASTTRAP_REX(1, 0, 0, 1); else scratch[i++] = FASTTRAP_REX(1, 0, 0, 0); if (tp->ftt_ripmode & FASTTRAP_RIP_1) scratch[i++] = FASTTRAP_MOV_EAX; else scratch[i++] = FASTTRAP_MOV_ECX; switch (tp->ftt_ripmode) { case FASTTRAP_RIP_1: reg = &rp->r_rax; curthread->t_dtrace_reg = REG_RAX; break; case FASTTRAP_RIP_2: reg = &rp->r_rcx; curthread->t_dtrace_reg = REG_RCX; break; case FASTTRAP_RIP_1 | FASTTRAP_RIP_X: reg = &rp->r_r8; curthread->t_dtrace_reg = REG_R8; break; case FASTTRAP_RIP_2 | FASTTRAP_RIP_X: reg = &rp->r_r9; curthread->t_dtrace_reg = REG_R9; break; } /* LINTED - alignment */ *(uint64_t *)&scratch[i] = *reg; curthread->t_dtrace_regv = *reg; *reg = pc + tp->ftt_size; i += sizeof (uint64_t); } #endif /* * Generate the branch instruction to what would have * normally been the subsequent instruction. In 32-bit mode, * this is just a relative branch; in 64-bit mode this is a * %rip-relative branch that loads the 64-bit pc value * immediately after the jmp instruction. */ #ifdef __amd64 if (p->p_model == DATAMODEL_LP64) { scratch[i++] = FASTTRAP_GROUP5_OP; scratch[i++] = FASTTRAP_MODRM(0, 4, 5); /* LINTED - alignment */ *(uint32_t *)&scratch[i] = 0; i += sizeof (uint32_t); /* LINTED - alignment */ *(uint64_t *)&scratch[i] = pc + tp->ftt_size; i += sizeof (uint64_t); } else { #endif #ifdef __i386__ /* * Set up the jmp to the next instruction; note that * the size of the traced instruction cancels out. */ scratch[i++] = FASTTRAP_JMP32; /* LINTED - alignment */ *(uint32_t *)&scratch[i] = pc - addr - 5; i += sizeof (uint32_t); #endif #ifdef __amd64 } #endif curthread->t_dtrace_astpc = addr + i; bcopy(tp->ftt_instr, &scratch[i], tp->ftt_size); i += tp->ftt_size; scratch[i++] = FASTTRAP_INT; scratch[i++] = T_DTRACE_RET; ASSERT(i <= sizeof (scratch)); #ifdef illumos if (fasttrap_copyout(scratch, (char *)addr, i)) { #else if (uwrite(p, scratch, i, addr)) { #endif fasttrap_sigtrap(p, curthread, pc); new_pc = pc; break; } if (tp->ftt_retids != NULL) { curthread->t_dtrace_step = 1; curthread->t_dtrace_ret = 1; new_pc = curthread->t_dtrace_astpc; } else { new_pc = curthread->t_dtrace_scrpc; } curthread->t_dtrace_pc = pc; curthread->t_dtrace_npc = pc + tp->ftt_size; curthread->t_dtrace_on = 1; break; } default: panic("fasttrap: mishandled an instruction"); } done: /* * If there were no return probes when we first found the tracepoint, * we should feel no obligation to honor any return probes that were * subsequently enabled -- they'll just have to wait until the next * time around. */ if (tp->ftt_retids != NULL) { /* * We need to wait until the results of the instruction are * apparent before invoking any return probes. If this * instruction was emulated we can just call * fasttrap_return_common(); if it needs to be executed, we * need to wait until the user thread returns to the kernel. */ if (tp->ftt_type != FASTTRAP_T_COMMON) { /* * Set the program counter to the address of the traced * instruction so that it looks right in ustack() * output. We had previously set it to the end of the * instruction to simplify %rip-relative addressing. */ rp->r_rip = pc; fasttrap_return_common(rp, pc, pid, new_pc); } else { ASSERT(curthread->t_dtrace_ret != 0); ASSERT(curthread->t_dtrace_pc == pc); ASSERT(curthread->t_dtrace_scrpc != 0); ASSERT(new_pc == curthread->t_dtrace_astpc); } } rp->r_rip = new_pc; #ifndef illumos PROC_LOCK(p); proc_write_regs(curthread, rp); PROC_UNLOCK(p); #endif return (0); } int fasttrap_return_probe(struct reg *rp) { proc_t *p = curproc; uintptr_t pc = curthread->t_dtrace_pc; uintptr_t npc = curthread->t_dtrace_npc; curthread->t_dtrace_pc = 0; curthread->t_dtrace_npc = 0; curthread->t_dtrace_scrpc = 0; curthread->t_dtrace_astpc = 0; #ifdef illumos /* * Treat a child created by a call to vfork(2) as if it were its * parent. We know that there's only one thread of control in such a * process: this one. */ while (p->p_flag & SVFORK) { p = p->p_parent; } #endif /* * We set rp->r_rip to the address of the traced instruction so * that it appears to dtrace_probe() that we're on the original * instruction, and so that the user can't easily detect our * complex web of lies. dtrace_return_probe() (our caller) * will correctly set %pc after we return. */ rp->r_rip = pc; fasttrap_return_common(rp, pc, p->p_pid, npc); return (0); }
static void fasttrap_return_common(struct reg *rp, uintptr_t pc, pid_t pid, uintptr_t new_pc) { fasttrap_tracepoint_t *tp; fasttrap_bucket_t *bucket; fasttrap_id_t *id; #ifdef illumos kmutex_t *pid_mtx; pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock; mutex_enter(pid_mtx); #else struct rm_priotracker tracker; rm_rlock(&fasttrap_tp_lock, &tracker); #endif bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { if (pid == tp->ftt_pid && pc == tp->ftt_pc && tp->ftt_proc->ftpc_acount != 0) break; } /* * Don't sweat it if we can't find the tracepoint again; unlike * when we're in fasttrap_pid_probe(), finding the tracepoint here * is not essential to the correct execution of the process. */ if (tp == NULL) { #ifdef illumos mutex_exit(pid_mtx); #else rm_runlock(&fasttrap_tp_lock, &tracker); #endif return; } for (id = tp->ftt_retids; id != NULL; id = id->fti_next) { /* * If there's a branch that could act as a return site, we * need to trace it, and check here if the program counter is * external to the function. */ if (tp->ftt_type != FASTTRAP_T_RET && tp->ftt_type != FASTTRAP_T_RET16 && new_pc - id->fti_probe->ftp_faddr < id->fti_probe->ftp_fsize) continue; dtrace_probe(id->fti_probe->ftp_id, pc - id->fti_probe->ftp_faddr, rp->r_rax, rp->r_rbx, 0, 0); } #ifdef illumos mutex_exit(pid_mtx); #else rm_runlock(&fasttrap_tp_lock, &tracker); #endif }
int osd_set_reserved(u_int type, struct osd *osd, u_int slot, void **rsv, void *value) { struct rm_priotracker tracker; KASSERT(type >= OSD_FIRST && type <= OSD_LAST, ("Invalid type.")); KASSERT(slot > 0, ("Invalid slot.")); KASSERT(osdm[type].osd_destructors[slot - 1] != NULL, ("Unused slot.")); rm_rlock(&osdm[type].osd_object_lock, &tracker); if (slot > osd->osd_nslots) { void **newptr; if (value == NULL) { OSD_DEBUG( "Not allocating null slot (type=%u, slot=%u).", type, slot); rm_runlock(&osdm[type].osd_object_lock, &tracker); if (rsv) osd_free_reserved(rsv); return (0); } /* * Too few slots allocated here, so we need to extend or create * the array. */ if (rsv) { /* * Use the reserve passed in (assumed to be * the right size). */ newptr = rsv; if (osd->osd_nslots != 0) { memcpy(newptr, osd->osd_slots, sizeof(void *) * osd->osd_nslots); free(osd->osd_slots, M_OSD); } } else { newptr = realloc(osd->osd_slots, sizeof(void *) * slot, M_OSD, M_NOWAIT | M_ZERO); if (newptr == NULL) { rm_runlock(&osdm[type].osd_object_lock, &tracker); return (ENOMEM); } } if (osd->osd_nslots == 0) { /* * First OSD for this object, so we need to put it * onto the list. */ mtx_lock(&osdm[type].osd_list_lock); LIST_INSERT_HEAD(&osdm[type].osd_list, osd, osd_next); mtx_unlock(&osdm[type].osd_list_lock); OSD_DEBUG("Setting first slot (type=%u).", type); } else OSD_DEBUG("Growing slots array (type=%u).", type); osd->osd_slots = newptr; osd->osd_nslots = slot; } else if (rsv) osd_free_reserved(rsv); OSD_DEBUG("Setting slot value (type=%u, slot=%u, value=%p).", type, slot, value); osd->osd_slots[slot - 1] = value; rm_runlock(&osdm[type].osd_object_lock, &tracker); return (0); }