static dr_emit_flags_t event_app_instruction(void *drcontext, void *tag, instrlist_t *bb, instr_t *instr, bool for_trace, bool translating, void *user_data) { /* We test reserving across app instrs by reserving on each store and * unreserving on the subsequent instr. */ drvector_t allowed; if (reg != DR_REG_NULL) { if (drreg_unreserve_register(drcontext, bb, instr, reg) != DRREG_SUCCESS) CHECK(false, "failed to unreserve"); reg = DR_REG_NULL; } if (!instr_is_app(instr)) return DR_EMIT_DEFAULT; if (!instr_writes_memory(instr)) return DR_EMIT_DEFAULT; if (!drmgr_is_last_instr(drcontext, instr)) { drreg_init_and_fill_vector(&allowed, true); /* Limit the registers for more of a stress test: */ drreg_set_vector_entry(&allowed, IF_X86_ELSE(DR_REG_XCX, DR_REG_R0), false); drreg_set_vector_entry(&allowed, IF_X86_ELSE(DR_REG_XDX, DR_REG_R1), false); if (drreg_reserve_register(drcontext, bb, instr, &allowed, ®) != DRREG_SUCCESS) DR_ASSERT(false); drvector_delete(&allowed); } return DR_EMIT_DEFAULT; }
drcovlib_status_t drmodtrack_exit(void) { drvector_delete(&module_table.vector); drmgr_exit(); return DRCOVLIB_SUCCESS; }
/* instrument_instr is called whenever a memory reference is identified. * It inserts code before the memory reference to to fill the memory buffer * and jump to our own code cache to call the clean_call when the buffer is full. */ static void instrument_instr(void *drcontext, instrlist_t *ilist, instr_t *where) { instr_t *instr, *call, *restore; opnd_t opnd1, opnd2; reg_id_t reg1, reg2; drvector_t allowed; per_thread_t *data; app_pc pc; data = drmgr_get_tls_field(drcontext, tls_index); /* Steal two scratch registers. * reg2 must be ECX or RCX for jecxz. */ drreg_init_and_fill_vector(&allowed, false); drreg_set_vector_entry(&allowed, DR_REG_XCX, true); if (drreg_reserve_register(drcontext, ilist, where, &allowed, ®2) != DRREG_SUCCESS || drreg_reserve_register(drcontext, ilist, where, NULL, ®1) != DRREG_SUCCESS) { DR_ASSERT(false); /* cannot recover */ drvector_delete(&allowed); return; } drvector_delete(&allowed); /* The following assembly performs the following instructions * buf_ptr->pc = pc; * buf_ptr->opcode = opcode; * buf_ptr++; * if (buf_ptr >= buf_end_ptr) * clean_call(); */ drmgr_insert_read_tls_field(drcontext, tls_index, ilist, where, reg2); /* Load data->buf_ptr into reg2 */ opnd1 = opnd_create_reg(reg2); opnd2 = OPND_CREATE_MEMPTR(reg2, offsetof(per_thread_t, buf_ptr)); instr = INSTR_CREATE_mov_ld(drcontext, opnd1, opnd2); instrlist_meta_preinsert(ilist, where, instr); /* Store pc */ pc = instr_get_app_pc(where); /* For 64-bit, we can't use a 64-bit immediate so we split pc into two halves. * We could alternatively load it into reg1 and then store reg1. * We use a convenience routine that does the two-step store for us. */ opnd1 = OPND_CREATE_MEMPTR(reg2, offsetof(ins_ref_t, pc)); instrlist_insert_mov_immed_ptrsz(drcontext, (ptr_int_t) pc, opnd1, ilist, where, NULL, NULL); /* Store opcode */ opnd1 = OPND_CREATE_MEMPTR(reg2, offsetof(ins_ref_t, opcode)); opnd2 = OPND_CREATE_INT32(instr_get_opcode(where)); instr = INSTR_CREATE_mov_st(drcontext, opnd1, opnd2); instrlist_meta_preinsert(ilist, where, instr); /* Increment reg value by pointer size using lea instr */ opnd1 = opnd_create_reg(reg2); opnd2 = opnd_create_base_disp(reg2, DR_REG_NULL, 0, sizeof(ins_ref_t), OPSZ_lea); instr = INSTR_CREATE_lea(drcontext, opnd1, opnd2); instrlist_meta_preinsert(ilist, where, instr); /* Update the data->buf_ptr */ drmgr_insert_read_tls_field(drcontext, tls_index, ilist, where, reg1); opnd1 = OPND_CREATE_MEMPTR(reg1, offsetof(per_thread_t, buf_ptr)); opnd2 = opnd_create_reg(reg2); instr = INSTR_CREATE_mov_st(drcontext, opnd1, opnd2); instrlist_meta_preinsert(ilist, where, instr); /* We use the lea + jecxz trick for better performance. * lea and jecxz won't disturb the eflags, so we won't need * code to save and restore the application's eflags. */ /* lea [reg2 - buf_end] => reg2 */ opnd1 = opnd_create_reg(reg1); opnd2 = OPND_CREATE_MEMPTR(reg1, offsetof(per_thread_t, buf_end)); instr = INSTR_CREATE_mov_ld(drcontext, opnd1, opnd2); instrlist_meta_preinsert(ilist, where, instr); opnd1 = opnd_create_reg(reg2); opnd2 = opnd_create_base_disp(reg1, reg2, 1, 0, OPSZ_lea); instr = INSTR_CREATE_lea(drcontext, opnd1, opnd2); instrlist_meta_preinsert(ilist, where, instr); /* jecxz call */ call = INSTR_CREATE_label(drcontext); opnd1 = opnd_create_instr(call); instr = INSTR_CREATE_jecxz(drcontext, opnd1); instrlist_meta_preinsert(ilist, where, instr); /* jump restore to skip clean call */ restore = INSTR_CREATE_label(drcontext); opnd1 = opnd_create_instr(restore); instr = INSTR_CREATE_jmp(drcontext, opnd1); instrlist_meta_preinsert(ilist, where, instr); /* clean call */ /* We jump to our generated lean procedure which performs a full context * switch and clean call invocation. This is to reduce the code cache size. */ instrlist_meta_preinsert(ilist, where, call); /* mov restore DR_REG_XCX */ opnd1 = opnd_create_reg(reg2); /* This is the return address for jumping back from the lean procedure. */ opnd2 = opnd_create_instr(restore); /* We could use instrlist_insert_mov_instr_addr(), but with a register * destination we know we can use a 64-bit immediate. */ instr = INSTR_CREATE_mov_imm(drcontext, opnd1, opnd2); instrlist_meta_preinsert(ilist, where, instr); /* jmp code_cache */ opnd1 = opnd_create_pc(code_cache); instr = INSTR_CREATE_jmp(drcontext, opnd1); instrlist_meta_preinsert(ilist, where, instr); /* Restore scratch registers */ instrlist_meta_preinsert(ilist, where, restore); if (drreg_unreserve_register(drcontext, ilist, where, reg1) != DRREG_SUCCESS || drreg_unreserve_register(drcontext, ilist, where, reg2) != DRREG_SUCCESS) DR_ASSERT(false); }
void module_table_destroy(module_table_t *table) { drvector_delete(&table->vector); dr_global_free(table, sizeof(*table)); }