/* Replace all calls to to the target functions with calls to the * replacement-functions in the module. */ static void replace_calls_in_module(struct module* mod) { BUG_ON(mod == NULL); BUG_ON(mod->module_core == NULL); if (mod->module_init != NULL) { KEDR_MSG(COMPONENT_STRING "target module: \"%s\", processing \"init\" area\n", module_name(mod)); do_process_area(mod->module_init, mod->module_init + mod->init_text_size, repl_table.orig_addrs, repl_table.repl_addrs, repl_table.num_addrs); } KEDR_MSG(COMPONENT_STRING "target module: \"%s\", processing \"core\" area\n", module_name(mod)); do_process_area(mod->module_core, mod->module_core + mod->core_text_size, repl_table.orig_addrs, repl_table.repl_addrs, repl_table.num_addrs); return; }
int kedr_target_detector_clear_target_name(void) { int result; /* * Only this order of mutex locking is correct. * * Otherwise deadlock is possible, because * detector_notifier_call() is called with module_mutex locked. */ result = mutex_lock_killable(&module_mutex); if(result) { KEDR_MSG(COMPONENT_STRING "failed to lock module_mutex\n"); return -EINTR; } result = mutex_lock_killable(&target_module_mutex); if(result) { KEDR_MSG(COMPONENT_STRING "failed to lock target_module_mutex\n"); mutex_unlock(&module_mutex); return -EINTR; } result = set_target_name_internal(NULL); mutex_unlock(&target_module_mutex); mutex_unlock(&module_mutex); return result; }
/* * on_module_load() should do real work when the target module is loaded: * instrument it, etc. * * Note that this function is called with controller_mutex locked. */ static void on_module_load(struct module *mod) { int ret = 0; unsigned long flags; KEDR_MSG(COMPONENT_STRING "target module \"%s\" has just loaded.\n", module_name(mod)); spin_lock_irqsave(&target_in_init_lock, flags); target_in_init = 1; spin_unlock_irqrestore(&target_in_init_lock, flags); trace_target_session_begins(target_name); /* Until this function finishes, no replacement function will be called * because the target module has not completed loading yet. That means, * no tracepoint will be triggered in the target module before the * tracepoint above is triggered. The order of the messages in the trace * is still up to the tracing system. */ /* Notify the base and request the combined replacement table */ ret = kedr_impl_on_target_load(target_module, &repl_table); if (ret != 0) { KEDR_MSG(COMPONENT_STRING "failed to handle loading of the target module.\n"); return; } replace_calls_in_module(mod); return; }
/* * on_module_unload() should do real work when the target module is about to * be unloaded. * * Note that this function is called with controller_mutex locked. * * [NB] This function is called even if initialization of the target module * fails. * */ static void on_module_unload(struct module *mod) { int ret = 0; unsigned long flags; KEDR_MSG(COMPONENT_STRING "target module \"%s\" is going to unload.\n", module_name(mod)); /* The replacement table may be used no longer. * The base will take care of releasing its contents when appropriate. * [NB] The access to repl_table is already synchronized as on_module_unload() * is called with controller_mutex locked. */ repl_table.num_addrs = 0; repl_table.orig_addrs = NULL; repl_table.repl_addrs = NULL; spin_lock_irqsave(&target_in_init_lock, flags); target_in_init = 0; spin_unlock_irqrestore(&target_in_init_lock, flags); /* Notify the base */ ret = kedr_impl_on_target_unload(target_module); if (ret != 0) { KEDR_MSG(COMPONENT_STRING "failed to handle unloading of the target module.\n"); return; } trace_target_session_ends(target_name); return; }
/* Should be executed with both module_mutex and target_module_mutex * locked. */ static int set_target_name_internal(const char* name) { if(target_module != NULL) { KEDR_MSG(COMPONENT_STRING "Cannot change name of the module to watch while module is loaded.\n"); return -EBUSY; } /* Check if new target is already loaded */ if ((name != NULL) && (find_module(name) != NULL)) { KEDR_MSG(COMPONENT_STRING "target module \"%s\" is already loaded\n", name); KEDR_MSG(COMPONENT_STRING "instrumenting already loaded target modules is not supported\n"); return -EEXIST; } /* * Currently, notifications about module state change are * performed after loading of the module * within SAME lock of 'module_mutex'(see kernel code). * * So, setting name of the target module after verification, that * this module has not loaded yet within SAME lock of 'module_mutex' * is sufficient for enforce, that we will be correctly notified * about loading of this module. * * Note: without such kernel code one cannot enforce such requirement. */ kfree(target_name); if(name != NULL) { target_name = kstrdup(name, GFP_KERNEL); if(target_name == NULL) { pr_err("Failed to allocate target name string."); return -ENOMEM; } } else { target_name = NULL; } return 0; }
/* A callback function to catch loading and unloading of module. * Sets target_module pointer among other things. */ static int detector_notifier_call(struct notifier_block *nb, unsigned long mod_state, void *vmod) { struct module* mod = (struct module *)vmod; BUG_ON(mod == NULL); /* handle module state change */ switch(mod_state) { case MODULE_STATE_COMING: /* the module has just loaded */ if(mutex_lock_killable(&target_module_mutex)) { KEDR_MSG(COMPONENT_STRING "failed to lock target_module_mutex\n"); return 0; } if((target_name != NULL) && (strcmp(target_name, module_name(mod)) == 0)) { BUG_ON(target_module != NULL); if((notifier->mod == NULL) || try_module_get(notifier->mod)) { if(!notifier->on_target_load(notifier, mod)) { target_module = mod; } else { if(notifier->mod) module_put(notifier->mod); } } else { pr_err("Fail to fix module of notifier."); } } mutex_unlock(&target_module_mutex); break; case MODULE_STATE_GOING: /* the module is going to unload */ /* if the target module has already been unloaded, * target_module is NULL, so (mod != target_module) will * be true. */ mutex_lock(&target_module_mutex); if(mod == target_module) { notifier->on_target_unload(notifier, mod); target_module = NULL; if(notifier->mod != NULL) module_put(notifier->mod); } mutex_unlock(&target_module_mutex); break; } return 0; }
static int __init payload_init_module(void) { int ret = 0; BUILD_BUG_ON(ARRAY_SIZE(orig_addrs) != ARRAY_SIZE(repl_addrs)); KEDR_MSG("[kedr_leak_check] Initializing\n"); if (stack_depth == 0 || stack_depth > KEDR_MAX_FRAMES) { printk(KERN_ERR "[kedr_leak_check] " "Invalid value of 'stack_depth': %u (should be a positive " "integer not greater than %u)\n", stack_depth, KEDR_MAX_FRAMES ); return -EINVAL; } ret = klc_output_init(); if (ret != 0) return ret; ret = kedr_payload_register(&payload); if (ret != 0) goto fail_reg; return 0; fail_reg: klc_output_fini(); return ret; }
static void payload_cleanup_module(void) { kedr_payload_unregister(&payload); klc_output_fini(); KEDR_MSG("[kedr_leak_check] Cleanup complete\n"); return; }
void kedr_payload_unregister(struct kedr_payload *payload) { struct payload_elem *elem; BUG_ON(payload == NULL); if (mutex_lock_killable(&base_mutex)) { KEDR_MSG(COMPONENT_STRING "failed to lock base_mutex\n"); return; } elem = payload_elem_find(payload, &payload_list); if (elem == NULL) { KEDR_MSG(COMPONENT_STRING "module \"%s\" attempts to unregister the payload " "that was never registered\n", module_name(payload->mod)); goto out; } KEDR_MSG(COMPONENT_STRING "unregistering payload from module \"%s\"\n", module_name(payload->mod)); list_del(&elem->list); kfree(elem); payload_functions_unuse(payload); function_replacements_remove_payload(&replaced_functions_map, payload); out: mutex_unlock(&base_mutex); return; }
/* ================================================================ */ int kedr_target_detector_init(struct kedr_target_module_notifier* notifier_param) { int result = 0; KEDR_MSG(COMPONENT_STRING "Initializing\n"); notifier = notifier_param; target_name = NULL; target_module = NULL; result = register_module_notifier(&detector_nb); return result; }
/* ================================================================ */ int kedr_base_init(struct kedr_base_operations* ops) { int result; KEDR_MSG(COMPONENT_STRING "initializing\n"); kedr_base_ops = ops; /* Initialize the list of payloads */ INIT_LIST_HEAD(&payload_list); payloads_are_used = 0; result = functions_map_init(&replaced_functions_map, 20); if(result) return result; return 0; }
/* ================================================================ */ static void controller_cleanup_module(void) { int i; //for iterate payloads unregister_module_notifier(&detector_nb); kedr_impl_controller_unregister(&controller); for(i = 0; i < payloads_n; i++) payloads_exit[i](); base_exit_module(); /* TODO later: uninstrument target if it is still loaded. * This makes sense only if there is a reasonable safe way of instrumenting * a live module ("hot patching") available. */ KEDR_MSG(COMPONENT_STRING "cleanup successful\n"); return; }
int kedr_payload_register(struct kedr_payload *payload) { int result = 0; BUG_ON(payload == NULL); result = mutex_lock_killable(&base_mutex); if (result != 0) { KEDR_MSG(COMPONENT_STRING "failed to lock base_mutex\n"); return result; } result = kedr_payload_register_internal(payload); mutex_unlock(&base_mutex); return result; }
/* A callback function to catch loading and unloading of module. * Sets target_module pointer among other things. */ static int detector_notifier_call(struct notifier_block *nb, unsigned long mod_state, void *vmod) { struct module* mod = (struct module *)vmod; BUG_ON(mod == NULL); if (mutex_lock_interruptible(&controller_mutex) != 0) { KEDR_MSG(COMPONENT_STRING "failed to lock controller_mutex\n"); return 0; } if (handle_module_notifications) { /* handle module state change */ switch(mod_state) { case MODULE_STATE_COMING: /* the module has just loaded */ if(!filter_module(module_name(mod))) break; BUG_ON(target_module != NULL); target_module = mod; on_module_load(mod); break; case MODULE_STATE_GOING: /* the module is going to unload */ /* if the target module has already been unloaded, * target_module is NULL, so (mod != target_module) will * be true. */ if(mod != target_module) break; on_module_unload(mod); target_module = NULL; } } mutex_unlock(&controller_mutex); return 0; }
/* ================================================================ */ static int __init controller_init_module(void) { int i;//for iterate payloads int result = 0; KEDR_MSG(COMPONENT_STRING "initializing\n"); result = base_init_module(); if(result) return result; for(i = 0; i < payloads_n; i++) { int result = payloads_init[i](); if(result) { for(;i>=0; i--) payloads_exit[i](); base_exit_module(); pr_err(COMPONENT_STRING "Cannot register all payloads."); return result; //not "goto fail" because it call all payloads_exit() } } /* Register with the base - must do this before the controller * begins to respond to module load/unload notifications. */ result = kedr_impl_controller_register(&controller); if (result < 0) { goto fail; } /* When looking for the target module, module_mutex must be locked */ result = mutex_lock_interruptible(&module_mutex); if (result != 0) { KEDR_MSG(COMPONENT_STRING "failed to lock module_mutex\n"); goto fail; } result = register_module_notifier(&detector_nb); if (result < 0) { goto unlock_and_fail; } /* Check if the target is already loaded */ if (find_module(target_name) != NULL) { KEDR_MSG(COMPONENT_STRING "target module \"%s\" is already loaded\n", target_name); KEDR_MSG(COMPONENT_STRING "instrumenting already loaded target modules is not supported\n"); result = -EEXIST; goto unlock_and_fail; } result = mutex_lock_interruptible(&controller_mutex); if (result != 0) { KEDR_MSG(COMPONENT_STRING "failed to lock controller_mutex\n"); goto fail; } handle_module_notifications = 1; mutex_unlock(&controller_mutex); mutex_unlock(&module_mutex); /* From now on, the controller will be notified when the target module * is loaded or have finished cleaning-up and is just about to unload * from memory. */ return 0; /* success */ unlock_and_fail: mutex_unlock(&module_mutex); fail: controller_cleanup_module(); return result; }
/* Should be executed with mutex locked */ static int kedr_payload_register_internal(struct kedr_payload *payload) { int result = 0; struct payload_elem *elem_new = NULL; BUG_ON(payload == NULL); /* If there is a target module already watched for, do not allow * to register another payload. */ if (payloads_are_used) { pr_err("Fail to register new payload because KEDR functionality currently in use."); return -EBUSY; } if (payload_elem_find(payload, &payload_list) != NULL) { KEDR_MSG(COMPONENT_STRING "module \"%s\" attempts to register the same payload twice\n", module_name(payload->mod)); return -EINVAL; } result = function_replacements_add_payload(&replaced_functions_map, payload); if(result) { pr_err("Fail to register payload because it replace function which already been replaced."); goto err_replacements; } result = payload_functions_use(payload); if(result) { goto err_functions_use; } KEDR_MSG(COMPONENT_STRING "registering payload from module \"%s\"\n", module_name(payload->mod)); elem_new = kzalloc(sizeof(*elem_new), GFP_KERNEL); if (elem_new == NULL) { result = -ENOMEM; goto err_alloc_new_elem; } INIT_LIST_HEAD(&elem_new->list); elem_new->payload = payload; elem_new->is_used = 0; list_add_tail(&elem_new->list, &payload_list); return 0; err_alloc_new_elem: payload_functions_unuse(payload); err_functions_use: function_replacements_remove_payload(&replaced_functions_map, payload); err_replacements: return result; }
/* Process the instructions in [kbeg, kend) area. * Each 'call' instruction calling one of the target functions will be * changed so as to call the corresponding replacement function instead. * The addresses of target and replacement fucntions are given in * 'from_funcs' and 'to_funcs', respectively, the number of the elements * to process in these arrays being 'nfuncs'. * For each i=0..nfuncs-1, from_funcs[i] corresponds to to_funcs[i]. */ static void do_process_area(void* kbeg, void* kend, void** from_funcs, void** to_funcs, unsigned int nfuncs) { struct insn c_insn; /* current instruction */ void* pos = NULL; BUG_ON(kbeg == NULL); BUG_ON(kend == NULL); BUG_ON(kend < kbeg); for (pos = kbeg; pos + 4 < kend; ) { unsigned int len; unsigned int k; /* 'pos + 4 < kend' is based on another "heuristics". 'call' and 'jmp' * instructions we need to instrument are 5 bytes long on x86 and x86-64 * machines. So if there are no more than 4 bytes left before the end, they * cannot contain the instruction of this kind, we do not need to check * these bytes. * This allows to avoid "decoder stopped past the end of the section" * conditions (see do_process_insn()). There, the decoder tries to chew * the trailing 1-2 zero bytes of the section (padding) and gets past * the end of the section. * It seems that the length of the instruction that consists of zeroes * only is 3 bytes (it is a flavour of 'add'), i.e. shorter than that * kind of 'call' we are instrumenting. * * [NB] The above check automatically handles 'pos == kend' case. */ len = do_process_insn(&c_insn, pos, kend, from_funcs, to_funcs, nfuncs); if (len == 0) { KEDR_MSG(COMPONENT_STRING "do_process_insn() returned 0\n"); WARN_ON(1); break; } if (pos + len > kend) { break; } /* If the decoded instruction contains only zero bytes (this is the case, * for example, for one flavour of 'add'), skip to the first nonzero byte * after it. * This is to avoid problems if there are two or more sections in the area * being analysed. Such situation is very unlikely - still have to find * the example. Note that ctors and dtors seem to be placed to the same * '.text' section as the ordinary functions ('.ctors' and '.dtors' sections * probably contain just the lists of their addresses or something similar). * * As we are not interested in instrumenting 'add' or the like, we can skip * to the next instruction that does not begin with 0 byte. If we are * actually past the last instruction in the section, we get to the next * section or to the end of the area this way which is what we want in this * case. */ for (k = 0; k < len; ++k) { if (*((unsigned char*)pos + k) != 0) { break; } } pos += len; if (k == len) { /* all bytes are zero, skip the following 0s */ while (pos < kend && *(unsigned char*)pos == 0) { ++pos; } } } return; }
/* Decode and process the instruction ('c_insn') at * the address 'kaddr' - see the description of do_process_area for details. * * Check if we get past the end of the buffer [kaddr, end_kaddr) * * The function returns the length of the instruction in bytes. * 0 is returned in case of failure. */ static unsigned int do_process_insn(struct insn* c_insn, void* kaddr, void* end_kaddr, void** from_funcs, void** to_funcs, unsigned int nfuncs) { /* ptr to the 32-bit offset argument in the instruction */ u32* offset = NULL; /* address of the function being called */ void* addr = NULL; static const unsigned char op = 0xe8; /* 'call <offset>' */ int i; BUG_ON(from_funcs == NULL || to_funcs == NULL); /* Decode the instruction and populate 'insn' structure */ kernel_insn_init(c_insn, kaddr); insn_get_length(c_insn); if (c_insn->length == 0) { return 0; } if (kaddr + c_insn->length > end_kaddr) { /* Note: it is OK to stop at 'end_kaddr' but no further */ KEDR_MSG(COMPONENT_STRING "instruction decoder stopped past the end of the section.\n"); insn_get_opcode(c_insn); printk(KERN_ALERT COMPONENT_STRING "kaddr=%p, end_kaddr=%p, c_insn->length=%d, opcode=0x%x\n", (void*)kaddr, (void*)end_kaddr, (int)c_insn->length, (unsigned int)c_insn->opcode.value ); WARN_ON(1); } /* This call may be overkill as insn_get_length() probably has to decode * the instruction completely. * Still, to operate safely, we need insn_get_opcode() before we can access * c_insn->opcode. * The call is cheap anyway, no re-decoding is performed. */ insn_get_opcode(c_insn); if (c_insn->opcode.value != op) { /* Not a 'call' instruction, nothing to do. */ return c_insn->length; } /* [NB] For some reason, the decoder stores the argument of 'call' and 'jmp' * as 'immediate' rather than 'displacement' (as Intel manuals name it). * May be it is a bug, may be it is not. * Meanwhile, I'll call this value 'offset' to avoid confusion. */ /* Call this before trying to access c_insn->immediate */ insn_get_immediate(c_insn); if (c_insn->immediate.nbytes != 4) { KEDR_MSG(COMPONENT_STRING "at 0x%p: " "opcode: 0x%x, " "immediate field is %u rather than 32 bits in size; " "insn.length = %u, insn.imm = %u, off_immed = %d\n", kaddr, (unsigned int)c_insn->opcode.value, 8 * (unsigned int)c_insn->immediate.nbytes, c_insn->length, (unsigned int)c_insn->immediate.value, insn_offset_immediate(c_insn)); WARN_ON(1); return c_insn->length; } offset = (u32*)(kaddr + insn_offset_immediate(c_insn)); addr = CALL_ADDR_FROM_OFFSET(kaddr, c_insn->length, *offset); /* Check if one of the functions of interest is called */ for (i = 0; i < nfuncs; ++i) { if (addr == from_funcs[i]) { /* Change the address of the function to be called */ BUG_ON(to_funcs[i] == NULL); KEDR_MSG(COMPONENT_STRING "at 0x%p: changing address 0x%p to 0x%p (displ: 0x%x to 0x%x)\n", kaddr, from_funcs[i], to_funcs[i], (unsigned int)(*offset), (unsigned int)CALL_OFFSET_FROM_ADDR( kaddr, c_insn->length, to_funcs[i]) ); *offset = CALL_OFFSET_FROM_ADDR( kaddr, c_insn->length, to_funcs[i] ); break; } } return c_insn->length; }