static int install_os_hooks(void) { int ret; /* register module state change notifier */ nb_init.notifier_call = module_init_notifier; ret = register_module_notifier(&nb_init); if (ret != 0) { printk(KERN_ERR "register_module_notifier() fails\n"); return -EFAULT; } /* hook necessary system call table */ px_original_sys_fork = (sys_fork_t) xchg(&system_call_table[__NR_fork - __NR_SYSCALL_BASE], px_sys_fork); px_original_sys_vfork = (sys_vfork_t) xchg(&system_call_table[__NR_vfork - __NR_SYSCALL_BASE], px_sys_vfork); px_original_sys_clone = (sys_clone_t) xchg(&system_call_table[__NR_clone - __NR_SYSCALL_BASE], px_sys_clone); px_original_sys_execve = (sys_execve_t) xchg(&system_call_table[__NR_execve - __NR_SYSCALL_BASE], px_sys_execve); px_original_sys_mmap = (sys_mmap_t) xchg(&system_call_table[__NR_mmap - __NR_SYSCALL_BASE], px_sys_mmap); px_original_sys_mmap2 = (sys_mmap2_t) xchg(&system_call_table[__NR_mmap2 - __NR_SYSCALL_BASE], px_sys_mmap2); px_original_sys_exit = (sys_exit_t) xchg(&system_call_table[__NR_exit - __NR_SYSCALL_BASE], px_sys_exit); px_original_sys_exit_group = (sys_exit_group_t) xchg(&system_call_table[__NR_exit_group - __NR_SYSCALL_BASE], px_sys_exit_group); px_original_sys_kill = (sys_kill_t) xchg(&system_call_table[__NR_kill - __NR_SYSCALL_BASE], px_sys_kill); px_original_sys_tkill = (sys_tkill_t) xchg(&system_call_table[__NR_tkill - __NR_SYSCALL_BASE], px_sys_tkill); px_original_sys_tgkill = (sys_tgkill_t) xchg(&system_call_table[__NR_tgkill - __NR_SYSCALL_BASE], px_sys_tgkill); gb_enable_os_hooks = true; return 0; }
static void _stp_handle_start(struct _stp_msg_start *st) { int handle_startup; mutex_lock(&_stp_transport_mutex); handle_startup = (! _stp_start_called && ! _stp_exit_called); _stp_start_called = 1; mutex_unlock(&_stp_transport_mutex); if (handle_startup) { dbug_trans(1, "stp_handle_start\n"); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) // linux commit #5f4352fb #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) // linux commit #9be260a6 #ifdef STAPCONF_VM_AREA { /* PR9740: workaround for kernel valloc bug. */ /* PR14611: not required except within above kernel range. */ void *dummy; #ifdef STAPCONF_VM_AREA_PTE dummy = alloc_vm_area (PAGE_SIZE, NULL); #else dummy = alloc_vm_area (PAGE_SIZE); #endif free_vm_area (dummy); } #endif #endif #endif _stp_target = st->target; st->res = systemtap_module_init(); if (st->res == 0) _stp_probes_started = 1; /* Register the module notifier. */ if (!_stp_module_notifier_active) { int rc = register_module_notifier(& _stp_module_notifier_nb); if (rc == 0) _stp_module_notifier_active = 1; else _stp_warn ("Cannot register module notifier (%d)\n", rc); } /* Called from the user context in response to a proc file write (in _stp_ctl_write_cmd), so may notify the reader directly. */ _stp_ctl_send_notify(STP_START, st, sizeof(*st)); /* Register the panic notifier. */ #if STP_TRANSPORT_VERSION == 2 atomic_notifier_chain_register(&panic_notifier_list, &_stp_module_panic_notifier_nb); #endif } }
/* ================================================================ */ int kedr_target_detector_init(struct kedr_target_module_notifier* notifier_param) { int result = 0; KEDR_MSG(COMPONENT_STRING "Initializing\n"); notifier = notifier_param; target_name = NULL; target_module = NULL; result = register_module_notifier(&detector_nb); return result; }
static void request_module_depend(char *name, int *flag) { switch (THIS_MODULE->state) { case MODULE_STATE_COMING: if (!pending_registered) { register_module_notifier(&pending_notifier); pending_registered = 1; } *flag = 1; break; case MODULE_STATE_LIVE: request_module(name); break; default: /* nothing */; break; } }
static int install_os_hooks(void) { int ret; /* register module state change notifier */ nb_init.notifier_call = module_init_notifier; ret = register_module_notifier(&nb_init); if (ret != 0) { printk(KERN_ERR "[CPA] register_module_notifier() fails\n"); return -EFAULT; } #ifdef CONFIG_PROFILING ret = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb); if (ret != 0) { printk(KERN_ERR "[CPA] profile_event_register() fails\n"); return -EFAULT; } #endif /* hook necessary system call table */ px_original_sys_fork = (sys_fork_t) xchg(&system_call_table[__NR_fork - __NR_SYSCALL_BASE], px_sys_fork); px_original_sys_vfork = (sys_vfork_t) xchg(&system_call_table[__NR_vfork - __NR_SYSCALL_BASE], px_sys_vfork); px_original_sys_clone = (sys_clone_t) xchg(&system_call_table[__NR_clone - __NR_SYSCALL_BASE], px_sys_clone); px_original_sys_execve = (sys_execve_t) xchg(&system_call_table[__NR_execve - __NR_SYSCALL_BASE], px_sys_execve); px_original_sys_mmap = (sys_mmap_t) xchg(&system_call_table[__NR_mmap - __NR_SYSCALL_BASE], px_sys_mmap); px_original_sys_mmap2 = (sys_mmap2_t) xchg(&system_call_table[__NR_mmap2 - __NR_SYSCALL_BASE], px_sys_mmap2); px_original_sys_exit = (sys_exit_t) xchg(&system_call_table[__NR_exit - __NR_SYSCALL_BASE], px_sys_exit); px_original_sys_exit_group = (sys_exit_group_t) xchg(&system_call_table[__NR_exit_group - __NR_SYSCALL_BASE], px_sys_exit_group); px_original_sys_kill = (sys_kill_t) xchg(&system_call_table[__NR_kill - __NR_SYSCALL_BASE], px_sys_kill); px_original_sys_tkill = (sys_tkill_t) xchg(&system_call_table[__NR_tkill - __NR_SYSCALL_BASE], px_sys_tkill); px_original_sys_tgkill = (sys_tgkill_t) xchg(&system_call_table[__NR_tgkill - __NR_SYSCALL_BASE], px_sys_tgkill); px_original_sys_prctl = (sys_prctl_t) xchg(&system_call_table[__NR_prctl - __NR_SYSCALL_BASE], px_sys_prctl); gb_enable_os_hooks = true; return 0; }
/* * strom_init_extra_symbols - solve the mandatory symbols and grab the module */ static int __init strom_init_extra_symbols(void) { int rc; #define LOOKUP_MANDATORY_EXTRA_SYMBOL(SYMBOL) \ do { \ rc = __strom_lookup_extra_symbol(#SYMBOL, \ (void **)&p_##SYMBOL, \ &mod_##SYMBOL, \ false); \ if (rc) \ goto cleanup; \ } while(0) #ifdef EXTRA_KSYMS_NEEDS_NVIDIA /* nvidia.ko */ LOOKUP_MANDATORY_EXTRA_SYMBOL(nvidia_p2p_get_pages); LOOKUP_MANDATORY_EXTRA_SYMBOL(nvidia_p2p_put_pages); LOOKUP_MANDATORY_EXTRA_SYMBOL(nvidia_p2p_free_page_table); #endif /* EXTRA_KSYMS_NEEDS_NVIDIA */ /* nvme.ko */ LOOKUP_MANDATORY_EXTRA_SYMBOL(nvme_free_iod); LOOKUP_MANDATORY_EXTRA_SYMBOL(nvme_submit_io_cmd); LOOKUP_MANDATORY_EXTRA_SYMBOL(nvme_setup_prps); /* notifier to get optional extra symbols */ rc = register_module_notifier(&nvme_strom_nb); if (rc) goto cleanup; return 0; cleanup: strom_put_all_extra_modules(); return rc; }
static int probe_kmodules(void) { return register_module_notifier(&vtss_kmodules_nb); }
static int __init gcov_init(void) { return register_module_notifier(&gcov_nb); }
static __init int jump_label_init_module(void) { return register_module_notifier(&jump_label_module_nb); }
static int init_imv(void) { return register_module_notifier(&imv_module_nb); }
/* ================================================================ */ static int __init controller_init_module(void) { int i;//for iterate payloads int result = 0; KEDR_MSG(COMPONENT_STRING "initializing\n"); result = base_init_module(); if(result) return result; for(i = 0; i < payloads_n; i++) { int result = payloads_init[i](); if(result) { for(;i>=0; i--) payloads_exit[i](); base_exit_module(); pr_err(COMPONENT_STRING "Cannot register all payloads."); return result; //not "goto fail" because it call all payloads_exit() } } /* Register with the base - must do this before the controller * begins to respond to module load/unload notifications. */ result = kedr_impl_controller_register(&controller); if (result < 0) { goto fail; } /* When looking for the target module, module_mutex must be locked */ result = mutex_lock_interruptible(&module_mutex); if (result != 0) { KEDR_MSG(COMPONENT_STRING "failed to lock module_mutex\n"); goto fail; } result = register_module_notifier(&detector_nb); if (result < 0) { goto unlock_and_fail; } /* Check if the target is already loaded */ if (find_module(target_name) != NULL) { KEDR_MSG(COMPONENT_STRING "target module \"%s\" is already loaded\n", target_name); KEDR_MSG(COMPONENT_STRING "instrumenting already loaded target modules is not supported\n"); result = -EEXIST; goto unlock_and_fail; } result = mutex_lock_interruptible(&controller_mutex); if (result != 0) { KEDR_MSG(COMPONENT_STRING "failed to lock controller_mutex\n"); goto fail; } handle_module_notifications = 1; mutex_unlock(&controller_mutex); mutex_unlock(&module_mutex); /* From now on, the controller will be notified when the target module * is loaded or have finished cleaning-up and is just about to unload * from memory. */ return 0; /* success */ unlock_and_fail: mutex_unlock(&module_mutex); fail: controller_cleanup_module(); return result; }
void disable_module_loading ( void ) { register_module_notifier(&nb); }