void mm_callback(vmi_instance_t vmi, vmi_event_t *event) { struct injector *injector = event->data; reg_t rip, cr3, rsp; vmi_get_vcpureg(vmi, &rip, RIP, event->vcpu_id); vmi_get_vcpureg(vmi, &cr3, CR3, event->vcpu_id); vmi_get_vcpureg(vmi, &rsp, RSP, event->vcpu_id); vmi_pid_t pid = vmi_dtb_to_pid(vmi, cr3); printf("----- Memevent: PID %u executing RIP 0x%lx. Target PID: %u. Target RIP: 0x%lx. My event count: %u\n", pid, ((event->mem_event.gfn<<12) + event->mem_event.offset), injector->target_pid, injector->target_rip, injector->mm_count); if ((PM2BIT(injector->pm) == BIT32 && rip < KERNEL32) || (PM2BIT(injector->pm) == BIT64 && rip < KERNEL64)) { injector->target_pid = pid; injector->target_rip = (event->mem_event.gfn << 12) + event->mem_event.offset; hijack_thread(injector, vmi, event->vcpu_id, pid); vmi_clear_event(vmi, event); vmi_clear_event(vmi, &injector->cr3_event); injector->mm_count++; uint8_t trap = 0xCC; vmi_read_8_pa(vmi, injector->target_rip, &injector->backup); vmi_write_8_pa(vmi, injector->target_rip, &trap); return; } vmi_clear_event(vmi, event); vmi_step_event(vmi, event, event->vcpu_id, 1, NULL); }
void ss_callback(vmi_instance_t vmi, vmi_event_t *event) { reg_t rip, cr3; vmi_get_vcpureg(vmi, &rip, RIP, event->vcpu_id); vmi_get_vcpureg(vmi, &cr3, CR3, event->vcpu_id); page_mode_t pm = vmi_get_page_mode(vmi); vmi_pid_t pid = vmi_dtb_to_pid(vmi, cr3); printf("----- Singlestep: CR3 0x%lx PID %u executing RIP 0x%lx\n", cr3, pid, rip); if ((PM2BIT(pm) == BIT32 && rip < KERNEL32) || (PM2BIT(pm) == BIT64 && rip < KERNEL64)) { printf("Good RIP: 0x%lx\n", rip); struct injector *injector = event->data; injector->ss_enabled = 0; injector->target_pid = pid; injector->target_rip = vmi_pagetable_lookup(vmi, cr3, rip); hijack_thread(injector, vmi, event->vcpu_id, pid); vmi_clear_event(vmi, event); vmi_clear_event(vmi, &injector->cr3_event); injector->mm_count++; uint8_t trap = 0xCC; vmi_read_8_pa(vmi, injector->target_rip, &injector->backup); vmi_write_8_pa(vmi, injector->target_rip, &trap); } }
addr_t vmi_pagetable_lookup (vmi_instance_t vmi, addr_t dtb, addr_t vaddr) { page_info_t info = {0}; /* check if entry exists in the cachec */ if (VMI_SUCCESS == v2p_cache_get(vmi, vaddr, dtb, &info.paddr)) { /* verify that address is still valid */ uint8_t value = 0; if (VMI_SUCCESS == vmi_read_8_pa(vmi, info.paddr, &value)) { return info.paddr; } else { v2p_cache_del(vmi, vaddr, dtb); } } if(vmi->arch_interface && vmi->arch_interface->v2p) { vmi->arch_interface->v2p(vmi, dtb, vaddr, &info); } else { errprint("Arch interface not initialized, can't use vmi_pagetable_lookup!\n"); } /* add this to the cache */ if (info.paddr) { v2p_cache_set(vmi, vaddr, dtb, info.paddr); } return info.paddr; }
/* * Return a status when page_info is not needed, but also use the cache, * which vmi_pagetable_lookup_extended() does not do. * * TODO: Should this eventually replace vmi_pagetable_lookup() in the API? */ status_t vmi_pagetable_lookup_cache( vmi_instance_t vmi, addr_t dtb, addr_t vaddr, addr_t *paddr) { status_t ret = VMI_FAILURE; page_info_t info = { .vaddr = vaddr, .dtb = dtb }; if(!paddr) return ret; *paddr = 0; /* check if entry exists in the cache */ if (VMI_SUCCESS == v2p_cache_get(vmi, vaddr, dtb, paddr)) { /* verify that address is still valid */ uint8_t value = 0; if (VMI_SUCCESS == vmi_read_8_pa(vmi, *paddr, &value)) { return VMI_SUCCESS; } else { if ( VMI_FAILURE == v2p_cache_del(vmi, vaddr, dtb) ) return VMI_FAILURE; } } if(vmi->arch_interface && vmi->arch_interface->v2p) { ret = vmi->arch_interface->v2p(vmi, dtb, vaddr, &info); } else { errprint("Invalid paging mode during vmi_pagetable_lookup\n"); ret = VMI_FAILURE; } /* add this to the cache */ if (ret == VMI_SUCCESS) { *paddr = info.paddr; v2p_cache_set(vmi, vaddr, dtb, info.paddr); } return ret; }
void cr3_callback(vmi_instance_t vmi, vmi_event_t *event) { //printf("CR3 changed to 0x%lx\n", event->reg_event.value); struct injector *injector = event->data; addr_t thread = 0, kpcrb_offset = 0, trapframe = 0; addr_t userspace_return_va = 0, userspace_return_pa = 0; reg_t fsgs = 0; if (event->reg_event.value == injector->target_cr3) { if (!injector->target_rip) { /* TODO: Trap frame approach only works on 64-bit */ if (PM2BIT(injector->pm) == BIT32) { if(!injector->mm_enabled) { vmi_pid_t pid = vmi_dtb_to_pid(vmi, event->reg_event.value); addr_t waitfor = sym2va(vmi, pid, "kernel32.dll", "WaitForMultipleObjects"); //printf("PID %u waitfor 0x%lx\n", pid, waitfor); if (!waitfor) { //injector->clone->interrupted = 1; printf("Target process doesn't have kernel32.dll mapped!\n"); return; } injector->mm_enabled=1; memset(&injector->mm_event, 0, sizeof(vmi_event_t)); injector->mm_event.type = VMI_EVENT_MEMORY; injector->mm_event.mem_event.physical_address = vmi_translate_uv2p(vmi, waitfor, pid); injector->mm_event.mem_event.npages = 1; injector->mm_event.mem_event.granularity=VMI_MEMEVENT_PAGE; injector->mm_event.mem_event.in_access = VMI_MEMACCESS_X; injector->mm_event.callback=mm_callback; injector->mm_event.data = injector; vmi_register_event(vmi, &injector->mm_event); } return; } /* Read return to userspace RIP out of the stack trap frame. */ if (PM2BIT(injector->pm) == BIT32) { vmi_get_vcpureg(vmi, &fsgs, FS_BASE, event->vcpu_id); kpcrb_offset = offsets[KPCR_PRCBDATA]; } else { vmi_get_vcpureg(vmi, &fsgs, GS_BASE, event->vcpu_id); kpcrb_offset = offsets[KPCR_PRCB]; } vmi_read_addr_va(vmi, fsgs + kpcrb_offset + offsets[KPRCB_CURRENTTHREAD], 0, &thread); vmi_read_addr_va(vmi, thread + offsets[KTHREAD_TRAPFRAME], 0, &trapframe); //printf("Trap frame @ 0x%lx\n", trapframe); if (PM2BIT(injector->pm) == BIT32) { vmi_read_addr_va(vmi, trapframe + offsets[KTRAP_FRAME_EIP], 0, &userspace_return_va); } else { vmi_read_addr_va(vmi, trapframe + offsets[KTRAP_FRAME_RIP], 0, &userspace_return_va); } injector->userspace_return = vmi_pagetable_lookup(vmi, event->reg_event.value, userspace_return_va); //printf("Userspace return @ VA 0x%lx -> PA 0x%lx\n", userspace_return_va, injector->userspace_return); uint8_t trap = 0xCC; vmi_read_8_pa(vmi, injector->userspace_return, &injector->userspace_return_backup); vmi_write_8_pa(vmi, injector->userspace_return, &trap); /*if (!injector->ss_enabled) { printf("My target process is executing, registering singlestep\n"); injector->ss_enabled = 1; memset(&injector->ss_event, 0, sizeof(vmi_event_t)); injector->ss_event.type = VMI_EVENT_SINGLESTEP; injector->ss_event.callback = ss_callback; injector->ss_event.data = injector; SET_VCPU_SINGLESTEP(injector->ss_event.ss_event, event->vcpu_id); vmi_register_event(vmi, &injector->ss_event); }*/ } } else { //printf("CR3 0x%lx is executing, not my process!\n", // event->reg_event.value); if (injector->mm_enabled) { injector->mm_enabled = 0; vmi_clear_event(vmi, &injector->mm_event); } if (injector->ss_enabled) { //printf("\tDisabling singlestep\n"); injector->ss_enabled = 0; vmi_clear_event(vmi, &injector->ss_event); } if (injector->userspace_return) { vmi_write_8_pa(vmi, injector->userspace_return, &injector->userspace_return_backup); injector->userspace_return_backup = 0; injector->userspace_return = 0; } } }