// Read/write from a Mach data segment. static int machsegrw(Map *map, Seg *seg, uvlong addr, void *v, uint n, int isr) { mach_port_t task; int r; task = idtotask(map->pid); if(task == -1) return -1; if(isr){ vm_size_t nn; nn = n; if(me(vm_read_overwrite(task, addr, n, (uintptr)v, &nn)) < 0) return -1; return nn; }else{ r = vm_write(task, addr, (uintptr)v, n); if(r == KERN_INVALID_ADDRESS){ // Happens when writing to text segment. // Change protections. if(me(vm_protect(task, addr, n, 0, VM_PROT_WRITE|VM_PROT_READ|VM_PROT_EXECUTE)) < 0){ fprint(2, "vm_protect: %s\n", r); return -1; } r = vm_write(task, addr, (uintptr)v, n); } if(r != 0){ me(r); return -1; } return n; } }
static bool tsk_write(task_t task, vm_address_t addr, const ut8 *buf, int len) { kern_return_t kr = vm_write (task, addr, (vm_offset_t)buf, (mach_msg_type_number_t)len); if (kr != KERN_SUCCESS) { return false; } return true; }
int Utils::WriteMemAndDeAllocate(task_t target, mach_vm_address_t address, type data) { auto re = vm_write(target, address, (vm_offset_t)&data, sizeof(data)); if (re != KERN_SUCCESS){ return 0; }else{ return 1; } }
static bool tsk_write(task_t task, vm_address_t addr, const ut8 *buf, int len) { kern_return_t kr = vm_write (task, addr, (vm_offset_t)buf, (mach_msg_type_number_t)len); if (kr != KERN_SUCCESS) { // perror ("vm_write"); //the memory is not mapped return false; } return true; }
static int mach_write_at(RIOMach *riom, const void *buff, int len, ut64 addr) { task_t task = riom->task; #if 0 /* get paVM_PROT_EXECUTEge perms */ kern_return_t err; int ret, _basic64[VM_REGION_BASIC_INFO_COUNT_64]; vm_region_basic_info_64_t basic64 = (vm_region_basic_info_64_t)_basic64; mach_msg_type_number_t infocnt; const int pagesize = 4096; vm_offset_t addrbase; mach_port_t objname; vm_size_t size = pagesize; eprintf (" 0x%llx\n", addr); infocnt = VM_REGION_BASIC_INFO_COUNT_64; addrbase = addr; size = len; // intentionally use VM_REGION_BASIC_INFO and get up-converted ret = vm_region_64 (task, &addrbase, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)basic64, &infocnt, &objname); eprintf ("+ PERMS (%x) %llx\n", basic64->protection, addr); if (ret == -1) { eprintf ("Cant get vm region info\n"); } #endif /* get page perms */ // XXX SHOULD RESTORE PERMS LATER!!! if (vm_protect (task, addr, len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE) != KERN_SUCCESS) //if (mach_vm_protect (task, addr, len, 0, VM_PROT_READ | VM_PROT_WRITE) != KERN_SUCCESS) if (vm_protect (task, addr, len, 0, VM_PROT_WRITE) != KERN_SUCCESS) eprintf ("cant change page perms to rw at 0x%"PFMT64x" with len= %d\n", addr, len); if (vm_write (task, (vm_address_t)addr, (vm_offset_t)buff, (mach_msg_type_number_t)len) != KERN_SUCCESS) eprintf ("cant write on memory\n"); //if (vm_read_overwrite(task, addr, 4, buff, &sz)) { eprintf ("cannot overwrite\n"); } #if 0 eprintf ("addrbase: %x\n", addrbase); eprintf ("change prems to %x\n", basic64->protection); int prot = 0; if (basic64->protection & 1) prot |= VM_PROT_EXECUTE; if (basic64->protection & 2) prot |= VM_PROT_WRITE; if (basic64->protection & 4) prot |= VM_PROT_READ; printf ("%d vs %d\n", prot, basic64->protection); int prot = VM_PROT_READ | VM_PROT_EXECUTE; if (vm_protect (task, addr, len, 0, prot) != KERN_SUCCESS) { //basic64->protection) != KERN_SUCCESS) { eprintf ("Oops (0x%"PFMT64x") error (%s)\n", addr, MACH_ERROR_STRING (err)); eprintf ("cant change page perms to rx\n"); } #endif return len; }
// inefficient, hacky, works int main(int argc, char **argv) { vm_address_t addr = (vm_address_t) strtoll(argv[1], NULL, 16); assert(addr); mach_port_name_t kernel_task; assert(!task_for_pid(mach_task_self(), 0, &kernel_task)); uint32_t val = 0x00100008 | 0x100; assert(!vm_write(kernel_task, addr, (vm_offset_t) &val, sizeof(val))); // I don't want to leave the kernel in this state no matter what. int ret, fail = 0; fail |= ret = mkdir("/private/var", 0755); printf("mkdir 1: %d\n", ret); fail |= ret = mkdir("/private/var/db", 0755); printf("mkdir 2: %d\n", ret); fail |= ret = mkdir("/private/var/db/.launchd_use_gmalloc", 0755); printf("mkdir 3: %d\n", ret); val &= ~0x100; assert(!vm_write(kernel_task, (vm_address_t) addr, (vm_offset_t) &val, sizeof(val))); assert(!ret); return 0; }
/* VM_IDENTIFY -- Identify the current process to the VM cache server when * opening a new client connection. */ static void vm_identify (void) { char buf[SZ_CMDBUF]; if (vm_write (vm_server, vm_client, strlen(vm_client)) < 0) vm_shutdown(); if (read (vm_server, buf, SZ_CMDBUF) <= 0) { if (vm_debug) fprintf (stderr, "vmclient (%s): server not responding\n", vm_client); vm_shutdown(); } }
/* VM_SHUTDOWN -- Called at process exit to shutdown the VMcached server * connection. */ static void vm_shutdown (void) { XINT status; XINT fd = vm_server; extern int ZCLSND(XINT *fd, XINT *status); if (vm_server) { if (vm_debug) fprintf (stderr, "vmclient (%s): shutdown server connection\n", vm_client); vm_write (vm_server, "bye\n", 4); ZCLSND (&fd, &status); } vm_server = 0; }
int main(int argc, char *argv[]) { mach_port_t process_to_write; kern_return_t error; if(getuid() && geteuid()) { printf("You need to be root to vm_write!\n"); } else { error = task_for_pid(mach_task_self(), PID, &process_to_write); if ((error != KERN_SUCCESS) || !MACH_PORT_VALID(process_to_write)) { printf("Error getting the process!\n"); } if(vm_write(process_to_write, (vm_address_t) ADDR, (vm_address_t)&value, sizeof(value))) { printf("Ooops!\n"); } printf("Done! :-)\n"); } return 0; }
static PyObject *MachTask_vm_write(PyObject *self, PyObject *args) { kern_return_t err; unsigned int address; PyObject *buffer; if (!PyArg_ParseTuple(args, "IO", &address, &buffer)) return NULL; if (!PyString_Check(buffer)) { PyErr_SetString(PyExc_Exception, "ERROR parsing args to vm_write for mach task (2nd arg MUST be string)"); return(NULL); } err = vm_write(((MachPort *)self)->port, address, (vm_offset_t)PyString_AS_STRING(buffer), PyString_GET_SIZE(buffer)); if (err != KERN_SUCCESS) { char buf[10]; sprintf(buf, "%d", err); PyErr_SetString(MachError, mach_error_string(err)); return NULL; } Py_RETURN_NONE; }
/* VM_RESERVESPACE -- Reserve VM space for file data. This directive is * useful if VM is being used but the VM space could not be preallocated * at file access time, e.g., when opening a new file. */ int vm_reservespace (long nbytes) { char buf[SZ_CMDBUF]; int status; if (!vm_initialized) vm_initialize(); if (!vm_enabled || vm_dioenabled) return (-1); if (vm_connect() < 0) return (-1); /* Format and send the file access directive to the VMcache daemon. * The status from the server is returned as an ascii integer value * on the same socket. */ sprintf (buf, "reservespace %ld\n", nbytes); if (vm_debug) fprintf (stderr, "vmclient (%s): %s", vm_client, buf); if (vm_write (vm_server, buf, strlen(buf)) < 0) { vm_shutdown(); return (-1); } if (read (vm_server, buf, SZ_CMDBUF) <= 0) { if (vm_debug) fprintf (stderr, "vmclient (%s): server not responding\n", vm_client); vm_shutdown(); return (-1); } status = atoi (buf); return (status); }
int main(int argc, const char *argv[]) { if (argc < 3) return -1; kern_return_t ret; vm_address_t r_libname; vm_address_t stack; vm_address_t code; thread_t thread; x86_thread_state64_t state; mach_port_t task; const char *libname = argv[1]; unsigned long long stackContents[5], stack_size, i; // the stack contents has to be an odd number of ull's for some reason (some aligning issue) in dlopen unsigned char codeContents[38]; bzero(codeContents, sizeof(codeContents)); bzero(stackContents, sizeof(stackContents)); codeContents[0] = 0x55; // push rbp codeContents[1] = 0x48; codeContents[2] = 0x89; codeContents[3] = 0xe5; // mov %rsp, %rbp codeContents[4] = 0x48; codeContents[5] = 0xbf; // mov r_libname, %rdi stackContents[1] = (unsigned long long)dlopen; stackContents[2] = (unsigned long long)mach_thread_self; stackContents[4] = (unsigned long long)thread_suspend; stack_size = 65536; if (strcmp(argv[2], "self") == 0) task = mach_task_self(); else ENSURE_SUCCESS(task_for_pid(mach_task_self(), atoi(argv[2]), &task)); ENSURE_SUCCESS(vm_allocate(task, &r_libname, strlen(libname) + 1, true)); ENSURE_SUCCESS(vm_allocate(task, &stack, stack_size, true)); ENSURE_SUCCESS(vm_allocate(task, &code, sizeof(codeContents), true)); stackContents[0] = code; stackContents[3] = (unsigned long long)code + 27; ENSURE_SUCCESS(vm_write(task, r_libname, (vm_offset_t)libname, strlen(libname) + 1)); ENSURE_SUCCESS(vm_write(task, stack + stack_size-sizeof(stackContents), (vm_offset_t)stackContents, sizeof(stackContents))); memcpy(&codeContents[6], &r_libname, sizeof(unsigned long long)); codeContents[14] = 0x48; codeContents[15] = 0xbe; codeContents[16] = 0x2; // mov 0x2, %rsi codeContents[24] = 0x5d; // pop %rbp codeContents[25] = 0x90; // nop / int 3 depending if im debugging codeContents[26] = 0xc3; // ret codeContents[27] = 0x48; codeContents[28] = 0x89; codeContents[29] = 0xc7; codeContents[30] = 0xc3; ENSURE_SUCCESS(vm_write(task, code, (vm_offset_t)codeContents, sizeof(codeContents))); ENSURE_SUCCESS(vm_protect(task, code, sizeof(codeContents), false, VM_PROT_EXECUTE | VM_PROT_READ)); printf("Created code region at %p:\n", (void *)code); for (i = 0; i < sizeof(codeContents); i++) { printf("0x%02x ", codeContents[i]); } puts(""); printf("Created stack at %p with top of stack at %p\n", (void*)stack, (void*)(stack + stack_size)); for (i = 0; i < sizeof(stackContents) / sizeof(stackContents[0]); i++) { printf("0x%02llx:\t0x%02llx\n", (stack + stack_size - sizeof(stackContents) + (i * sizeof(unsigned long long))), stackContents[i]); } bzero(&state, sizeof(state)); state.__rip = (uint64_t)dlsym(RTLD_DEFAULT, "_pthread_set_self"); state.__rdi = stack; state.__rsp = stack + stack_size-sizeof(stackContents); // end of stack minus returns state.__rbp = state.__rsp; printf("Found _pthread_set_self at %p\n", (void *)state.__rip); ENSURE_SUCCESS(thread_create_running(task, x86_THREAD_STATE64, (thread_state_t)(&state), x86_THREAD_STATE64_COUNT, &thread)); if (strcmp(argv[2], "self") == 0) { int rv = pthread_join(*(pthread_t *)stack, NULL); if (rv) { fprintf(stderr, "pthread_join: (%d) %s\n", rv, strerror(rv)); sleep(1); // let the dylib actually load in the other thread, it wouldn't appear that there exists mach thread waiting, and I'm too lazy to create a semaphore and using the value of `stack` for a pthread_t in pthread_join doesn't work } } }
mach_error_t mach_inject( const mach_inject_entry threadEntry, const void *paramBlock, size_t paramSize, pid_t targetProcess, vm_size_t stackSize ) { assert( threadEntry ); assert( targetProcess > 0 ); assert( stackSize == 0 || stackSize > 1024 ); // Find the image. const void *image; unsigned long imageSize; unsigned int jumpTableOffset; unsigned int jumpTableSize; mach_error_t err = machImageForPointer( threadEntry, &image, &imageSize, &jumpTableOffset, &jumpTableSize ); // Initialize stackSize to default if requested. if( stackSize == 0 ) /** @bug We only want an 8K default, fix the plop-in-the-middle code below. */ stackSize = 16 * 1024; // Convert PID to Mach Task ref. mach_port_t remoteTask = 0; if( !err ) { err = task_for_pid( mach_task_self(), targetProcess, &remoteTask ); #if defined(__i386__) if (err == 5) fprintf(stderr, "Could not access task for pid %d. You probably need to add user to procmod group\n", targetProcess); #endif } /** @todo Would be nice to just allocate one block for both the remote stack *and* the remoteCode (including the parameter data block once that's written. */ // Allocate the remoteStack. vm_address_t remoteStack = (vm_address_t)NULL; if( !err ) err = vm_allocate( remoteTask, &remoteStack, stackSize, 1 ); // Allocate the code. vm_address_t remoteCode = (vm_address_t)NULL; if( !err ) err = vm_allocate( remoteTask, &remoteCode, imageSize, 1 ); if( !err ) { ASSERT_CAST( pointer_t, image ); #if defined (__ppc__) || defined (__ppc64__) err = vm_write( remoteTask, remoteCode, (pointer_t) image, imageSize ); #elif defined (__i386__) // on intel, jump table use relative jump instructions (jmp), which means // the offset needs to be corrected. We thus copy the image and fix the offset by hand. ptrdiff_t fixUpOffset = (ptrdiff_t) (image - remoteCode); void * fixedUpImage = fixedUpImageFromImage(image, imageSize, jumpTableOffset, jumpTableSize, fixUpOffset); err = vm_write( remoteTask, remoteCode, (pointer_t) fixedUpImage, imageSize ); free(fixedUpImage); #endif } // Allocate the paramBlock if specified. vm_address_t remoteParamBlock = (vm_address_t)NULL; if( !err && paramBlock != NULL && paramSize ) { err = vm_allocate( remoteTask, &remoteParamBlock, paramSize, 1 ); if( !err ) { ASSERT_CAST( pointer_t, paramBlock ); err = vm_write( remoteTask, remoteParamBlock, (pointer_t) paramBlock, paramSize ); } } // Calculate offsets. ptrdiff_t threadEntryOffset, imageOffset; if( !err ) { //assert( (void*)threadEntry >= image && (void*)threadEntry <= (image+imageSize) ); ASSERT_CAST( void*, threadEntry ); threadEntryOffset = ((void*) threadEntry) - image; ASSERT_CAST( void*, remoteCode ); imageOffset = ((void*) remoteCode) - image; } // Allocate the thread. thread_act_t remoteThread; #if defined (__ppc__) || defined (__ppc64__) if( !err ) { ppc_thread_state_t remoteThreadState; /** @bug Stack math should be more sophisticated than this (ala redzone). */ remoteStack += stackSize / 2; bzero( &remoteThreadState, sizeof(remoteThreadState) ); ASSERT_CAST( unsigned int, remoteCode ); remoteThreadState.srr0 = (unsigned int) remoteCode; remoteThreadState.srr0 += threadEntryOffset; assert( remoteThreadState.srr0 < (remoteCode + imageSize) ); ASSERT_CAST( unsigned int, remoteStack ); remoteThreadState.r1 = (unsigned int) remoteStack; ASSERT_CAST( unsigned int, imageOffset ); remoteThreadState.r3 = (unsigned int) imageOffset; ASSERT_CAST( unsigned int, remoteParamBlock ); remoteThreadState.r4 = (unsigned int) remoteParamBlock; ASSERT_CAST( unsigned int, paramSize ); remoteThreadState.r5 = (unsigned int) paramSize; ASSERT_CAST( unsigned int, 0xDEADBEEF ); remoteThreadState.lr = (unsigned int) 0xDEADBEEF; #if 0 printf( "remoteCode start: %p\n", (void*) remoteCode ); printf( "remoteCode size: %ld\n", imageSize ); printf( "remoteCode pc: %p\n", (void*) remoteThreadState.srr0 ); printf( "remoteCode end: %p\n", (void*) (((char*)remoteCode)+imageSize) ); fflush(0); #endif err = thread_create_running( remoteTask, PPC_THREAD_STATE, (thread_state_t) &remoteThreadState, PPC_THREAD_STATE_COUNT, &remoteThread ); }
int main (int argc, char** argv) { // check if we have correct privileges struct group *procmodGroup = getgrnam("procmod"); if (getuid() != 0 && getegid() != procmodGroup->gr_gid) { fprintf(stderr, "Must be run as root or with procmod\n"); exit(1); } //find the info of the dock process kinfo_proc *procList = NULL; size_t procCount = 0; getBSDProcessList(&procList, &procCount); pid_t pid = -1; int i; for (i=0; i<procCount; i++) { if (strcmp(procList[i].kp_proc.p_comm, "Dock") == 0) { pid = procList[i].kp_proc.p_pid; break; } } assert(pid != -1); printf("Dock pid is %d\n", pid); cpu_type_t arch = getProcessArchitecture(pid); char* dockVersion = getBundleVersion("/System/Library/CoreServices/Dock.app"); printf("Dock version is %s\n", dockVersion); mach_vm_address_t hackOffset; if (arch == (CPU_TYPE_X86 | CPU_ARCH_ABI64)) { if (strcmp(dockVersion, "1040.10") == 0) { hackOffset = 0x593ed; } else if (strcmp(dockVersion, "1040.36") == 0) { hackOffset = 0x56afa; } else { fprintf(stderr, "Dock version not supported\n"); exit(1); } } else if (arch == CPU_TYPE_X86) { if (strcmp(dockVersion, "1040.10") == 0) { hackOffset = 0x583ab; } else { fprintf(stderr, "Dock version not supported\n"); exit(1); } } else { fprintf(stderr, "Dock architecture not supported\n"); exit(1); } free(dockVersion); mach_port_name_t port; if(task_for_pid(mach_task_self(), pid, &port)) { fprintf(stderr, "Can't open Dock task\n"); exit(1); } mach_vm_address_t baseAddress = getTaskBaseAddress(port); printf("Suspending Dock\n"); if (task_suspend(port)) { fprintf(stderr, "Can't suspend Dock\n"); exit(1); } mach_vm_address_t writeAddress = baseAddress + hackOffset; //make the region writeable printf("Making 0x%llx writable\n", writeAddress); if (vm_protect(port, writeAddress, 1, 0, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE)) { fprintf(stderr, "Can't set Dock memory protection\n"); task_resume(port); exit(1); } pointer_t data = (pointer_t)"\6"; printf("Writing 0x6 to 0x%llx\n", writeAddress); if(vm_write(port, writeAddress, data, 1)) { fprintf(stderr, "Can't write to Dock memory\n"); task_resume(port); exit(1); } printf("Resuming Dock\n"); if (task_resume(port)) { fprintf(stderr, "Can't resume Dock\n"); exit(1); } printf("Done\n"); return 0; }
mach_error_t mach_inject( const mach_inject_entry threadEntry, const void *paramBlock, size_t paramSize, pid_t targetProcess, vm_size_t stackSize ) { ;//assertCodePtr( threadEntry ); ;//assertPtrIfNotNull( paramBlock ); ;//assertPositive( targetProcess ); ;//assertIsTrue( stackSize == 0 || stackSize > 1024 ); // Find the image. const void *image; unsigned long imageSize; mach_error_t err = machImageForPointer( threadEntry, &image, &imageSize ); // Initialize stackSize to default if requested. if( stackSize == 0 ) /** @bug We only want an 8K default, fix the plop-in-the-middle code below. */ stackSize = 16 * 1024; // Convert PID to Mach Task ref. mach_port_t remoteTask = 0; if( !err ) err = task_for_pid( mach_task_self(), targetProcess, &remoteTask ); /** @todo Would be nice to just allocate one block for both the remote stack *and* the remoteCode (including the parameter data block once that's written. */ // Allocate the remoteStack. vm_address_t remoteStack = 0; if( !err ) err = vm_allocate( remoteTask, &remoteStack, stackSize, 1 ); // Allocate the code. vm_address_t remoteCode = 0; if( !err ) err = vm_allocate( remoteTask, &remoteCode, imageSize, 1 ); if( !err ) { ASSERT_CAST( pointer_t, image ); err = vm_write( remoteTask, remoteCode, (pointer_t) image, imageSize ); } // Allocate the paramBlock if specified. vm_address_t remoteParamBlock = 0; if( !err && paramBlock != NULL && paramSize ) { err = vm_allocate( remoteTask, &remoteParamBlock, paramSize, 1 ); if( !err ) { ASSERT_CAST( pointer_t, paramBlock ); err = vm_write( remoteTask, remoteParamBlock, (pointer_t) paramBlock, paramSize ); } } // Calculate offsets. ptrdiff_t threadEntryOffset, imageOffset; if( !err ) { ;//assertIsWithinRange( threadEntry, image, image+imageSize ); ASSERT_CAST( void*, threadEntry ); threadEntryOffset = ((void*) threadEntry) - image; ASSERT_CAST( void*, remoteCode ); imageOffset = ((void*) remoteCode) - image; } // Allocate the thread. thread_act_t remoteThread; if( !err ) { ppc_thread_state_t remoteThreadState; /** @bug Stack math should be more sophisticated than this (ala redzone). */ remoteStack += stackSize / 2; bzero( &remoteThreadState, sizeof(remoteThreadState) ); ASSERT_CAST( unsigned int, remoteCode ); remoteThreadState.srr0 = (unsigned int) remoteCode; remoteThreadState.srr0 += threadEntryOffset; assert( remoteThreadState.srr0 < (remoteCode + imageSize) ); ASSERT_CAST( unsigned int, remoteStack ); remoteThreadState.r1 = (unsigned int) remoteStack; ASSERT_CAST( unsigned int, imageOffset ); remoteThreadState.r3 = (unsigned int) imageOffset; ASSERT_CAST( unsigned int, remoteParamBlock ); remoteThreadState.r4 = (unsigned int) remoteParamBlock; ASSERT_CAST( unsigned int, paramSize ); remoteThreadState.r5 = (unsigned int) paramSize; ASSERT_CAST( unsigned int, 0xDEADBEEF ); remoteThreadState.lr = (unsigned int) 0xDEADBEEF; //printf( "remoteCode start: %p\n", (void*) remoteCode ); //printf( "remoteCode size: %ld\n", imageSize ); //printf( "remoteCode pc: %p\n", (void*) remoteThreadState.srr0 ); //printf( "remoteCode end: %p\n", (void*) (((char*)remoteCode)+imageSize) ); fflush(0); err = thread_create_running( remoteTask, PPC_THREAD_STATE, (thread_state_t) &remoteThreadState, PPC_THREAD_STATE_COUNT, &remoteThread ); }
void InjectLibrary(pid_t pid) { const char *library(CY_LIBRARY); static const size_t Stack_(8 * 1024); size_t length(strlen(library) + 1), depth(sizeof(Baton) + length); depth = (depth + sizeof(uintptr_t) + 1) / sizeof(uintptr_t) * sizeof(uintptr_t); CYPool pool; uint8_t *local(reinterpret_cast<uint8_t *>(apr_palloc(pool, depth))); Baton *baton(reinterpret_cast<Baton *>(local)); baton->__pthread_set_self = &__pthread_set_self; baton->pthread_create = &pthread_create; baton->pthread_join = &pthread_join; baton->dlopen = &dlopen; baton->dlerror = &dlerror; baton->dlsym = &dlsym; baton->mach_thread_self = &mach_thread_self; baton->thread_terminate = &thread_terminate; baton->pid = getpid(); memcpy(baton->library, library, length); vm_size_t size(depth + Stack_); mach_port_t self(mach_task_self()), task; _krncall(task_for_pid(self, pid, &task)); vm_address_t stack; _krncall(vm_allocate(task, &stack, size, true)); vm_address_t data(stack + Stack_); vm_write(task, data, reinterpret_cast<vm_address_t>(baton), depth); thread_act_t thread; _krncall(thread_create(task, &thread)); thread_state_flavor_t flavor; mach_msg_type_number_t count; size_t push; Trampoline *trampoline; #if defined(__arm__) trampoline = &Trampoline_arm_; arm_thread_state_t state; flavor = ARM_THREAD_STATE; count = ARM_THREAD_STATE_COUNT; push = 0; #elif defined(__i386__) trampoline = &Trampoline_i386_; i386_thread_state_t state; flavor = i386_THREAD_STATE; count = i386_THREAD_STATE_COUNT; push = 5; #elif defined(__x86_64__) trampoline = &Trampoline_x86_64_; x86_thread_state64_t state; flavor = x86_THREAD_STATE64; count = x86_THREAD_STATE64_COUNT; push = 2; #else #error XXX: implement #endif vm_address_t code; _krncall(vm_allocate(task, &code, trampoline->size_, true)); vm_write(task, code, reinterpret_cast<vm_address_t>(trampoline->data_), trampoline->size_); _krncall(vm_protect(task, code, trampoline->size_, false, VM_PROT_READ | VM_PROT_EXECUTE)); /* printf("_ptss:%p\n", baton->__pthread_set_self); printf("dlsym:%p\n", baton->dlsym); printf("code:%zx\n", (size_t) code); */ uint32_t frame[push]; if (sizeof(frame) != 0) memset(frame, 0, sizeof(frame)); memset(&state, 0, sizeof(state)); mach_msg_type_number_t read(count); _krncall(thread_get_state(thread, flavor, reinterpret_cast<thread_state_t>(&state), &read)); _assert(count == count); #if defined(__arm__) state.r[0] = data; state.sp = stack + Stack_; state.pc = code + trampoline->entry_; if ((state.pc & 0x1) != 0) { state.pc &= ~0x1; state.cpsr |= 0x20; } #elif defined(__i386__) frame[1] = data; state.__eip = code + trampoline->entry_; state.__esp = stack + Stack_ - sizeof(frame); #elif defined(__x86_64__) frame[0] = 0xdeadbeef; state.__rdi = data; state.__rip = code + trampoline->entry_; state.__rsp = stack + Stack_ - sizeof(frame); #else #error XXX: implement #endif if (sizeof(frame) != 0) vm_write(task, stack + Stack_ - sizeof(frame), reinterpret_cast<vm_address_t>(frame), sizeof(frame)); _krncall(thread_set_state(thread, flavor, reinterpret_cast<thread_state_t>(&state), count)); _krncall(thread_resume(thread)); _krncall(mach_port_deallocate(self, task)); }
void applyGlowEffect(uint32_t glowStartAddress, int glowObjectIndex, struct Color * color){ bool stat = 1; printf("glow state is %i, alpha is %f\n", states, color->alpha); vm_write(csgo, glowStartAddress + 0x38 * glowObjectIndex + 0x24, (vm_offset_t) &stat, sizeof(bool)); vm_write(csgo, glowStartAddress + 0x38 * glowObjectIndex + 0x4, (vm_offset_t) &(color->red), sizeof(struct Color)); }
int main(int argv,char *args[]){ while(1){ int r=getinput(); if(r==R_CODE_EXIT) break; else if(r==R_CODE_INFO) printf("-help info:\n"HELP); else if(r==R_CODE_PS||r==R_CODE_AT){ GetBSDProcessList(); int i = 0; for (i = 0; i < gprocCount; i++) { kinfo_proc *pro = (gprocList + i); if(r==R_CODE_PS){ printf("%d pid:%d name:%s user_stack:%p\n", i, pro->kp_proc.p_pid, pro->kp_proc.p_comm, pro->kp_proc.user_stack); }else{ pid_t targetpid = pro->kp_proc.p_pid; int num=-1; MioGetArg2Num(1,&num); if(num==targetpid){ kern_return_t kr=task_for_pid(current_task(), targetpid, >ask); if(kr==KERN_SUCCESS){ printf("[attach proccess %s %d]\n",pro->kp_proc.p_comm,num); gproc=pro; }else{ printf("task_for_pid fail %d pid:%d\n",kr,num); gproc=NULL; } break; } } } }else if(r==R_CODE_SUS){ kern_return_t kr = task_suspend(gtask); if(kr==KERN_SUCCESS){ printf("[suspend]\n"); }else{ printf("task_suspend fail %d\n",kr); } }else if(r==R_CODE_RES){ kern_return_t kr = task_resume(gtask); if(kr==KERN_SUCCESS){ printf("[resume]\n"); }else{ printf("task_resume fail %d\n",kr); } }else if(r==R_CODE_SSI){ int num=-1; if(MioGetArg2Num(1,&num)!=0){ printf("arg error"); continue; } findmemoryspace(); int i=0; int index=0; for(i=0;i<gspace_count;i++){ space *target_space=gspaces+i; vm_address_t target_add=target_space->address; vm_address_t end_add=target_space->address+target_space->size; printf("start search %d from %p to %p of %dK space.\n",num,target_add,end_add,target_space->size/1024); do{ int *buf; uint32_t sz; kern_return_t kr=vm_read(gtask,target_add,sizeof(int),&buf,&sz); if(kr!=KERN_SUCCESS){ printf("error %d\n",kr); } if((*buf)==num){ if(index<MAX_ADDS){ printf("find the var at %p=%lu\n",target_add,target_add); gadds[index]=target_add; index++; }else{ printf("gadds over flow\n"); } } target_add=target_add+sizeof(int); }while(target_add<end_add); printf("there are %d vars\n",index); gadds[index]=0; } //end of start search int }else if(r==R_CODE_CSI){ int num=-1; if(MioGetArg2Num(1,&num)!=0){ printf("arg error"); continue; } char *add=NULL; int index=0; while((add=gadds[index])!=0){ int *buf; uint32_t sz; kern_return_t kr=vm_read(gtask,add,sizeof(int),&buf,&sz); if(kr!=KERN_SUCCESS){ printf("error %d\n",kr); break; } if((*buf)==num){ printf("still find the var at %p=%lu\n",add,add); int t=0; char *tadd=NULL; while(1){ tadd=gadds[t]; if(tadd=-1){ gadds[t]=add; break; }else{ continue; } } index++; }else{ gadds[index]=0; index++; } } gadds[index]=0; }else if(r==R_CODE_MOD){ char *add=-1; if(MioGetArg2Long(1,&add)!=0){ printf("address arg error"); continue; } int num=-1; if(MioGetArg2Num(2,&num)!=0){ printf("change to arg error"); continue; } printf("mod %p to %d\n",add,num); kern_return_t kr=vm_write(gtask,add,(vm_offset_t)&num,sizeof(int)); if(kr==KERN_SUCCESS){ printf("OK!\n"); }else{ printf("vm_write fail %d\n",kr); } } } return 0; }
static OSStatus DoSaveMemory( AuthorizationRef auth, const void * userData, CFDictionaryRef request, CFMutableDictionaryRef response, aslclient asl, aslmsg aslMsg ) // Implements the kLowNumberedPortsCommand. Opens three low-numbered ports // and adds them to the descriptor array in the response dictionary. { // Pre-conditions if(auth == NULL || request == NULL || response == NULL) return kMemToolBadParameter; // CFShow(request); // load in the WoW ProcessID pid_t wowPID = 0; CFNumberRef cfPID = CFDictionaryGetValue(request, CFSTR(kWarcraftPID)); if(!CFNumberGetValue(cfPID, kCFNumberIntType, &wowPID) || wowPID <= 0) { return kMemToolBadPID; } // load in the memory address unsigned int address = 0; CFNumberRef cfAddress = CFDictionaryGetValue(request, CFSTR(kMemoryAddress)); if(!CFNumberGetValue(cfAddress, kCFNumberIntType, &address) || address == 0) { return kMemToolBadAddress; } // load in memory length CFIndex length = 0; CFDataRef cfContents = CFDictionaryGetValue(request, CFSTR(kMemoryContents)); if( !(length = CFDataGetLength(cfContents))) { return kMemToolBadContents; } bool memSuccess = false; if(wowPID && address && cfContents && length) { asl_log(asl, aslMsg, ASL_LEVEL_DEBUG, "Writing to pid %d at address 0x%X with data length %ld", wowPID, address, length); // put our data into a local buffer Byte buffer[length]; CFDataGetBytes(cfContents, CFRangeMake(0, length), buffer); // get a handle on the WoW task mach_port_t wowTask; task_for_pid(current_task(), wowPID, &wowTask); memSuccess = (KERN_SUCCESS == vm_write(wowTask, address, (vm_offset_t)&buffer, length)); if(memSuccess) { asl_log(asl, aslMsg, ASL_LEVEL_DEBUG, "Write success!"); return kMemToolNoError; } } else { return kMemToolBadParameter; } return kMemToolUnknown; }
/* VM_ACCESS -- Access a file via the VM subsystem. A return value of 1 * indicates that the file is (or will be) "cached" in virtual memory, i.e., * that normal virtual memory file system (normal file i/o) should be used * to access the file. A return value of 0 indicates that direct i/o should * be used to access the file, bypassing the virtual memory file system. */ int vm_access (char *fname, int mode) { struct stat st; char *modestr = NULL, buf[SZ_COMMAND]; char pathname[SZ_PATHNAME]; int status; /* One-time process initialization. */ if (!vm_initialized) vm_initialize(); if (stat (fname, &st) < 0) { status = DEF_ACCESSVAL; goto done; } /* If directio is enabled and the file exceeds the directio threshold * use directio to access the file (access=0). If vmcache is * disabled use normal VM-based i/o to access the file (access=1). * If VMcache is enabled we still only use it if the file size * exceeds vm_threshold. */ if (vm_dioenabled) { status = (st.st_size >= dio_threshold) ? 0 : 1; goto done; } else if (!vm_enabled || st.st_size < vm_threshold) { status = DEF_ACCESSVAL; goto done; } /* Use of VMcache is enabled and the file equals or exceeds the * minimum size threshold. Initialization has already been performed. * Open a VMcache daemon server connection if we don't already have * one. If the server connection fails we are done, but we will try * to open a connection again in the next file access. */ if (!vm_server) if (vm_connect() < 0) { status = DEF_ACCESSVAL; goto done; } /* Compute the mode string for the server request. */ switch (mode) { case READ_ONLY: modestr = "ro"; break; case NEW_FILE: case READ_WRITE: case APPEND: modestr = "rw"; break; } /* Format and send the file access directive to the VMcache daemon. * The status from the server is returned as an ascii integer value * on the same socket. */ sprintf (buf, "access %s %s\n", realpath(fname,pathname), modestr); if (vm_write (vm_server, buf, strlen(buf)) < 0) { vm_shutdown(); status = DEF_ACCESSVAL; goto done; } if (read (vm_server, buf, SZ_CMDBUF) <= 0) { if (vm_debug) fprintf (stderr, "vmclient (%s): server not responding\n", vm_client); vm_shutdown(); status = DEF_ACCESSVAL; goto done; } status = atoi (buf); done: if (vm_debug) fprintf (stderr, "vmclient (%s): access `%s' -> %d\n", vm_client, fname, status); return (status < 0 ? DEF_ACCESSVAL : status); }
/* VM_DELETE -- Delete any VM space used by a file, e.g., because the file * is being physically deleted. This should be called before the file is * actually deleted so that the cache can determine its device and inode * values. */ int vm_delete (char *fname, int force) { struct stat st; char buf[SZ_COMMAND]; char pathname[SZ_PATHNAME]; int status = 0; /* One-time process initialization. */ if (!vm_initialized) vm_initialize(); if (stat (fname, &st) < 0) { status = -1; goto done; } /* If VMcache is not being used we are done. */ if (vm_dioenabled && (st.st_size >= dio_threshold)) goto done; else if (!vm_enabled || st.st_size < vm_threshold) goto done; /* Don't delete the VM space used by the file if it has hard links * and only a link is being deleted (force flag will override). */ if (st.st_nlink > 1 && !force) goto done; /* Connect to the VMcache server if not already connected. */ if (!vm_server) if (vm_connect() < 0) { status = -1; goto done; } /* Format and send the delete directive to the VMcache daemon. * The status from the server is returned as an ascii integer value * on the same socket. */ sprintf (buf, "delete %s\n", realpath(fname,pathname)); if (vm_write (vm_server, buf, strlen(buf)) < 0) { vm_shutdown(); status = -1; goto done; } if (read (vm_server, buf, SZ_CMDBUF) <= 0) { if (vm_debug) fprintf (stderr, "vmclient (%s): server not responding\n", vm_client); vm_shutdown(); status = -1; goto done; } status = atoi (buf); done: if (vm_debug) fprintf (stderr, "vmclient (%s): delete `%s' -> %d\n", vm_client, fname, status); return (status < 0 ? -1 : status); }