Example #1
0
static void 
readmem(mach_vm_offset_t *buffer, mach_vm_address_t address, mach_vm_size_t size, pid_t pid, vm_region_basic_info_data_64_t *info)
{
    // get task for pid
    vm_map_t port;

    kern_return_t kr;

    if (task_for_pid(mach_task_self(), pid, &port))
    {
        fprintf(stderr, "[ERROR] Can't execute task_for_pid! Do you have the right permissions/entitlements?\n");
        exit(1);
    }
    
    mach_msg_type_number_t info_cnt = sizeof (vm_region_basic_info_data_64_t);
    mach_port_t object_name;
    mach_vm_size_t size_info;
    mach_vm_address_t address_info = address;
    kr = mach_vm_region(port, &address_info, &size_info, VM_REGION_BASIC_INFO_64, (vm_region_info_t)info, &info_cnt, &object_name);
    if (kr)
    {
        fprintf(stderr, "[ERROR] mach_vm_region failed with error %d\n", (int)kr);
        exit(1);
    }

    // read memory - vm_read_overwrite because we supply the buffer
    mach_vm_size_t nread;
    kr = mach_vm_read_overwrite(port, address, size, (mach_vm_address_t)buffer, &nread);

    if (kr || nread != size)
    {
        fprintf(stderr, "[ERROR] vm_read failed!\n");
        exit(1);
    }
}
Example #2
0
void read_mem(mach_vm_address_t addr, mach_vm_address_t buf, int len) {
    // Read the traced process's memory, starting at addr, up to addr+len.
    mach_vm_size_t count = len;
    kern_return_t ret = 0;

    ret = mach_vm_protect(task, addr, 1, false, VM_PROT_READ|VM_PROT_WRITE);
    error(ret);
    ret = mach_vm_read_overwrite(task, addr, len, buf, &count);
    error(ret);
/*
    size_t read = 0;
    
    while (read < len) {
        // Read and copy a single word of data at a time from the traced
        // process's address space.
        errno = 0;
        int data = ptrace(PT_READ_D, pid, addr+read, 0);
        if (errno) {
            perror("Error: PT_READ_D");
        } 
        memcpy(buf+read, &data, std::min(sizeof(data), len-read));
        read += sizeof(data);
    }
*/
}
Example #3
0
/**
 * (Safely) read len bytes from @a source, storing in @a dest.
 *
 * @param task The task from which data from address @a source will be read.
 * @param source The address within @a task from which the data will be read.
 * @param dest The destination address to which copied data will be written.
 * @param len The number of bytes to be read.
 *
 * @return On success, returns KERN_SUCCESS. If the pages containing @a source + len are unmapped, KERN_INVALID_ADDRESS
 * will be returned. If the pages can not be read due to access restrictions, KERN_PROTECTION_FAILURE will be returned.
 *
 * @warning Unlike all other plcrash_* functions, plcrash_async_read_addr returns a kern_return_t value.
 * @todo Modify plcrash_async_read_addr and all API clients to use plcrash_error_t values.
 */
kern_return_t plcrash_async_read_addr (mach_port_t task, pl_vm_address_t source, void *dest, pl_vm_size_t len) {
#ifdef PL_HAVE_MACH_VM
    pl_vm_size_t read_size = len;
    return mach_vm_read_overwrite(task, source, len, (pointer_t) dest, &read_size);
#else
    vm_size_t read_size = len;
    return vm_read_overwrite(task, source, len, (pointer_t) dest, &read_size);
#endif
}
Example #4
0
__attribute__((always_inline)) void read_kernel_memory_in_buffer(task_t task, vm_offset_t addr, uint32_t size, void *buffer)
{
    mach_vm_size_t sz = 0;
    mach_vm_read_overwrite(task, addr, size, (mach_vm_address_t)buffer, (mach_vm_size_t*)&sz);

    if (!buffer) {
        __dbg("(!) read failed.");
        return;
    }
}
Example #5
0
void *read_kernel_memory(task_t task, vm_offset_t addr, uint32_t size)
{
    void* mem = malloc(size);
    mach_vm_size_t sz = 0;
    mach_vm_read_overwrite(task, addr, size, (mach_vm_address_t)mem, (mach_vm_size_t*)&sz);

    if (!mem) {
        __dbg("(!) read failed.");
        return NULL;
    }

    return mem;
}
Example #6
0
bool read_vm(vm_map_t task, mach_vm_address_t address, size_t size, unsigned char **read_value, mach_vm_size_t *read_size) {
	protection_backup *backup = backup_protection(task, address, size);
	KERN_TEST(mach_vm_protect(task, address, size, 0, VM_PROT_ALL), "Error setting protection");
	
	if(!read_size) {
		mach_vm_size_t dummy;
		read_size = &dummy;
	}
	
	KERN_TEST(mach_vm_read_overwrite(task, address, size, (mach_vm_address_t)read_value, read_size),
			  "Error reading bytes");
	restore_protection(task, backup);
	
	return true;
}
Example #7
0
size_t kread(uint64_t where, void *p, size_t size) {
    int rv;
    size_t offset = 0;
    while (offset < size) {
        mach_vm_size_t sz, chunk = 2048;
        if (chunk > size - offset) {
            chunk = size - offset;
        }
        rv = mach_vm_read_overwrite(tfpzero, where + offset, chunk, (mach_vm_address_t)p + offset, &sz);
        if (rv || sz == 0) {
            fprintf(stderr, "[e] error reading kernel @%p\n", (void *)(offset + where));
            break;
        }
        offset += sz;
    }
    return offset;
}
Example #8
0
int 
read_memory(int pid, mach_vm_address_t addr, mach_vm_size_t len, char *data)
{
    //		fprintf(stderr, "!read_memory %d %p %x\n", pid, (void *)addr, len);
    mach_vm_size_t nread ;
    vm_map_t port = getport(pid);
	
    mach_vm_read_overwrite(port, addr, len, (mach_vm_address_t)data, &nread);
    if(nread != len){
        //fprintf(stderr, "Error reading memory, requested %d bytes, read %d\n", len, nread);
        //                return 0;  // bad
    }
    /*		if (data != NULL)
     printf("[DEBUG] read %d bytes data is: %x\n", nread, *data);
     */
    return 1;
}
Example #9
0
static int find_foreign_images(mach_port_t task,
                               struct foreign_image *images, size_t nimages,
                               char **error) {
    struct task_dyld_info tdi;
    mach_msg_type_number_t cnt = TASK_DYLD_INFO_COUNT;

    kern_return_t kr = task_info(task, TASK_DYLD_INFO, (void *) &tdi, &cnt);
    if (kr || cnt != TASK_DYLD_INFO_COUNT) {
        asprintf(error, "task_info(TASK_DYLD_INFO): kr=%d", kr);
        return SUBSTITUTE_ERR_MISC;
    }

    if (!tdi.all_image_info_addr || !tdi.all_image_info_size ||
        tdi.all_image_info_size > 1024 ||
        tdi.all_image_info_format > TASK_DYLD_ALL_IMAGE_INFO_64) {
        asprintf(error, "TASK_DYLD_INFO obviously malformed");
        return SUBSTITUTE_ERR_MISC;
    }

    char all_image_infos_buf[1024];

    cnt = tdi.all_image_info_size;
    mach_vm_size_t size;
    kr = mach_vm_read_overwrite(task, tdi.all_image_info_addr,
                                tdi.all_image_info_size,
                                (mach_vm_address_t) all_image_infos_buf, &size);
    if (kr || size != tdi.all_image_info_size) {
        asprintf(error, "mach_vm_read_overwrite(all_image_info): kr=%d", kr);
        return SUBSTITUTE_ERR_MISC;
    }

    bool is64 = tdi.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_64;
    const struct dyld_all_image_infos_32 *aii32 = (void *) all_image_infos_buf;
    const struct dyld_all_image_infos_64 *aii64 = (void *) all_image_infos_buf;

    #define FIELD(f) (is64 ? aii64->f : aii32->f)

    if (FIELD(version) < 2) {
        /* apparently we're on Leopard or something */
        asprintf(error, "dyld_all_image_infos version too low");
        return SUBSTITUTE_ERR_MISC;
    }

    /* If we are on the same shared cache with the same slide, then we can just
     * look up the symbols locally and don't have to do the rest of the
     * syscalls... not sure if this is any faster, but whatever. */
    if (FIELD(version) >= 13) {
        //const struct dyld_all_image_infos *local_aii = _dyld_get_all_image_infos();
        
        void* handle = dlopen("libSystem.B.dylib", 1);
        void* _dyld_get_all_image_infos = dlsym(handle, "_dyld_get_all_image_infos");
        struct dyld_all_image_infos* local_aii = ((struct dyld_all_image_infos* (*)())_dyld_get_all_image_infos)();
        
        if (local_aii->version >= 13 &&
            FIELD(sharedCacheSlide) == local_aii->sharedCacheSlide &&
            !memcmp(FIELD(sharedCacheUUID), local_aii->sharedCacheUUID, 16)) {
            return FFI_SHORT_CIRCUIT;
        }
        dlclose(handle);
    }


    uint64_t info_array_addr = FIELD(infoArray);
    uint32_t info_array_count = FIELD(infoArrayCount);
    size_t info_array_elm_size = (is64 ? sizeof(uint64_t) : sizeof(uint32_t)) * 3;

    #undef FIELD

    if (info_array_count > 2000) {
        asprintf(error, "unreasonable number of loaded libraries: %u",
                 info_array_count);
        return SUBSTITUTE_ERR_MISC;
    }
    size_t info_array_size = info_array_count * info_array_elm_size;
    void *info_array = malloc(info_array_count * info_array_elm_size);
    if (!info_array)
        return SUBSTITUTE_ERR_OOM;

    kr = mach_vm_read_overwrite(task, info_array_addr, info_array_size,
                                (mach_vm_address_t) info_array, &size);
    if (kr || size != info_array_size) {
        asprintf(error, "mach_vm_read_overwrite(info_array): kr=%d", kr);
        free(info_array);
        return SUBSTITUTE_ERR_MISC;
    }

    /* yay, slow file path reads! */

    void *info_array_ptr = info_array;
    size_t images_left = nimages;
    for (uint32_t i = 0; i < info_array_count; i++) {
        uint64_t load_address;
        uint64_t file_path;
        if (is64) {
            uint64_t *e = info_array_ptr;
            load_address = e[0];
            file_path = e[1];
        } else {
            uint32_t *e = info_array_ptr;
            load_address = e[0];
            file_path = e[1];
        }

        /* mach_vm_read_overwrite won't do partial copies, so... */

        char path_buf[MAXPATHLEN+1];
        size_t toread = MIN(MAXPATHLEN, -file_path & 0xfff);
        path_buf[toread] = '\0';
        kr = mach_vm_read_overwrite(task, file_path, toread,
                                    (mach_vm_address_t) path_buf, &size);
        if (kr) {
            /* printf("kr=%d <%p %p>\n", kr, (void *) file_path, path_buf); */
            continue;
        }
        if (strlen(path_buf) == toread && toread < MAXPATHLEN) {
            /* get the rest... */
            kr = mach_vm_read_overwrite(task, file_path + toread,
                                        MAXPATHLEN - toread,
                                        (mach_vm_address_t) path_buf + toread,
                                        &size);
            if (kr) {
                continue;
            }
            path_buf[MAXPATHLEN] = '\0';
        }

        for (size_t i = 0; i < nimages; i++) {
            if (!images[i].address &&
                !strcmp(path_buf, images[i].name)) {
                images[i].address = load_address;
                if (--images_left == 0) {
                    free(info_array);
                    return SUBSTITUTE_OK;
                }
            }
        }

        info_array_ptr += info_array_elm_size;
    }

    free(info_array);
    asprintf(error, "couldn't find libdyld or libpthread");
    return SUBSTITUTE_ERR_MISC;
}
int main(int argc, char** argv)
{
    // THIS IS BOILERPLATE TO PROPERLY GAIN TFP0 AND INITIALIZE INTERNALS
    offsets_init();
    task_t kernel_task;
    host_get_special_port(mach_host_self(), HOST_LOCAL_NODE, 4, &kernel_task);
    task_self_addr();
    kernel_task_port = kernel_task;
    tfp0 = kernel_task;
    // THIS IS BOILERPLATE TO PROPERLY GAIN TFP0 AND INITIALIZE INTERNALS

    if (argc != 1)
    {
        printf("Usage\n\t%s NO ARGUMENTS\n", argv[0]);
        return -1;
    }

    fprintf(stderr, "[NERFBAT]\tVersion 0.3b (tfp = 0x%x)\n", tfp0);
    fprintf(stderr, "[NERFBAT]\tpid = %d\n", getpid());
    fprintf(stderr, "[NERFBAT]\tWaiting on handle for MISVSACI to open up...\n");
    sleep(5);

    set_platform_attribs(get_proc_block(getpid()), tfp0);



    uint32_t amfid_pid = 0;
    kern_return_t kr;
    mach_port_name_t amfid_port = 0;
    int failure = 1;
    uint64_t old_amfid_MISVSACI_local = 0;

    if(!(access("/tmp/amfid.MISVSACI", F_OK) == -1))
    {
        char fdata[0x20];   
        sprintf(fdata, "0x%llx", old_amfid_MISVSACI);
        int fd = open("/tmp/amfid.MISVSACI", O_RDONLY);
        read(fd, fdata, 0x20);
        close(fd);
        old_amfid_MISVSACI_local = strtoull(fdata, 0, 0x10);
        old_amfid_MISVSACI = old_amfid_MISVSACI_local;
        fprintf(stderr, "[NERFBAT]\tLoading old jump table: 0x%llx\n", old_amfid_MISVSACI);
        fprintf(stderr, "[NERFBAT]\tabout to search for the binary load address\n");

        amfid_pid = get_pid_from_name("amfid");
        fprintf(stderr, "[NERFBAT]\tAMFID pid = %d\n", amfid_pid);
        fprintf(stderr, "[NERFBAT]\t[i]\ttask for pid 0 = 0x%x\n", tfp0);
        kr = task_for_pid(mach_task_self(), amfid_pid, &amfid_port);
        if (kr != KERN_SUCCESS)
            fprintf(stderr, "[NERFBAT]\t[-]\tTHERE WAS AN ERROR GETTING task_for_portfor AMFID\n");
        amfid_base = binary_load_address(amfid_port);
        fprintf(stderr, "[NERFBAT]\tamfid load address: 0x%llx\n", amfid_base);
    } else {
        fprintf(stderr, "[NERFBAT]\t[i]\tMASSIVE PROBLEM IN NERFBAT\n");
    }

    while (1)
    {
        if (failure || get_pid_from_name("amfid") != amfid_pid)
        {
            amfid_pid = get_pid_from_name("amfid");
            fprintf(stderr, "[NERFBAT]\t[i]\tAMFID pid == %d\n", amfid_pid);
            uint64_t amfid_proc = get_proc_block(amfid_pid);
            amfid_base = amfid_proc;
            fprintf(stderr, "[NERFBAT]\t[i]\tAMFID proc bloc == 0x%llx\n", amfid_proc);
            //We need to enable amfid to allow us to get a port to it
            fprintf(stderr, "[NERFBAT]\t[i]\tAMFID pid == %d\n", amfid_pid);
            uint64_t amfid_task = get_proc_block(amfid_pid);
            fprintf(stderr, "[NERFBAT]\t[i]\tGot amfid pid at 0x%llx\n", amfid_task);
            uint64_t vnode_info = rk64(amfid_task+0x248);
            fprintf(stderr, "[NERFBAT]\t[i]\tVNODE INFO : 0x%llx\n", vnode_info);
            uint64_t ubc_info = rk64(vnode_info+0xf*sizeof(uint64_t));
            fprintf(stderr, "[NERFBAT]\t[i]\tMy UBC INFO is 0x%llx\n", ubc_info);
            uint64_t blob = rk64(ubc_info+0xa*sizeof(uint64_t));
            char *csb = malloc(0xa8);
            mach_vm_address_t sz = 0;
            mach_vm_read_overwrite(tfp0, (mach_vm_address_t)blob, 0xa8, (mach_vm_address_t)csb, &sz);
            fprintf(stderr, "[NERFBAT]\t[i]\tCurrent 0xa4 = 0x%02x\n", (int)*(char *)((char *)csb + 0xA4));
            *(char *)((char *)csb + 0xA4) = (*((char *)csb + 0xA4) & 0xFE) | 1;
            fprintf(stderr, "[NERFBAT]\t[i]\tNew 0xa4 = 0x%02x\n", (int)*(char *)((char *)csb + 0xA4));
            fprintf(stderr, "[NERFBAT]\t[i]\tCurrent 0xc = 0x%04x\n", *(uint32_t *)((uint32_t *)csb + 0xc));
            *(uint32_t *)((uint32_t *)csb + 0xc) = *((uint32_t *)csb + 0xc) | htonl(0x22000005);
            fprintf(stderr, "[NERFBAT]\t[i]\tCurrent 0xc = 0x%04x\n", *(uint32_t *)((uint32_t *)csb + 0xc));
            mach_vm_write(tfp0, blob, (vm_offset_t)csb, 0xa8);
            free(csb);

            fprintf(stderr, "[NERFBAT]\t[i]\ttask for pid 0 = 0x%x\n", tfp0);
            kr = task_for_pid(mach_task_self(), amfid_pid, &amfid_port);
            if (kr != KERN_SUCCESS)
            {
                fprintf(stderr, "[NERFBAT]\t[-]\tTHERE WAS AN ERROR GETTING task_for_portfor AMFID\n");
                failure = 1;
            } else {
                failure = 0;
            }
            fprintf(stderr, "[NERFBAT]\t[i]\tPATCHING AMFID on port = 0x%x\n", amfid_port);
            unpatch_amfid(amfid_port, old_amfid_MISVSACI_local);
            patch_amfid(amfid_port);
        }
        fprintf(stderr, "[NERFBAT]\t[i]\tSleeping for 10 seconds...\n");
        sleep(10);
    }
}
Example #11
0
static kern_return_t it_stuff(task_t task, cpu_type_t* cputype, it_addr_bundle_t* addrs)
{
    // make the optimizer happy
    *cputype = 0; 

    // init the task info
    task_dyld_info_data_t   info = {0};
    mach_msg_type_number_t  count = TASK_DYLD_INFO_COUNT;
    if (task_info(task, TASK_DYLD_INFO, (task_info_t) &info, &count)) return tb_false;

    // read all image info
    union 
    {
        it_dyld_all_image_infos_t       data;
        it_dyld_all_image_infos_64_t    data64;

    } u;
    mach_vm_size_t data_size = sizeof(u);
    if (info.all_image_info_size < data_size) data_size = info.all_image_info_size;
    if (mach_vm_read_overwrite(task, info.all_image_info_addr, data_size, it_address_cast(&u), &data_size)) return tb_false;
    if (u.data.version <= 1) return tb_false;

    // read mach header
#if defined(TB_ARCH_x86) || defined(TB_ARCH_x64) || defined(TB_ARCH_ARM64)
    tb_bool_t proc64 = u.data64.dyldImageLoadAddress > 0? tb_true : tb_false;
#else
    tb_bool_t proc64 = tb_false;
#endif 
    tb_trace_d("proc64: %p", proc64);   
    struct mach_header  mach_hdr = {0};
    mach_vm_address_t   dyldImageLoadAddress = proc64? u.data64.dyldImageLoadAddress : u.data.dyldImageLoadAddress;
    if (mach_vm_read_overwrite(task, dyldImageLoadAddress, (mach_vm_size_t)sizeof(mach_hdr), it_address_cast(&mach_hdr), &data_size)) return tb_false;

    // swap?
    tb_bool_t               swap = (mach_hdr.magic == MH_CIGAM || mach_hdr.magic == MH_CIGAM_64)? tb_true : tb_false;
    tb_trace_d("swap: %u", swap);

    // save sputype
    *cputype = it_swap_u32(mach_hdr.cputype);

    // read cmds
    mach_vm_size_t          sizeofcmds = it_swap_u32(mach_hdr.sizeofcmds);
    struct load_command*    cmds = malloc(sizeofcmds);
    tb_bool_t               mh64 = (mach_hdr.magic == MH_MAGIC_64 || mach_hdr.magic == MH_CIGAM_64)? tb_true : tb_false;
    tb_trace_d("mh64: %u", mh64);   
    if (mach_vm_read_overwrite(task, dyldImageLoadAddress + (mh64 ? sizeof(struct mach_header_64) : sizeof(struct mach_header)), (mach_vm_size_t)sizeofcmds, it_address_cast(cmds), &sizeofcmds)) return tb_false;

    // read symtab
    mach_vm_address_t       slide;
    it_symtab_bundle_t      symtab;
    tb_size_t               nlist_size = mh64 ? sizeof(struct nlist_64) : sizeof(struct nlist);
    if (!it_find_symtab_addrs(dyldImageLoadAddress, mach_hdr.ncmds, sizeofcmds, cmds, swap, nlist_size, &symtab, &slide)) return tb_false;

    // read strs & syms
    tb_char_t*      strs = malloc(symtab.strsize);
    tb_pointer_t    syms = malloc(symtab.nsyms * nlist_size);
    if (mach_vm_read_overwrite(task, symtab.straddr, (mach_vm_size_t)(symtab.strsize), it_address_cast(strs), &data_size)) return tb_false;
    if (mach_vm_read_overwrite(task, symtab.symaddr, (mach_vm_size_t)(symtab.nsyms * nlist_size), it_address_cast(syms), &data_size)) return tb_false;

    // read address
    memset(addrs, 0, sizeof(*addrs));
    if (mh64) 
    {
        struct nlist_64 const* nl = syms;
        while (symtab.nsyms--) 
        {
            tb_uint32_t strx = (tb_uint32_t) it_swap_u32(nl->n_un.n_strx);
            tb_assert(strx < symtab.strsize);
            it_handle_sym(strs + strx, symtab.strsize - strx, (mach_vm_address_t) it_swap_u64(nl->n_value) + slide, addrs);
            nl++;
        }
    } 
    else 
    {
        struct nlist const* nl = syms;
        while (symtab.nsyms--)
        {
            tb_uint32_t strx = it_swap_u32(nl->n_un.n_strx);
            tb_assert(strx < symtab.strsize);
            it_handle_sym(strs + strx, symtab.strsize - strx, (mach_vm_address_t) it_swap_u32(nl->n_value) + slide, addrs);
            nl++;
        }
    }
    tb_assert(addrs->dlopen);
    tb_assert(addrs->syscall);

    // free
    if (cmds) free(cmds);
    if (strs) free(strs);
    if (syms) free(syms);

    // ok
    return tb_true;
}