Ejemplo n.º 1
0
void breakpoint(mach_vm_address_t addr) {
    // Set the breakpoint to the given address
    //addr = (mach_vm_address_t)0x0000000100000ea0;
    //addr = vm_map_trunc_page(0x0000000100000ea0UL);// / 8;
    //addr = vm_map_trunc_page(0x00000001);// / 8;
    addr = vm_map_trunc_page(0x7fff5e8a6c08);// / 8;
 //(mach_vm_address_t)0; //0x00000ea0;

    //00 00 00 01 00 00 0e a0
    //a0 0e 00 00 01 00 00 00
    std::cerr << "Breaking at " << std::hex << (intptr_t)addr << std::dec << std::endl;
    //char* trap = new char[PAGE_SIZE];
//    trap[0] = char(0xcd);
//    trap[1] = char(0x80);
//    trap[2] = char(0xcc);
//    trap[3] = 0x00;
//    char* backup = new char[PAGE_SIZE];

    mach_vm_address_t trap = 0; 
    mach_vm_address_t backup = 0;
    
    error(mach_vm_allocate(mach_task_self(), &trap, PAGE_SIZE, VM_FLAGS_ANYWHERE));
    error(mach_vm_allocate(mach_task_self(), &backup, PAGE_SIZE, VM_FLAGS_ANYWHERE));
        
    read_mem(addr, backup, PAGE_SIZE);
    write_mem(addr, trap, PAGE_SIZE);
    // Set up the breakpoint by replacing it with an INT instruction
}
Ejemplo n.º 2
0
/*
 * If we try to allocate memory occupied by superpages as normal pages
 * - the call should fail
 */
boolean_t
test_reallocate() {
	mach_vm_address_t addr = 0, addr2;
	mach_vm_size_t	size = SUPERPAGE_SIZE;
	int kr, ret;
	int i;

	kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB);
	if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret;

	/* attempt to allocate every sub-page of superpage */
	for (i=0; i<SUPERPAGE_SIZE/PAGE_SIZE; i++) {
		addr2 = addr + i*PAGE_SIZE;
		size = PAGE_SIZE;
		kr = mach_vm_allocate(mach_task_self(), &addr2, size, 0);
		if ((ret = check_kr(kr, "mach_vm_allocate"))) {
			sprintf(error, "could allocate already allocated space, page %d", i);
			mach_vm_deallocate(mach_task_self(), addr, size);
			return FALSE;
		}
	}
	kr = mach_vm_deallocate(mach_task_self(), addr, size);
	if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret;
	return TRUE;
}
Ejemplo n.º 3
0
kern_return_t
kext_alloc(vm_offset_t *_addr, vm_size_t size, boolean_t fixed)
{
    kern_return_t rval = 0;
    mach_vm_offset_t addr = (fixed) ? *_addr : kext_alloc_base;
    int flags = (fixed) ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE;
 
    /* Allocate the kext virtual memory */
    rval = mach_vm_allocate(g_kext_map, &addr, size, flags);
    if (rval != KERN_SUCCESS) {
        printf("vm_allocate failed - %d\n", rval);
        goto finish;
    }

    /* Check that the memory is reachable by kernel text */
    if ((addr + size) > kext_alloc_max) {
        kext_free((vm_offset_t)addr, size);
        goto finish;
    }

    *_addr = (vm_offset_t)addr;
    rval = KERN_SUCCESS;

finish:
    return rval;
}
Ejemplo n.º 4
0
/*
 * If we fork with active superpages
 * - the parent should still be able to access the superpages
 * - the child should not be able to access the superpages
 */
boolean_t
test_fork() {
	mach_vm_address_t addr = 0;
	mach_vm_size_t	size = SUPERPAGE_SIZE;
	int kr, ret;
	pid_t pid;
	
	kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB);
	if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret;

	fflush(stdout);
	if ((pid=fork())) { /* parent */
		if (!(ret = check_rw(addr, size))) return ret;
		waitpid(pid, &ret, 0);
		if (!ret) {
			sprintf(error, "child could access superpage");
			return ret;
		}
	} else { /* child */
		if (!(ret = check_nr(addr, size, NULL))) exit(ret);
		exit(TRUE);
	}
	
	kr = mach_vm_deallocate(mach_task_self(), addr, size);
	if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret;
	return TRUE;
}
Ejemplo n.º 5
0
PassRefPtr<SharedMemory> SharedMemory::create(size_t size)
{
    ASSERT(size);

    mach_vm_address_t address;
    kern_return_t kr = mach_vm_allocate(mach_task_self(), &address, round_page(size), VM_FLAGS_ANYWHERE);
    if (kr != KERN_SUCCESS) {
        LOG_ERROR("Failed to allocate mach_vm_allocate shared memory (%zu bytes). %s (%x)", size, mach_error_string(kr), kr); 
        return 0;
    }

    // Create a Mach port that represents the shared memory.
    mach_port_t port;
    memory_object_size_t memoryObjectSize = round_page(size);
    kr = mach_make_memory_entry_64(mach_task_self(), &memoryObjectSize, address, VM_PROT_DEFAULT, &port, MACH_PORT_NULL);

    if (kr != KERN_SUCCESS) {
        LOG_ERROR("Failed to create a mach port for shared memory. %s (%x)", mach_error_string(kr), kr);
        mach_vm_deallocate(mach_task_self(), address, round_page(size));
        return 0;
    }

    ASSERT(memoryObjectSize >= round_page(size));

    RefPtr<SharedMemory> sharedMemory(adoptRef(new SharedMemory));
    sharedMemory->m_size = size;
    sharedMemory->m_data = toPointer(address);
    sharedMemory->m_port = port;

    return sharedMemory.release();
}
Ejemplo n.º 6
0
uint64_t kalloc_wired(uint64_t size) {
    kern_return_t err;
    mach_vm_address_t addr = 0;
    mach_vm_size_t ksize = round_page_kernel(size);
    
    printf("vm_kernel_page_size: %lx\n", vm_kernel_page_size);
    
    err = mach_vm_allocate(tfpzero, &addr, ksize+0x4000, VM_FLAGS_ANYWHERE);
    if (err != KERN_SUCCESS) {
        printf("unable to allocate kernel memory via tfp0: %s %x\n", mach_error_string(err), err);
        sleep(3);
        return 0;
    }
    
    printf("allocated address: %llx\n", addr);
    
    addr += 0x3fff;
    addr &= ~0x3fffull;
    
    printf("address to wire: %llx\n", addr);
    
    err = mach_vm_wire(fake_host_priv(), tfpzero, addr, ksize, VM_PROT_READ|VM_PROT_WRITE);
    if (err != KERN_SUCCESS) {
        printf("unable to wire kernel memory via tfp0: %s %x\n", mach_error_string(err), err);
        sleep(3);
        return 0;
    }
    return addr;
}
Ejemplo n.º 7
0
boolean_t
test_fileio() {
	mach_vm_address_t addr1 = 0;
	mach_vm_address_t addr2 = 0;
	mach_vm_size_t	size = SUPERPAGE_SIZE;
	int kr, ret;
	int fd;
	unsigned int bytes;
	
	/* allocate one superpage */
	kr = mach_vm_allocate(mach_task_self(), &addr1, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB);
	if (!(ret = check_kr(kr, "mach_vm_allocate (1)"))) return ret;

	/* allocate base pages (superpage-sized) */
	kr = mach_vm_allocate(mach_task_self(), &addr2, size, VM_FLAGS_ANYWHERE);
	if (!(ret = check_kr(kr, "mach_vm_allocate (2)"))) return ret;

	if ((fd = open(FILENAME, O_RDONLY))<0) {
		sprintf(error, "couldn't open %s", FILENAME);
		return FALSE;
	}
	fcntl(fd, F_NOCACHE, 1);
	/* read kernel into superpage */
	if ((bytes = read(fd, (void*)(uintptr_t)addr1, SUPERPAGE_SIZE)) < SUPERPAGE_SIZE) {
		sprintf(error, "short read (1)");
		return FALSE;
	}
	lseek(fd, 0, SEEK_SET);
	/* read kernel into base pages */
	if ((bytes = read(fd, (void*)(uintptr_t)addr2, SUPERPAGE_SIZE)) < SUPERPAGE_SIZE) {
		sprintf(error, "short read (2)");
		return FALSE;
	}
	close(fd);
	
	/* compare */
	if (memcmp((void*)(uintptr_t)addr1, (void*)(uintptr_t)addr2, bytes)) {
		sprintf(error, "read data corrupt");
		return FALSE;
	}

	kr = mach_vm_deallocate(mach_task_self(), addr1, size);
	if (!(ret = check_kr(kr, "mach_vm_deallocate (1)"))) return ret;
	kr = mach_vm_deallocate(mach_task_self(), addr2, size);
	if (!(ret = check_kr(kr, "mach_vm_deallocate (2)"))) return ret;
	return TRUE;
}
Ejemplo n.º 8
0
MachDebugger::MachDebugger() : scratch_(0) {
    // Creates a new debugger and starts accepting input from the user. 

    // Allocate the scratch area, which is used to read data from the traced
    // process.  This area must be aligned to a page boundary, so just alloc
    // two pages.  We need two pages to handle reading memory regions that span
    // a page boundary.
    int const flags = VM_FLAGS_ANYWHERE;
    VMCHECK(mach_vm_allocate(mach_task_self(), &scratch_, slen_, flags));
}
Ejemplo n.º 9
0
kern_return_t
kext_alloc(vm_offset_t *_addr, vm_size_t size, boolean_t fixed)
{
    kern_return_t rval = 0;
#if CONFIG_KEXT_BASEMENT
    mach_vm_offset_t addr = (fixed) ? *_addr : kext_post_boot_base;
#else
    mach_vm_offset_t addr = (fixed) ? *_addr : kext_alloc_base;
#endif
    int flags = (fixed) ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE;
 
#if CONFIG_KEXT_BASEMENT
    /* Allocate the kext virtual memory
     * 10608884 - use mach_vm_map since we want VM_FLAGS_ANYWHERE allocated past
     * kext_post_boot_base (when possible).  mach_vm_allocate will always 
     * start at 0 into the map no matter what you pass in addr.  We want non 
     * fixed (post boot) kext allocations to start looking for free space 
     * just past where prelinked kexts have loaded.  
     */
    rval = mach_vm_map(g_kext_map, 
                       &addr, 
                       size, 
                       0,
                       flags,
                       MACH_PORT_NULL,
                       0,
                       TRUE,
                       VM_PROT_DEFAULT,
                       VM_PROT_ALL,
                       VM_INHERIT_DEFAULT);
    if (rval != KERN_SUCCESS) {
        printf("mach_vm_map failed - %d\n", rval);
        goto finish;
    }
#else
    rval = mach_vm_allocate(g_kext_map, &addr, size, flags);
    if (rval != KERN_SUCCESS) {
        printf("vm_allocate failed - %d\n", rval);
        goto finish;
    }
#endif

    /* Check that the memory is reachable by kernel text */
    if ((addr + size) > kext_alloc_max) {
        kext_free((vm_offset_t)addr, size);
        rval = KERN_INVALID_ADDRESS;
        goto finish;
    }

    *_addr = (vm_offset_t)addr;
    rval = KERN_SUCCESS;

finish:
    return rval;
}
Ejemplo n.º 10
0
int
main(void)
{
    kern_return_t  kr;
    int            status;
    mach_port_t    mytask = mach_task_self();
    mach_vm_size_t size = (mach_vm_size_t)vm_page_size;
   
    kr = mach_vm_allocate(mytask, &page_shared, size, VM_FLAGS_ANYWHERE);
    OUT_ON_MACH_ERROR("vm_allocate", kr);
   
    kr = mach_vm_allocate(mytask, &page_cow, size, VM_FLAGS_ANYWHERE);
    OUT_ON_MACH_ERROR("vm_allocate", kr);
   
    kr = mach_vm_inherit(mytask, page_shared, size, VM_INHERIT_SHARE);
    OUT_ON_MACH_ERROR("vm_inherit(VM_INHERIT_SHARE)", kr);
   
    kr = mach_vm_inherit(mytask, page_cow, size, VM_INHERIT_COPY);
    OUT_ON_MACH_ERROR("vm_inherit(VM_INHERIT_COPY)", kr);
   
    FIRST_UINT32(page_shared) = (unsigned int)0xAAAAAAAA;
    FIRST_UINT32(page_cow)    = (unsigned int)0xBBBBBBBB;
   
    printf("%-12s%-8s%-10s%-12s%-10s%s\n",
           "Process", "Page", "Contents", "VM Object", "Refcount", "Event");
   
    peek_at_some_memory("parent", "before forking");
   
    if (fork() == 0)
        child_process(); // this will also exit the child
   
    wait(&status);
   
    peek_at_some_memory("parent", "after child is done");
   
out:
    mach_vm_deallocate(mytask, page_shared, size);
    mach_vm_deallocate(mytask, page_cow, size);
   
    exit(0);
}
Ejemplo n.º 11
0
PassRefPtr<SharedMemory> SharedMemory::create(size_t size)
{
    mach_vm_address_t address;
    kern_return_t kr = mach_vm_allocate(mach_task_self(), &address, round_page(size), VM_FLAGS_ANYWHERE);
    if (kr != KERN_SUCCESS)
        return 0;

    RefPtr<SharedMemory> sharedMemory(adoptRef(new SharedMemory));
    sharedMemory->m_size = size;
    sharedMemory->m_data = toPointer(address);

    return sharedMemory.release();
}
Ejemplo n.º 12
0
/*
 * If we deallocate a sub-page of a superpage,
 * - the call should succeed
 * - make the complete memory inaccessible
 */
boolean_t
test_deallocatesubpage() {
	int kr;
	int ret;
	mach_vm_address_t addr = 0;
	mach_vm_size_t	size = SUPERPAGE_SIZE;
	
	kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB);
	if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret;
	kr = mach_vm_deallocate(mach_task_self(), addr + PAGE_SIZE, size);
	if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret;
	if (!(ret = check_nr(addr, size, NULL))) return ret;
	return TRUE;
}
Ejemplo n.º 13
0
/*
 * If we allocate an amount of memory not divisible by 2 MB as a 2 MB superpage
 * - the call should fail
 */
boolean_t
test_allocateoddsize() {
	int kr;
	int ret;
	mach_vm_address_t addr = FIXED_ADDRESS1;
	mach_vm_size_t	size = PAGE_SIZE; /* != 2 MB */

	kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_SUPERPAGE_SIZE_2MB);
	/* is supposed to fail */
	if ((ret = check_kr(kr, "mach_vm_allocate"))) {
		sprintf(error, "mach_vm_allocate() should have failed");
		return FALSE;
	}
	return TRUE;
}
Ejemplo n.º 14
0
/*
 * If we allocate a 2 MB superpage read-write without specifying an address,
 * - the call should succeed
 * - not return 0
 * - return a 2 MB aligned address
 * - the memory should be readable and writable
 */
boolean_t
test_allocate() {
	int kr, ret;

	global_addr = 0;
	global_size = SUPERPAGE_SIZE;
	
	kr = mach_vm_allocate(mach_task_self(), &global_addr, global_size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB);
	if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret;
	if (!(ret = check_addr0(global_addr, "mach_vm_allocate"))) return ret;
	if (!(ret = check_align(global_addr))) return ret;
	if (!(ret = check_rw(global_addr, global_size))) return ret;

	return TRUE;
}
Ejemplo n.º 15
0
/*
 * Tests one allocation/deallocaton cycle; used in a loop this tests for leaks
 */
boolean_t
test_alloc_dealloc() {
	mach_vm_address_t addr = 0;
	mach_vm_size_t	size = SUPERPAGE_SIZE;
	int kr, ret;
	
	kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB);
	if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret;
	if (!(ret = check_addr0(addr, "mach_vm_allocate"))) return ret;
	if (!(ret = check_align(addr))) return ret;
	if (!(ret = check_rw(addr, size))) return ret;
	kr = mach_vm_deallocate(mach_task_self(), addr, size);
	if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret;
	return TRUE;
}
Ejemplo n.º 16
0
/*
 * If we allocate a 2 MB superpage read-write at a 2 MB aligned address,
 * - the call should succeed
 * - return the address we wished for
 * - the memory should be readable and writable
 * If we deallocate it,
 * - the call should succeed
 * - make the memory inaccessible
 */
boolean_t
test_allocatefixed() {
	int kr;
	int ret;
	mach_vm_address_t addr = FIXED_ADDRESS1;
	mach_vm_size_t	size = SUPERPAGE_SIZE;

	kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_SUPERPAGE_SIZE_2MB);
	if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret;
	if (!(ret = check_addr(addr, FIXED_ADDRESS1, "mach_vm_allocate"))) return ret;
	if (!(ret = check_rw(addr, size))) return ret;
	kr = mach_vm_deallocate(mach_task_self(), addr, size);
	if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret;
	if (!(ret = check_nr(addr, size, NULL))) return ret;
	return TRUE;
}
Ejemplo n.º 17
0
/*
 * If we allocate a superpage of any size read-write without specifying an address
 * - the call should succeed
 * - not return 0
 * - the memory should be readable and writable
 * If we deallocate it,
 * - the call should succeed
 * - make the memory inaccessible
 */
boolean_t
test_allocate_size_any() {
	int kr;
	int ret;
	mach_vm_address_t addr = 0;
	mach_vm_size_t	size = 2*PAGE_SIZE; /* will be rounded up to some superpage size */

	kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_ANY);
	if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret;
	if (!(ret = check_addr0(addr, "mach_vm_allocate"))) return ret;
	if (!(ret = check_rw(addr, size))) return ret;
	kr = mach_vm_deallocate(mach_task_self(), addr, size);
	if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret;
	if (!(ret = check_nr(addr, size, NULL))) return ret;
	return TRUE;
}
Ejemplo n.º 18
0
char *
allocate(int pid, mach_vm_address_t address, mach_vm_size_t size)
{
    char *data;
	//fprintf(stderr, "allocate %d %d %d\n", pid, address, size);
    vm_map_t port = getport(pid);
    
    kern_return_t err = mach_vm_allocate(port, (mach_vm_address_t*) &data, size, VM_FLAGS_ANYWHERE);
    
    if(err!= KERN_SUCCESS)
    {
        fprintf(stderr,"[IMPLEMENTATION.C] allocate failed!\n");
        data = NULL;
    } 
    //fprintf(stderr, "ALLOCATE RETURNED WITH %x\n", (unsigned int) data);
    return data;
}
Ejemplo n.º 19
0
RefPtr<SharedMemory> SharedMemory::allocate(size_t size)
{
    ASSERT(size);

    mach_vm_address_t address;
    kern_return_t kr = mach_vm_allocate(mach_task_self(), &address, round_page(size), VM_FLAGS_ANYWHERE);
    if (kr != KERN_SUCCESS) {
        LOG_ERROR("Failed to allocate mach_vm_allocate shared memory (%zu bytes). %s (%x)", size, mach_error_string(kr), kr);
        return nullptr;
    }

    auto sharedMemory = adoptRef(*new SharedMemory);
    sharedMemory->m_size = size;
    sharedMemory->m_data = toPointer(address);
    sharedMemory->m_port = MACH_PORT_NULL;
    sharedMemory->m_protection = Protection::ReadWrite;

    return WTFMove(sharedMemory);
}
Ejemplo n.º 20
0
PassRefPtr<SharedMemory> SharedMemory::create(size_t size)
{
    ASSERT(size);

    mach_vm_address_t address;
    kern_return_t kr = mach_vm_allocate(mach_task_self(), &address, round_page(size), VM_FLAGS_ANYWHERE);
    if (kr != KERN_SUCCESS) {
        LOG_ERROR("Failed to allocate mach_vm_allocate shared memory (%zu bytes). %s (%x)", size, mach_error_string(kr), kr);
        return 0;
    }

    RefPtr<SharedMemory> sharedMemory = createFromVMBuffer(toPointer(address), size);
    if (!sharedMemory) {
        mach_vm_deallocate(mach_task_self(), address, round_page(size));
        return 0;
    }
    
    sharedMemory->m_shouldVMDeallocateData = true;
    return sharedMemory.release();
}
Ejemplo n.º 21
0
kern_return_t
vm_allocate(
	mach_port_name_t task,
	vm_address_t *address,
	vm_size_t size,
	int flags)
{
	kern_return_t rv;
	mach_vm_address_t mach_addr;

	mach_addr = (mach_vm_address_t)*address;
	rv = mach_vm_allocate(task, &mach_addr, size, flags);
#if defined(__LP64__)
	*address = mach_addr;
#else
	*address = (vm_address_t)(mach_addr & ((vm_address_t)-1));
#endif

	return (rv);
}
Ejemplo n.º 22
0
/*
 * If we try to write-protect superpages
 * - the call should succeed
 * - the memory should remain readable
 * - the memory should not be writable
 */
boolean_t
test_readonly() {
	int kr;
	int ret;
	mach_vm_address_t addr = 0;
	mach_vm_size_t	size = SUPERPAGE_SIZE;

	kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB);
	if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret;

	mach_vm_protect(mach_task_self(), addr, size, 0, VM_PROT_READ);
	if (!(ret = check_kr(kr, "mach_vm_protect"))) return ret;

	if (!(ret = check_r(addr, size, NULL))) return ret;
	if (!(ret = check_nw(addr, size))) return ret;

	kr = mach_vm_deallocate(mach_task_self(), addr, size);
	if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret;

	return TRUE;
}
Ejemplo n.º 23
0
static bool
CreateThePort(mach_vm_address_t& child_address)
{
  mach_vm_address_t address;
  mach_port_t port;
  size_t size = 8000;

  kern_return_t kr = mach_vm_allocate(mach_task_self(), &address, round_page(size), VM_FLAGS_ANYWHERE);
  if (kr != KERN_SUCCESS) {
    printf("Failed to allocate mach_vm_allocate shared memory (%zu bytes). %s (%x)", size, mach_error_string(kr), kr);
    return false;
  }

  memory_object_size_t memoryObjectSize = round_page(size);

  kr = mach_make_memory_entry_64(mach_task_self(), &memoryObjectSize, address, VM_PROT_DEFAULT, &port, MACH_PORT_NULL);
  if (kr != KERN_SUCCESS) {
    printf("Failed to make memory entry (%zu bytes). %s (%x)\n", size, mach_error_string(kr), kr);
    return false;
  }

  vm_prot_t vmProtection = VM_PROT_READ | VM_PROT_WRITE;

  // Choose an address that will be valid in the child process and point to our buffer.
  // child_address must not be dereferenced in the parent process.
  child_address = address + 0x10000;

  kr = mach_vm_map(child_task, &child_address, round_page(size), 0, 0,
                  port, 0, false, vmProtection, vmProtection, VM_INHERIT_NONE);
  if (kr != KERN_SUCCESS) {
    printf("Failed to mach_vm_map (%zu bytes). %s (%x)\n", size, mach_error_string(kr), kr);
    return false;
  }

  int* buf = reinterpret_cast<int*>(static_cast<uintptr_t>(address));

  buf[0] = 42;

  return true;
}
Ejemplo n.º 24
0
/*
 * If we try to wire superpages
 * - the call should succeed
 * - the memory should remain readable and writable
 */
boolean_t
test_wire() {
	int kr;
	int ret;
	mach_vm_address_t addr = 0;
	mach_vm_size_t	size = SUPERPAGE_SIZE;

	kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB);
	if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret;

	kr = mach_vm_wire(mach_host_self(), mach_task_self(), addr, size, VM_PROT_WRITE | VM_PROT_READ);

	if (!geteuid()) /* may fail as user */
		if (!(ret = check_kr(kr, "mach_vm_wire"))) return ret;

	if (!(ret = check_rw(addr, size))) return ret;

	kr = mach_vm_deallocate(mach_task_self(), addr, size);
	if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret;

	return TRUE;
}
Ejemplo n.º 25
0
int
_kernelrpc_mach_vm_allocate_trap(struct _kernelrpc_mach_vm_allocate_trap_args *args)
{
	mach_vm_offset_t addr;
	task_t task = port_name_to_task(args->target);
	int rv = MACH_SEND_INVALID_DEST;

	if (task != current_task())
		goto done;

	if (copyin(args->addr, (char *)&addr, sizeof (addr)))
		goto done;

	rv = mach_vm_allocate(task->map, &addr, args->size, args->flags);
	if (rv == KERN_SUCCESS)
		rv = copyout(&addr, args->addr, sizeof (addr));
	
done:
	if (task)
		task_deallocate(task);
	return (rv);
}
Ejemplo n.º 26
0
/*
 * If we try to wire superpages
 * - the call should fail
 * - the memory should remain readable and writable
 * Currently, superpages are always wired.
 */
boolean_t
test_unwire() {
	int kr;
	int ret;
	mach_vm_address_t addr = 0;
	mach_vm_size_t	size = SUPERPAGE_SIZE;

	kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB);
	if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret;

	kr = mach_vm_wire(mach_host_self(), mach_task_self(), addr, size, VM_PROT_NONE);
	if ((ret = check_kr(kr, "mach_vm_wire"))) {
		sprintf(error, "could unwire");
		return FALSE;
	}

	if (!(ret = check_rw(addr, size))) return ret;

	kr = mach_vm_deallocate(mach_task_self(), addr, size);
	if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret;

	return TRUE;
}
Ejemplo n.º 27
0
/******************************************************************************
 * Allocates huge pages
 ******************************************************************************/
void *
pixie_alloc_huge(size_t size, int *err)
{
    
#if defined(WIN32)
    void *result;
    
    /* On Windows, the user account needs privileges to use huge pages, which
     * it doesn't by default */
    Privilege(TEXT("SeLockMemoryPrivilege"), TRUE);
    

    /* Attempt the allocation */
    result = VirtualAlloc(
                          0, /* have the OS assign an address */
                          size,
                          MEM_LARGE_PAGES | MEM_COMMIT, 
                          PAGE_READWRITE);
    
    /* Handle permission error  */
    if (result == NULL && GetLastError() == ERROR_PRIVILEGE_NOT_HELD) {
        *err = HugeErr_NoPermissions;
        return result;
    }
    
    /* Handle fragmented error */
    if (result == NULL && GetLastError() == 3) {
        *err = HugeErr_MemoryFragmented;
        return result;
    }
    if (result == NULL && GetLastError() == ERROR_NO_SYSTEM_RESOURCES) {
        *err = HugeErr_MemoryFragmented;
        return result;
    }
    
    /* Handle any other error */
    if (result == NULL) {
        fprintf(stderr, "err = %u\n", GetLastError());
        win_perror("VirtualAlloc(MEM_LARGE_PAGES)");
        *err = HugeErr_Unknown;
        return result;
    }
    
    *err = 0;
    return result;
#elif defined(__APPLE__)
    int kr;
    mach_vm_address_t pointer = 0;
    
    

    kr = mach_vm_allocate(
                          mach_task_self(), 
                          &pointer, 
                          size, 
                          VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_ANY);
    
    /* Handle fragmented error */
    if (kr == ESRCH) {
        *err = HugeErr_MemoryFragmented;
        return 0;
    }
    
    /* Handle any other error */
    if (pointer == 0) {
        *err = HugeErr_Unknown;
        return 0;
    }

    return (void*)pointer;
#else
    void *result;
    
    result = mmap(NULL, /* no existing memory */
                  size,
                  PROT_READ | PROT_WRITE, /* normal read/write */
                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE | MAP_HUGETLB, 
                  -1,   /* no file descriptor */
                  0     /* no offset */
                  );
    if (result == (void*)-1 && errno == ENOMEM) {
        *err = HugeErr_MemoryFragmented;
	return 0;
    }
    if (result == (void*)-1) {
	printf("err = %u\n", errno);
	perror("mmap");
	}
    return result;
#endif
}
Ejemplo n.º 28
0
bool ZGAllocateMemory(ZGMemoryMap processTask, ZGMemoryAddress *address, ZGMemorySize size)
{
	return (mach_vm_allocate(processTask, address, size, VM_FLAGS_ANYWHERE) == KERN_SUCCESS);
}
Ejemplo n.º 29
0
static
load_return_t
load_segment(
	struct load_command		*lcp,
	uint32_t			filetype,
	void *				control,
	off_t				pager_offset,
	off_t				macho_size,
	struct vnode			*vp,
	vm_map_t			map,
	int64_t				slide,
	load_result_t		*result
)
{
	struct segment_command_64 segment_command, *scp;
	kern_return_t		ret;
	vm_map_offset_t		map_addr, map_offset;
	vm_map_size_t		map_size, seg_size, delta_size;
	vm_prot_t 		initprot;
	vm_prot_t		maxprot;
	size_t			segment_command_size, total_section_size,
				single_section_size;
	boolean_t		prohibit_pagezero_mapping = FALSE;
	
	if (LC_SEGMENT_64 == lcp->cmd) {
		segment_command_size = sizeof(struct segment_command_64);
		single_section_size  = sizeof(struct section_64);
	} else {
		segment_command_size = sizeof(struct segment_command);
		single_section_size  = sizeof(struct section);
	}
	if (lcp->cmdsize < segment_command_size)
		return (LOAD_BADMACHO);
	total_section_size = lcp->cmdsize - segment_command_size;

	if (LC_SEGMENT_64 == lcp->cmd)
		scp = (struct segment_command_64 *)lcp;
	else {
		scp = &segment_command;
		widen_segment_command((struct segment_command *)lcp, scp);
	}

	/*
	 * Make sure what we get from the file is really ours (as specified
	 * by macho_size).
	 */
	if (scp->fileoff + scp->filesize < scp->fileoff ||
	    scp->fileoff + scp->filesize > (uint64_t)macho_size)
		return (LOAD_BADMACHO);
	/*
	 * Ensure that the number of sections specified would fit
	 * within the load command size.
	 */
	if (total_section_size / single_section_size < scp->nsects)
		return (LOAD_BADMACHO);
	/*
	 * Make sure the segment is page-aligned in the file.
	 */
	if ((scp->fileoff & PAGE_MASK_64) != 0)
		return (LOAD_BADMACHO);

	/*
	 *	Round sizes to page size.
	 */
	seg_size = round_page_64(scp->vmsize);
	map_size = round_page_64(scp->filesize);
	map_addr = trunc_page_64(scp->vmaddr); /* JVXXX note that in XNU TOT this is round instead of trunc for 64 bits */
	if (seg_size == 0)
		return (KERN_SUCCESS);
	if (map_addr == 0 &&
	    map_size == 0 &&
	    seg_size != 0 &&
	    (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
	    (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
		/*
		 * For PIE, extend page zero rather than moving it.  Extending
		 * page zero keeps early allocations from falling predictably
		 * between the end of page zero and the beginning of the first
		 * slid segment.
		 */
		seg_size += slide;
		slide = 0;
#if CONFIG_EMBEDDED
		prohibit_pagezero_mapping = TRUE;
#endif
		/* XXX (4596982) this interferes with Rosetta, so limit to 64-bit tasks */
		if (scp->cmd == LC_SEGMENT_64) {
		        prohibit_pagezero_mapping = TRUE;
		}
		
		if (prohibit_pagezero_mapping) {
			/*
			 * This is a "page zero" segment:  it starts at address 0,
			 * is not mapped from the binary file and is not accessible.
			 * User-space should never be able to access that memory, so
			 * make it completely off limits by raising the VM map's
			 * minimum offset.
			 */
			ret = vm_map_raise_min_offset(map, seg_size);
			if (ret != KERN_SUCCESS) {
				return (LOAD_FAILURE);
			}
			return (LOAD_SUCCESS);
		}
	}

	/* If a non-zero slide was specified by the caller, apply now */
	map_addr += slide;

	if (map_addr < result->min_vm_addr)
		result->min_vm_addr = map_addr;
	if (map_addr+seg_size > result->max_vm_addr)
		result->max_vm_addr = map_addr+seg_size;

	if (map == VM_MAP_NULL)
		return (LOAD_SUCCESS);

	map_offset = pager_offset + scp->fileoff;	/* limited to 32 bits */

	if (map_size > 0) {
		initprot = (scp->initprot) & VM_PROT_ALL;
		maxprot = (scp->maxprot) & VM_PROT_ALL;
		/*
		 *	Map a copy of the file into the address space.
		 */
		ret = vm_map_enter_mem_object_control(map,
				&map_addr, map_size, (mach_vm_offset_t)0,
			        VM_FLAGS_FIXED,	control, map_offset, TRUE,
				initprot, maxprot,
				VM_INHERIT_DEFAULT);
		if (ret != KERN_SUCCESS)
			return (LOAD_NOSPACE);
	
		/*
		 *	If the file didn't end on a page boundary,
		 *	we need to zero the leftover.
		 */
		delta_size = map_size - scp->filesize;
#if FIXME
		if (delta_size > 0) {
			mach_vm_offset_t	tmp;
	
			ret = mach_vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE);
			if (ret != KERN_SUCCESS)
				return(LOAD_RESOURCE);
	
			if (copyout(tmp, map_addr + scp->filesize,
								delta_size)) {
				(void) mach_vm_deallocate(
						kernel_map, tmp, delta_size);
				return (LOAD_FAILURE);
			}
	
			(void) mach_vm_deallocate(kernel_map, tmp, delta_size);
		}
#endif /* FIXME */
	}

	/*
	 *	If the virtual size of the segment is greater
	 *	than the size from the file, we need to allocate
	 *	zero fill memory for the rest.
	 */
	delta_size = seg_size - map_size;
	if (delta_size > 0) {
		mach_vm_offset_t tmp = map_addr + map_size;

		ret = mach_vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED,
				  NULL, 0, FALSE,
				  scp->initprot, scp->maxprot,
				  VM_INHERIT_DEFAULT);
		if (ret != KERN_SUCCESS)
			return(LOAD_NOSPACE);
	}

	if ( (scp->fileoff == 0) && (scp->filesize != 0) )
		result->mach_header = map_addr;

	if (scp->flags & SG_PROTECTED_VERSION_1) {
		ret = unprotect_segment(scp->fileoff,
					scp->filesize,
					vp,
					pager_offset,
					map,
					map_addr,
					map_size);
	} else {
		ret = LOAD_SUCCESS;
	}
	if (LOAD_SUCCESS == ret && filetype == MH_DYLINKER &&
	    result->all_image_info_addr == MACH_VM_MIN_ADDRESS)
		note_all_image_info_section(scp,
		    LC_SEGMENT_64 == lcp->cmd, single_section_size,
		    (const char *)lcp + segment_command_size, slide, result);

	if ((result->entry_point >= map_addr) && (result->entry_point < (map_addr + map_size)))
		result->validentry = 1;

	return ret;
}
Ejemplo n.º 30
0
static tb_bool_t it_inject(pid_t pid, tb_char_t const* path) 
{
    // check
    tb_assert_and_check_return_val(pid && path, tb_false);

    // trace
    tb_trace_d("inject: pid: %lu, path: %s: ..", (tb_size_t)pid, path);

#ifdef TB_ARCH_ARM64
    // uses libsubstrate first?
    if (tb_file_info("/usr/lib/libsubstrate.dylib", tb_null))
    {
        // init library
        tb_bool_t   ok = tb_false;
        tb_handle_t library = tb_dynamic_init("/usr/lib/libsubstrate.dylib");
        if (library)
        {
            // trace
            tb_trace_d("library: %p", library);

            // the func
            it_MSHookProcess_t pMSHookProcess = tb_dynamic_func(library, "MSHookProcess");
            if (pMSHookProcess)
            {
                // trace
                tb_trace_d("MSHookProcess: %p", pMSHookProcess);

                // hook process
                ok = pMSHookProcess(pid, path)? tb_true : tb_false;
            }

            // exit library
            tb_dynamic_exit(library);

            // trace
            tb_trace_i("%s", ok? "ok" : "no");

            // ok?
            return ok;
        }
    }
#endif

    // pid => task
    task_t task = 0;
    if (task_for_pid(mach_task_self(), (tb_int_t)pid, &task)) 
    {
        tb_trace_i("task_for_pid: %lu failed, errno: %d", (tb_size_t)pid, errno);
        return tb_false;
    }

    // trace
    tb_trace_d("task: %u", task);

    // stuff
    cpu_type_t cputype; it_addr_bundle_t addrs;
    if (!it_stuff(task, &cputype, &addrs)) return tb_false;

    // trace
    tb_trace_d("dlopen: %p", addrs.dlopen);
    tb_trace_d("syscall: %p", addrs.syscall);

    // alloc stack 
    mach_vm_address_t stack_address = 0;
    if (mach_vm_allocate(task, &stack_address, it_stack_size, VM_FLAGS_ANYWHERE)) return tb_false;

    // write path
    mach_vm_address_t stack_end = stack_address + it_stack_size - 0x100;
    if (mach_vm_write(task, stack_address, (vm_offset_t)it_address_cast(path), strlen(path) + 1)) return tb_false;

    /* the first one is the return address
     *
     * syscall(SYS_bsdthread_create, 0xdeadbeef, 0xdeadbeef, 128 * 1024, 0, 0)
     */
    tb_uint32_t args_32[] = {0, 360, 0xdeadbeef, 0xdeadbeef, 128 * 1024, 0, 0};
    tb_uint64_t args_64[] = {0, 360, 0xdeadbeef, 0xdeadbeef, 128 * 1024, 0, 0};

    // init thread state 
    union
    {
        it_arm_thread_state_t       arm;
        it_arm_thread_state64_t     arm64;
        it_x86_thread_state32_t     x86;
        it_x86_thread_state64_t     x64;
        natural_t                   nat;

    }state;
    thread_state_flavor_t           state_flavor;
    mach_msg_type_number_t          state_count;
    memset(&state, 0, sizeof(state));   

    // init thread state for the cpu type
    switch (cputype)
    {
    case CPU_TYPE_ARM:
        {
            tb_trace_i("cputype: arm");
            memcpy(&state.arm.r[0], args_32 + 1, 4 * sizeof(tb_uint32_t));
            if (mach_vm_write(task, stack_end, (vm_offset_t)it_address_cast(args_32 + 5), 2 * sizeof(tb_uint32_t))) return tb_false;

            state.arm.sp    = (tb_uint32_t) stack_end;
            state.arm.pc    = (tb_uint32_t) addrs.syscall;
            state.arm.lr    = (tb_uint32_t) args_32[0];

            state_flavor    = ARM_THREAD_STATE;
            state_count     = sizeof(state.arm) / sizeof(state.nat);

            // trace
            tb_trace_d("init: pc: %x", state.arm.pc);
            tb_trace_d("init: lr: %x", state.arm.lr);
            tb_trace_d("init: sp: %x", state.arm.sp);
        }
        break;
    case CPU_TYPE_ARM64:
        {
            tb_trace_i("cputype: arm64");
            memcpy(&state.arm64.x[0], args_64 + 1, 6 * sizeof(tb_uint64_t));

            state.arm64.sp  = (tb_uint64_t) stack_end;
//          state.arm64.fp  = (tb_uint64_t) stack_end;
            state.arm64.pc  = (tb_uint64_t) addrs.syscall;
            state.arm64.lr  = (tb_uint64_t) args_64[0];

            state_flavor    = ARM_THREAD_STATE64;
            state_count     = sizeof(state.arm64) / sizeof(state.nat);

            // trace
            tb_trace_d("init: pc: %llx", state.arm64.pc);
            tb_trace_d("init: lr: %llx", state.arm64.lr);
            tb_trace_d("init: sp: %llx", state.arm64.sp);
        }
        break;
    case CPU_TYPE_X86:
        {
            tb_trace_i("cputype: x86");
            if (mach_vm_write(task, stack_end, (vm_offset_t)it_address_cast(args_32), 7 * 4)) return tb_false;

            state.x86.esp   = state.x86.ebp = (tb_uint32_t) stack_end;
            state.x86.eip   = (tb_uint32_t)addrs.syscall;

            state_flavor    = x86_THREAD_STATE32;
            state_count     = sizeof(state.x86) / sizeof(state.nat);
        }
        break;
    case CPU_TYPE_X86_64:
        {
            tb_trace_i("cputype: x64");
            state.x64.rdi   = args_64[1];
            state.x64.rsi   = args_64[2];
            state.x64.rdx   = args_64[3];
            state.x64.rcx   = args_64[4];
            state.x64.r8    = args_64[5];
            state.x64.r9    = args_64[6];

            state.x64.rsp   = state.x64.rbp = stack_end;
            state.x64.rip   = addrs.syscall;

            state_flavor    = x86_THREAD_STATE64;
            state_count     = sizeof(state.x64) / sizeof(state.nat);
        }
        break;
    default:
        tb_trace_i("cputype: unknown: %lx", (tb_size_t)cputype);
        return tb_false;
    }

    // init a remote thread
    thread_act_t thread = 0;
    if (thread_create(task, &thread)) return tb_false;

    // trace
    tb_trace_d("init: thread: %x", thread);

    // alloc port
    mach_port_t exc = 0;
    mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &exc);
    if (mach_port_insert_right(mach_task_self(), exc, exc, MACH_MSG_TYPE_MAKE_SEND)) return tb_false;

    // swap port
    exception_mask_t        em[2];
    exception_handler_t     eh[2];
    exception_behavior_t    eb[2];
    thread_state_flavor_t   ef[2];
    mach_msg_type_number_t  em_count = 2;
    if (task_swap_exception_ports(task, EXC_MASK_BAD_ACCESS, exc, EXCEPTION_STATE_IDENTITY, state_flavor, em, &em_count, eh, eb, ef)) return tb_false;
    tb_assert_and_check_return_val(em_count <= 1, tb_false);

    // resume thread, done: syscall(SYS_bsdthread_create, 0xdeadbeef, 0xdeadbeef, 128 * 1024, 0, 0)
    if (thread_set_state(thread, state_flavor, &state.nat, state_count)) return tb_false;
    if (thread_resume(thread)) return tb_false;

    // we expect three exceptions: one from thread when it returns, one from the new thread when it calls the fake handler, and one from the new thread when it returns from dlopen.
    tb_bool_t started_dlopen = tb_false;
    while (1) 
    {
        // recv msg
        it_exception_message_t msg;
        if (mach_msg_overwrite(tb_null, MACH_RCV_MSG, 0, sizeof(msg), exc, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL, (tb_pointer_t) &msg, sizeof(msg))) return tb_false;

        // trace
        tb_trace_d("recv: msg: from thread: %x", msg.thread.name);

        // check
        tb_assert_and_check_return_val((msg.Head.msgh_bits & MACH_MSGH_BITS_COMPLEX), tb_false);
        tb_assert_and_check_return_val((msg.msgh_body.msgh_descriptor_count != 0), tb_false);
        tb_assert_and_check_return_val((msg.Head.msgh_size >= offsetof(it_exception_message_t, old_state)), tb_false);
        tb_assert_and_check_return_val((msg.old_stateCnt == state_count), tb_false);
        tb_assert_and_check_return_val((msg.Head.msgh_size >= offsetof(it_exception_message_t, old_state) + msg.old_stateCnt * sizeof(natural_t)), tb_false);

        // the msg state
        memcpy(&state, msg.old_state, sizeof(state));

        // dump
//      tb_dump_data((tb_byte_t const*)&state, sizeof(state));

        // done
        if (msg.thread.name == thread)
        {
            tb_trace_d("terminate");
            if (thread_terminate(thread)) return tb_false;
        } 
        else
        {
            // init cond
            tb_bool_t cond = tb_false;
            switch(cputype)
            {
            case CPU_TYPE_ARM:      
                {
                    // trace
                    tb_trace_d("recv: pc: %x", state.arm.pc);
                    tb_trace_d("recv: lr: %x", state.arm.lr);
                    tb_trace_d("recv: sp: %x", state.arm.sp);

                    // cond
                    cond = ((state.arm.pc & ~1) == 0xdeadbeee)? tb_true : tb_false;
                }
                break;
            case CPU_TYPE_ARM64:
                {
                    // trace
                    tb_trace_d("recv: pc: %llx", state.arm64.pc);
                    tb_trace_d("recv: lr: %llx", state.arm64.lr);
                    tb_trace_d("recv: sp: %llx", state.arm64.sp);

                    // cond
                    cond = ((state.arm64.pc & ~1) == 0xdeadbeee)? tb_true : tb_false;
                }
                break;
            case CPU_TYPE_X86:
                cond = (state.x86.eip == 0xdeadbeef)? tb_true : tb_false; 
                break;
            case CPU_TYPE_X86_64:
                cond = (state.x64.rip == 0xdeadbeef)? tb_true : tb_false;
                break;
            }

            tb_trace_d("cond: %d, started_dlopen: %d", cond, started_dlopen);
            if (!cond)
            {
                // let the normal crash mechanism handle it
                task_set_exception_ports(task, em[0], eh[0], eb[0], ef[0]);
                tb_assert_and_check_return_val(0, tb_false);
            }
            else if (started_dlopen)
            {
                tb_trace_d("terminate");
                if (thread_terminate(msg.thread.name)) return tb_false;
                break;
            }
            else 
            {
                // done: dlopen(path, RTLD_LAZY)
                switch(cputype)
                {
                case CPU_TYPE_ARM:
                    {
                        state.arm.r[0] = (tb_uint32_t) stack_address;
                        state.arm.r[1] = RTLD_LAZY;
                        state.arm.pc = (tb_uint32_t) addrs.dlopen;
                        state.arm.lr = 0xdeadbeef;
                    }
                    break;
                case CPU_TYPE_ARM64:
                    {
                        state.arm64.x[0] = (tb_uint64_t) stack_address;
                        state.arm64.x[1] = RTLD_LAZY;
                        state.arm64.pc = (tb_uint64_t) addrs.dlopen;
                        state.arm64.lr = 0xdeadbeef;
                    }
                    break;
                case CPU_TYPE_X86:
                    {
                        tb_uint32_t stack_stuff[3] = {0xdeadbeef, (tb_uint32_t)stack_address, RTLD_LAZY};
                        if (mach_vm_write(task, (mach_vm_address_t)state.x86.esp, (vm_offset_t)it_address_cast(&stack_stuff), sizeof(stack_stuff))) return tb_false;
                    }
                    state.x86.eip = (tb_uint32_t) addrs.dlopen;
                    break;
                case CPU_TYPE_X86_64:
                    {
                        tb_uint64_t stack_stuff = 0xdeadbeef;
                        if (mach_vm_write(task, (mach_vm_address_t)state.x64.rsp, (vm_offset_t)it_address_cast(&stack_stuff), sizeof(stack_stuff))) return tb_false;
                        state.x64.rip = addrs.dlopen;
                        state.x64.rdi = stack_address;
                        state.x64.rsi = RTLD_LAZY;
                    }
                    break;
                }

                it_exception_reply_t reply;
                memcpy(&reply.Head, &msg.Head, sizeof(mach_msg_header_t));
                reply.Head.msgh_bits &= ~MACH_MSGH_BITS_COMPLEX;
                reply.Head.msgh_size = offsetof(it_exception_reply_t, new_state) + state_count * sizeof(natural_t);
                reply.Head.msgh_id += 100;
                memcpy(&reply.NDR, &msg.NDR, sizeof(NDR_record_t));
                reply.RetCode = 0;
                reply.flavor = state_flavor;
                reply.new_stateCnt = state_count;
                memcpy(&reply.new_state, &state, sizeof(state));

                if (thread_set_state(msg.thread.name, state_flavor, &state.nat, state_count)) return tb_false;
                if (mach_msg(&reply.Head, MACH_SEND_MSG, reply.Head.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL)) return tb_false;
                started_dlopen = tb_true;
            }
        }
    }

    // exit
    if (stack_address) vm_deallocate(task, stack_address, it_stack_size);
    if (thread)
    {
        thread_terminate(thread);
        mach_port_deallocate(mach_task_self(), thread);
    }
    if (task) mach_port_deallocate(mach_task_self(), task);
    if (exc) mach_port_deallocate(mach_task_self(), exc);

    // ok
    tb_trace_i("ok");
    return tb_true;
}