Esempio n. 1
1
/* Returns NULL on failure; errno set */
static void *
my_mmap (void *addr, W_ size, int operation)
{
    void *ret;

#if darwin_HOST_OS
    // Without MAP_FIXED, Apple's mmap ignores addr.
    // With MAP_FIXED, it overwrites already mapped regions, whic
    // mmap(0, ... MAP_FIXED ...) is worst of all: It unmaps the program text
    // and replaces it with zeroes, causing instant death.
    // This behaviour seems to be conformant with IEEE Std 1003.1-2001.
    // Let's just use the underlying Mach Microkernel calls directly,
    // they're much nicer.

    kern_return_t err = 0;
    ret = addr;

    if(operation & MEM_RESERVE)
    {
        if(addr)    // try to allocate at address
            err = vm_allocate(mach_task_self(),(vm_address_t*) &ret,
                              size, false);
        if(!addr || err)    // try to allocate anywhere
            err = vm_allocate(mach_task_self(),(vm_address_t*) &ret,
                              size, true);
    }

    if(err) {
        // don't know what the error codes mean exactly, assume it's
        // not our problem though.
        errorBelch("memory allocation failed (requested %" FMT_Word " bytes)",
                   size);
        stg_exit(EXIT_FAILURE);
    }

    if(operation & MEM_COMMIT) {
        vm_protect(mach_task_self(), (vm_address_t)ret, size, false,
                   VM_PROT_READ|VM_PROT_WRITE);
    }

#else

    int prot, flags;
    if (operation & MEM_COMMIT)
        prot = PROT_READ | PROT_WRITE;
    else
        prot = PROT_NONE;
    if (operation == MEM_RESERVE)
# if defined(MAP_NORESERVE)
        flags = MAP_NORESERVE;
# else
#  ifdef USE_LARGE_ADDRESS_SPACE
#   error USE_LARGE_ADDRESS_SPACE needs MAP_NORESERVE
#  endif
        errorBelch("my_mmap(,,MEM_RESERVE) not supported on this platform");
# endif
    else if (operation == MEM_COMMIT)
Esempio n. 2
0
void debugInstallFreeHook(void)
{
#if defined(_WIN32) && defined(_DEBUG)
    lastCrtAllocHook = _CrtSetAllocHook(DebugAllocHook);
#endif

#ifdef __GLIBC__
    // __free_hook is not thread safe so it marked as deprecated.  Use here
    // is hopefully safe and should catch errors in a single threaded program
    // and only miss some in a multithreaded program
    lastFreeHook = __free_hook;
    __free_hook = DebugFreeHook;
#endif

#ifdef __APPLE__
    malloc_zone_t* zone = malloc_default_zone();
    assert(zone != NULL);
    //remove the write protection from the zone struct
    if (zone->version >= 8) {
      vm_protect(mach_task_self(), (uintptr_t)zone, sizeof(*zone), 0, VM_PROT_READ | VM_PROT_WRITE);
    }
    lastMallocZone = *zone;
    zone->free = DebugFreeHook;
    zone->free_definite_size = DebugFreeDefiniteSizeHook;
    if (zone->version >= 8) {
      vm_protect(mach_task_self(), (uintptr_t)zone, sizeof(*zone), 0, VM_PROT_READ);
    }
#endif
}
Esempio n. 3
0
 bool memmgr::writeBytes(void *dst, std::vector<unsigned char>& bytes)
 {
     vm_prot_t prot;
     kern_return_t status = getMemoryProtection(dst, prot);
     if (status)
     {
         log().error(format("vm_region() failed at %X") % dst);
         return 0;
     }
     
     status = vm_protect(mach_task_self(), (vm_address_t)dst, bytes.size(), 0, VM_PROT_ALL);
     
     if (status)
     {
         log().error(format("vm_protect() failed to set %X + %u bytes to VM_PROT_WRITE") % dst % bytes.size());
         return 0;
     }
     
     unsigned char *byteDst = (unsigned char *)dst;
     
     for (int i = 0; i < bytes.size(); i++)
     {
         *(byteDst + i) = bytes[i];
     }
     
     // restore original protection
     vm_protect(mach_task_self(), (vm_address_t)dst, bytes.size(), 0, prot);
     
     return 1;
 }
Esempio n. 4
0
static int mach_write_at(RIOMach *riom, const void *buff, int len, ut64 addr) {
	task_t task = riom->task;
#if 0
/* get paVM_PROT_EXECUTEge perms */
        kern_return_t err;
	int ret, _basic64[VM_REGION_BASIC_INFO_COUNT_64];
	vm_region_basic_info_64_t basic64 = (vm_region_basic_info_64_t)_basic64;
	mach_msg_type_number_t	infocnt;
const int pagesize = 4096;
vm_offset_t addrbase;
	mach_port_t	objname;
	vm_size_t size = pagesize;

eprintf ("   0x%llx\n", addr);
	infocnt = VM_REGION_BASIC_INFO_COUNT_64;
addrbase = addr;
size = len;
	// intentionally use VM_REGION_BASIC_INFO and get up-converted
	ret = vm_region_64 (task, &addrbase, &size, VM_REGION_BASIC_INFO_64,
					 (vm_region_info_t)basic64, &infocnt, &objname);
eprintf ("+ PERMS (%x) %llx\n", basic64->protection, addr);
	if (ret == -1) {
		eprintf ("Cant get vm region info\n");
	}

#endif
/* get page perms */

        // XXX SHOULD RESTORE PERMS LATER!!!
        if (vm_protect (task, addr, len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE) != KERN_SUCCESS)
		//if (mach_vm_protect (task, addr, len, 0, VM_PROT_READ | VM_PROT_WRITE) != KERN_SUCCESS)
			if (vm_protect (task, addr, len, 0, VM_PROT_WRITE) != KERN_SUCCESS)
				eprintf ("cant change page perms to rw at 0x%"PFMT64x" with len= %d\n", addr, len);
        if (vm_write (task, (vm_address_t)addr,
                	(vm_offset_t)buff, (mach_msg_type_number_t)len) != KERN_SUCCESS)
                eprintf ("cant write on memory\n");
	//if (vm_read_overwrite(task, addr, 4, buff, &sz)) { eprintf ("cannot overwrite\n"); }

#if 0
eprintf ("addrbase: %x\n", addrbase);
eprintf ("change prems to %x\n", basic64->protection);
int prot = 0;
if (basic64->protection & 1) prot |= VM_PROT_EXECUTE;
if (basic64->protection & 2) prot |= VM_PROT_WRITE;
if (basic64->protection & 4) prot |= VM_PROT_READ;
printf ("%d vs %d\n", prot, basic64->protection);
int prot = VM_PROT_READ | VM_PROT_EXECUTE;
        if (vm_protect (task, addr, len, 0, prot) != KERN_SUCCESS) { //basic64->protection) != KERN_SUCCESS) {
        	eprintf ("Oops (0x%"PFMT64x") error (%s)\n", addr,
			MACH_ERROR_STRING (err));
                eprintf ("cant change page perms to rx\n");
	}
#endif
	return len;
}
Esempio n. 5
0
/* Wire down all memory currently allocated at START for LEN bytes;
   host_priv is the privileged host port. */
static void
wire_segment_internal (vm_address_t start,
		       vm_size_t len,
		       host_priv_t host_priv)
{
  vm_address_t addr;
  vm_size_t size;
  vm_prot_t protection;
  vm_prot_t max_protection;
  vm_inherit_t inheritance;
  boolean_t shared;
  mach_port_t object_name;
  vm_offset_t offset;
  error_t err;
  volatile char *poke;

  do
    {
      addr = start;
      err = vm_region (mach_task_self (), &addr, &size, &protection,
		       &max_protection, &inheritance, &shared, &object_name,
		       &offset);
      if (err)
	return;

      /* The current region begins at ADDR and is SIZE long.  If it
      	 extends beyond the LEN, prune it. */
      if (addr + size > start + len)
	size = len - (addr - start);

      /* Set protection to allow all access possible */
      vm_protect (mach_task_self (), addr, size, 0, max_protection);

      /* Generate write faults */
      for (poke = (char *) addr;
	   (vm_address_t) poke < addr + size;
	   poke += vm_page_size)
	*poke = *poke;

      /* Wire pages */
      vm_wire (host_priv, mach_task_self (), addr, size, max_protection);

      /* Set protection back to what it was */
      vm_protect (mach_task_self (), addr, size, 0, protection);


      mach_port_deallocate (mach_task_self (), object_name);

      len -= (addr - start) + size;
      start = addr + size;
    }
  while (len);
}
Esempio n. 6
0
File: page.c Progetto: n13l/kbuild
mach_error_t
mach_page_copy(void *addr, int len)
{
	vm_address_t page = (vm_address_t)addr;
	vm_map_t self = mach_task_self();
	mach_error_t e;

	if ((e = vm_protect(self, page, 8, false, (VM_PROT_ALL | VM_PROT_COPY) )))
		return e;

	return vm_protect(self, page, 8, false, (VM_PROT_DEFAULT | VM_PROT_COPY) );
}
Esempio n. 7
0
bool
basic_jit_cache::init_translation_cache(uint32 size)
{
	size *= 1024;

	// Round up translation cache size to 16 KB boundaries
	const uint32 roundup = 16 * 1024;
	cache_size = (size + JIT_CACHE_SIZE_GUARD + roundup - 1) & -roundup;
	assert(cache_size > 0);

	tcode_start = (uint8 *)vm_acquire(cache_size, VM_MAP_PRIVATE | VM_MAP_32BIT);
	if (tcode_start == VM_MAP_FAILED) {
		tcode_start = NULL;
		return false;
	}

	if (vm_protect(tcode_start, cache_size,
				   VM_PAGE_READ | VM_PAGE_WRITE | VM_PAGE_EXECUTE) < 0) {
		vm_release(tcode_start, cache_size);
		tcode_start = NULL;
		return false;
	}

  done:
	D(bug("basic_jit_cache: Translation cache: %d KB at %p\n", cache_size / 1024, tcode_start));
	code_start = tcode_start;
	code_p = code_start;
	code_end = code_p + size;
	return true;
}
Esempio n. 8
0
/*	These are my mach based versions, untested and probably bad ...
*/
caddr_t my_mmap(caddr_t addr, size_t len, int prot, int flags,
          int fildes, off_t off)
{
	kern_return_t ret_val;
	
	/*	First map ...
	*/
	ret_val = map_fd ( fildes, 					/* fd				*/
	                  (vm_offset_t) off,		/* offset			*/
					  (vm_offset_t*)&addr,		/* address			*/
					  TRUE, 					/* find_space		*/
					  (vm_size_t) len);			/* size				*/

	if (ret_val != KERN_SUCCESS) {
    	mach_error("Error calling map_fd() in mmap", ret_val );
		return (caddr_t)0;
	}
	
	/*	... then protect (this is probably bad)
	*/
	ret_val = vm_protect( task_self(),			/* target_task 		*/
						 (vm_address_t)addr,	/* address			*/
						 (vm_size_t) len,		/* size 			*/
						 FALSE,					/* set_maximum		*/
						 (vm_prot_t) prot);		/* new_protection	*/
	if (ret_val != KERN_SUCCESS) {
		mach_error("vm_protect in mmap()", ret_val );
		return (caddr_t)0;
	}
	
	return addr;
}
Esempio n. 9
0
int my_mprotect(caddr_t addr, size_t len, int prot)
{
	vm_prot_t mach_prot;
	kern_return_t ret_val;
	
	switch (prot) {
		case PROT_READ:		mach_prot = VM_PROT_READ;		break;
		case PROT_WRITE:	mach_prot = VM_PROT_WRITE;		break;
		case PROT_EXEC:		mach_prot = VM_PROT_EXECUTE;	break;
		case PROT_NONE:		mach_prot = VM_PROT_NONE;		break;
	}
	
	ret_val = vm_protect(task_self(),			/* target_task 		*/
						 (vm_address_t)addr,	/* address			*/
						 (vm_size_t) len,		/* size 			*/
						 FALSE,					/* set_maximum		*/
						 (vm_prot_t) prot);		/* new_protection	*/

	if (ret_val != KERN_SUCCESS) {
		mach_error("vm_protect in mprotect()", ret_val);
		return -1;
	}
	
	return 0;
}
Esempio n. 10
0
int BreakPoint::set(mach_port_t task, unsigned long address) {
	printf("Setting breakpoint at address: %lu\n", address);
	int err;
	if(task_suspend(task) != KERN_SUCCESS) {
		printf("Suspend: %s\n", mach_error_string(err));
		return -1;
	}
	vm_offset_t intermediary;
	mach_msg_type_number_t size = (size_t)1;
	//printf("%llu\n", size);
	//char orig = static_cast<char>(*address);
	if(KERN_SUCCESS != (err = vm_protect(task, (vm_offset_t)address, (size_t)1, false, VM_PROT_ALL))) {
		printf("Protect: %s\n", mach_error_string(err));
	}
	/*if (KERN_SUCCESS != (err = mach_vm_read(task, address, (mach_vm_size_t)1, (&intermediary), &size))) {
		printf("Read: %s\n", mach_error_string(err));
	}
	printf("Intermediary: %lu\n", intermediary);
	original_byte = intermediary;
	printf("Original Byte: %c\n", original_byte);
	char value = 0xCC;
	vm_offset_t break_trigger = (unsigned long)&value;
	*(char *)break_trigger = 0xCC;
	if(KERN_SUCCESS != (err = mach_vm_write(task, address, break_trigger, size))) {
		printf("Write: %s\n", mach_error_string(err));
	}*/
	if(task_resume(task) != KERN_SUCCESS) {
		printf("Resume: %s\n", mach_error_string(err));
		return -1;
	}
	return 0;
}
Esempio n. 11
0
CMemoryFunction::CMemoryFunction(const void* code, size_t size)
: m_code(nullptr)
{
#ifdef WIN32
	m_size = size;
	m_code = malloc(size);
	memcpy(m_code, code, size);
	
	DWORD oldProtect = 0;
	BOOL result = VirtualProtect(m_code, size, PAGE_EXECUTE_READWRITE, &oldProtect);
	assert(result == TRUE);
#elif defined(__APPLE__)
	vm_size_t page_size = 0;
	host_page_size(mach_task_self(), &page_size);
	unsigned int allocSize = ((size + page_size - 1) / page_size) * page_size;
	vm_allocate(mach_task_self(), reinterpret_cast<vm_address_t*>(&m_code), allocSize, TRUE); 
	memcpy(m_code, code, size);
	sys_icache_invalidate(m_code, size);
	kern_return_t result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(m_code), size, 0, VM_PROT_READ | VM_PROT_EXECUTE);
	assert(result == 0);
	m_size = allocSize;
#elif defined(__ANDROID__) || defined(__linux__) || defined(__FreeBSD__)
	m_size = size;
	m_code = mmap(nullptr, size, PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
	assert(m_code != MAP_FAILED);
	memcpy(m_code, code, size);
#if defined(__arm__) || defined(__aarch64__)
	__clear_cache(m_code, reinterpret_cast<uint8*>(m_code) + size);
#endif
#endif
}
Esempio n. 12
0
uint64_t Alloc(uint32_t addr, uint32_t sz) 
{
    mach_error_t	k_error;
    
    printf("Alloc: deallocating! \n");
    vm_deallocate(mach_task_self(), (vm_address_t) addr, sz);
    
    printf("Alloc: allocating 0x%x (0x%08x - 0x%08x) bytes\n", sz, addr, addr+sz);
    k_error = vm_allocate(mach_task_self(), (vm_address_t*)&addr, sz, 0);
    
    if (k_error != KERN_SUCCESS)
    {
         printf("Alloc: vm_allocate() - failed with message %s (error = %d)!\n", mach_error_string(k_error), k_error);
         exit(-1);
    }
    
    
    printf("Alloc: vm_allocate ok, now vm_protect ...\n");
 
    k_error =  vm_protect(mach_task_self(), addr, sz, 0, 7); //rwx
       
    if (k_error != KERN_SUCCESS)
    {
         printf("Alloc: vm_protect() - failed with message %s (error = %d)!\n", mach_error_string(k_error), k_error);
         exit(-1);        
    }
     
    printf("Alloc: vm_allocate returned = %d - addr = 0x%08x, vm_protect ok, filling\n", k_error, addr);
   
    while(sz--) *(char*)(addr+sz)=0;
    return addr;
}
Esempio n. 13
0
bool SheepMem::Init(void)
{
	// Size of a native page
	page_size = vm_get_page_size();

	// Allocate SheepShaver globals
	proc = base;
	if (vm_mac_acquire(base, size) < 0)
		return false;

	// Allocate page with all bits set to 0, right in the middle
	// This is also used to catch undesired overlaps between proc and data areas
	zero_page = proc + (size / 2);
	Mac_memset(zero_page, 0, page_size);
	if (vm_protect(Mac2HostAddr(zero_page), page_size, VM_PAGE_READ) < 0)
		return false;

	// Allocate alternate stack for PowerPC interrupt routine
	sig_stack = base + size;
	if (vm_mac_acquire(sig_stack, SIG_STACK_SIZE) < 0)
		return false;

	data = base + size;
	return true;
}
Esempio n. 14
0
File: darwin.c Progetto: 8l/go-learn
// Read/write from a Mach data segment.
static int
machsegrw(Map *map, Seg *seg, uvlong addr, void *v, uint n, int isr)
{
	mach_port_t task;
	int r;

	task = idtotask(map->pid);
	if(task == -1)
		return -1;

	if(isr){
		vm_size_t nn;
		nn = n;
		if(me(vm_read_overwrite(task, addr, n, (uintptr)v, &nn)) < 0)
			return -1;
		return nn;
	}else{
		r = vm_write(task, addr, (uintptr)v, n);
		if(r == KERN_INVALID_ADDRESS){
			// Happens when writing to text segment.
			// Change protections.
			if(me(vm_protect(task, addr, n, 0, VM_PROT_WRITE|VM_PROT_READ|VM_PROT_EXECUTE)) < 0){
				fprint(2, "vm_protect: %s\n", r);
				return -1;
			}
			r = vm_write(task, addr, (uintptr)v, n);
		}
		if(r != 0){
			me(r);
			return -1;
		}
		return n;
	}
}
Esempio n. 15
0
static void* zalloc(native_word_t** zone) {
    void* ret = NULL;
    pthread_mutex_lock(&zone_lck);
    if (!(*zone)) {
        
        if (ZONE_SIZE % 2 || ZONE_SIZE < sizeof(native_word_t)) {
            puts("zalloc error: zone size must be a multiple of 2 and bigger than sizeof(native_word_t)");
            exit(-1);
        }

        native_word_t* szfl = 0;
        
        vm_allocate(mach_task_self_, (vm_address_t*)&szfl, PAGE_SIZE, 1);
        if (!szfl) {
            goto out;
        }
        vm_protect(mach_task_self_, (vm_address_t)szfl, PAGE_SIZE, 0, VM_PROT_ALL);
        for (int i = 0; i < (PAGE_SIZE/ZONE_SIZE); i++) {
            zfree((void*)(1ULL | (native_word_t)&szfl[i*(ZONE_SIZE/sizeof(native_word_t))]), zone);
        }
    }
    if (!(*zone)) {
        goto out;
    }
    ret = (*zone);
    (*zone) = (native_word_t*) (*zone)[0];
    ((native_word_t*) ret)[0] = ZONE_ALLOCATOR_BEEF;
out:
    pthread_mutex_unlock(&zone_lck);
    return ret;
}
Esempio n. 16
0
void Page_DenyAccess(void *address, size_t size)
{
  kern_return_t kret;

  kret = vm_protect(mach_task_self(), (vm_address_t)address, size, 0,
                    VM_PROT_NONE);
  MACH_CHECK_ERROR(kret);
}
Esempio n. 17
0
void Page_AllowAccess(void *address, size_t size)
{
  kern_return_t kret;

  kret = vm_protect(mach_task_self(), (vm_address_t)address, size, 0,
                    (VM_PROT_READ | VM_PROT_WRITE));
  MACH_CHECK_ERROR(kret);
}
Esempio n. 18
0
static bool tsk_setperm(RIO *io, task_t task, vm_address_t addr, int len, int perm) {
	kern_return_t kr;
	kr = vm_protect (task, addr, len, 0, perm);
	if (kr != KERN_SUCCESS) {
		perror ("tsk_setperm");
		return false;
	}
	return true;
}
Esempio n. 19
0
void * vm_acquire(size_t size, int options)
{
	void * addr;

	errno = 0;

	// VM_MAP_FIXED are to be used with vm_acquire_fixed() only
	if (options & VM_MAP_FIXED)
		return VM_MAP_FAILED;

#ifndef HAVE_VM_WRITE_WATCH
	if (options & VM_MAP_WRITE_WATCH)
		return VM_MAP_FAILED;
#endif

#if defined(HAVE_MACH_VM)
	// vm_allocate() returns a zero-filled memory region
	kern_return_t ret_code = vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE);
	if (ret_code != KERN_SUCCESS) {
		errno = vm_error(ret_code);
		return VM_MAP_FAILED;
	}
#elif defined(HAVE_MMAP_VM)
	int fd = zero_fd;
	int the_map_flags = translate_map_flags(options) | map_flags;

	if ((addr = mmap(NULL, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0)) == (void *)MAP_FAILED)
		return VM_MAP_FAILED;

	// Sanity checks for 64-bit platforms
	if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff))
		return VM_MAP_FAILED;

	next_address = (char *)addr + size;
#elif defined(HAVE_WIN32_VM)
	int alloc_type = MEM_RESERVE | MEM_COMMIT;
	if (options & VM_MAP_WRITE_WATCH)
	  alloc_type |= MEM_WRITE_WATCH;

	if ((addr = VirtualAlloc(NULL, size, alloc_type, PAGE_EXECUTE_READWRITE)) == NULL)
		return VM_MAP_FAILED;
#else
	if ((addr = calloc(size, 1)) == 0)
		return VM_MAP_FAILED;

	// Omit changes for protections because they are not supported in this mode
	return addr;
#endif

	// Explicitely protect the newly mapped region here because on some systems,
	// say MacOS X, mmap() doesn't honour the requested protection flags.
	if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
		return VM_MAP_FAILED;

	return addr;
}
Esempio n. 20
0
static bool tsk_setperm(RIO *io, task_t task, vm_address_t addr, int len, int perm) {
	kern_return_t kr;
	kr = vm_protect (task, addr, len, 0, perm);
	if (kr != KERN_SUCCESS) {
		eprintf ("failed to change perm %s:%d\n", __FILE__, __LINE__);
		perror ("tsk_setperm");
		return false;
	}
	return true;
}
Esempio n. 21
0
int vm_acquire_fixed(void * addr, size_t size, int options)
{
	errno = 0;

	// Fixed mappings are required to be private
	if (options & VM_MAP_SHARED)
		return -1;

#ifndef HAVE_VM_WRITE_WATCH
	if (options & VM_MAP_WRITE_WATCH)
		return -1;
#endif

#if defined(HAVE_MACH_VM)
	// vm_allocate() returns a zero-filled memory region
	kern_return_t ret_code = vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0);
	if (ret_code != KERN_SUCCESS) {
		errno = vm_error(ret_code);
		return -1;
	}
#elif defined(HAVE_MMAP_VM)
	int fd = zero_fd;
	int the_map_flags = translate_map_flags(options) | map_flags | MAP_FIXED;

	if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0) == (void *)MAP_FAILED)
		return -1;
#elif defined(HAVE_WIN32_VM)
	// Windows cannot allocate Low Memory
	if (addr == NULL)
		return -1;

	int alloc_type = MEM_RESERVE | MEM_COMMIT;
	if (options & VM_MAP_WRITE_WATCH)
	  alloc_type |= MEM_WRITE_WATCH;

	// Allocate a possibly offset region to align on 64K boundaries
	LPVOID req_addr = align_addr_segment(addr);
	DWORD  req_size = align_size_segment(addr, size);
	LPVOID ret_addr = VirtualAlloc(req_addr, req_size, alloc_type, PAGE_EXECUTE_READWRITE);
	if (ret_addr != req_addr)
		return -1;
#else
	// Unsupported
	return -1;
#endif

	// Explicitely protect the newly mapped region here because on some systems,
	// say MacOS X, mmap() doesn't honour the requested protection flags.
	if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
		return -1;

	return 0;
}
Esempio n. 22
0
void
unprotect_data_segment(
void)
{
    kern_return_t r;

	if((r = vm_protect(mach_task_self(), data_seg->vmaddr +
		dyld_image_vmaddr_slide, (vm_size_t)data_seg->vmsize,
		FALSE, data_seg->initprot)) != KERN_SUCCESS){
	    mach_error(r, "can't vm_(un)protect data segment of dyld");
	    link_edit_error(DYLD_MACH_RESOURCE, r, "dyld");
	}
}
Esempio n. 23
0
int xnu_map_protect (RDebug *dbg, ut64 addr, int size, int perms) {
	int ret;
	// TODO: align pointers
	ret = vm_protect (pid_to_task (dbg->tid),
			(vm_address_t)addr,
			(vm_size_t)size,
			(boolean_t)0, /* maximum protection */
			VM_PROT_COPY|perms); //unix_prot_to_darwin (perms));
	if (ret != KERN_SUCCESS) {
		printf("vm_protect failed\n");
		return R_FALSE;
	}
	return R_TRUE;
}
Esempio n. 24
0
void debugRemoveFreeHook(void)
{
#if defined(_WIN32) && defined(_DEBUG)
    _CrtSetAllocHook(lastCrtAllocHook);
#endif

#ifdef __GLIBC__
    __free_hook = lastFreeHook;
#endif

#ifdef __APPLE__
    malloc_zone_t* zone = malloc_default_zone();
    assert(zone != NULL);
    //remove the write protection from the zone struct
    if (zone->version >= 8) {
      vm_protect(mach_task_self(), (uintptr_t)zone, sizeof(*zone), 0, VM_PROT_READ | VM_PROT_WRITE);
    }
    zone->free = lastMallocZone.free;
    zone->free_definite_size = lastMallocZone.free_definite_size;
    if (zone->version >= 8) {
      vm_protect(mach_task_self(), (uintptr_t)zone, sizeof(*zone), 0, VM_PROT_READ);
    }
#endif
}
Esempio n. 25
0
static int
read_exec(void *handle, vm_offset_t file_ofs, vm_size_t file_size,
		     vm_offset_t mem_addr, vm_size_t mem_size,
		     exec_sectype_t sec_type)
{
  struct multiboot_module *mod = handle;

	vm_map_t user_map = current_task()->map;
	vm_offset_t start_page, end_page;
	vm_prot_t mem_prot = sec_type & EXEC_SECTYPE_PROT_MASK;
	int err;

	if (mod->mod_start + file_ofs + file_size > mod->mod_end)
	  return -1;

	if (!(sec_type & EXEC_SECTYPE_ALLOC))
		return 0;

	assert(mem_size > 0);
	assert(mem_size >= file_size);

	start_page = trunc_page(mem_addr);
	end_page = round_page(mem_addr + mem_size);

#if 0
	printf("reading bootstrap section %08x-%08x-%08x prot %d pages %08x-%08x\n",
		mem_addr, mem_addr+file_size, mem_addr+mem_size, mem_prot, start_page, end_page);
#endif

	err = vm_allocate(user_map, &start_page, end_page - start_page, FALSE);
	assert(err == 0);
	assert(start_page == trunc_page(mem_addr));

	if (file_size > 0)
	{
		err = copyout((char *)phystokv (mod->mod_start) + file_ofs,
			      (void *)mem_addr, file_size);
		assert(err == 0);
	}

	if (mem_prot != VM_PROT_ALL)
	{
		err = vm_protect(user_map, start_page, end_page - start_page, FALSE, mem_prot);
		assert(err == 0);
	}

	return 0;
}
Esempio n. 26
0
static void os_protect_pages(void *p, size_t len, int writeable)
{
  kern_return_t retval;

  if(len & (page_size - 1)) {
    len += page_size - (len & (page_size - 1));
  }

  retval = vm_protect(task_self, (vm_address_t)p, len, FALSE,
		      writeable ? VM_PROT_ALL 
		      : (VM_PROT_READ | VM_PROT_EXECUTE));
  if(retval != KERN_SUCCESS) {
    GCPRINT(GCOUTF, "WARNING: couldn't protect %li bytes of page %p%s\n",
	   len, p, mach_error_string(retval));
  }
}
Esempio n. 27
0
bool image_info_jump_table(uint32_t index, jump_table_t *table) {
  const struct mach_header *header;
  if (!image_info_ready()) {
    p0_logf(P0_ERR, "image info not currently ready");
    return false;
  }
  if (index >= all_image_infos->infoArrayCount) {
    p0_logf(P0_ERR, "index out of range");
    return false;
  }
  if (!table) {
    p0_logf(P0_ERR, "specified jump_table pointer is NULL");
    return false;
  }
  header = all_image_infos->infoArray[index].imageLoadAddress;
  p0_logf(P0_INFO, "loading image '%s'",
           all_image_infos->infoArray[index].imageFilePath);
  if (!header) {
    p0_logf(P0_ERR, "failed to acquire header for %d", index);
    return false;
  }
  table->addr = (intptr_t) getsectdatafromheader(header,
                                                 "__IMPORT",
                                                 "__jump_table",
                                                 (unsigned long *)
                                                 &table->size);
  p0_logf(P0_INFO, "header: %p addr %p", header, table->addr);
  if (table->addr == 0) {
    p0_logf(P0_ERR, "jump table mapped at 0x0: bailing");
    return false;
  }
  /* Make sure we can patch the table */
  if (vm_protect(mach_task_self(),
                 (vm_address_t)table->addr,
                 table->size,
                 false,
                 VM_PROT_ALL) != KERN_SUCCESS) {
    /* we will keep on truckin' though. just in case! */
    p0_logf(P0_WARN, "failed to change the protections on the jump table");
  }
  return true;
}
Esempio n. 28
0
int vm_protect(void * addr, size_t size, int prot)
{
#ifdef HAVE_MACH_VM
	int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
	return ret_code == KERN_SUCCESS ? 0 : -1;
#else
#ifdef HAVE_MMAP_VM
	int ret_code = mprotect((caddr_t)addr, size, prot);
	return ret_code == 0 ? 0 : -1;
#else
#ifdef HAVE_WIN32_VM
	DWORD old_prot;
	int ret_code = VirtualProtect(addr, size, translate_prot_flags(prot), &old_prot);
	return ret_code != 0 ? 0 : -1;
#else
	// Unsupported
	return -1;
#endif
#endif
#endif
}
Esempio n. 29
0
static int read_exec(void *handle, vm_offset_t file_ofs, vm_size_t file_size,
		     vm_offset_t mem_addr, vm_size_t mem_size,
		     exec_sectype_t sec_type)
{
	vm_map_t user_map = current_task()->map;
	vm_offset_t start_page, end_page;
	vm_prot_t mem_prot = sec_type & EXEC_SECTYPE_PROT_MASK;
	int err;

	if (!(sec_type & EXEC_SECTYPE_ALLOC))
		return 0;

	assert(mem_size > 0);
	assert(mem_size >= file_size);

	start_page = trunc_page(mem_addr);
	end_page = round_page(mem_addr + mem_size);

	printf("reading bootstrap section %08x-%08x-%08x prot %d pages %08x-%08x\n",
		mem_addr, mem_addr+file_size, mem_addr+mem_size, mem_prot, start_page, end_page);

	err = vm_allocate(user_map, &start_page, end_page - start_page, FALSE);
	assert(err == 0);
	assert(start_page == trunc_page(mem_addr));

	if (file_size > 0)
	{
		err = copyout(handle + file_ofs, mem_addr, file_size);
		assert(err == 0);
	}

	if (mem_prot != VM_PROT_ALL)
	{
		err = vm_protect(user_map, start_page, end_page - start_page, FALSE, mem_prot);
		assert(err == 0);
	}
}
Esempio n. 30
0
    mach_error_t
mach_override_ptr(
	void *originalFunctionAddress,
    const void *overrideFunctionAddress,
    void **originalFunctionReentryIsland )
{
	assert( originalFunctionAddress );
	assert( overrideFunctionAddress );
	
	// this addresses overriding such functions as AudioOutputUnitStart()
	// test with modified DefaultOutputUnit project
#if defined(__x86_64__)
    for(;;){
        if(*(uint16_t*)originalFunctionAddress==0x25FF)    // jmp qword near [rip+0x????????]
            originalFunctionAddress=*(void**)((char*)originalFunctionAddress+6+*(int32_t *)((uint16_t*)originalFunctionAddress+1));
        else break;
    }
#elif defined(__i386__)
    for(;;){
        if(*(uint16_t*)originalFunctionAddress==0x25FF)    // jmp *0x????????
            originalFunctionAddress=**(void***)((uint16_t*)originalFunctionAddress+1);
        else break;
    }
#endif

	long	*originalFunctionPtr = (long*) originalFunctionAddress;
	mach_error_t	err = err_none;
	
#if defined(__ppc__) || defined(__POWERPC__)
	//	Ensure first instruction isn't 'mfctr'.
	#define	kMFCTRMask			0xfc1fffff
	#define	kMFCTRInstruction	0x7c0903a6
	
	long	originalInstruction = *originalFunctionPtr;
	if( !err && ((originalInstruction & kMFCTRMask) == kMFCTRInstruction) )
		err = err_cannot_override;
#elif defined(__i386__) || defined(__x86_64__)
	int eatenCount = 0;
	int originalInstructionCount = 0;
	char originalInstructions[kOriginalInstructionsSize];
	uint8_t originalInstructionSizes[kOriginalInstructionsSize];
	uint64_t jumpRelativeInstruction = 0; // JMP

	Boolean overridePossible = eatKnownInstructions ((unsigned char *)originalFunctionPtr, 
										&jumpRelativeInstruction, &eatenCount, 
										originalInstructions, &originalInstructionCount, 
										originalInstructionSizes );
	if (eatenCount + kMaxFixupSizeIncrease > kOriginalInstructionsSize) {
		//printf ("Too many instructions eaten\n");
		overridePossible = false;
	}
	if (!overridePossible) err = err_cannot_override;
	if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
#endif
	
	//	Make the original function implementation writable.
	if( !err ) {
		err = vm_protect( mach_task_self(),
				(vm_address_t) originalFunctionPtr, 8, false,
				(VM_PROT_ALL | VM_PROT_COPY) );
		if( err )
			err = vm_protect( mach_task_self(),
					(vm_address_t) originalFunctionPtr, 8, false,
					(VM_PROT_DEFAULT | VM_PROT_COPY) );
	}
	if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
	
	//	Allocate and target the escape island to the overriding function.
	BranchIsland	*escapeIsland = NULL;
	if( !err )	
		err = allocateBranchIsland( &escapeIsland, originalFunctionAddress );
		if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);

	
#if defined(__ppc__) || defined(__POWERPC__)
	if( !err )
		err = setBranchIslandTarget( escapeIsland, overrideFunctionAddress, 0 );
	
	//	Build the branch absolute instruction to the escape island.
	long	branchAbsoluteInstruction = 0; // Set to 0 just to silence warning.
	if( !err ) {
		long escapeIslandAddress = ((long) escapeIsland) & 0x3FFFFFF;
		branchAbsoluteInstruction = 0x48000002 | escapeIslandAddress;
	}
#elif defined(__i386__) || defined(__x86_64__)
        if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);

	if( !err )
		err = setBranchIslandTarget_i386( escapeIsland, overrideFunctionAddress, 0 );
 
	if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
	// Build the jump relative instruction to the escape island
#endif


#if defined(__i386__) || defined(__x86_64__)
	if (!err) {
		uint32_t addressOffset = ((char*)escapeIsland - (char*)originalFunctionPtr - 5);
		addressOffset = OSSwapInt32(addressOffset);
		
		jumpRelativeInstruction |= 0xE900000000000000LL; 
		jumpRelativeInstruction |= ((uint64_t)addressOffset & 0xffffffff) << 24;
		jumpRelativeInstruction = OSSwapInt64(jumpRelativeInstruction);		
	}
#endif
	
	//	Optionally allocate & return the reentry island. This may contain relocated
	//  jmp instructions and so has all the same addressing reachability requirements
	//  the escape island has to the original function, except the escape island is
	//  technically our original function.
	BranchIsland	*reentryIsland = NULL;
	if( !err && originalFunctionReentryIsland ) {
		err = allocateBranchIsland( &reentryIsland, escapeIsland);
		if( !err )
			*originalFunctionReentryIsland = reentryIsland;
	}
	
#if defined(__ppc__) || defined(__POWERPC__)	
	//	Atomically:
	//	o If the reentry island was allocated:
	//		o Insert the original instruction into the reentry island.
	//		o Target the reentry island at the 2nd instruction of the
	//		  original function.
	//	o Replace the original instruction with the branch absolute.
	if( !err ) {
		int escapeIslandEngaged = false;
		do {
			if( reentryIsland )
				err = setBranchIslandTarget( reentryIsland,
						(void*) (originalFunctionPtr+1), originalInstruction );
			if( !err ) {
				escapeIslandEngaged = CompareAndSwap( originalInstruction,
										branchAbsoluteInstruction,
										(UInt32*)originalFunctionPtr );
				if( !escapeIslandEngaged ) {
					//	Someone replaced the instruction out from under us,
					//	re-read the instruction, make sure it's still not
					//	'mfctr' and try again.
					originalInstruction = *originalFunctionPtr;
					if( (originalInstruction & kMFCTRMask) == kMFCTRInstruction)
						err = err_cannot_override;
				}
			}
		} while( !err && !escapeIslandEngaged );
	}
#elif defined(__i386__) || defined(__x86_64__)
	// Atomically:
	//	o If the reentry island was allocated:
	//		o Insert the original instructions into the reentry island.
	//		o Target the reentry island at the first non-replaced 
	//        instruction of the original function.
	//	o Replace the original first instructions with the jump relative.
	//
	// Note that on i386, we do not support someone else changing the code under our feet
	if ( !err ) {
		uint32_t offset = (uintptr_t)originalFunctionPtr - (uintptr_t)reentryIsland;
		fixupInstructions(offset, originalInstructions,
					originalInstructionCount, originalInstructionSizes );
	
		if( reentryIsland )
			err = setBranchIslandTarget_i386( reentryIsland,
										 (void*) ((char *)originalFunctionPtr+eatenCount), originalInstructions );
		// try making islands executable before planting the jmp
#if defined(__x86_64__) || defined(__i386__)
        if( !err )
            err = makeIslandExecutable(escapeIsland);
        if( !err && reentryIsland )
            err = makeIslandExecutable(reentryIsland);
#endif
		if ( !err )
			atomic_mov64((uint64_t *)originalFunctionPtr, jumpRelativeInstruction);
	}
#endif
	
	//	Clean up on error.
	if( err ) {
		if( reentryIsland )
			freeBranchIsland( reentryIsland );
		if( escapeIsland )
			freeBranchIsland( escapeIsland );
	}

	return err;
}