/* Returns NULL on failure; errno set */ static void * my_mmap (void *addr, W_ size, int operation) { void *ret; #if darwin_HOST_OS // Without MAP_FIXED, Apple's mmap ignores addr. // With MAP_FIXED, it overwrites already mapped regions, whic // mmap(0, ... MAP_FIXED ...) is worst of all: It unmaps the program text // and replaces it with zeroes, causing instant death. // This behaviour seems to be conformant with IEEE Std 1003.1-2001. // Let's just use the underlying Mach Microkernel calls directly, // they're much nicer. kern_return_t err = 0; ret = addr; if(operation & MEM_RESERVE) { if(addr) // try to allocate at address err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, false); if(!addr || err) // try to allocate anywhere err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, true); } if(err) { // don't know what the error codes mean exactly, assume it's // not our problem though. errorBelch("memory allocation failed (requested %" FMT_Word " bytes)", size); stg_exit(EXIT_FAILURE); } if(operation & MEM_COMMIT) { vm_protect(mach_task_self(), (vm_address_t)ret, size, false, VM_PROT_READ|VM_PROT_WRITE); } #else int prot, flags; if (operation & MEM_COMMIT) prot = PROT_READ | PROT_WRITE; else prot = PROT_NONE; if (operation == MEM_RESERVE) # if defined(MAP_NORESERVE) flags = MAP_NORESERVE; # else # ifdef USE_LARGE_ADDRESS_SPACE # error USE_LARGE_ADDRESS_SPACE needs MAP_NORESERVE # endif errorBelch("my_mmap(,,MEM_RESERVE) not supported on this platform"); # endif else if (operation == MEM_COMMIT)
void attack(void) { object_t *objp = (object_t *)random(); object_t obj = (object_t)random(); char *name = (char *)random(); void *msg = (void *)random(); size_t size = (size_t)random(); task_t self = task_self(); void *addr = (void *)random(); int attr = random() & 7; thread_t t = (thread_t)random(); thread_t *tp = (thread_t *)random(); object_create(NULL, NULL); object_create(NULL, objp); object_create(name, NULL); object_create(name, objp); object_destroy(0); object_destroy(obj); object_lookup(NULL, objp); object_lookup(name, NULL); object_lookup(name, objp); msg_send(0, msg, size); msg_send(obj, NULL, size); msg_send(obj, msg, 0); msg_send(0, msg, 0); msg_send(0, NULL, size); msg_send(obj, msg, size); msg_receive(0, msg, size); msg_receive(obj, NULL, size); msg_receive(obj, msg, 0); msg_receive(0, msg, 0); msg_receive(0, NULL, size); msg_receive(obj, msg, size); msg_reply(0, msg, size); msg_reply(obj, NULL, size); msg_reply(obj, msg, 0); msg_reply(0, msg, 0); msg_reply(0, NULL, size); msg_reply(obj, msg, size); vm_allocate(self, addr, size, 1); vm_allocate(self, &addr, size, 1); vm_free(self, addr); vm_attribute(self, addr, attr); vm_map(self, addr, size, &addr); thread_create(self, tp); thread_suspend(t); thread_terminate(t); }
static void rthreads_more_memory(int size, register free_list_t fl) { register int amount; register int n; vm_address_t where; register header_t h; kern_return_t r; if (size <= vm_page_size) { amount = vm_page_size; n = vm_page_size / size; /* * We lose vm_page_size - n*size bytes here. */ } else { amount = size; n = 1; } MACH_CALL(vm_allocate(mach_task_self(), &where, (vm_size_t) amount, TRUE), r); /* We mustn't allocate at address 0, since programs will then see * what appears to be a null pointer for valid data. */ if (r == KERN_SUCCESS && where == 0) { MACH_CALL(vm_allocate(mach_task_self(), &where, (vm_size_t) amount, TRUE), r); if (r == KERN_SUCCESS) { MACH_CALL(vm_deallocate(mach_task_self(), (vm_address_t) 0, (vm_size_t) amount), r); } } h = (header_t) where; do { h->next = fl->head; fl->head = h; h = (header_t) ((char *) h + size); } while (--n != 0); }
__private_extern__ kern_return_t _io_pm_hid_event_copy_history( mach_port_t server, vm_offset_t *array_data, mach_msg_type_number_t *array_dataLen, int *return_val) { CFDataRef sendData = NULL; sendData = CFPropertyListCreateData(0, gHIDEventHistory, kCFPropertyListXMLFormat_v1_0, 0, NULL); if (!sendData) { *return_val = kIOReturnError; goto exit; } *array_data = (vm_offset_t)CFDataGetBytePtr(sendData); *array_dataLen = (size_t)CFDataGetLength(sendData); vm_allocate(mach_task_self(), (vm_address_t *)array_data, *array_dataLen, TRUE); if (*array_data) { memcpy((void *)*array_data, CFDataGetBytePtr(sendData), *array_dataLen); *return_val = kIOReturnSuccess; } CFRelease(sendData); *return_val = kIOReturnSuccess; exit: return KERN_SUCCESS; }
int main(int argc, char** argv){ kern_return_t err; // re map the null page rw int var = 0; err = vm_deallocate(mach_task_self(), 0x0, 0x1000); if (err != KERN_SUCCESS){ printf("%x\n", err); } vm_address_t addr = 0; err = vm_allocate(mach_task_self(), &addr, 0x1000, 0); if (err != KERN_SUCCESS){ if (err == KERN_INVALID_ADDRESS){ printf("invalid address\n"); } if (err == KERN_NO_SPACE){ printf("no space\n"); } printf("%x\n", err); } char* np = 0; for (int i = 0; i < 0x1000; i++){ np[i] = '\x41'; } for (;;) { poc(); } return 0; }
uint64_t Alloc(uint32_t addr, uint32_t sz) { mach_error_t k_error; printf("Alloc: deallocating! \n"); vm_deallocate(mach_task_self(), (vm_address_t) addr, sz); printf("Alloc: allocating 0x%x (0x%08x - 0x%08x) bytes\n", sz, addr, addr+sz); k_error = vm_allocate(mach_task_self(), (vm_address_t*)&addr, sz, 0); if (k_error != KERN_SUCCESS) { printf("Alloc: vm_allocate() - failed with message %s (error = %d)!\n", mach_error_string(k_error), k_error); exit(-1); } printf("Alloc: vm_allocate ok, now vm_protect ...\n"); k_error = vm_protect(mach_task_self(), addr, sz, 0, 7); //rwx if (k_error != KERN_SUCCESS) { printf("Alloc: vm_protect() - failed with message %s (error = %d)!\n", mach_error_string(k_error), k_error); exit(-1); } printf("Alloc: vm_allocate returned = %d - addr = 0x%08x, vm_protect ok, filling\n", k_error, addr); while(sz--) *(char*)(addr+sz)=0; return addr; }
default_pager_thread_t * start_default_pager_thread( int id, boolean_t internal) { default_pager_thread_t *dpt; kern_return_t kr; static char here[] = "start_default_pager_thread"; dpt = (default_pager_thread_t *)kalloc(sizeof (default_pager_thread_t)); if (dpt == 0) Panic("alloc pager thread"); dpt->dpt_internal = internal; dpt->dpt_id = id; dpt->dpt_initialized_p = FALSE; kr = vm_allocate(default_pager_self, &dpt->dpt_buffer, vm_page_size, TRUE); if (kr != KERN_SUCCESS) Panic("alloc thread buffer"); wire_memory(dpt->dpt_buffer, vm_page_size, VM_PROT_READ | VM_PROT_WRITE); dpt->dpt_thread = cthread_fork((cthread_fn_t) default_pager_thread, (void *) dpt); return dpt; }
void *VMemAlloc(const vm_size_t size) { GLbyte *pointer = NULL; kern_return_t err = KERN_SUCCESS; // In debug builds, check that we have // correct VM page alignment check(size != 0); check((size % 4096) == 0); // Allocate directly from VM err = vm_allocate( (vm_map_t) mach_task_self(), (vm_address_t *)&pointer, size, VM_FLAGS_ANYWHERE ); // Check errors check(err == KERN_SUCCESS); if( err != KERN_SUCCESS) { NSLog(@">> ERROR: Failed to allocate vm memory of size = %lu",size); pointer = NULL; } // if return pointer; } // VMemAlloc
static void trySO(const char* path) { void* handle = dlopen(path, RTLD_LAZY); if ( handle == NULL ) { const char* msg = dlerror(); FAIL("dlopen(\"%s\" RTLD_LAZY) failed but it should have worked: %s", path, msg); exit(0); } void* sym = dlsym(handle, "foo"); if ( sym == NULL ) { const char* msg = dlerror(); FAIL("dlsym(handle, \"foo\") failed but it should have worked: %s", msg); exit(0); } int result = dlclose(handle); if ( result != 0 ) { FAIL("dlclose(handle) returned %d", result); exit(0); } // now try to create a page where foo() was vm_address_t addr = ((uintptr_t)sym) & (-4096); kern_return_t r = vm_allocate(mach_task_self(), &addr, 4096, VM_FLAGS_FIXED); if ( r != KERN_SUCCESS ) { FAIL("dlclose-unmap: could not allocate page where SO was previously mapped", result); exit(0); } }
static void *map_it( const char *path, int fd, void *map_at, size_t len ) { kern_return_t rc; vm_offset_t addr; addr = (vm_offset_t)map_at; rc = vm_allocate( task_self(), &addr, len, /* anywhere */ FALSE ); if (rc != KERN_SUCCESS) { mach_error( "vm_allocate", rc ); fprintf( stderr, "%s: could not map at %08lx\n", path, (unsigned long)map_at ); return NULL; } rc = map_fd( fd, 0, &addr, /*find_space*/ FALSE, len ); if (rc != KERN_SUCCESS) { mach_error( "map_fd", rc ); fprintf( stderr, "%s: could not map at %08lx\n", path, (unsigned long)map_at ); return NULL; } return (void *)addr; }
static void* zalloc(native_word_t** zone) { void* ret = NULL; pthread_mutex_lock(&zone_lck); if (!(*zone)) { if (ZONE_SIZE % 2 || ZONE_SIZE < sizeof(native_word_t)) { puts("zalloc error: zone size must be a multiple of 2 and bigger than sizeof(native_word_t)"); exit(-1); } native_word_t* szfl = 0; vm_allocate(mach_task_self_, (vm_address_t*)&szfl, PAGE_SIZE, 1); if (!szfl) { goto out; } vm_protect(mach_task_self_, (vm_address_t)szfl, PAGE_SIZE, 0, VM_PROT_ALL); for (int i = 0; i < (PAGE_SIZE/ZONE_SIZE); i++) { zfree((void*)(1ULL | (native_word_t)&szfl[i*(ZONE_SIZE/sizeof(native_word_t))]), zone); } } if (!(*zone)) { goto out; } ret = (*zone); (*zone) = (native_word_t*) (*zone)[0]; ((native_word_t*) ret)[0] = ZONE_ALLOCATOR_BEEF; out: pthread_mutex_unlock(&zone_lck); return ret; }
bool TPCircularBufferInit(TPCircularBuffer *buffer, int length) { // keep trying tuntil we get our buffer, needed to handle race conditions while(1) { buffer->length = round_page(length); // We need whole page sizes // Temporarily allocate twice the length, so we have the contiguous address space to // support a second instance of the buffer directly after vm_address_t bufferAddress; if ( !checkResult(vm_allocate(mach_task_self(), &bufferAddress, buffer->length * 2, VM_FLAGS_ANYWHERE), // allocate anywhere it'll fit "Buffer allocation") ) { // try again if we fail continue; } // Now replace the second half of the allocation with a virtual copy of the first half. Deallocate the second half... if ( !checkResult(vm_deallocate(mach_task_self(), bufferAddress + buffer->length, buffer->length), "Buffer deallocation") ) { // if this fails somehow, deallocate the whole region and try again vm_deallocate(mach_task_self(), bufferAddress, buffer->length); continue; } // Re-map the buffer to the address space immediately after the buffer vm_address_t virtualAddress = bufferAddress + buffer->length; vm_prot_t cur_prot, max_prot; if(!checkResult(vm_remap(mach_task_self(), &virtualAddress, // mirror target buffer->length, // size of mirror 0, // auto alignment 0, // force remapping to virtualAddress mach_task_self(), // same task bufferAddress, // mirror source 0, // MAP READ-WRITE, NOT COPY &cur_prot, // unused protection struct &max_prot, // unused protection struct VM_INHERIT_DEFAULT), "Remap buffer memory")) { // if this remap failed, we hit a race condition, so deallocate and try again vm_deallocate(mach_task_self(), bufferAddress, buffer->length); continue; } if ( virtualAddress != bufferAddress+buffer->length ) { // if the memory is not contiguous, clean up both allocated buffers and try again printf("Couldn't map buffer memory to end of buffer\n"); vm_deallocate(mach_task_self(), virtualAddress, buffer->length); vm_deallocate(mach_task_self(), bufferAddress, buffer->length); continue; } buffer->buffer = (void*)bufferAddress; buffer->fillCount = 0; buffer->head = buffer->tail = 0; return true; } }
struct trampoline * mach_tramp_alloc(void *addr) { __build_bug_on(sizeof (struct trampoline) > PAGE_SIZE); mach_error_t err = err_none; vm_address_t first = ASLR_FIRST(addr); vm_address_t last = ASLR_LAST(addr); vm_address_t page = first; int allocated = 0; vm_map_t self = mach_task_self(); while( !err && !allocated && page != last ) { err = vm_allocate(self, &page, PAGE_SIZE, 0 ); if (err == err_none) allocated = 1; else if (err == KERN_NO_SPACE) { page -= PAGE_SIZE; err = err_none; } } if (!allocated || err) return NULL; return (struct trampoline*) page; }
bool VolatileBuffer::Init(size_t aSize, size_t aAlignment) { MOZ_ASSERT(!mSize && !mBuf, "Init called twice"); MOZ_ASSERT(!(aAlignment % sizeof(void *)), "Alignment must be multiple of pointer size"); mSize = aSize; kern_return_t ret = 0; if (aSize < MIN_VOLATILE_ALLOC_SIZE) { goto heap_alloc; } ret = vm_allocate(mach_task_self(), (vm_address_t*)&mBuf, mSize, VM_FLAGS_PURGABLE | VM_FLAGS_ANYWHERE); if (ret == KERN_SUCCESS) { return true; } heap_alloc: (void)moz_posix_memalign(&mBuf, aAlignment, aSize); mHeap = true; return !!mBuf; }
void cpu_physwindow_init(int cpu) { cpu_data_t *cdp = cpu_data_ptr[cpu]; cpu_desc_index_t *cdi = &cdp->cpu_desc_index; vm_offset_t phys_window; if (vm_allocate(kernel_map, &phys_window, PAGE_SIZE, VM_FLAGS_ANYWHERE) != KERN_SUCCESS) panic("cpu_physwindow_init: couldn't allocate phys map window"); /* * make sure the page that encompasses the * pte pointer we're interested in actually * exists in the page table */ pmap_expand(kernel_pmap, phys_window); cdp->cpu_physwindow_base = phys_window; cdp->cpu_physwindow_ptep = vtopte(phys_window); cdi->cdi_gdt[sel_idx(PHYS_WINDOW_SEL)] = physwindow_desc_pattern; cdi->cdi_gdt[sel_idx(PHYS_WINDOW_SEL)].offset = phys_window; fix_desc(&cdi->cdi_gdt[sel_idx(PHYS_WINDOW_SEL)], 1); }
bool VolatileBuffer::Init(size_t aSize, size_t aAlignment) { MOZ_ASSERT(!mSize && !mBuf, "Init called twice"); MOZ_ASSERT(!(aAlignment % sizeof(void *)), "Alignment must be multiple of pointer size"); mSize = aSize; kern_return_t ret = 0; if (aSize < MIN_VOLATILE_ALLOC_SIZE) { goto heap_alloc; } ret = vm_allocate(mach_task_self(), (vm_address_t*)&mBuf, mSize, VM_FLAGS_PURGABLE | VM_FLAGS_ANYWHERE); if (ret == KERN_SUCCESS) { return true; } heap_alloc: #if(0) (void)moz_posix_memalign(&mBuf, aAlignment, aSize); #else // 10.4 doesn't have memalign, but our malloc()s are always aligned to // 16 bytes anyway, and that's all we need to support right now. if(MOZ_UNLIKELY(aAlignment > 16)) fprintf(stderr, "Warning: volatile alignment %i.\n", aAlignment); mBuf = malloc(aSize); #endif mHeap = true; return !!mBuf; }
/* Truncate file */ static int ramfs_truncate(vnode_t vp, off_t length) { struct ramfs_node *np; void *new_buf; size_t new_size; DPRINTF(AFSDB_CORE, ("truncate %s length=%lld\n", vp->v_path, (long long)length)); np = vp->v_data; if (length == 0) { if (np->rn_buf != NULL) { vm_free(getpid(), np->rn_buf, np->rn_bufsize); np->rn_buf = NULL; np->rn_bufsize = 0; } } else if (length > np->rn_bufsize) { new_size = round_page(length); if (vm_allocate(getpid(), &new_buf, new_size, 1)) return -EIO; if (np->rn_size != 0) { memcpy(new_buf, np->rn_buf, vp->v_size); vm_free(getpid(), np->rn_buf, np->rn_bufsize); } np->rn_buf = new_buf; np->rn_bufsize = new_size; } np->rn_size = length; vn_lock_rw(vp)->v_size = length; return 0; }
error_t pager_read_page (struct user_pager_info *pager, vm_offset_t page, vm_address_t *buf, int *write_lock) { int pfn = page / vm_page_size; size_t nread; /* We never request write locks. */ *write_lock = 0; expand_map (pager, page); if (!pager->map[pfn]) vm_allocate (mach_task_self (), buf, vm_page_size, 1); else { store_read (backing_store, pager->map[pfn], vm_page_size, (void **)buf, &nread); if (nread != vm_page_size) { munmap ((caddr_t) *buf, nread); return EIO; } } return 0; }
CMemoryFunction::CMemoryFunction(const void* code, size_t size) : m_code(nullptr) { #ifdef WIN32 m_size = size; m_code = malloc(size); memcpy(m_code, code, size); DWORD oldProtect = 0; BOOL result = VirtualProtect(m_code, size, PAGE_EXECUTE_READWRITE, &oldProtect); assert(result == TRUE); #elif defined(__APPLE__) vm_size_t page_size = 0; host_page_size(mach_task_self(), &page_size); unsigned int allocSize = ((size + page_size - 1) / page_size) * page_size; vm_allocate(mach_task_self(), reinterpret_cast<vm_address_t*>(&m_code), allocSize, TRUE); memcpy(m_code, code, size); sys_icache_invalidate(m_code, size); kern_return_t result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(m_code), size, 0, VM_PROT_READ | VM_PROT_EXECUTE); assert(result == 0); m_size = allocSize; #elif defined(__ANDROID__) || defined(__linux__) || defined(__FreeBSD__) m_size = size; m_code = mmap(nullptr, size, PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); assert(m_code != MAP_FAILED); memcpy(m_code, code, size); #if defined(__arm__) || defined(__aarch64__) __clear_cache(m_code, reinterpret_cast<uint8*>(m_code) + size); #endif #endif }
static ffi_trampoline_table * ffi_trampoline_table_alloc (void) { ffi_trampoline_table *table; vm_address_t config_page; vm_address_t trampoline_page; vm_address_t trampoline_page_template; vm_prot_t cur_prot; vm_prot_t max_prot; kern_return_t kt; uint16_t i; /* Allocate two pages -- a config page and a placeholder page */ config_page = 0x0; kt = vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2, VM_FLAGS_ANYWHERE); if (kt != KERN_SUCCESS) return NULL; /* Remap the trampoline table on top of the placeholder page */ trampoline_page = config_page + PAGE_MAX_SIZE; trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page; #ifdef __arm__ /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */ trampoline_page_template &= ~1UL; #endif kt = vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0, VM_FLAGS_OVERWRITE, mach_task_self (), trampoline_page_template, FALSE, &cur_prot, &max_prot, VM_INHERIT_SHARE); if (kt != KERN_SUCCESS) { vm_deallocate (mach_task_self (), config_page, PAGE_MAX_SIZE * 2); return NULL; } /* We have valid trampoline and config pages */ table = calloc (1, sizeof (ffi_trampoline_table)); table->free_count = FFI_TRAMPOLINE_COUNT; table->config_page = config_page; table->trampoline_page = trampoline_page; /* Create and initialize the free list */ table->free_list_pool = calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry)); for (i = 0; i < table->free_count; i++) { ffi_trampoline_table_entry *entry = &table->free_list_pool[i]; entry->trampoline = (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE)); if (i < table->free_count - 1) entry->next = &table->free_list_pool[i + 1]; } table->free_list = table->free_list_pool; return table; }
void setup_arch(char **cmdline_p, unsigned long * memory_start_p, unsigned long * memory_end_p) { kern_return_t kr; unsigned long initial_mem_size; char c, *cp; set_rootdev(); /* * Reserve a bunch of memory for subsystems initialization. * The extra memory will be freed in osfmach3_mem_init after * every subsystem had a chance to reserve some memory. */ initial_mem_size = 64*1024*1024; /* 64 MB */ kr = vm_allocate(mach_task_self(), (vm_offset_t *) &initial_start_mem, initial_mem_size, TRUE); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("setup_arch: vm_allocate(&start_mem)")); panic("setup_arch: can't set memory_start"); } /* Save unparsed command line copy for /proc/cmdline */ memcpy(saved_command_line, *server_command_line_p, COMMAND_LINE_SIZE); saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; __mem_size = osfmach3_mem_size; for (c = ' ', cp = saved_command_line;;) { /* * "mem=XXX[kKmM]" overrides the Mach-reported memory size */ if (c == ' ' && !strncmp(cp, "mem=", 4)) { __mem_size = simple_strtoul(cp+4, &cp, 0); if (*cp == 'K' || *cp == 'k') { __mem_size = __mem_size << 10; cp++; } else if (*cp == 'M' || *cp == 'm') { __mem_size = __mem_size << 20; cp++; } } c = *(cp++); if (!c) break; } *memory_start_p = initial_start_mem; *memory_end_p = initial_start_mem + initial_mem_size; *cmdline_p = *server_command_line_p; #ifdef CONFIG_BLK_DEV_INITRD initrd_start = (unsigned long) &builtin_ramdisk_image; initrd_end = initrd_start + builtin_ramdisk_size; printk ("Initrd at 0x%08lx to 0x%08lx (0x%08lx)\n", initrd_start, initrd_end, initrd_end - initrd_start); #endif /* CONFIG_BLK_DEV_INITRD */ }
mach_error_t allocateBranchIsland( BranchIsland **island, int allocateHigh, void *originalFunctionAddress) { assert( island ); mach_error_t err = err_none; if( allocateHigh ) { vm_size_t pageSize; err = host_page_size( mach_host_self(), &pageSize ); if( !err ) { assert( sizeof( BranchIsland ) <= pageSize ); #if defined(__x86_64__) vm_address_t first = (uint64_t)originalFunctionAddress & ~(uint64_t)(((uint64_t)1 << 31) - 1) | ((uint64_t)1 << 31); // start in the middle of the page? vm_address_t last = 0x0; #else vm_address_t first = 0xfeffffff; vm_address_t last = 0xfe000000 + pageSize; #endif vm_address_t page = first; int allocated = 0; vm_map_t task_self = mach_task_self(); while( !err && !allocated && page != last ) { err = vm_allocate( task_self, &page, pageSize, 0 ); if( err == err_none ) allocated = 1; else if( err == KERN_NO_SPACE ) { #if defined(__x86_64__) page -= pageSize; #else page += pageSize; #endif err = err_none; } } if( allocated ) *island = (void*) page; else if( !allocated && !err ) err = KERN_NO_SPACE; } } else { void *block = malloc( sizeof( BranchIsland ) ); if( block ) *island = block; else err = KERN_NO_SPACE; } if( !err ) (**island).allocatedHigh = allocateHigh; return err; }
/* struct copy inout to out and in to inout */ kern_return_t s_outline_fixed_structs(mach_port_t target, OFstruct inStruct, OFstruct *inoutStruct, OFstruct *outStruct) { if (inStruct == NULL || inoutStruct == NULL || *inoutStruct == NULL) return MIG_REMOTE_ERROR; vm_allocate(mach_task_self(), (vm_address_t *) outStruct, sizeof(*inStruct), 1); **outStruct = **inoutStruct; **inoutStruct = *inStruct; return KERN_SUCCESS; }
/* copy inRight to outRight */ kern_return_t s_inline_poly_arrays(mach_port_t target, Imach_port_array_t inRight, mach_msg_type_number_t inRightCnt, Imach_port_array_t *outRight, mach_msg_type_number_t *outRightCnt) { int i; vm_allocate(mach_task_self(), (vm_address_t *)outRight, inRightCnt*sizeof(mach_port_t), 1); *outRightCnt = inRightCnt; for (i=0; i< inRightCnt; i++) (*outRight)[i] = inRight[i]; return KERN_SUCCESS; }
void cpu_userwindow_init(int cpu) { cpu_data_t *cdp = cpu_data_ptr[cpu]; cpu_desc_index_t *cdi = &cdp->cpu_desc_index; vm_offset_t user_window; vm_offset_t vaddr; int num_cpus; num_cpus = ml_get_max_cpus(); if (cpu >= num_cpus) panic("cpu_userwindow_init: cpu > num_cpus"); if (user_window_base == 0) { if (vm_allocate(kernel_map, &vaddr, (NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE, VM_FLAGS_ANYWHERE) != KERN_SUCCESS) panic("cpu_userwindow_init: " "couldn't allocate user map window"); /* * window must start on a page table boundary * in the virtual address space */ user_window_base = (vaddr + (NBPDE - 1)) & ~(NBPDE - 1); /* * get rid of any allocation leading up to our * starting boundary */ vm_deallocate(kernel_map, vaddr, user_window_base - vaddr); /* * get rid of tail that we don't need */ user_window = user_window_base + (NBPDE * NCOPY_WINDOWS * num_cpus); vm_deallocate(kernel_map, user_window, (vaddr + ((NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE)) - user_window); } user_window = user_window_base + (cpu * NCOPY_WINDOWS * NBPDE); cdp->cpu_copywindow_base = user_window; cdp->cpu_copywindow_pdp = pmap_pde(kernel_pmap, user_window); cdi->cdi_gdt[sel_idx(USER_WINDOW_SEL)] = userwindow_desc_pattern; cdi->cdi_gdt[sel_idx(USER_WINDOW_SEL)].offset = user_window; fix_desc(&cdi->cdi_gdt[sel_idx(USER_WINDOW_SEL)], 1); }
/* * This message server catches server exceptions. It runs in a dedicated thread. */ void * server_exception_catcher( void *arg) { struct server_thread_priv_data priv_data; kern_return_t kr; #define MSG_BUFFER_SIZE 8192 union request_msg { mach_msg_header_t hdr; mig_reply_error_t death_pill; char space[MSG_BUFFER_SIZE]; } *msg_buffer_1, *msg_buffer_2; mach_msg_header_t *request; mig_reply_error_t *reply; cthread_set_name(cthread_self(), "server exc catcher"); server_thread_set_priv_data(cthread_self(), &priv_data); kr = vm_allocate(mach_task_self(), (vm_address_t *) &msg_buffer_1, 2 * sizeof *msg_buffer_1, TRUE); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("server_exception_catcher: vm_allocate")); panic("server_exception_catcher"); } msg_buffer_2 = msg_buffer_1 + 1; request = &msg_buffer_1->hdr; reply = &msg_buffer_2->death_pill; do { kr = mach_msg(request, MACH_RCV_MSG, 0, sizeof *msg_buffer_1, server_exception_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); if (kr != MACH_MSG_SUCCESS) { MACH3_DEBUG(1, kr, ("server_exception_catcher: mach_msg")); panic("server_exception_catcher: receive"); } if (exc_server(request, &reply->Head)) {} else { printk("server_exception_catcher: invalid message" "(id = %d = 0x%x)\n", request->msgh_id, request->msgh_id); } panic("server_exception_catcher: what now ?"); } while (1); cthread_detach(cthread_self()); cthread_exit((void *) 0); /*NOTREACHED*/ return (void *) 0; }
int main(int argc, char *argv[]) { struct timerinfo info; task_t task; char stack[16]; u_long start, end; int i, pri, error; printf("Benchmark to create/terminate %d threads\n", NR_THREADS); sys_info(INFO_TIMER, &info); if (info.hz == 0) panic("can not get timer tick rate"); thread_getpri(thread_self(), &pri); thread_setpri(thread_self(), pri - 1); task = task_self(); error = vm_allocate(task, (void **)&thread, sizeof(thread_t) * NR_THREADS, 1); if (error) panic("vm_allocate is failed"); sys_time(&start); /* * Create threads */ for (i = 0; i < NR_THREADS; i++) { if (thread_create(task, &thread[i]) != 0) panic("thread_create is failed"); if (thread_load(thread[i], null_thread, &stack) != 0) panic("thread_load is failed"); if (thread_resume(thread[i]) != 0) panic("thread_resume is failed"); } /* * Teminate threads */ for (i = 0; i < NR_THREADS; i++) thread_terminate(thread[i]); sys_time(&end); vm_free(task, thread); printf("Complete. The score is %d msec (%d ticks).\n", (int)((end - start) * 1000 / info.hz), (int)(end - start)); return 0; }
void * vm_acquire(size_t size, int options) { void * addr; errno = 0; // VM_MAP_FIXED are to be used with vm_acquire_fixed() only if (options & VM_MAP_FIXED) return VM_MAP_FAILED; #ifndef HAVE_VM_WRITE_WATCH if (options & VM_MAP_WRITE_WATCH) return VM_MAP_FAILED; #endif #if defined(HAVE_MACH_VM) // vm_allocate() returns a zero-filled memory region kern_return_t ret_code = vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE); if (ret_code != KERN_SUCCESS) { errno = vm_error(ret_code); return VM_MAP_FAILED; } #elif defined(HAVE_MMAP_VM) int fd = zero_fd; int the_map_flags = translate_map_flags(options) | map_flags; if ((addr = mmap(NULL, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0)) == (void *)MAP_FAILED) return VM_MAP_FAILED; // Sanity checks for 64-bit platforms if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff)) return VM_MAP_FAILED; next_address = (char *)addr + size; #elif defined(HAVE_WIN32_VM) int alloc_type = MEM_RESERVE | MEM_COMMIT; if (options & VM_MAP_WRITE_WATCH) alloc_type |= MEM_WRITE_WATCH; if ((addr = VirtualAlloc(NULL, size, alloc_type, PAGE_EXECUTE_READWRITE)) == NULL) return VM_MAP_FAILED; #else if ((addr = calloc(size, 1)) == 0) return VM_MAP_FAILED; // Omit changes for protections because they are not supported in this mode return addr; #endif // Explicitely protect the newly mapped region here because on some systems, // say MacOS X, mmap() doesn't honour the requested protection flags. if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0) return VM_MAP_FAILED; return addr; }
static int ramfs_write(vnode_t vp, file_t fp, struct uio *uio, size_t *result) { struct ramfs_node *np; off_t file_pos, end_pos; void *new_buf; size_t new_size; *result = 0; if (vp->v_type == VDIR) return -EISDIR; if (vp->v_type != VREG) return -EINVAL; np = vp->v_data; /* Check if the file position exceeds the end of file. */ end_pos = vp->v_size; file_pos = (fp->f_flags & O_APPEND) ? end_pos : fp->f_offset; size_t total = 0; const struct iovec *iov = uio->iov; for (int i = 0; i < uio->iovcnt; ++i) { size_t size = iov->iov_len; if (file_pos + size > (size_t)end_pos) { /* Expand the file size before writing to it */ end_pos = file_pos + size; if (end_pos > (off_t)np->rn_bufsize) { /* * We allocate the data buffer in page boundary. * So that we can reduce the memory allocation unless * the file size exceeds next page boundary. * This will prevent the memory fragmentation by * many malloc/free calls. */ new_size = round_page(end_pos); if (vm_allocate(getpid(), &new_buf, new_size, 1)) return -EIO; if (np->rn_size != 0) { memcpy(new_buf, np->rn_buf, vp->v_size); vm_free(getpid(), np->rn_buf, np->rn_bufsize); } np->rn_buf = new_buf; np->rn_bufsize = new_size; } np->rn_size = end_pos; vn_lock_rw(vp)->v_size = end_pos; } memcpy(np->rn_buf + file_pos, iov->iov_base, size); file_pos += size; total += size; ++iov; } fp->f_offset = file_pos; *result = total; return 0; }
void *Page_Create(size_t size) { kern_return_t kret; vm_address_t address = 0; kret = vm_allocate(mach_task_self(), &address, size, 1); MACH_CHECK_ERROR(kret); return ((void *)address); }