Exemplo n.º 1
0
Arquivo: vm.c Projeto: Kloniks/muk
static void test_vm_map(void)
{
  paddr_t paddr;
  vaddr_t vaddr;
  vm_as_t* as;

  as = vm_get_kernel_as();

  vaddr = (vaddr_t)0xb0000000;
  paddr = phys_alloc();

  vm_map(as, vaddr + PHYS_FRAME_SIZE * 0, paddr);
  *(volatile uint32_t*)(vaddr + PHYS_FRAME_SIZE * 0) = 0x2a;
  serial_printl("[?] --> %x\n",
		*(volatile uint32_t*)(vaddr + PHYS_FRAME_SIZE * 0));

  vm_map(as, vaddr + PHYS_FRAME_SIZE * 1, paddr);
  *(volatile uint32_t*)(vaddr + PHYS_FRAME_SIZE * 1) = 0x2a;
  serial_printl("[?] --> %x\n",
		*(volatile uint32_t*)(vaddr + PHYS_FRAME_SIZE * 1));

  vm_map(as, vaddr + PHYS_FRAME_SIZE * 2, paddr);
  *(volatile uint32_t*)(vaddr + PHYS_FRAME_SIZE * 2) = 0x2a;
  serial_printl("[?] --> %x\n",
		*(volatile uint32_t*)(vaddr + PHYS_FRAME_SIZE * 2));

  phys_free(paddr);
}
Exemplo n.º 2
0
int main(int argc, char** argv)
{
	//定义变量并初始化
	kern_return_t kr;
	vm_task_t target_task=mach_task_self();
	vm_address_t *address=NULL;
	vm_size_t size=vm_page_size;
	vm_address_t mask;//mask不会设置;
	boolean_t anywhere=TURE;
	memory_object_t memory_object=MEMORY_OBJECT_NULL;
	vm_offset_t offset=0;
	boolean_t copy=FALSE;
	vm_prot_t cur_protection=(VM_PORT_READ|VM_PORT_WRITE);//当前保护属性,读写
	vm_prot_t max_protection=(VM_PORT_READ|VM_PORT_WRITE);//最大保护属性,读写
	vm_inherit_t inheritance=VM_INHERIT_SHARE;//共享,设置共享

	kr=0;

	//建立一个新的内存对象,使用vm_map();
	kr=vm_map(target_task,*address,size,mask,anywhere,
				memory_object,offset,copy,cur_protection,
				max_protection, inheritance);
	if(kr)
		{
			mach_error("the value of error is",kr);
			printf("vm_map() is exiting:%d\n",kr);
			return kr;
		}
	printf("vm_map is ok\n");
	

}
Exemplo n.º 3
0
/* Implement io_map_cntl as described in <hurd/io.defs>. */
kern_return_t
diskfs_S_io_map_cntl (struct protid *cred,
		      memory_object_t *ctlobj,
		      mach_msg_type_name_t *ctlobj_type)
{
  if (!cred)
    return EOPNOTSUPP;

  assert (__vm_page_size >= sizeof (struct shared_io));
  mutex_lock (&cred->po->np->lock);
  if (!cred->mapped)
    {
      default_pager_object_create (diskfs_default_pager, &cred->shared_object,
				   __vm_page_size);
      vm_map (mach_task_self (), (vm_address_t *)&cred->mapped, vm_page_size,
	      0, 1, cred->shared_object, 0, 0,
	      VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE, 0);
      cred->mapped->shared_page_magic = SHARED_PAGE_MAGIC;
      cred->mapped->conch_status = USER_HAS_NOT_CONCH;
      spin_lock_init (&cred->mapped->lock);
      *ctlobj = cred->shared_object;
      *ctlobj_type = MACH_MSG_TYPE_COPY_SEND;
      mutex_unlock (&cred->po->np->lock);
      return 0;
    }
  else
    {
      mutex_unlock (&cred->po->np->lock);
      return EBUSY;
    }
}
PageAllocationAligned PageAllocationAligned::allocate(size_t size, size_t alignment, OSAllocator::Usage usage, bool writable)
{
    ASSERT(isPageAligned(size));
    ASSERT(isPageAligned(alignment));
    ASSERT(isPowerOfTwo(alignment));
    ASSERT(size >= alignment);
    size_t alignmentMask = alignment - 1;

#if OS(DARWIN)
    int flags = VM_FLAGS_ANYWHERE;
    if (usage != OSAllocator::UnknownUsage)
        flags |= usage;
    int protection = PROT_READ;
    if (writable)
        protection |= PROT_WRITE;

    vm_address_t address = 0;
    vm_map(current_task(), &address, size, alignmentMask, flags, MEMORY_OBJECT_NULL, 0, FALSE, protection, PROT_READ | PROT_WRITE, VM_INHERIT_DEFAULT);
    return PageAllocationAligned(reinterpret_cast<void*>(address), size);
#else
    size_t alignmentDelta = alignment - pageSize();

    // Resererve with suffcient additional VM to correctly align.
    size_t reservationSize = size + alignmentDelta;
    void* reservationBase = OSAllocator::reserveUncommitted(reservationSize, usage, writable, false);

    // Select an aligned region within the reservation and commit.
    void* alignedBase = reinterpret_cast<uintptr_t>(reservationBase) & alignmentMask
                        ? reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(reservationBase) & ~alignmentMask) + alignment)
                        : reservationBase;
    OSAllocator::commit(alignedBase, size, writable, false);

    return PageAllocationAligned(alignedBase, size, reservationBase, reservationSize);
#endif
}
Exemplo n.º 5
0
void pm_free_page(addr_t addr)
{
	if(!paging_enabled)
		panic(PANIC_MEM | PANIC_NOSYNC, "Called free page without paging environment");
	if(addr < pm_location || (((addr > highest_page) || addr < lowest_page) 
			&& memory_has_been_mapped)) {
		panic(PANIC_MEM | PANIC_NOSYNC, "tried to free invalic physical address");
		return;
	}
	if(current_task) current_task->freed++;
	mutex_on(&pm_mutex);
	if(pm_stack_max <= pm_stack)
	{
		vm_map(pm_stack_max, addr, PAGE_PRESENT | PAGE_WRITE, MAP_CRIT);
		memset((void *)pm_stack_max, 0, PAGE_SIZE);
		pm_stack_max += PAGE_SIZE;
	} else {
		*(unsigned int *)(pm_stack) = addr;
		pm_stack += sizeof(unsigned int);
		--pm_used_pages;
	}
	if(current_task && current_task->num_pages)
		current_task->num_pages--;
	mutex_off(&pm_mutex);
}
Exemplo n.º 6
0
Arquivo: vm.c Projeto: Kloniks/muk
static void test_vm_unmap(void)
{
  paddr_t paddr;
  vaddr_t vaddr;
  vm_as_t* as;

  as = vm_get_kernel_as();

  serial_printl("[?] test unmap\n");

  vaddr = (vaddr_t)0xb0000000;
  paddr = phys_alloc();
  vm_map(as, vaddr, paddr);

  /* no page fault
   */
  *(uint32_t*)vaddr = 0x2a;

  vm_unmap(as, vaddr);
  phys_free(paddr);

  /* page fault
   */
  *(uint32_t*)vaddr = 0x2a;
}
Exemplo n.º 7
0
Arquivo: fork.c Projeto: AndrewD/prex
void
vfork_end(struct proc *p)
{
	void *stack;
	task_t self = task_self();

	DPRINTF(("vfork_end: org=%x saved=%x\n", p->p_stackbase,
		 p->p_stacksaved));
	/*
	 * Restore parent's stack
	 */
	if (vm_map(p->p_task, p->p_stackbase, USTACK_SIZE, &stack) != 0)
		return;

	memcpy(stack, p->p_stacksaved, USTACK_SIZE);

	vm_free(self, p->p_stacksaved);
	vm_free(self, stack);

	/*
	 * Resume parent
	 */
	p->p_vforked = 0;
	task_resume(p->p_task);
}
Exemplo n.º 8
0
void
attack(void)
{
	object_t *objp	= (object_t *)random();
	object_t obj	= (object_t)random();
	char *name	= (char *)random();
	void *msg	= (void *)random();
	size_t size	= (size_t)random();
	task_t self	= task_self();
	void *addr	= (void *)random();
	int attr	= random() & 7;
	thread_t t	= (thread_t)random();
	thread_t *tp	= (thread_t *)random();

	object_create(NULL, NULL);
	object_create(NULL, objp);
	object_create(name, NULL);
	object_create(name, objp);

	object_destroy(0);
	object_destroy(obj);

	object_lookup(NULL, objp);
	object_lookup(name, NULL);
	object_lookup(name, objp);

	msg_send(0, msg, size);
	msg_send(obj, NULL, size);
	msg_send(obj, msg, 0);
	msg_send(0, msg, 0);
	msg_send(0, NULL, size);
	msg_send(obj, msg, size);

	msg_receive(0, msg, size);
	msg_receive(obj, NULL, size);
	msg_receive(obj, msg, 0);
	msg_receive(0, msg, 0);
	msg_receive(0, NULL, size);
	msg_receive(obj, msg, size);

	msg_reply(0, msg, size);
	msg_reply(obj, NULL, size);
	msg_reply(obj, msg, 0);
	msg_reply(0, msg, 0);
	msg_reply(0, NULL, size);
	msg_reply(obj, msg, size);

	vm_allocate(self, addr, size, 1);
	vm_allocate(self, &addr, size, 1);

	vm_free(self, addr);
	vm_attribute(self, addr, attr);
	vm_map(self, addr, size, &addr);

	thread_create(self, tp);
	thread_suspend(t);
	thread_terminate(t);
}
Exemplo n.º 9
0
int 
xf86ReadBIOS(unsigned long Base,unsigned long Offset,unsigned char *Buf,int Len)
{
    mach_port_t device,iopl_dev;
    memory_object_t iopl_mem;
    vm_address_t addr = (vm_address_t)0; /* serach starting address */
    kern_return_t err;


    err = get_privileged_ports (NULL, &device);
    if( err )
    {
	errno = err;
	FatalError("xf86ReadBIOS() can't get_privileged_ports. (%s)\n",strerror(errno));
    }
    err = device_open(device,D_READ|D_WRITE,"iopl",&iopl_dev);
    mach_port_deallocate (mach_task_self (), device);
    if( err )
    {
	errno = err;
	FatalError("xf86ReadBIOS() can't device_open. (%s)\n",strerror(errno));
    }
    err = device_map(iopl_dev,VM_PROT_READ|VM_PROT_WRITE, Base , BIOS_SIZE ,&iopl_mem,0);
    if( err )
    {
	errno = err;
	FatalError("xf86ReadBIOS() can't device_map. (%s)\n",strerror(errno));
    }
    err = vm_map(mach_task_self(),
		 &addr,
		 BIOS_SIZE,
		 0,
		 TRUE,
		 iopl_mem,
		 Base,
		 FALSE,
		 VM_PROT_READ|VM_PROT_WRITE,
		 VM_PROT_READ|VM_PROT_WRITE,
		 VM_INHERIT_SHARE);
    mach_port_deallocate(mach_task_self(),iopl_mem);
    if( err )
    {
	errno = err;
	FatalError("xf86ReadBIOS() can't vm_map. (%s)\n",strerror(errno));
    }

    memcpy(Buf,(void*)((int)addr + Offset), Len);
    
    err = vm_deallocate(mach_task_self(), addr, BIOS_SIZE);
    if( err )
    {
	errno = err;
	FatalError("xf86ReadBIOS() can't vm_deallocate. (%s)\n",strerror(errno));
    }
    
    return Len;
}
Exemplo n.º 10
0
/*
 * Contiguous space allocator for items of less than a page size.
 */
static union header *
kget_space(vm_offset_t allocsize)
{
	vm_size_t	space_to_add = 0;
	vm_offset_t	new_space = 0;
	union header	*addr;

	while (kalloc_next_space + allocsize > kalloc_end_of_space) {
	    /*
	     * Add at least one page to allocation area.
	     */
	    space_to_add = round_page(allocsize);

	    if (new_space == 0) {
		/*
		 * Allocate memory.
		 * Try to make it contiguous with the last
		 * allocation area.
		 */
		new_space = kalloc_end_of_space;
		if (vm_map(mach_task_self(),
			   &new_space, space_to_add, (vm_offset_t) 0, TRUE,
			   MEMORY_OBJECT_NULL, (vm_offset_t) 0, FALSE,
			   VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT)
			!= KERN_SUCCESS)
		    return 0;
		continue;
	    }

	    /*
	     * Memory was allocated in a previous iteration.
	     * Check whether the new region is contiguous with the
	     * old one.
	     */
	    if (new_space != kalloc_end_of_space) {
		/*
		 * Throw away the remainder of the old space,
		 * and start a new one.
		 */
		kalloc_wasted_space +=
			kalloc_end_of_space - kalloc_next_space;
		kalloc_next_space = new_space;
	    }
	    kalloc_end_of_space = new_space + space_to_add;

	    new_space = 0;
	}

	addr = (union header *)kalloc_next_space;
	kalloc_next_space += allocsize;

	if (new_space != 0)
	    (void) vm_deallocate(mach_task_self(), new_space, space_to_add);

	return addr;
}
Exemplo n.º 11
0
void _start() {
	vm_map(0x10000000, (0xAFFFF - 0xA0000), 0xA0000);

	busy_wait(0x5FFFFFF);
	uint8_t color = 0;
	do {
		fill_screen(color);
		busy_wait(0xFFFFF);
		color = (color++) % 255;
	} while(1);
}
Exemplo n.º 12
0
/**************************************************************************
 * Video Memory Mapping section                                            
 ***************************************************************************/
static pointer
mapVidMem(int ScreenNum, unsigned long Base, unsigned long Size, int Flags)
{
    mach_port_t device,iopl_dev;
    memory_object_t iopl_mem;
    kern_return_t err;
    vm_address_t addr=(vm_address_t)0;

    err = get_privileged_ports (NULL, &device);
    if( err )
    {
	errno = err;
	FatalError("xf86MapVidMem() can't get_privileged_ports. (%s)\n",strerror(errno));
    }
    err = device_open(device,D_READ|D_WRITE,"iopl",&iopl_dev);
    mach_port_deallocate (mach_task_self(), device);
    if( err )
    {
	errno = err;
	FatalError("xf86MapVidMem() can't device_open. (%s)\n",strerror(errno));
    }

    err = device_map(iopl_dev,VM_PROT_READ|VM_PROT_WRITE, Base , Size ,&iopl_mem,0);
    if( err )
    {
	errno = err;
	FatalError("xf86MapVidMem() can't device_map. (%s)\n",strerror(errno));
    }
    err = vm_map(mach_task_self(),
		 &addr,
		 Size,
		 0,     /* mask */
		 TRUE,  /* anywhere */
		 iopl_mem,
		 (vm_offset_t)Base,
		 FALSE, /* copy on write */
		 VM_PROT_READ|VM_PROT_WRITE,
		 VM_PROT_READ|VM_PROT_WRITE,
		 VM_INHERIT_SHARE);
    mach_port_deallocate(mach_task_self(),iopl_mem);
    if( err )
    {
	errno = err;
	FatalError("xf86MapVidMem() can't vm_map.(iopl_mem) (%s)\n",strerror(errno));
    }
    mach_port_deallocate(mach_task_self(),iopl_dev);
    if( err )
    {
	errno = err;
	FatalError("xf86MapVidMem() can't mach_port_deallocate.(iopl_dev) (%s)\n",strerror(errno));
    }
    return (pointer)addr;
}
Exemplo n.º 13
0
void setup_thread(thread_params_t *params)
{
    context_t user_context;
    uint32_t phys_page;
    int i;
    interrupt_status_t intr_status;
    thread_table_t *thread= thread_get_current_thread_entry();

    /* Copy thread parameters. */
    int arg = params->arg;
    void (*func)(int) = params->func;
    process_id_t pid = thread->process_id = params->pid;
    thread->pagetable = params->pagetable;
    params->done = 1; /* OK, we don't need params any more. */

    intr_status = _interrupt_disable();
    spinlock_acquire(&process_table_slock);

    /* Set up userspace environment. */
    memoryset(&user_context, 0, sizeof(user_context));

    user_context.cpu_regs[MIPS_REGISTER_A0] = arg;
    user_context.pc = (uint32_t)func;

    /* Allocate thread stack */
    if (process_table[pid].bot_free_stack != 0) {
        /* Reuse old thread stack. */
        user_context.cpu_regs[MIPS_REGISTER_SP] =
            process_table[pid].bot_free_stack
            + CONFIG_USERLAND_STACK_SIZE*PAGE_SIZE
            - 4; /* Space for the thread argument */
        process_table[pid].bot_free_stack =
            *(uint32_t*)process_table[pid].bot_free_stack;
    } else {
        /* Allocate physical pages (frames) for the stack. */
        for (i = 0; i < CONFIG_USERLAND_STACK_SIZE; i++) {
            phys_page = pagepool_get_phys_page();
            KERNEL_ASSERT(phys_page != 0);
            vm_map(thread->pagetable, phys_page,
                    process_table[pid].stack_end - (i+1)*PAGE_SIZE, 1);
        }
        user_context.cpu_regs[MIPS_REGISTER_SP] =
            process_table[pid].stack_end-4; /* Space for the thread argument */
        process_table[pid].stack_end -= PAGE_SIZE*CONFIG_USERLAND_STACK_SIZE;
    }

    tlb_fill(thread->pagetable);

    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);

    thread_goto_userland(&user_context);
}
Exemplo n.º 14
0
Arquivo: vm.c Projeto: Kloniks/muk
static void test_page_fault(void)
{
  volatile uint32_t* vaddr_0 = (uint32_t*)0xb0000000;
  volatile uint32_t* vaddr_1 = (uint32_t*)0xb0001000;
  volatile uint32_t* vaddr_2 = (uint32_t*)0xb0002000;
  paddr_t paddr = phys_alloc();
  vm_as_t* as;

  as = vm_get_kernel_as();

  vm_map(as, (vaddr_t)vaddr_0, paddr);
  vm_map(as, (vaddr_t)vaddr_1, paddr);
  vm_map(as, (vaddr_t)vaddr_2, paddr);

  *vaddr_0 = 0x2a;

  if ((*vaddr_0 != 0x2a) ||
      (*vaddr_0 != *vaddr_1) ||
      (*vaddr_0 != *vaddr_2))
    BUG();
}
Exemplo n.º 15
0
int main(int argc, char *argv[])
{
	kern_return_t kr;
	mach_port_t netmemory_server;

	mach_port_t netmemory_object;
	mach_port_t memory_object;
	
	netname_name_t netmemory_object_name;
	vm_size_t size=vm_page_size;
	vm_address_t address;
	boolean_t anywhere=TRUE;


//
	kr=netname_look_up(name_server_port,
						"",
						"netmemoryserver",
						&netmemory_server);
	if (kr)
		return kr;

//
	kr=netname_look_up(name_server_port,"",
						netmemory_object_name,
						&netmemory_object);
		if (kr)
		{
			printf("netmemory is not find\n");
			return kr;
		}
	
//
	kr=netmemory_cache(netmemory_server,
						netmemory_object,
						&memory_object);
	if (kr)
		return kr;

//
	kr=vm_map(mach_task_self(),&address,size,0,anywhere,
					memory_object,0,FALSE,VM_PROT_DEFAULT,
					VM_PROT_DEFAULT,VM_INHERIT_SHARE);

	if (kr)
		return kr;

//
	printf("address is %d\n",address);
	//address=100;
	
	return 0;
}
Exemplo n.º 16
0
Arquivo: pager.c Projeto: Larhard/hurd
/* Create a new pager in USER_PAGER with NPAGES pages, and return a
   mapping to the memory in *USER.  */
error_t
user_pager_create (struct user_pager *user_pager, unsigned int npages,
		   struct cons_display **user)
{
  error_t err;
  struct user_pager_info *upi;

  upi = calloc (1, sizeof (struct user_pager_info)
		+ sizeof (vm_address_t) * npages);
  if (!upi)
    return errno;

  upi->memobj_npages = npages;

  /* XXX Are the values 1 and MEMORY_OBJECT_COPY_DELAY correct? */
  user_pager->pager = pager_create (upi, pager_bucket,
				    1, MEMORY_OBJECT_COPY_DELAY, 0);
  if (!user_pager->pager)
    {
      free (upi);
      return errno;
    }
  user_pager->memobj = pager_get_port (user_pager->pager);
  ports_port_deref (user_pager->pager);

  mach_port_insert_right (mach_task_self (), user_pager->memobj,
			  user_pager->memobj, MACH_MSG_TYPE_MAKE_SEND);

  *user = 0;
  err = vm_map (mach_task_self (),
		(vm_address_t *) user,
		(vm_size_t) npages * vm_page_size,
		(vm_address_t) 0,
		1 /* ! (flags & MAP_FIXED) */,
		user_pager->memobj, 0 /* (vm_offset_t) offset */,
		0 /* ! (flags & MAP_SHARED) */,
                VM_PROT_READ | VM_PROT_WRITE,
                VM_PROT_READ | VM_PROT_WRITE,
                VM_INHERIT_NONE);
  if (err)
    {
      /* UPI will be cleaned up by libpager.  */
      mach_port_deallocate (mach_task_self (), user_pager->memobj);
      return err;
    }

  return 0;
}
Exemplo n.º 17
0
Arquivo: process.c Projeto: PtxDK/OSM
void* syscall_memlimit(void* new_end){
  uint32_t phys_page;
  interrupt_status_t intr_status;
  
  intr_status = _interrupt_disable();
  spinlock_acquire(&process_table_slock);
  
  process_control_block_t *curr_proc = process_get_current_process_entry();
  // checks if the new_end is NULL
  if (new_end == NULL){
    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);
    return (void*)curr_proc->heap_end;
  }
  // checks if we are trying to shrink the memory, if so we error
  // and return NULL
  if ((uint32_t)new_end < curr_proc->heap_end){
    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);
    return NULL;
  }
  // loop where we alloc the physical and the virtual memoery, we also check
  // if we have enough physical memory.
  for (uint32_t i = (curr_proc->heap_end / PAGE_SIZE +1);
       i <= ((uint32_t)new_end / PAGE_SIZE);i++){
    // we allocate some physical memory.
    phys_page = physmem_allocblock();
    //check if we got enough physical memory, if we do not we error
    if (phys_page == 0){
      spinlock_release(&process_table_slock);
      _interrupt_set_state(intr_status);
      return NULL;
    }
    // maps the virtual memory
    vm_map(thread_get_current_thread_entry()->pagetable,
           phys_page, i * PAGE_SIZE, 1);
  }
  // if nothing fails we at last set heap_end to  new_end
  curr_proc->heap_end = (uint32_t)new_end;
  
  spinlock_release(&process_table_slock);
  _interrupt_set_state(intr_status);
  return new_end;
}
Exemplo n.º 18
0
bool AUMCircularBufferInit(AUMCircularBuffer *buffer, int length) {
    
    buffer->length = round_page(length);    // We need whole page sizes
    
    // Temporarily allocate twice the length, so we have the contiguous address space to
    // support a second instance of the buffer directly after
    vm_address_t bufferAddress;
    if ( !checkResult(vm_allocate(mach_task_self(), &bufferAddress, buffer->length * 2, TRUE /* (don't use the current bufferAddress value) */),
                      "Buffer allocation") ) return false;
    
    // Now replace the second half of the allocation with a virtual copy of the first half. Deallocate the second half...
    if ( !checkResult(vm_deallocate(mach_task_self(), bufferAddress + buffer->length, buffer->length),
                      "Buffer deallocation") ) return false;
    
    // Then create a memory entry that refers to the buffer
    vm_size_t entry_length = buffer->length;
    mach_port_t memoryEntry;
    if ( !checkResult(mach_make_memory_entry(mach_task_self(), &entry_length, bufferAddress, VM_PROT_READ|VM_PROT_WRITE, &memoryEntry, 0),
                      "Create memory entry") ) {
        vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
        return false;
    }
    
    // And map the memory entry to the address space immediately after the buffer
    vm_address_t virtualAddress = bufferAddress + buffer->length;
    if ( !checkResult(vm_map(mach_task_self(), &virtualAddress, buffer->length, 0, FALSE, memoryEntry, 0, FALSE, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_DEFAULT),
                      "Map buffer memory") ) {
        vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
        return false;
    }
    
    if ( virtualAddress != bufferAddress+buffer->length ) {
        printf("Couldn't map buffer memory to end of buffer\n");
        vm_deallocate(mach_task_self(), virtualAddress, buffer->length);
        vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
        return false;
    }
    
    buffer->buffer = (void*)bufferAddress;
    buffer->fillCount = 0;
    buffer->head = buffer->tail = 0;
    
    return true;
}
Exemplo n.º 19
0
void* allocateJSBlock(unsigned size)
{
    // See: JavaScriptCore/runtime/Collector.cpp
    OLYMPIA_ASSERT(size == BLOCK_SIZE);
#if defined(OLYMPIA_LINUX) //|| defined(OLYMPIA_MAC)
#if ENABLE(JSC_MULTIPLE_THREADS)
#error Need to initialize pagesize safely.
#endif
    static size_t pagesize = pageSize();

    size_t extra = 0;
    if (BLOCK_SIZE > pagesize)
        extra = BLOCK_SIZE - pagesize;

    void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
    uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);

    size_t adjust = 0;
    if ((address & BLOCK_OFFSET_MASK) != 0)
        adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);

    if (adjust > 0)
        munmap(reinterpret_cast<char*>(address), adjust);

    if (adjust < extra)
        munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust);

    address += adjust;
    return reinterpret_cast<void*>(address);
#elif defined(OLYMPIA_WINDOWS)
#if COMPILER(MINGW) && !COMPILER(MINGW64)
    void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
#else
    void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
#endif
    memset(address, 0, BLOCK_SIZE);
	return address;
#elif defined(OLYMPIA_MAC)
    vm_address_t address = 0;
    vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
    return reinterpret_cast<void*>(address);
#endif
}
Exemplo n.º 20
0
void
diskfs_start_disk_pager (struct user_pager_info *upi,
			 struct port_bucket *pager_bucket, int may_cache,
			 size_t size, void **image)
{
  error_t err;
  mach_port_t disk_pager_port;

  /* Make a thread to service paging requests.  */
  cthread_detach (cthread_fork ((cthread_fn_t) service_paging_requests,
				(any_t)pager_bucket));

  /* Create the pager.  */
  diskfs_disk_pager = pager_create (upi, pager_bucket,
				    may_cache, MEMORY_OBJECT_COPY_NONE);
  assert (diskfs_disk_pager);

  /* Get a port to the disk pager.  */
  disk_pager_port = pager_get_port (diskfs_disk_pager);
  mach_port_insert_right (mach_task_self (), disk_pager_port, disk_pager_port,
			  MACH_MSG_TYPE_MAKE_SEND);

  /* Now map the disk image.  */
  err = vm_map (mach_task_self (), (vm_address_t *)image, size,
		0, 1, disk_pager_port, 0, 0,
		VM_PROT_READ | (diskfs_readonly ? 0 : VM_PROT_WRITE),
		VM_PROT_READ | VM_PROT_WRITE,
		VM_INHERIT_NONE);
  if (err)
    error (2, err, "cannot vm_map whole disk");

  /* Set up the signal preemptor to catch faults on the disk image.  */
  preemptor.first = (vm_address_t) *image;
  preemptor.last = ((vm_address_t) *image + size);
  hurd_preempt_signals (&preemptor);

  /* We have the mapping; we no longer need the send right.  */
  mach_port_deallocate (mach_task_self (), disk_pager_port);
}
Exemplo n.º 21
0
static void
vproc_shmem_init(void)
{
	vm_address_t vm_addr = 0;
	mach_port_t shmem_port;
	kern_return_t kr;

	kr = vproc_mig_setup_shmem(bootstrap_port, &shmem_port);

	//assert(kr == 0);
	if (kr) {
		/* rdar://problem/6416724
		 * If we fail to set up a shared memory page, just allocate a local chunk
		 * of memory. This way, processes can still introspect their own transaction
		 * counts if they're being run under a debugger. Moral of the story: Debug
		 * from the environment you intend to run in.
		 */
		void *_vm_addr = calloc(1, sizeof(struct vproc_shmem_s));
		if( !_vm_addr ) {
			return;
		}

		vm_addr = (vm_address_t)_vm_addr;
	} else {
		kr = vm_map(mach_task_self(), &vm_addr, getpagesize(), 0, true, shmem_port, 0, false,
					VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE, VM_INHERIT_NONE);
		
		//assert(kr == 0);
		if (kr) return;
		
		kr = mach_port_deallocate(mach_task_self(), shmem_port);
		
		//assert(kr == 0);
	}

	vproc_shmem = (struct vproc_shmem_s *)vm_addr;
}
Exemplo n.º 22
0
Arquivo: fork.c Projeto: AndrewD/prex
static int
vfork_start(struct proc *p)
{
	void *stack;
	task_t self = task_self();

	/*
	 * Save parent's stack
	 */
	if (vm_map(p->p_task, p->p_stackbase, USTACK_SIZE, &stack) != 0)
		return ENOMEM;

	if (vm_allocate(self, &p->p_stacksaved, USTACK_SIZE, 1) != 0)
		return ENOMEM;

	memcpy(p->p_stacksaved, stack, USTACK_SIZE);

	vm_free(self, stack);

	p->p_vforked = 1;
	DPRINTF(("vfork_start: saved=%x org=%x\n", p->p_stacksaved,
		 p->p_stackbase));
	return 0;
}
Exemplo n.º 23
0
/* Implement the diskfs_lookup callback from the diskfs library.  See
   <hurd/diskfs.h> for the interface specification.  */
error_t
diskfs_lookup_hard (struct node *dp, const char *name, enum lookup_type type,
		    struct node **npp, struct dirstat *ds, struct protid *cred)
{
  error_t err;
  ino_t inum;
  int namelen;
  int spec_dotdot;
  struct node *np = 0;
  int retry_dotdot = 0;
  vm_prot_t prot =
    (type == LOOKUP) ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
  memory_object_t memobj;
  vm_address_t buf = 0;
  vm_size_t buflen = 0;
  int blockaddr;
  int idx, lastidx;
  int looped;

  if ((type == REMOVE) || (type == RENAME))
    assert (npp);

  if (npp)
    *npp = 0;

  spec_dotdot = type & SPEC_DOTDOT;
  type &= ~SPEC_DOTDOT;

  namelen = strlen (name);

  if (namelen > FAT_NAME_MAX)
    return ENAMETOOLONG;
  
 try_again:
  if (ds)
    {
      ds->type = LOOKUP;
      ds->mapbuf = 0;
      ds->mapextent = 0;
    }
  if (buf)
    {
      munmap ((caddr_t) buf, buflen);
      buf = 0;
    }
  if (ds && (type == CREATE || type == RENAME))
    ds->stat = LOOKING;

  /* Map in the directory contents. */
  memobj = diskfs_get_filemap (dp, prot);

  if (memobj == MACH_PORT_NULL)
    return errno;

  buf = 0;
  /* We allow extra space in case we have to do an EXTEND.  */
  buflen = round_page (dp->dn_stat.st_size + DIRBLKSIZ);
  err = vm_map (mach_task_self (),
                &buf, buflen, 0, 1, memobj, 0, 0, prot, prot, 0);
  mach_port_deallocate (mach_task_self (), memobj);

  inum = 0;

  diskfs_set_node_atime (dp);

  /* Start the lookup at DP->dn->dir_idx.  */
  idx = dp->dn->dir_idx;
  if (idx << LOG2_DIRBLKSIZ > dp->dn_stat.st_size)
    idx = 0;                    /* just in case */
  blockaddr = buf + (idx << LOG2_DIRBLKSIZ);
  looped = (idx == 0);
  lastidx = idx;
  if (lastidx == 0)
    lastidx = dp->dn_stat.st_size >> LOG2_DIRBLKSIZ;

  while (!looped || idx < lastidx)
    {
      err = dirscanblock (blockaddr, dp, idx, name, namelen, type, ds, &inum);
      if (!err)
        {
          dp->dn->dir_idx = idx;
          break;
        }
      if (err != ENOENT)
        {
          munmap ((caddr_t) buf, buflen);
          return err;
        }

      blockaddr += DIRBLKSIZ;
      idx++;
      if (blockaddr - buf >= dp->dn_stat.st_size && !looped)
        {
          /* We've gotten to the end; start back at the beginning.  */
          looped = 1;
          blockaddr = buf;
          idx = 0;
        }
    }

  diskfs_set_node_atime (dp);
  if (diskfs_synchronous)
    diskfs_node_update (dp, 1);

  /* If err is set here, it's ENOENT, and we don't want to
     think about that as an error yet.  */
  err = 0;

  if (inum && npp)
    {
      if (namelen != 2 || name[0] != '.' || name[1] != '.')
        {
          if (inum == dp->cache_id)
            {
              np = dp;
              diskfs_nref (np);
            }
          else
            {
              err = diskfs_cached_lookup_in_dirbuf (inum, &np, buf);
              if (err)
                goto out;
            }
        }

      /* We are looking up "..".  */
      /* Check to see if this is the root of the filesystem.  */
      else if (dp == diskfs_root_node)
        {
          err = EAGAIN;
          goto out;
        }

      /* We can't just do diskfs_cached_lookup, because we would then
         deadlock.  So we do this.  Ick.  */
      else if (retry_dotdot)
        {
          /* Check to see that we got the same answer as last time.  */
          if (inum != retry_dotdot)
            {
              /* Drop what we *thought* was .. (but isn't any more) and
                 try *again*.  */
              diskfs_nput (np);
              mutex_unlock (&dp->lock);
              err = diskfs_cached_lookup_in_dirbuf (inum, &np, buf);
              mutex_lock (&dp->lock);
              if (err)
                goto out;
              retry_dotdot = inum;
              goto try_again;
            }
          /* Otherwise, we got it fine and np is already set properly.  */
        }
      else if (!spec_dotdot)
        {
          /* Lock them in the proper order, and then
             repeat the directory scan to see if this is still
             right.  */
          mutex_unlock (&dp->lock);
          err = diskfs_cached_lookup_in_dirbuf (inum, &np, buf);
          mutex_lock (&dp->lock);
          if (err)
            goto out;
          retry_dotdot = inum;
          goto try_again;
        }

      /* Here below are the spec dotdot cases.  */
      else if (type == RENAME || type == REMOVE)
        np = ifind (inum);

      else if (type == LOOKUP)
        {
          diskfs_nput (dp);
          err = diskfs_cached_lookup_in_dirbuf (inum, &np, buf);
          if (err)
            goto out;
        }
      else
        assert (0);
    }

  if ((type == CREATE || type == RENAME) && !inum && ds && ds->stat == LOOKING)
    {
      /* We didn't find any room, so mark ds to extend the dir.  */
      ds->type = CREATE;
      ds->stat = EXTEND;
      ds->idx = dp->dn_stat.st_size >> LOG2_DIRBLKSIZ;
    }
Exemplo n.º 24
0
static  void*
commpage_allocate( 
	vm_map_t	submap,			// commpage32_map or commpage_map64
	size_t		area_used,		// _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED
	vm_prot_t	uperm)
{
	vm_offset_t	kernel_addr = 0;	// address of commpage in kernel map
	vm_offset_t	zero = 0;
	vm_size_t	size = area_used;	// size actually populated
	vm_map_entry_t	entry;
	ipc_port_t	handle;
	kern_return_t	kr;

	if (submap == NULL)
		panic("commpage submap is null");

	if ((kr = vm_map(kernel_map,
			 &kernel_addr,
			 area_used,
			 0,
			 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_OSFMK),
			 NULL,
			 0,
			 FALSE,
			 VM_PROT_ALL,
			 VM_PROT_ALL,
			 VM_INHERIT_NONE)))
		panic("cannot allocate commpage %d", kr);

	if ((kr = vm_map_wire(kernel_map,
			      kernel_addr,
			      kernel_addr+area_used,
			      VM_PROT_DEFAULT|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
			      FALSE)))
		panic("cannot wire commpage: %d", kr);

	/* 
	 * Now that the object is created and wired into the kernel map, mark it so that no delay
	 * copy-on-write will ever be performed on it as a result of mapping it into user-space.
	 * If such a delayed copy ever occurred, we could remove the kernel's wired mapping - and
	 * that would be a real disaster.
	 *
	 * JMM - What we really need is a way to create it like this in the first place.
	 */
	if (!(kr = vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr, VM_MAP_PAGE_MASK(kernel_map)), &entry) || entry->is_sub_map))
		panic("cannot find commpage entry %d", kr);
	VME_OBJECT(entry)->copy_strategy = MEMORY_OBJECT_COPY_NONE;

	if ((kr = mach_make_memory_entry( kernel_map,		// target map
				    &size,		// size 
				    kernel_addr,	// offset (address in kernel map)
				    uperm,	// protections as specified
				    &handle,		// this is the object handle we get
				    NULL )))		// parent_entry (what is this?)
		panic("cannot make entry for commpage %d", kr);

	if ((kr = vm_map_64(	submap,				// target map (shared submap)
			&zero,				// address (map into 1st page in submap)
			area_used,			// size
			0,				// mask
			VM_FLAGS_FIXED,			// flags (it must be 1st page in submap)
			handle,				// port is the memory entry we just made
			0,                              // offset (map 1st page in memory entry)
			FALSE,                          // copy
			uperm,   // cur_protection (R-only in user map)
			uperm,   // max_protection
		        VM_INHERIT_SHARE )))             // inheritance
		panic("cannot map commpage %d", kr);

	ipc_port_release(handle);
	/* Make the kernel mapping non-executable. This cannot be done
	 * at the time of map entry creation as mach_make_memory_entry
	 * cannot handle disjoint permissions at this time.
	 */
	kr = vm_protect(kernel_map, kernel_addr, area_used, FALSE, VM_PROT_READ | VM_PROT_WRITE);
	assert (kr == KERN_SUCCESS);

	return (void*)(intptr_t)kernel_addr;                     // return address in kernel map
}
Exemplo n.º 25
0
/**
 * Starts one userland process. The thread calling this function will
 * be used to run the process and will therefore never return from
 * this function. This function asserts that no errors occur in
 * process startup (the executable file exists and is a valid ecoff
 * file, enough memory is available, file operations succeed...).
 * Therefore this function is not suitable to allow startup of
 * arbitrary processes.
 *
 * @executable The name of the executable to be run in the userland
 * process
 */
void process_start(uint32_t pid)
{
    thread_table_t *my_entry;
    pagetable_t *pagetable;
    uint32_t phys_page;
    context_t user_context;
    uint32_t stack_bottom;
    elf_info_t elf;
    openfile_t file;
    const char* executable;

    int i;

    interrupt_status_t intr_status;

    my_entry = thread_get_current_thread_entry();
    my_entry->process_id = pid;
    executable = process_table[pid].executable;

    /* If the pagetable of this thread is not NULL, we are trying to
       run a userland process for a second time in the same thread.
       This is not possible. */
    KERNEL_ASSERT(my_entry->pagetable == NULL);

    pagetable = vm_create_pagetable(thread_get_current_thread());
    KERNEL_ASSERT(pagetable != NULL);

    intr_status = _interrupt_disable();
    my_entry->pagetable = pagetable;
    _interrupt_set_state(intr_status);

    file = vfs_open((char *)executable);
    /* Make sure the file existed and was a valid ELF file */
    KERNEL_ASSERT(file >= 0);
    KERNEL_ASSERT(elf_parse_header(&elf, file));

    /* Trivial and naive sanity check for entry point: */
    KERNEL_ASSERT(elf.entry_point >= PAGE_SIZE);

    /* Calculate the number of pages needed by the whole process
       (including userland stack). Since we don't have proper tlb
       handling code, all these pages must fit into TLB. */
    KERNEL_ASSERT(elf.ro_pages + elf.rw_pages + CONFIG_USERLAND_STACK_SIZE
		  <= _tlb_get_maxindex() + 1);

    /* Allocate and map stack */
    for(i = 0; i < CONFIG_USERLAND_STACK_SIZE; i++) {
        phys_page = pagepool_get_phys_page();
        KERNEL_ASSERT(phys_page != 0);
        vm_map(my_entry->pagetable, phys_page, 
               (USERLAND_STACK_TOP & PAGE_SIZE_MASK) - i*PAGE_SIZE, 1);
    }

    /* Allocate and map pages for the segments. We assume that
       segments begin at page boundary. (The linker script in tests
       directory creates this kind of segments) */
    for(i = 0; i < (int)elf.ro_pages; i++) {
        phys_page = pagepool_get_phys_page();
        KERNEL_ASSERT(phys_page != 0);
        vm_map(my_entry->pagetable, phys_page, 
               elf.ro_vaddr + i*PAGE_SIZE, 1);
    }

    for(i = 0; i < (int)elf.rw_pages; i++) {
        phys_page = pagepool_get_phys_page();
        KERNEL_ASSERT(phys_page != 0);
        vm_map(my_entry->pagetable, phys_page, 
               elf.rw_vaddr + i*PAGE_SIZE, 1);
    }

    /* Put the mapped pages into TLB. Here we again assume that the
       pages fit into the TLB. After writing proper TLB exception
       handling this call should be skipped. */
    //intr_status = _interrupt_disable();
    //tlb_fill(my_entry->pagetable);
    //_interrupt_set_state(intr_status);
    
    /* Now we may use the virtual addresses of the segments. */

    /* Zero the pages. */
    memoryset((void *)elf.ro_vaddr, 0, elf.ro_pages*PAGE_SIZE);
    memoryset((void *)elf.rw_vaddr, 0, elf.rw_pages*PAGE_SIZE);

    stack_bottom = (USERLAND_STACK_TOP & PAGE_SIZE_MASK) - 
        (CONFIG_USERLAND_STACK_SIZE-1)*PAGE_SIZE;
    memoryset((void *)stack_bottom, 0, CONFIG_USERLAND_STACK_SIZE*PAGE_SIZE);

    /* Copy segments */

    if (elf.ro_size > 0) {
	/* Make sure that the segment is in proper place. */
        KERNEL_ASSERT(elf.ro_vaddr >= PAGE_SIZE);
        KERNEL_ASSERT(vfs_seek(file, elf.ro_location) == VFS_OK);
        KERNEL_ASSERT(vfs_read(file, (void *)elf.ro_vaddr, elf.ro_size)
		      == (int)elf.ro_size);
    }

    if (elf.rw_size > 0) {
	/* Make sure that the segment is in proper place. */
        KERNEL_ASSERT(elf.rw_vaddr >= PAGE_SIZE);
        KERNEL_ASSERT(vfs_seek(file, elf.rw_location) == VFS_OK);
        KERNEL_ASSERT(vfs_read(file, (void *)elf.rw_vaddr, elf.rw_size)
		      == (int)elf.rw_size);
    }


    /* Set the dirty bit to zero (read-only) on read-only pages. */
    for(i = 0; i < (int)elf.ro_pages; i++) {
        vm_set_dirty(my_entry->pagetable, elf.ro_vaddr + i*PAGE_SIZE, 0);
    }

    /* Insert page mappings again to TLB to take read-only bits into use */
    //intr_status = _interrupt_disable();
    //tlb_fill(my_entry->pagetable);
    //_interrupt_set_state(intr_status);

    /* Initialize the user context. (Status register is handled by
       thread_goto_userland) */
    memoryset(&user_context, 0, sizeof(user_context));
    user_context.cpu_regs[MIPS_REGISTER_SP] = USERLAND_STACK_TOP;
    user_context.pc = elf.entry_point;

    vfs_close(file);

    thread_goto_userland(&user_context);

    KERNEL_PANIC("thread_goto_userland failed.");
}
Exemplo n.º 26
0
static
load_return_t
load_segment(
    struct segment_command	*scp,
    void *			pager,
    off_t			pager_offset,
    off_t			macho_size,
    __unused off_t		end_of_file,
    vm_map_t		map,
    load_result_t		*result
)
{
    kern_return_t		ret;
    vm_offset_t		map_addr, map_offset;
    vm_size_t		map_size, seg_size, delta_size;
    vm_prot_t 		initprot;
    vm_prot_t		maxprot;

    /*
     * Make sure what we get from the file is really ours (as specified
     * by macho_size).
     */
    if (scp->fileoff + scp->filesize > macho_size)
        return (LOAD_BADMACHO);
    /*
     * Make sure the segment is page-aligned in the file.
     */
    if ((scp->fileoff & PAGE_MASK) != 0)
        return LOAD_BADMACHO;

    seg_size = round_page(scp->vmsize);
    if (seg_size == 0)
        return(KERN_SUCCESS);

    /*
     *	Round sizes to page size.
     */
    map_size = round_page(scp->filesize);
    map_addr = trunc_page(scp->vmaddr);

#if 0	/* XXX (4596982) this interferes with Rosetta */
    if (map_addr == 0 &&
            map_size == 0 &&
            seg_size != 0 &&
            (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
            (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
        /*
         * This is a "page zero" segment:  it starts at address 0,
         * is not mapped from the binary file and is not accessible.
         * User-space should never be able to access that memory, so
         * make it completely off limits by raising the VM map's
         * minimum offset.
         */
        ret = vm_map_raise_min_offset(map, (vm_map_offset_t) seg_size);
        if (ret != KERN_SUCCESS) {
            return LOAD_FAILURE;
        }
        return LOAD_SUCCESS;
    }
#endif

    map_offset = pager_offset + scp->fileoff;

    if (map_size > 0) {
        initprot = (scp->initprot) & VM_PROT_ALL;
        maxprot = (scp->maxprot) & VM_PROT_ALL;
        /*
         *	Map a copy of the file into the address space.
         */
        ret = vm_map(map,
                     &map_addr, map_size, (vm_offset_t)0,
                     VM_FLAGS_FIXED,	pager, map_offset, TRUE,
                     initprot, maxprot,
                     VM_INHERIT_DEFAULT);
        if (ret != KERN_SUCCESS)
            return(LOAD_NOSPACE);

        /*
         *	If the file didn't end on a page boundary,
         *	we need to zero the leftover.
         */
        delta_size = map_size - scp->filesize;
#if FIXME
        if (delta_size > 0) {
            vm_offset_t	tmp;

            ret = vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE);
            if (ret != KERN_SUCCESS)
                return(LOAD_RESOURCE);

            if (copyout(tmp, map_addr + scp->filesize,
                        delta_size)) {
                (void) vm_deallocate(
                    kernel_map, tmp, delta_size);
                return(LOAD_FAILURE);
            }

            (void) vm_deallocate(kernel_map, tmp, delta_size);
        }
#endif /* FIXME */
    }

    /*
     *	If the virtual size of the segment is greater
     *	than the size from the file, we need to allocate
     *	zero fill memory for the rest.
     */
    delta_size = seg_size - map_size;
    if (delta_size > 0) {
        vm_offset_t	tmp = map_addr + map_size;

        ret = vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED,
                     NULL, 0, FALSE,
                     scp->initprot, scp->maxprot,
                     VM_INHERIT_DEFAULT);
        if (ret != KERN_SUCCESS)
            return(LOAD_NOSPACE);
    }

    if ( (scp->fileoff == 0) && (scp->filesize != 0) )
        result->mach_header = map_addr;

    if (scp->flags & SG_PROTECTED_VERSION_1) {
        ret = unprotect_segment_64((uint64_t) scp->fileoff,
                                   (uint64_t) scp->filesize,
                                   map,
                                   (vm_map_offset_t) map_addr,
                                   (vm_map_size_t) map_size);
    } else {
        ret = LOAD_SUCCESS;
    }

    return ret;
}
Exemplo n.º 27
0
void
init_mapped_time(void)
{
	kern_return_t	kr;
	mach_port_t	pager;
	int 		new_res;
	tvalspec_t	rtc_time;

	kr = host_get_clock_service(host_port,
				    REALTIME_CLOCK,
				    &rt_clock);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr,
			    ("init_mapped_time: "
			     "host_get_clock_service(REALTIME_CLOCK)"));
		panic("unable to get real time clock");
	}

	kr = host_get_clock_control(privileged_host_port,
				    REALTIME_CLOCK,
				    &rt_clock_ctrl);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr,
			    ("init_mapped_time: "
			     "host_get_clock_control(REALTIME_CLOCK)"));
	} else {
		/* ask for 500 microsecond resolution */
		new_res = 500000;
#if 0
		kr = clock_set_attributes(rt_clock_ctrl,
					  CLOCK_ALARM_CURRES,
					  (clock_attr_t) &new_res,
					  1);
		if (kr != KERN_SUCCESS) {
			MACH3_DEBUG(2, kr,
				    ("init_mapped_time: "
				     "clock_set_attributes(%d nsec)",
				     new_res));
		}
#endif
	}

	kr = clock_map_time(rt_clock, &pager);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr, ("init_mapped_time: clock_map_time"));
		panic("unable to map real time clock");
	}

	kr = vm_map(mach_task_self(),
		    (vm_address_t *)&serv_mtime,
		    sizeof(mapped_tvalspec_t),
		    0,
		    TRUE,
		    pager,
		    0,
		    0,
		    VM_PROT_READ,
		    VM_PROT_READ,
		    VM_INHERIT_NONE);
	if (kr != D_SUCCESS) {
		MACH3_DEBUG(1, kr, ("init_mapped_time: vm_map"));
		panic("unable to vm_map real time clock");
	}

	kr = mach_port_deallocate(mach_task_self(), pager);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr, ("init_mapped_time: mach_port_deallocate"));
		panic("unable to deallocate pager");
	}

	/* calculate origin of rtclock (ie. time of boot) so that we
	 * can use rtclock to generate the current time
	 */
	kr = host_get_clock_service(host_port,
				    BATTERY_CLOCK,
				    &bb_clock);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr,
			    ("init_mapped_time: "
			     "host_get_clock_service(BATTERY_CLOCK)"));
		panic("unable to get battery backed clock");
	}

	kr = host_get_clock_control(privileged_host_port,
				    BATTERY_CLOCK,
				    &bb_clock_ctrl);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr,
			    ("init_mapped_time: "
			     "host_get_clock_control(BATTERY_CLOCK)"));
		return;
	}

	kr = clock_get_time(bb_clock, &base_time);

	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr, ("init_mapped_time: clock_get_time"));
	}

	MTS_TO_TS(serv_mtime, &rtc_time);

	SUB_TVALSPEC(&base_time, &rtc_time);

}
Exemplo n.º 28
0
void
osfmach3_insert_vm_struct(
	struct mm_struct 	*mm,
	struct vm_area_struct	*vmp)
{
	memory_object_t		mem_obj;
	vm_offset_t		mem_obj_offset;
	kern_return_t		kr;
	unsigned short		vm_flags;
	boolean_t		is_shared;
	vm_prot_t		cur_prot, max_prot;
	vm_address_t		user_addr, wanted_addr;
	vm_size_t		size;
	unsigned int		id;
	struct shmid_ds		*shp;
	struct osfmach3_mach_task_struct *mach_task;
	extern struct shmid_ds	*shm_segs[SHMMNI];

	if (vmp->vm_flags & VM_REMAPPING) {
		/* don't mess with Mach VM: it's only Linux remappings */
		return;
	}

#ifdef	VMA_DEBUG
	if (vma_debug) {
		printk("VMA:osfmach3_insert_vm_struct: mm=0x%p, vmp=0x%p\n",
		       mm, vmp);
	}
#endif	/* VMA_DEBUG */

	mach_task = mm->mm_mach_task;
	if (vmp->vm_inode == NULL) {
		if (vmp->vm_pte != 0) {
			/* shared memory */
			id = SWP_OFFSET(vmp->vm_pte) & SHM_ID_MASK;
			shp = shm_segs[id];
			if (shp != IPC_UNUSED) {
				mem_obj = (mach_port_t) shp->shm_pages;
				mem_obj_offset = 0;
			} else {
				mem_obj = MEMORY_OBJECT_NULL;
				mem_obj_offset = 0;
			}
		} else {
			mem_obj = MEMORY_OBJECT_NULL;
			mem_obj_offset = 0;
		}
	} else if (S_ISREG(vmp->vm_inode->i_mode)) {
		mem_obj = inode_pager_setup(vmp->vm_inode);
		if (mem_obj == MEMORY_OBJECT_NULL) {
			panic("osfmach3_insert_vm_struct: can't setup pager");
		}
		mem_obj_offset = (vm_offset_t) vmp->vm_offset;
	} else if (vmp->vm_inode->i_mem_object != NULL) {
		/* special file, but with a pager already setup */
		mem_obj = vmp->vm_inode->i_mem_object->imo_mem_obj;
		mem_obj_offset = (vm_offset_t) vmp->vm_offset;
	} else {
		panic("osfmach3_insert_vm_struct: non-regular file");
	}

	vm_flags = vmp->vm_flags;
	cur_prot = VM_PROT_NONE;
	if (vm_flags & VM_READ)
		cur_prot |= VM_PROT_READ;
	if (vm_flags & VM_WRITE)
		cur_prot |= VM_PROT_WRITE;
	if (vm_flags & VM_EXEC)
		cur_prot |= VM_PROT_EXECUTE;
	max_prot = VM_PROT_ALL;
	is_shared = (vmp->vm_flags & VM_SHARED) != 0;
	user_addr = vmp->vm_start;
	wanted_addr = user_addr;
	size = vmp->vm_end - vmp->vm_start;

#ifdef	VMA_DEBUG
	if (vma_debug) {
		printk("VMA: vm_map(task=0x%x, user_addr=0x%x, size=0x%x, "
		       "mem_obj=0x%x, offset=0x%x, %sCOPY, cur_prot=0x%x, "
		       "max_prot=0x%x, %s)\n",
		       mach_task->mach_task_port,
		       user_addr,
		       size,
		       mem_obj,
		       mem_obj_offset,
		       is_shared ? "!" : "",
		       cur_prot,
		       max_prot,
		       is_shared ? "INHERIT_SHARE" : "INHERIT_COPY");
	}
#endif	/* VMA_DEBUG */
	
	server_thread_blocking(FALSE);
	kr = vm_map(mach_task->mach_task_port,
		    &user_addr,
		    size,
		    0,		/* no mask */
		    FALSE,	/* not anywhere */
		    mem_obj,
		    mem_obj_offset,
		    !is_shared,
		    cur_prot,
		    max_prot,
		    is_shared ? VM_INHERIT_SHARE : VM_INHERIT_COPY);
	server_thread_unblocking(FALSE);

	if (kr != KERN_SUCCESS) {
		printk("Failure: vm_map(task=0x%x, user_addr=0x%x, size=0x%x, "
		       "mem_obj=0x%x, offset=0x%x, %sCOPY, cur_prot=0x%x, "
		       "max_prot=0x%x, %s)\n",
		       mach_task->mach_task_port,
		       user_addr,
		       size,
		       mem_obj,
		       mem_obj_offset,
		       is_shared ? "!" : "",
		       cur_prot,
		       max_prot,
		       is_shared ? "INHERIT_SHARE" : "INHERIT_COPY");
		MACH3_DEBUG(1, kr, ("osfmach3_insert_vm_struct: vm_map"));
		printk("osfmach3_insert_vm_struct: can't map\n");
	}
	if (user_addr != wanted_addr) {
		printk("vm_map: mapped at 0x%x instead of 0x%x\n",
		       user_addr, wanted_addr);
		printk("osfmach3_insert_vm_struct: mapping at wrong address\n");
	}

	if (vmp->vm_flags & VM_LOCKED) {
		extern mach_port_t privileged_host_port;

		server_thread_blocking(FALSE);
		kr = vm_wire(privileged_host_port,
			     mach_task->mach_task_port,
			     user_addr,
			     size,
			     cur_prot);
		server_thread_unblocking(FALSE);
		if (kr != KERN_SUCCESS) {
			MACH3_DEBUG(2, kr,
				    ("osfmach3_insert_vm_struct: "
				     "vm_wire(task=0x%x, addr=0x%x, size=0x%x, "
				     "prot=0x%x)",
				     mach_task->mach_task_port,
				     user_addr,
				     size,
				     cur_prot));
			printk("osfmach3_insert_vm_struct: vm_wire failed\n");
		}
	}
#if 0
	if (vmp->vm_inode != NULL) {
		/*
		 * If mem_obj was already cached in the kernel, we got an
		 * extra reference on its i_mem_object structure (inode_pager).
		 * If it was the first time we mapped the inode, the memory
		 * object has just been initialized by the kernel and we
		 * got a reference in memory_object_init(). In both cases,
		 * we have to release a reference.
		 */
		ASSERT(mem_obj != MEMORY_OBJECT_NULL);
		ASSERT(vmp->vm_inode->i_mem_object);
		ASSERT(vmp->vm_inode->i_mem_object->imo_mem_obj_control);
		inode_pager_release(vmp->vm_inode);
	}
#endif
}
Exemplo n.º 29
0
/**
 * Map pages starting at @a task_addr from @a task into the current process. The mapping
 * will be copy-on-write, and will be checked to ensure a minimum protection value of
 * VM_PROT_READ.
 *
 * @param task The task from which the memory will be mapped.
 * @param task_addr The task-relative address of the memory to be mapped. This is not required to fall on a page boundry.
 * @param length The total size of the mapping to create.
 * @param require_full If false, short mappings will be permitted in the case where a memory object of the requested length
 * does not exist at the target address. It is the caller's responsibility to validate the resulting length of the
 * mapping, eg, using plcrash_async_mobject_remap_address() and similar. If true, and the entire requested page range is
 * not valid, the mapping request will fail.
 * @param[out] result The in-process address at which the pages were mapped.
 * @param[out] result_length The total size, in bytes, of the mapped pages.
 *
 * @return On success, returns PLCRASH_ESUCCESS. On failure, one of the plcrash_error_t error values will be returned, and no
 * mapping will be performed.
 *
 * @note
 * This code previously used vm_remap() to perform atomic remapping of process memory. However, this appeared
 * to trigger a kernel bug (and resulting panic) on iOS 6.0 through 6.1.2, possibly fixed in 6.1.3. Note that
 * no stable release of PLCrashReporter shipped with the vm_remap() code.
 *
 * Investigation of the failure seems to show an over-release of the target vm_map and backing vm_object, leading to
 * NULL dereference, invalid memory references, and in some cases, deadlocks that result in watchdog timeouts.
 *
 * In one example case, the crash occurs in update_first_free_ll() as a NULL dereference of the vm_map_entry_t parameter.
 * Analysis of the limited reports shows that this is called via vm_map_store_update_first_free(). No backtrace is
 * available from the kernel panics, but analyzing the register state demonstrates:
 * - A reference to vm_map_store_update_first_free() remains in the link register.
 * - Of the following callers, one can be eliminated by register state:
 *     - vm_map_enter - not possible, r3 should be equal to r0
 *     - vm_map_clip_start - possible
 *     - vm_map_clip_unnest - possible
 *     - vm_map_clip_end - possible
 *
 * In the other panic seen in vm_object_reap_pages(), a value of 0x8008 is loaded and deferenced from the next pointer
 * of an element within the vm_object's resident page queue (object->memq).
 *
 * Unfortunately, our ability to investigate has been extremely constrained by the following issues;
 * - The panic is not easily or reliably reproducible
 * - Apple's does not support iOS kernel debugging
 * - There is no support for jailbreak kernel debugging against iOS 6.x devices at the time of writing.
 *
 * The work-around used here is to split the vm_remap() into distinct calls to mach_make_memory_entry_64() and
 * vm_map(); this follows a largely distinct code path from vm_remap(). In testing by a large-scale user of PLCrashReporter,
 * they were no longer able to reproduce the issue with this fix in place. Additionally, they've not been able to reproduce
 * the issue on 6.1.3 devices, or had any reports of the issue occuring on 6.1.3 devices.
 *
 * The mach_make_memory_entry_64() API may not actually return an entry for the full requested length; this requires
 * that we loop through the full range, requesting an entry for the remaining unallocated pages, and then mapping
 * the pages in question. Since this requires multiple calls to vm_map(), we pre-allocate a contigious range of pages
 * for the target mappings into which we'll insert (via overwrite) our own mappings.
 *
 * @note
 * As a work-around for bugs in Apple's Mach-O/dyld implementation, we provide the @a require_full flag; if false,
 * a successful mapping that is smaller than the requested range may be made, and will not return an error. This is necessary
 * to allow our callers to work around bugs in update_dyld_shared_cache(1), which writes out a larger Mach-O VM segment
 * size value than is actually available and mappable. See the plcrash_async_macho_map_segment() API documentation for
 * more details. This bug has been reported to Apple as rdar://13707406.
 */
static plcrash_error_t plcrash_async_mobject_remap_pages_workaround (mach_port_t task,
                                                                     pl_vm_address_t task_addr,
                                                                     pl_vm_size_t length,
                                                                     bool require_full,
                                                                     pl_vm_address_t *result,
                                                                     pl_vm_size_t *result_length)
{
    kern_return_t kt;

    /* Compute the total required page size. */
    pl_vm_address_t base_addr = mach_vm_trunc_page(task_addr);
    pl_vm_size_t total_size = mach_vm_round_page(length + (task_addr - base_addr));
    
    /*
     * If short mappings are permitted, determine the actual mappable size of the target range. Due
     * to rdar://13707406 (update_dyld_shared_cache appears to write invalid LINKEDIT vmsize), an
     * LC_SEGMENT-reported VM size may be far larger than the actual mapped pages. This would result
     * in us making large (eg, 36MB) allocations in cases where the mappable range is actually much
     * smaller, which can trigger out-of-memory conditions on smaller devices.
     */
    if (!require_full) {
        pl_vm_size_t verified_size = 0;
        
        while (verified_size < total_size) {            
            memory_object_size_t entry_length = total_size - verified_size;
            mach_port_t mem_handle;
            
            /* Fetch an entry reference */
            kt = mach_make_memory_entry_64(task, &entry_length, base_addr + verified_size, VM_PROT_READ, &mem_handle, MACH_PORT_NULL);
            if (kt != KERN_SUCCESS) {
                /* Once we hit an unmappable page, break */
                break;
            }
            
            /* Drop the reference */
            kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
            }

            /* Note the size */
            verified_size += entry_length;
        }

        /* No valid page found at the task_addr */
        if (verified_size == 0) {
            PLCF_DEBUG("No mappable pages found at 0x%" PRIx64, (uint64_t) task_addr);
            return PLCRASH_ENOMEM;
        }

        /* Reduce the total size to the verified size */
        if (verified_size < total_size)
            total_size = verified_size;
    }

    /*
     * Set aside a memory range large enough for the total requested number of pages. Ideally the kernel
     * will lazy-allocate the backing physical pages so that we don't waste actual memory on this
     * pre-emptive page range reservation.
     */
    pl_vm_address_t mapping_addr = 0x0;
    pl_vm_size_t mapped_size = 0;
#ifdef PL_HAVE_MACH_VM
    kt = mach_vm_allocate(mach_task_self(), &mapping_addr, total_size, VM_FLAGS_ANYWHERE);
#else
    kt = vm_allocate(mach_task_self(), &mapping_addr, total_size, VM_FLAGS_ANYWHERE);
#endif

    if (kt != KERN_SUCCESS) {
        PLCF_DEBUG("Failed to allocate a target page range for the page remapping: %d", kt);
        return PLCRASH_EINTERNAL;
    }

    /* Map the source pages into the allocated region, overwriting the existing page mappings */
    while (mapped_size < total_size) {
        /* Create a reference to the target pages. The returned entry may be smaller than the total length. */
        memory_object_size_t entry_length = total_size - mapped_size;
        mach_port_t mem_handle;
        kt = mach_make_memory_entry_64(task, &entry_length, base_addr + mapped_size, VM_PROT_READ, &mem_handle, MACH_PORT_NULL);
        if (kt != KERN_SUCCESS) {            
            /* No pages are found at the target. When validating the total length above, we already verified the
             * availability of the requested pages; if they've now disappeared, we can treat it as an error,
             * even if !require_full was specified */
            PLCF_DEBUG("mach_make_memory_entry_64() failed: %d", kt);
            
            /* Clean up the reserved pages */
            kt = vm_deallocate(mach_task_self(), mapping_addr, total_size);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("vm_deallocate() failed: %d", kt);
            }
            
            /* Return error */
            return PLCRASH_ENOMEM;
        }
        
        /* Map the pages into our local task, overwriting the allocation used to reserve the target space above. */
        pl_vm_address_t target_address = mapping_addr + mapped_size;
#ifdef PL_HAVE_MACH_VM
        kt = mach_vm_map(mach_task_self(), &target_address, entry_length, 0x0, VM_FLAGS_FIXED|VM_FLAGS_OVERWRITE, mem_handle, 0x0, TRUE, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_COPY);
#else
        kt = vm_map(mach_task_self(), &target_address, entry_length, 0x0, VM_FLAGS_FIXED|VM_FLAGS_OVERWRITE, mem_handle, 0x0, TRUE, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_COPY);
#endif /* !PL_HAVE_MACH_VM */
        
        if (kt != KERN_SUCCESS) {
            PLCF_DEBUG("vm_map() failure: %d", kt);

            /* Clean up the reserved pages */
            kt = vm_deallocate(mach_task_self(), mapping_addr, total_size);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("vm_deallocate() failed: %d", kt);
            }

            /* Drop the memory handle */
            kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
            }
            
            return PLCRASH_ENOMEM;
        }

        /* Drop the memory handle */
        kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
        if (kt != KERN_SUCCESS) {
            PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
        }
        
        /* Adjust the total mapping size */
        mapped_size += entry_length;
    }
    
    *result = mapping_addr;
    *result_length = mapped_size;

    return PLCRASH_ESUCCESS;
}
Exemplo n.º 30
0
/* Return non-zero on error. */
int setup_new_process(TID_t thread,
                      const char *executable, const char **argv_src,
                      virtaddr_t *entry_point, virtaddr_t *stack_top)
{
  pagetable_t *pagetable;
  elf_info_t elf;
  openfile_t file;
  uintptr_t phys_page;
  int i, res;
  thread_table_t *thread_entry = thread_get_thread_entry(thread);

  int argc = 1;
  virtaddr_t argv_begin;
  virtaddr_t argv_dst;
  int argv_elem_size;
  virtaddr_t argv_elem_dst;

  file = vfs_open((char *)executable);

  /* Make sure the file existed and was a valid ELF file */
  if (file < 0) {
    return -1;
  }

  res = elf_parse_header(&elf, file);
  if (res < 0) {
    return -1;
  }

  /* Trivial and naive sanity check for entry point: */
  if (elf.entry_point < PAGE_SIZE) {
    return -1;
  }

  *entry_point = elf.entry_point;

  pagetable = vm_create_pagetable(thread);

  thread_entry->pagetable = pagetable;

  /* Allocate and map stack */
  for(i = 0; i < CONFIG_USERLAND_STACK_SIZE; i++) {
    phys_page = physmem_allocblock();
    KERNEL_ASSERT(phys_page != 0);
    /* Zero the page */
    memoryset((void*)ADDR_PHYS_TO_KERNEL(phys_page), 0, PAGE_SIZE);
    vm_map(pagetable, phys_page,
           (USERLAND_STACK_TOP & PAGE_SIZE_MASK) - i*PAGE_SIZE, 1);
  }

  /* Allocate and map pages for the segments. We assume that
     segments begin at page boundary. (The linker script in tests
     directory creates this kind of segments) */
  for(i = 0; i < (int)elf.ro_pages; i++) {
    int left_to_read = elf.ro_size - i*PAGE_SIZE;
    phys_page = physmem_allocblock();
    KERNEL_ASSERT(phys_page != 0);
    /* Zero the page */
    memoryset((void*)ADDR_PHYS_TO_KERNEL(phys_page), 0, PAGE_SIZE);
    /* Fill the page from ro segment */
    if (left_to_read > 0) {
      KERNEL_ASSERT(vfs_seek(file, elf.ro_location + i*PAGE_SIZE) == VFS_OK);
      KERNEL_ASSERT(vfs_read(file, (void*)ADDR_PHYS_TO_KERNEL(phys_page),
                             MIN(PAGE_SIZE, left_to_read))
                    == (int) MIN(PAGE_SIZE, left_to_read));
    }
    vm_map(pagetable, phys_page,
           elf.ro_vaddr + i*PAGE_SIZE, 0);
  }

  for(i = 0; i < (int)elf.rw_pages; i++) {
    int left_to_read = elf.rw_size - i*PAGE_SIZE;
    phys_page = physmem_allocblock();
    KERNEL_ASSERT(phys_page != 0);
    /* Zero the page */
    memoryset((void*)ADDR_PHYS_TO_KERNEL(phys_page), 0, PAGE_SIZE);
    /* Fill the page from rw segment */
    if (left_to_read > 0) {
      KERNEL_ASSERT(vfs_seek(file, elf.rw_location + i*PAGE_SIZE) == VFS_OK);
      KERNEL_ASSERT(vfs_read(file, (void*)ADDR_PHYS_TO_KERNEL(phys_page),
                             MIN(PAGE_SIZE, left_to_read))
                    == (int) MIN(PAGE_SIZE, left_to_read));
    }
    vm_map(pagetable, phys_page,
           elf.rw_vaddr + i*PAGE_SIZE, 1);
  }

  /* Set up argc and argv on the stack. */

  /* Start by preparing ancillary information for the new process argv. */
  if (argv_src != NULL)
    for (i = 0; argv_src[i] != NULL; i++) {
      argc++;
    }

  argv_begin = USERLAND_STACK_TOP - (argc * sizeof(virtaddr_t));
  argv_dst = argv_begin;

  /* Prepare for copying executable. */
  argv_elem_size = strlen(executable) + 1;
  argv_elem_dst = argv_dst - wordpad(argv_elem_size);

  /* Copy executable to argv[0] location. */
  vm_memwrite(pagetable,
              argv_elem_size,
              argv_elem_dst,
              executable);
  /* Set argv[i] */
  vm_memwrite(pagetable,
              sizeof(virtaddr_t),
              argv_dst,
              &argv_elem_dst);

  /* Move argv_dst to &argv[1]. */
  argv_dst += sizeof(virtaddr_t);

  if (argv_src != NULL) {
    for (i = 0; argv_src[i] != NULL; i++) {
      /* Compute the size of argv[i+1] */
      argv_elem_size = strlen(argv_src[i]) + 1;
      argv_elem_dst -= wordpad(argv_elem_size);

      /* Write the 'i+1'th element of argv */
      vm_memwrite(pagetable,
                  argv_elem_size,
                  argv_elem_dst,
                  argv_src[i]);

      /* Write argv[i+1] */
      vm_memwrite(pagetable,
                  sizeof(virtaddr_t),
                  argv_dst,
                  &argv_elem_dst);

      /* Move argv_dst to next element of argv. */
      argv_dst += sizeof(virtaddr_t);
    }
  }

  /* Write argc to the stack. */
  vm_memwrite(pagetable,
              sizeof(int),
              argv_elem_dst - sizeof(int),
              &argc);
  /* Write argv to the stack. */
  vm_memwrite(pagetable,
              sizeof(virtaddr_t),
              argv_elem_dst - sizeof(int) - sizeof(virtaddr_t),
              &argv_begin);

  /* Stack pointer points at argv. */
  *stack_top = argv_elem_dst - sizeof(int) - sizeof(virtaddr_t);

  return 0;
}