示例#1
0
文件: hurdmsg.c 项目: angelhunt/SNP
kern_return_t
_S_get_init_ports (mach_port_t msgport, mach_port_t auth,
		   mach_port_t **ports,
		   mach_msg_type_name_t *ports_type,
		   unsigned int *nports)
{
  unsigned int i;
  error_t err;

  AUTHCHECK;

  if (err = __vm_allocate (__mach_task_self (), (vm_address_t *) ports,
			   _hurd_nports * sizeof (mach_port_t), 1))
    return err;
  *nports = _hurd_nports;

  for (i = 0; i < _hurd_nports; ++i)
    /* This function adds a new user ref for the *RESULT it gives back.
       Our reply message uses move-send rights that consumes this ref.  */
    if (err = _hurd_ports_get (i, &(*ports)[i]))
      {
	/* Died part way through.  Deallocate the ports already fetched.  */
	while (i-- > 0)
	  __mach_port_deallocate (__mach_task_self (), (*ports)[i]);
	__vm_deallocate (__mach_task_self (),
			 (vm_address_t) *ports,
			 *nports * sizeof (mach_port_t));
	return err;
      }

  *ports_type = MACH_MSG_TYPE_MOVE_SEND;
  return 0;
}
示例#2
0
文件: hurdmsg.c 项目: angelhunt/SNP
kern_return_t
_S_get_environment (mach_port_t msgport,
		    char **data, unsigned int *datalen)
{
  /* Pack the environment into an array with nulls separating elements.  */
  if (__environ != NULL)
    {
      char *ap, **p;
      size_t envlen = 0;

      for (p = __environ; *p != NULL; ++p)
	envlen += strlen (*p) + 1;

      if (envlen > *datalen)
	{
	  if (__vm_allocate (__mach_task_self (),
			     (vm_address_t *) data, envlen, 1))
	    return ENOMEM;
	}

      ap = *data;
      for (p = __environ; *p != NULL; ++p)
	ap = __memccpy (ap, *p, '\0', ULONG_MAX);

      *datalen = envlen;
    }
  else
    *datalen = 0;

  return 0;
}
示例#3
0
文件: hurdmsg.c 项目: angelhunt/SNP
kern_return_t
_S_get_init_ints (mach_port_t msgport, mach_port_t auth,
		  int **values, unsigned int *nvalues)
{
  error_t err;
  unsigned int i;

  AUTHCHECK;

  if (err = __vm_allocate (__mach_task_self (), (vm_address_t *) values,
			   INIT_INT_MAX * sizeof (int), 1))
    return err;
  *nvalues = INIT_INT_MAX;

  for (i = 0; i < INIT_INT_MAX; ++i)
    switch (err = get_int (i, &(*values)[i]))
      {
      case 0:			/* Success.  */
	break;
      case EINVAL:		/* Unknown index.  */
	(*values)[i] = 0;
	break;
      default:			/* Lossage.  */
	__vm_deallocate (__mach_task_self (),
			 (vm_address_t) *values, INIT_INT_MAX * sizeof (int));
	return err;
      }

  return 0;
}
示例#4
0
文件: mig-alloc.c 项目: bminor/glibc
/* Called by MiG to allocate space.  */
void
__mig_allocate (vm_address_t *addr,
		vm_size_t size)
{
  if (__vm_allocate (__mach_task_self (), addr, size, 1) != KERN_SUCCESS)
    *addr = 0;
}
示例#5
0
文件: hurdmsg.c 项目: AubrCool/glibc
kern_return_t
_S_msg_get_env_variable (mach_port_t msgport,
			 char *variable,
			 char **data, mach_msg_type_number_t *datalen)
{
  error_t err;
  mach_msg_type_number_t valuelen;
  const char *value = getenv (variable);

  if (value == NULL)
    return ENOENT;

  valuelen = strlen (value);
  if (valuelen > *datalen)
    {
      if (err = __vm_allocate (__mach_task_self (),
			       (vm_address_t *) data, valuelen, 1))
	return err;
    }

  memcpy (*data, value, valuelen);
  *datalen = valuelen;

  return 0;
}
示例#6
0
kern_return_t
__mach_setup_thread (task_t task, thread_t thread, void *pc,
		     vm_address_t *stack_base, vm_size_t *stack_size)
{
  kern_return_t error;
  struct machine_thread_state ts;
  mach_msg_type_number_t tssize = MACHINE_THREAD_STATE_COUNT;
  vm_address_t stack;
  vm_size_t size;
  int anywhere;

  size = stack_size ? *stack_size ? : STACK_SIZE : STACK_SIZE;
  stack = stack_base ? *stack_base ? : 0 : 0;
  anywhere = !stack_base || !*stack_base;

  error = __vm_allocate (task, &stack, size + __vm_page_size, anywhere);
  if (error)
    return error;

  if (stack_size)
    *stack_size = size;

  memset (&ts, 0, sizeof (ts));
  MACHINE_THREAD_STATE_SET_PC (&ts, pc);
#ifdef STACK_GROWTH_DOWN
  if (stack_base)
    *stack_base = stack + __vm_page_size;
  ts.SP = stack + __vm_page_size + size;
#elif defined (STACK_GROWTH_UP)
  if (stack_base)
    *stack_base = stack;
  ts.SP = stack;
  stack += size;
#else
  #error stack direction unknown
#endif

  /* Create the red zone.  */
  if (error = __vm_protect (task, stack, __vm_page_size, 0, VM_PROT_NONE))
    return error;

  return __thread_set_state (thread, MACHINE_THREAD_STATE_FLAVOR,
			     (natural_t *) &ts, tssize);
}
示例#7
0
文件: mmap.c 项目: siddhesh/glibc
__ptr_t
__mmap (__ptr_t addr, size_t len, int prot, int flags, int fd, off_t offset)
{
  error_t err;
  vm_prot_t vmprot;
  memory_object_t memobj;
  vm_address_t mapaddr;

  mapaddr = (vm_address_t) addr;

  /* ADDR and OFFSET must be page-aligned.  */
  if ((mapaddr & (__vm_page_size - 1)) || (offset & (__vm_page_size - 1)))
    return (__ptr_t) (long int) __hurd_fail (EINVAL);

  if ((flags & (MAP_TYPE|MAP_INHERIT)) == MAP_ANON
      && prot == (PROT_READ|PROT_WRITE)) /* cf VM_PROT_DEFAULT */
    {
      /* vm_allocate has (a little) less overhead in the kernel too.  */
      err = __vm_allocate (__mach_task_self (), &mapaddr, len, mapaddr == 0);

      if (err == KERN_NO_SPACE)
	{
	  if (flags & MAP_FIXED)
	    {
	      /* XXX this is not atomic as it is in unix! */
	      /* The region is already allocated; deallocate it first.  */
	      err = __vm_deallocate (__mach_task_self (), mapaddr, len);
	      if (!err)
		err = __vm_allocate (__mach_task_self (), &mapaddr, len, 0);
	    }
	  else if (mapaddr != 0)
	    err = __vm_allocate (__mach_task_self (), &mapaddr, len, 1);
	}

      return err ? (__ptr_t) (long int) __hurd_fail (err) : (__ptr_t) mapaddr;
    }

  vmprot = VM_PROT_NONE;
  if (prot & PROT_READ)
    vmprot |= VM_PROT_READ;
  if (prot & PROT_WRITE)
    vmprot |= VM_PROT_WRITE;
  if (prot & PROT_EXEC)
    vmprot |= VM_PROT_EXECUTE;

  switch (flags & MAP_TYPE)
    {
    default:
      return (__ptr_t) (long int) __hurd_fail (EINVAL);

    case MAP_ANON:
      memobj = MACH_PORT_NULL;
      break;

    case MAP_FILE:
    case 0:			/* Allow, e.g., just MAP_SHARED.  */
      {
	mach_port_t robj, wobj;
	if (err = HURD_DPORT_USE (fd, __io_map (port, &robj, &wobj)))
	  {
	    if (err == MIG_BAD_ID || err == EOPNOTSUPP || err == ENOSYS)
	      err = ENODEV;	/* File descriptor doesn't support mmap.  */
	    return (__ptr_t) (long int) __hurd_dfail (fd, err);
	  }
	switch (prot & (PROT_READ|PROT_WRITE))
	  {
	  /* Although it apparently doesn't make sense to map a file with
	     protection set to PROT_NONE, it is actually sometimes done.
	     In particular, that's how localedef reserves some space for
	     the locale archive file, the rationale being that some
	     implementations take into account whether the mapping is
	     anonymous or not when selecting addresses.  */
	  case PROT_NONE:
	  case PROT_READ:
	    memobj = robj;
	    if (wobj != MACH_PORT_NULL)
	      __mach_port_deallocate (__mach_task_self (), wobj);
	    break;
	  case PROT_WRITE:
	    memobj = wobj;
	    if (robj != MACH_PORT_NULL)
	      __mach_port_deallocate (__mach_task_self (), robj);
	    break;
	  case PROT_READ|PROT_WRITE:
	    if (robj == wobj)
	      {
		memobj = wobj;
		/* Remove extra reference.  */
		__mach_port_deallocate (__mach_task_self (), memobj);
	      }
	    else if (wobj == MACH_PORT_NULL && /* Not writable by mapping.  */
		     !(flags & MAP_SHARED))
	      /* The file can only be mapped for reading.  Since we are
		 making a private mapping, we will never try to write the
		 object anyway, so we don't care.  */
	      memobj = robj;
	    else
	      {
		__mach_port_deallocate (__mach_task_self (), wobj);
		return (__ptr_t) (long int) __hurd_fail (EACCES);
	      }
	    break;
	  default:
	    __builtin_unreachable ();
	  }
	break;
	/* XXX handle MAP_NOEXTEND */
      }
    }

  /* XXX handle MAP_INHERIT */

  err = __vm_map (__mach_task_self (),
		  &mapaddr, (vm_size_t) len, (vm_address_t) 0,
		  mapaddr == 0,
		  memobj, (vm_offset_t) offset,
		  ! (flags & MAP_SHARED),
		  vmprot, VM_PROT_ALL,
		  (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY);

  if (err == KERN_NO_SPACE)
    {
      if (flags & MAP_FIXED)
	{
	  /* XXX this is not atomic as it is in unix! */
	  /* The region is already allocated; deallocate it first.  */
	  err = __vm_deallocate (__mach_task_self (), mapaddr, len);
	  if (! err)
	    err = __vm_map (__mach_task_self (),
			    &mapaddr, (vm_size_t) len, (vm_address_t) 0,
			    0, memobj, (vm_offset_t) offset,
			    ! (flags & MAP_SHARED),
			    vmprot, VM_PROT_ALL,
			    (flags & MAP_SHARED) ? VM_INHERIT_SHARE
			    : VM_INHERIT_COPY);
	}
      else if (mapaddr != 0)
	err = __vm_map (__mach_task_self (),
			&mapaddr, (vm_size_t) len, (vm_address_t) 0,
			1, memobj, (vm_offset_t) offset,
			! (flags & MAP_SHARED),
			vmprot, VM_PROT_ALL,
			(flags & MAP_SHARED) ? VM_INHERIT_SHARE
			: VM_INHERIT_COPY);
    }

  if (memobj != MACH_PORT_NULL)
    __mach_port_deallocate (__mach_task_self (), memobj);

  if (err)
    return (__ptr_t) (long int) __hurd_fail (err);

  return (__ptr_t) mapaddr;
}
示例#8
0
kern_return_t
__mach_setup_thread (task_t task, thread_t thread, void *pc,
                     vm_address_t *stack_base, vm_size_t *stack_size)
{
    kern_return_t error;
    struct machine_thread_state ts;
    mach_msg_type_number_t tssize = MACHINE_THREAD_STATE_COUNT;
    vm_address_t stack;
    vm_size_t size;
    int anywhere = 0;

    size = stack_size ? *stack_size ? : STACK_SIZE : STACK_SIZE;

    if (stack_base && *stack_base)
        stack = *stack_base;
    else if (size == STACK_SIZE)
    {
        /* Cthreads has a bug that makes its stack-probing code fail if
        the stack is too low in memory.  It's bad to try and fix it there
         until cthreads is integrated into libc, so we'll just do it here
         by requesting a high address.  When the cthreads bug is fixed,
         this assignment to STACK should be changed to 0, and the ANYWHERE
         argument to vm_allocate should be changed to 0.  This comment should
         be left, however, in order to confuse people who wonder why its
         here.  (Though perhaps that last sentence (and this one) should
         be deleted to maximize the effect.)  */
#ifdef STACK_GROWTH_DOWN
        stack = VM_MAX_ADDRESS - size - __vm_page_size;
#else
        stack = VM_MIN_ADDRESS;
#endif
    }
    else
        anywhere = 1;

    if (error = __vm_allocate (task, &stack, size + __vm_page_size, anywhere))
        return error;

    if (stack_size)
        *stack_size = size;

    memset (&ts, 0, sizeof (ts));
    MACHINE_THREAD_STATE_SET_PC (&ts, pc);
#ifdef STACK_GROWTH_DOWN
    if (stack_base)
        *stack_base = stack + __vm_page_size;
    ts.SP = stack + __vm_page_size + size;
#elif defined (STACK_GROWTH_UP)
    if (stack_base)
        *stack_base = stack;
    ts.SP = stack;
    stack += size;
#else
#error stack direction unknown
#endif

    /* Create the red zone.  */
    if (error = __vm_protect (task, stack, __vm_page_size, 0, VM_PROT_NONE))
        return error;

    return __thread_set_state (thread, MACHINE_THREAD_STATE_FLAVOR,
                               (int *) &ts, tssize);
}
示例#9
0
__ptr_t
__mmap (__ptr_t addr, size_t len, int prot, int flags, int fd, off_t offset)
{
  error_t err;
  vm_prot_t vmprot;
  memory_object_t memobj;
  vm_address_t mapaddr;
  vm_size_t pageoff;

  mapaddr = (vm_address_t) addr;

  if ((flags & (MAP_TYPE|MAP_INHERIT)) == MAP_ANON
      && prot == (PROT_READ|PROT_WRITE)) /* cf VM_PROT_DEFAULT */
    {
      /* vm_allocate has (a little) less overhead in the kernel too.  */
      err = __vm_allocate (__mach_task_self (), &mapaddr, len,
			   !(flags & MAP_FIXED));

      if (err == KERN_NO_SPACE && (flags & MAP_FIXED))
	{
	  /* XXX this is not atomic as it is in unix! */
	  /* The region is already allocated; deallocate it first.  */
	  err = __vm_deallocate (__mach_task_self (), mapaddr, len);
	  if (!err)
	    err = __vm_allocate (__mach_task_self (), &mapaddr, len, 0);
	}

      return err ? (__ptr_t) (long int) __hurd_fail (err) : (__ptr_t) mapaddr;
    }

  pageoff = offset & (vm_page_size - 1);
  offset &= ~(vm_page_size - 1);

  if (flags & MAP_FIXED)
    {
      /* A specific address is requested.  It need not be page-aligned;
	 it just needs to be congruent with the object offset.  */
      if ((mapaddr & (vm_page_size - 1)) != pageoff)
	return (__ptr_t) (long int) __hurd_fail (EINVAL);
      else
	/* We will add back PAGEOFF after mapping.  */
	mapaddr -= pageoff;
    }

  vmprot = VM_PROT_NONE;
  if (prot & PROT_READ)
    vmprot |= VM_PROT_READ;
  if (prot & PROT_WRITE)
    vmprot |= VM_PROT_WRITE;
  if (prot & PROT_EXEC)
    vmprot |= VM_PROT_EXECUTE;

  switch (flags & MAP_TYPE)
    {
    default:
      return (__ptr_t) (long int) __hurd_fail (EINVAL);

    case MAP_ANON:
      memobj = MACH_PORT_NULL;
      break;

    case MAP_FILE:
    case 0:			/* Allow, e.g., just MAP_SHARED.  */
      {
	mach_port_t robj, wobj;
	if (err = HURD_DPORT_USE (fd, __io_map (port, &robj, &wobj)))
	  return (__ptr_t) (long int) __hurd_dfail (fd, err);
	switch (prot & (PROT_READ|PROT_WRITE))
	  {
	  case PROT_READ:
	    memobj = robj;
	    if (wobj != MACH_PORT_NULL)
	      __mach_port_deallocate (__mach_task_self (), wobj);
	    break;
	  case PROT_WRITE:
	    memobj = wobj;
	    if (robj != MACH_PORT_NULL)
	      __mach_port_deallocate (__mach_task_self (), robj);
	    break;
	  case PROT_READ|PROT_WRITE:
	    if (robj == wobj)
	      {
		memobj = wobj;
		/* Remove extra reference.  */
		__mach_port_deallocate (__mach_task_self (), memobj);
	      }
	    else if (wobj == MACH_PORT_NULL && /* Not writable by mapping.  */
		     !(flags & MAP_SHARED))
	      /* The file can only be mapped for reading.  Since we are
		 making a private mapping, we will never try to write the
		 object anyway, so we don't care.  */
	      memobj = robj;
	    else
	      {
		__mach_port_deallocate (__mach_task_self (), wobj);
		return (__ptr_t) (long int) __hurd_fail (EACCES);
	      }
	    break;
	  }
	break;
	/* XXX handle MAP_NOEXTEND */
      }
    }

  /* XXX handle MAP_INHERIT */

  err = __vm_map (__mach_task_self (),
		  &mapaddr, (vm_size_t) len, (vm_address_t) 0,
		  ! (flags & MAP_FIXED),
		  memobj, (vm_offset_t) offset,
		  ! (flags & MAP_SHARED),
		  vmprot, VM_PROT_ALL,
		  (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY);

  if (err == KERN_NO_SPACE && (flags & MAP_FIXED))
    {
      /* XXX this is not atomic as it is in unix! */
      /* The region is already allocated; deallocate it first.  */
      err = __vm_deallocate (__mach_task_self (), mapaddr, len);
      if (! err)
	err = __vm_map (__mach_task_self (),
			&mapaddr, (vm_size_t) len, (vm_address_t) 0,
			0, memobj, (vm_offset_t) offset,
			! (flags & MAP_SHARED),
			vmprot, VM_PROT_ALL,
			(flags & MAP_SHARED) ? VM_INHERIT_SHARE
			: VM_INHERIT_COPY);
    }

  if (memobj != MACH_PORT_NULL)
    __mach_port_deallocate (__mach_task_self (), memobj);

  if (err)
    return (__ptr_t) (long int) __hurd_fail (err);

  /* Adjust the mapping address for the offset-within-page.  */
  mapaddr += pageoff;

  return (__ptr_t) mapaddr;
}