コード例 #1
0
ファイル: hurdstartup.c プロジェクト: AubrCool/glibc
void
_hurd_startup (void **argptr, void (*main) (intptr_t *data))
{
  error_t err;
  mach_port_t in_bootstrap;
  char *args, *env;
  mach_msg_type_number_t argslen, envlen;
  struct hurd_startup_data data;
  char **argv, **envp;
  int argc, envc;
  intptr_t *argcptr;
  vm_address_t addr;

  /* Attempt to map page zero redzoned before we receive any RPC
     data that might get allocated there.  We can ignore errors.  */
  addr = 0;
  __vm_map (__mach_task_self (),
	    &addr, __vm_page_size, 0, 0, MACH_PORT_NULL, 0, 1,
	    VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_COPY);

  if (err = __task_get_special_port (__mach_task_self (), TASK_BOOTSTRAP_PORT,
				     &in_bootstrap))
    LOSE;

  if (in_bootstrap != MACH_PORT_NULL)
    {
      /* Call the exec server on our bootstrap port and
	 get all our standard information from it.  */

      argslen = envlen = 0;
      data.dtablesize = data.portarraysize = data.intarraysize = 0;

      err = __exec_startup_get_info (in_bootstrap,
				     &data.user_entry,
				     &data.phdr, &data.phdrsz,
				     &data.stack_base, &data.stack_size,
				     &data.flags,
				     &args, &argslen,
				     &env, &envlen,
				     &data.dtable, &data.dtablesize,
				     &data.portarray, &data.portarraysize,
				     &data.intarray, &data.intarraysize);
      __mach_port_deallocate (__mach_task_self (), in_bootstrap);
    }

  if (err || in_bootstrap == MACH_PORT_NULL || (data.flags & EXEC_STACK_ARGS))
    {
      /* Either we have no bootstrap port, or the RPC to the exec server
	 failed, or whoever started us up passed the flag saying args are
	 on the stack.  Try to snarf the args in the canonical Mach way.
	 Hopefully either they will be on the stack as expected, or the
	 stack will be zeros so we don't crash.  */

      argcptr = (intptr_t *) argptr;
      argc = argcptr[0];
      argv = (char **) &argcptr[1];
      envp = &argv[argc + 1];
      envc = 0;
      while (envp[envc])
	++envc;
    }
  else
    {
      /* Turn the block of null-separated strings we were passed for the
	 arguments and environment into vectors of pointers to strings.  */

      /* Count up the arguments so we can allocate ARGV.  */
      argc = __argz_count (args, argslen);
      /* Count up the environment variables so we can allocate ENVP.  */
      envc = __argz_count (env, envlen);

      /* There were some arguments.  Allocate space for the vectors of
	 pointers and fill them in.  We allocate the space for the
	 environment pointers immediately after the argv pointers because
	 the ELF ABI will expect it.  */
      argcptr = __alloca (sizeof (intptr_t) +
			  (argc + 1 + envc + 1) * sizeof (char *) +
			  sizeof (struct hurd_startup_data));
      *argcptr = argc;
      argv = (void *) (argcptr + 1);
      __argz_extract (args, argslen, argv);

      /* There was some environment.  */
      envp = &argv[argc + 1];
      __argz_extract (env, envlen, envp);
    }

  if (err || in_bootstrap == MACH_PORT_NULL)
    {
      /* Either we have no bootstrap port, or the RPC to the exec server
	 failed.  Set all our other variables to have empty information.  */

      data.flags = 0;
      args = env = NULL;
      argslen = envlen = 0;
      data.dtable = NULL;
      data.dtablesize = 0;
      data.portarray = NULL;
      data.portarraysize = 0;
      data.intarray = NULL;
      data.intarraysize = 0;
    }
  else if ((void *) &envp[envc + 1] == argv[0])
    {
      /* The arguments arrived on the stack from the kernel, but our
	 protocol requires some space after them for a `struct
	 hurd_startup_data'.  Move them.  */
      struct
	{
	  intptr_t count;
	  char *argv[argc + 1];
	  char *envp[envc + 1];
	  struct hurd_startup_data data;
	} *args = alloca (sizeof *args);
      if ((void *) &args[1] == (void *) argcptr)
	args = alloca (-((char *) &args->data - (char *) args));
      memmove (args, argcptr, (char *) &args->data - (char *) args);
      argcptr = (void *) args;
      argv = args->argv;
      envp = args->envp;
    }

  {
    struct hurd_startup_data *d = (void *) &envp[envc + 1];

    if ((void *) d != argv[0])
      {
	*d = data;
	_hurd_init_dtable = d->dtable;
	_hurd_init_dtablesize = d->dtablesize;
      }

    (*main) (argcptr);
  }

  /* Should never get here.  */
  LOSE;
  abort ();
}
コード例 #2
0
ファイル: mmap.c プロジェクト: siddhesh/glibc
__ptr_t
__mmap (__ptr_t addr, size_t len, int prot, int flags, int fd, off_t offset)
{
  error_t err;
  vm_prot_t vmprot;
  memory_object_t memobj;
  vm_address_t mapaddr;

  mapaddr = (vm_address_t) addr;

  /* ADDR and OFFSET must be page-aligned.  */
  if ((mapaddr & (__vm_page_size - 1)) || (offset & (__vm_page_size - 1)))
    return (__ptr_t) (long int) __hurd_fail (EINVAL);

  if ((flags & (MAP_TYPE|MAP_INHERIT)) == MAP_ANON
      && prot == (PROT_READ|PROT_WRITE)) /* cf VM_PROT_DEFAULT */
    {
      /* vm_allocate has (a little) less overhead in the kernel too.  */
      err = __vm_allocate (__mach_task_self (), &mapaddr, len, mapaddr == 0);

      if (err == KERN_NO_SPACE)
	{
	  if (flags & MAP_FIXED)
	    {
	      /* XXX this is not atomic as it is in unix! */
	      /* The region is already allocated; deallocate it first.  */
	      err = __vm_deallocate (__mach_task_self (), mapaddr, len);
	      if (!err)
		err = __vm_allocate (__mach_task_self (), &mapaddr, len, 0);
	    }
	  else if (mapaddr != 0)
	    err = __vm_allocate (__mach_task_self (), &mapaddr, len, 1);
	}

      return err ? (__ptr_t) (long int) __hurd_fail (err) : (__ptr_t) mapaddr;
    }

  vmprot = VM_PROT_NONE;
  if (prot & PROT_READ)
    vmprot |= VM_PROT_READ;
  if (prot & PROT_WRITE)
    vmprot |= VM_PROT_WRITE;
  if (prot & PROT_EXEC)
    vmprot |= VM_PROT_EXECUTE;

  switch (flags & MAP_TYPE)
    {
    default:
      return (__ptr_t) (long int) __hurd_fail (EINVAL);

    case MAP_ANON:
      memobj = MACH_PORT_NULL;
      break;

    case MAP_FILE:
    case 0:			/* Allow, e.g., just MAP_SHARED.  */
      {
	mach_port_t robj, wobj;
	if (err = HURD_DPORT_USE (fd, __io_map (port, &robj, &wobj)))
	  {
	    if (err == MIG_BAD_ID || err == EOPNOTSUPP || err == ENOSYS)
	      err = ENODEV;	/* File descriptor doesn't support mmap.  */
	    return (__ptr_t) (long int) __hurd_dfail (fd, err);
	  }
	switch (prot & (PROT_READ|PROT_WRITE))
	  {
	  /* Although it apparently doesn't make sense to map a file with
	     protection set to PROT_NONE, it is actually sometimes done.
	     In particular, that's how localedef reserves some space for
	     the locale archive file, the rationale being that some
	     implementations take into account whether the mapping is
	     anonymous or not when selecting addresses.  */
	  case PROT_NONE:
	  case PROT_READ:
	    memobj = robj;
	    if (wobj != MACH_PORT_NULL)
	      __mach_port_deallocate (__mach_task_self (), wobj);
	    break;
	  case PROT_WRITE:
	    memobj = wobj;
	    if (robj != MACH_PORT_NULL)
	      __mach_port_deallocate (__mach_task_self (), robj);
	    break;
	  case PROT_READ|PROT_WRITE:
	    if (robj == wobj)
	      {
		memobj = wobj;
		/* Remove extra reference.  */
		__mach_port_deallocate (__mach_task_self (), memobj);
	      }
	    else if (wobj == MACH_PORT_NULL && /* Not writable by mapping.  */
		     !(flags & MAP_SHARED))
	      /* The file can only be mapped for reading.  Since we are
		 making a private mapping, we will never try to write the
		 object anyway, so we don't care.  */
	      memobj = robj;
	    else
	      {
		__mach_port_deallocate (__mach_task_self (), wobj);
		return (__ptr_t) (long int) __hurd_fail (EACCES);
	      }
	    break;
	  default:
	    __builtin_unreachable ();
	  }
	break;
	/* XXX handle MAP_NOEXTEND */
      }
    }

  /* XXX handle MAP_INHERIT */

  err = __vm_map (__mach_task_self (),
		  &mapaddr, (vm_size_t) len, (vm_address_t) 0,
		  mapaddr == 0,
		  memobj, (vm_offset_t) offset,
		  ! (flags & MAP_SHARED),
		  vmprot, VM_PROT_ALL,
		  (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY);

  if (err == KERN_NO_SPACE)
    {
      if (flags & MAP_FIXED)
	{
	  /* XXX this is not atomic as it is in unix! */
	  /* The region is already allocated; deallocate it first.  */
	  err = __vm_deallocate (__mach_task_self (), mapaddr, len);
	  if (! err)
	    err = __vm_map (__mach_task_self (),
			    &mapaddr, (vm_size_t) len, (vm_address_t) 0,
			    0, memobj, (vm_offset_t) offset,
			    ! (flags & MAP_SHARED),
			    vmprot, VM_PROT_ALL,
			    (flags & MAP_SHARED) ? VM_INHERIT_SHARE
			    : VM_INHERIT_COPY);
	}
      else if (mapaddr != 0)
	err = __vm_map (__mach_task_self (),
			&mapaddr, (vm_size_t) len, (vm_address_t) 0,
			1, memobj, (vm_offset_t) offset,
			! (flags & MAP_SHARED),
			vmprot, VM_PROT_ALL,
			(flags & MAP_SHARED) ? VM_INHERIT_SHARE
			: VM_INHERIT_COPY);
    }

  if (memobj != MACH_PORT_NULL)
    __mach_port_deallocate (__mach_task_self (), memobj);

  if (err)
    return (__ptr_t) (long int) __hurd_fail (err);

  return (__ptr_t) mapaddr;
}
コード例 #3
0
ファイル: mmap.c プロジェクト: BackupTheBerlios/wl530g-svn
__ptr_t
__mmap (__ptr_t addr, size_t len, int prot, int flags, int fd, off_t offset)
{
  error_t err;
  vm_prot_t vmprot;
  memory_object_t memobj;
  vm_address_t mapaddr;
  vm_size_t pageoff;

  mapaddr = (vm_address_t) addr;

  if ((flags & (MAP_TYPE|MAP_INHERIT)) == MAP_ANON
      && prot == (PROT_READ|PROT_WRITE)) /* cf VM_PROT_DEFAULT */
    {
      /* vm_allocate has (a little) less overhead in the kernel too.  */
      err = __vm_allocate (__mach_task_self (), &mapaddr, len,
			   !(flags & MAP_FIXED));

      if (err == KERN_NO_SPACE && (flags & MAP_FIXED))
	{
	  /* XXX this is not atomic as it is in unix! */
	  /* The region is already allocated; deallocate it first.  */
	  err = __vm_deallocate (__mach_task_self (), mapaddr, len);
	  if (!err)
	    err = __vm_allocate (__mach_task_self (), &mapaddr, len, 0);
	}

      return err ? (__ptr_t) (long int) __hurd_fail (err) : (__ptr_t) mapaddr;
    }

  pageoff = offset & (vm_page_size - 1);
  offset &= ~(vm_page_size - 1);

  if (flags & MAP_FIXED)
    {
      /* A specific address is requested.  It need not be page-aligned;
	 it just needs to be congruent with the object offset.  */
      if ((mapaddr & (vm_page_size - 1)) != pageoff)
	return (__ptr_t) (long int) __hurd_fail (EINVAL);
      else
	/* We will add back PAGEOFF after mapping.  */
	mapaddr -= pageoff;
    }

  vmprot = VM_PROT_NONE;
  if (prot & PROT_READ)
    vmprot |= VM_PROT_READ;
  if (prot & PROT_WRITE)
    vmprot |= VM_PROT_WRITE;
  if (prot & PROT_EXEC)
    vmprot |= VM_PROT_EXECUTE;

  switch (flags & MAP_TYPE)
    {
    default:
      return (__ptr_t) (long int) __hurd_fail (EINVAL);

    case MAP_ANON:
      memobj = MACH_PORT_NULL;
      break;

    case MAP_FILE:
    case 0:			/* Allow, e.g., just MAP_SHARED.  */
      {
	mach_port_t robj, wobj;
	if (err = HURD_DPORT_USE (fd, __io_map (port, &robj, &wobj)))
	  return (__ptr_t) (long int) __hurd_dfail (fd, err);
	switch (prot & (PROT_READ|PROT_WRITE))
	  {
	  case PROT_READ:
	    memobj = robj;
	    if (wobj != MACH_PORT_NULL)
	      __mach_port_deallocate (__mach_task_self (), wobj);
	    break;
	  case PROT_WRITE:
	    memobj = wobj;
	    if (robj != MACH_PORT_NULL)
	      __mach_port_deallocate (__mach_task_self (), robj);
	    break;
	  case PROT_READ|PROT_WRITE:
	    if (robj == wobj)
	      {
		memobj = wobj;
		/* Remove extra reference.  */
		__mach_port_deallocate (__mach_task_self (), memobj);
	      }
	    else if (wobj == MACH_PORT_NULL && /* Not writable by mapping.  */
		     !(flags & MAP_SHARED))
	      /* The file can only be mapped for reading.  Since we are
		 making a private mapping, we will never try to write the
		 object anyway, so we don't care.  */
	      memobj = robj;
	    else
	      {
		__mach_port_deallocate (__mach_task_self (), wobj);
		return (__ptr_t) (long int) __hurd_fail (EACCES);
	      }
	    break;
	  }
	break;
	/* XXX handle MAP_NOEXTEND */
      }
    }

  /* XXX handle MAP_INHERIT */

  err = __vm_map (__mach_task_self (),
		  &mapaddr, (vm_size_t) len, (vm_address_t) 0,
		  ! (flags & MAP_FIXED),
		  memobj, (vm_offset_t) offset,
		  ! (flags & MAP_SHARED),
		  vmprot, VM_PROT_ALL,
		  (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY);

  if (err == KERN_NO_SPACE && (flags & MAP_FIXED))
    {
      /* XXX this is not atomic as it is in unix! */
      /* The region is already allocated; deallocate it first.  */
      err = __vm_deallocate (__mach_task_self (), mapaddr, len);
      if (! err)
	err = __vm_map (__mach_task_self (),
			&mapaddr, (vm_size_t) len, (vm_address_t) 0,
			0, memobj, (vm_offset_t) offset,
			! (flags & MAP_SHARED),
			vmprot, VM_PROT_ALL,
			(flags & MAP_SHARED) ? VM_INHERIT_SHARE
			: VM_INHERIT_COPY);
    }

  if (memobj != MACH_PORT_NULL)
    __mach_port_deallocate (__mach_task_self (), memobj);

  if (err)
    return (__ptr_t) (long int) __hurd_fail (err);

  /* Adjust the mapping address for the offset-within-page.  */
  mapaddr += pageoff;

  return (__ptr_t) mapaddr;
}