/* Send signal SIG to process number PID.  If PID is zero,
   send SIG to all processes in the current process's process group.
   If PID is < -1, send SIG to all processes in process group - PID.  */
int
__kill (pid_t pid, int sig)
{
  int delivered = 0;		/* Set when we deliver any signal.  */
  error_t err;
  mach_port_t proc;
  struct hurd_userlink ulink;

  void kill_pid (pid_t pid) /* Kill one PID.  */
    {
      /* SIGKILL is not delivered as a normal signal.
	 Sending SIGKILL to a process means to terminate its task.  */
      if (sig == SIGKILL)
	/* Fetch the process's task port and terminate the task.  We
	   loop in case the process execs and changes its task port.
	   If the old task port dies after we fetch it but before we
	   send the RPC, we get MACH_SEND_INVALID_DEST; if it dies
	   after we send the RPC request but before it is serviced, we
	   get MIG_SERVER_DIED.  */
	do
	  {
	    task_t refport;
	    err = __proc_pid2task (proc, pid, &refport);
	    /* Ignore zombies.  */
	    if (!err && refport != MACH_PORT_NULL)
	      {
		err = __task_terminate (refport);
		__mach_port_deallocate (__mach_task_self (), refport);
	      }
	  } while (err == MACH_SEND_INVALID_DEST ||
		   err == MIG_SERVER_DIED);
      else
	{
	  error_t taskerr;
	  error_t kill_port (mach_port_t msgport, mach_port_t refport)
	    {
	      if (msgport != MACH_PORT_NULL)
		/* Send a signal message to his message port.  */
		return __msg_sig_post (msgport, sig, 0, refport);

	      /* The process has no message port.  Perhaps try direct
		 frobnication of the task.  */

	      if (taskerr)
		/* If we could not get the task port, we can do nothing.  */
		return taskerr;

	      if (refport == MACH_PORT_NULL)
		/* proc_pid2task returned success with a null task port.
		   That means the process is a zombie.  Signals
		   to zombies should return success and do nothing.  */
		return 0;

	      /* For user convenience in the case of a task that has
		 not registered any message port with the proc server,
		 translate a few signals to direct task operations.  */
	      switch (sig)
		{
		  /* The only signals that really make sense for an
		     unregistered task are kill, suspend, and continue.  */
		case SIGSTOP:
		case SIGTSTP:
		  return __task_suspend (refport);
		case SIGCONT:
		  return __task_resume (refport);
		case SIGTERM:
		case SIGQUIT:
		case SIGINT:
		  return __task_terminate (refport);
		default:
		  /* We have full permission to send signals, but there is
		     no meaningful way to express this signal.  */
		  return EPERM;
		}
	    }
	  err = HURD_MSGPORT_RPC (__proc_getmsgport (proc, pid, &msgport),
				  (taskerr = __proc_pid2task (proc, pid,
							      &refport)) ?
				  __proc_getsidport (proc, &refport) : 0, 1,
				  kill_port (msgport, refport));
	}
      if (! err)
	delivered = 1;
    }

  proc = _hurd_port_get (&_hurd_ports[INIT_PORT_PROC], &ulink);

  if (pid <= 0)
    {
      /* Send SIG to each process in pgrp (- PID).  */
      pid_t pidbuf[10], *pids = pidbuf;
      mach_msg_type_number_t i, npids = sizeof (pidbuf) / sizeof (pidbuf[0]);

      err = __proc_getpgrppids (proc, - pid, &pids, &npids);
      if (!err)
	{
	  for (i = 0; i < npids; ++i)
	    {
	      kill_pid (pids[i]);
	      if (err == ESRCH)
		/* The process died already.  Ignore it.  */
		err = 0;
	    }
	  if (pids != pidbuf)
	    __vm_deallocate (__mach_task_self (),
			     (vm_address_t) pids, npids * sizeof (pids[0]));
	}
    }
  else
    kill_pid (pid);

  _hurd_port_free (&_hurd_ports[INIT_PORT_PROC], &ulink, proc);

  /* If we delivered no signals, but ERR is clear, this must mean that
     every kill_pid call failed with ESRCH, meaning all the processes in
     the pgrp died between proc_getpgrppids and kill_pid; in that case we
     fail with ESRCH.  */
  return delivered ? 0 : __hurd_fail (err ?: ESRCH);
}
Exemple #2
0
/* Check the first NFDS descriptors either in POLLFDS (if nonnnull) or in
   each of READFDS, WRITEFDS, EXCEPTFDS that is nonnull.  If TIMEOUT is not
   NULL, time out after waiting the interval specified therein.  Returns
   the number of ready descriptors, or -1 for errors.  */
int
_hurd_select (int nfds,
	      struct pollfd *pollfds,
	      fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
	      const struct timespec *timeout, const sigset_t *sigmask)
{
  int i;
  mach_port_t portset;
  int got;
  error_t err;
  fd_set rfds, wfds, xfds;
  int firstfd, lastfd;
  mach_msg_timeout_t to = 0;
  struct
    {
      struct hurd_userlink ulink;
      struct hurd_fd *cell;
      mach_port_t io_port;
      int type;
      mach_port_t reply_port;
    } d[nfds];
  sigset_t oset;

  union typeword		/* Use this to avoid unkosher casts.  */
    {
      mach_msg_type_t type;
      uint32_t word;
    };
  assert (sizeof (union typeword) == sizeof (mach_msg_type_t));
  assert (sizeof (uint32_t) == sizeof (mach_msg_type_t));

  if (nfds < 0 || (pollfds == NULL && nfds > FD_SETSIZE))
    {
      errno = EINVAL;
      return -1;
    }

  if (timeout != NULL)
    {
      if (timeout->tv_sec < 0 || timeout->tv_nsec < 0)
	{
	  errno = EINVAL;
	  return -1;
	}

      to = (timeout->tv_sec * 1000 +
            (timeout->tv_nsec + 999999) / 1000000);
    }

  if (sigmask && __sigprocmask (SIG_SETMASK, sigmask, &oset))
    return -1;

  if (pollfds)
    {
      /* Collect interesting descriptors from the user's `pollfd' array.
	 We do a first pass that reads the user's array before taking
	 any locks.  The second pass then only touches our own stack,
	 and gets the port references.  */

      for (i = 0; i < nfds; ++i)
	if (pollfds[i].fd >= 0)
	  {
	    int type = 0;
	    if (pollfds[i].events & POLLIN)
	      type |= SELECT_READ;
	    if (pollfds[i].events & POLLOUT)
	      type |= SELECT_WRITE;
	    if (pollfds[i].events & POLLPRI)
	      type |= SELECT_URG;

	    d[i].io_port = pollfds[i].fd;
	    d[i].type = type;
	  }
	else
	  d[i].type = 0;

      HURD_CRITICAL_BEGIN;
      __mutex_lock (&_hurd_dtable_lock);

      for (i = 0; i < nfds; ++i)
	if (d[i].type != 0)
	  {
	    const int fd = (int) d[i].io_port;

	    if (fd < _hurd_dtablesize)
	      {
		d[i].cell = _hurd_dtable[fd];
		d[i].io_port = _hurd_port_get (&d[i].cell->port, &d[i].ulink);
		if (d[i].io_port != MACH_PORT_NULL)
		  continue;
	      }

	    /* If one descriptor is bogus, we fail completely.  */
	    while (i-- > 0)
	      if (d[i].type != 0)
		_hurd_port_free (&d[i].cell->port,
				 &d[i].ulink, d[i].io_port);
	    break;
	  }

      __mutex_unlock (&_hurd_dtable_lock);
      HURD_CRITICAL_END;

      if (i < nfds)
	{
	  if (sigmask)
	    __sigprocmask (SIG_SETMASK, &oset, NULL);
	  errno = EBADF;
	  return -1;
	}

      lastfd = i - 1;
      firstfd = i == 0 ? lastfd : 0;
    }
  else
    {
      /* Collect interested descriptors from the user's fd_set arguments.
	 Use local copies so we can't crash from user bogosity.  */

      if (readfds == NULL)
	FD_ZERO (&rfds);
      else
	rfds = *readfds;
      if (writefds == NULL)
	FD_ZERO (&wfds);
      else
	wfds = *writefds;
      if (exceptfds == NULL)
	FD_ZERO (&xfds);
      else
	xfds = *exceptfds;

      HURD_CRITICAL_BEGIN;
      __mutex_lock (&_hurd_dtable_lock);

      if (nfds > _hurd_dtablesize)
	nfds = _hurd_dtablesize;

      /* Collect the ports for interesting FDs.  */
      firstfd = lastfd = -1;
      for (i = 0; i < nfds; ++i)
	{
	  int type = 0;
	  if (readfds != NULL && FD_ISSET (i, &rfds))
	    type |= SELECT_READ;
	  if (writefds != NULL && FD_ISSET (i, &wfds))
	    type |= SELECT_WRITE;
	  if (exceptfds != NULL && FD_ISSET (i, &xfds))
	    type |= SELECT_URG;
	  d[i].type = type;
	  if (type)
	    {
	      d[i].cell = _hurd_dtable[i];
	      d[i].io_port = _hurd_port_get (&d[i].cell->port, &d[i].ulink);
	      if (d[i].io_port == MACH_PORT_NULL)
		{
		  /* If one descriptor is bogus, we fail completely.  */
		  while (i-- > 0)
		    if (d[i].type != 0)
		      _hurd_port_free (&d[i].cell->port, &d[i].ulink,
				       d[i].io_port);
		  break;
		}
	      lastfd = i;
	      if (firstfd == -1)
		firstfd = i;
	    }
	}

      __mutex_unlock (&_hurd_dtable_lock);
      HURD_CRITICAL_END;

      if (i < nfds)
	{
	  if (sigmask)
	    __sigprocmask (SIG_SETMASK, &oset, NULL);
	  errno = EBADF;
	  return -1;
	}
    }


  err = 0;
  got = 0;

  /* Send them all io_select request messages.  */

  if (firstfd == -1)
    /* But not if there were no ports to deal with at all.
       We are just a pure timeout.  */
    portset = __mach_reply_port ();
  else
    {
      portset = MACH_PORT_NULL;

      for (i = firstfd; i <= lastfd; ++i)
	if (d[i].type)
	  {
	    int type = d[i].type;
	    d[i].reply_port = __mach_reply_port ();
	    err = __io_select (d[i].io_port, d[i].reply_port,
			       /* Poll only if there's a single descriptor.  */
			       (firstfd == lastfd) ? to : 0,
			       &type);
	    switch (err)
	      {
	      case MACH_RCV_TIMED_OUT:
		/* No immediate response.  This is normal.  */
		err = 0;
		if (firstfd == lastfd)
		  /* When there's a single descriptor, we don't need a
		     portset, so just pretend we have one, but really
		     use the single reply port.  */
		  portset = d[i].reply_port;
		else if (got == 0)
		  /* We've got multiple reply ports, so we need a port set to
		     multiplex them.  */
		  {
		    /* We will wait again for a reply later.  */
		    if (portset == MACH_PORT_NULL)
		      /* Create the portset to receive all the replies on.  */
		      err = __mach_port_allocate (__mach_task_self (),
						  MACH_PORT_RIGHT_PORT_SET,
						  &portset);
		    if (! err)
		      /* Put this reply port in the port set.  */
		      __mach_port_move_member (__mach_task_self (),
					       d[i].reply_port, portset);
		  }
		break;

	      default:
		/* No other error should happen.  Callers of select
		   don't expect to see errors, so we simulate
		   readiness of the erring object and the next call
		   hopefully will get the error again.  */
		type = SELECT_ALL;
		/* FALLTHROUGH */

	      case 0:
		/* We got an answer.  */
		if ((type & SELECT_ALL) == 0)
		  /* Bogus answer; treat like an error, as a fake positive.  */
		  type = SELECT_ALL;

		/* This port is already ready already.  */
		d[i].type &= type;
		d[i].type |= SELECT_RETURNED;
		++got;
		break;
	      }
	    _hurd_port_free (&d[i].cell->port, &d[i].ulink, d[i].io_port);
	  }
    }

  /* Now wait for reply messages.  */
  if (!err && got == 0)
    {
      /* Now wait for io_select_reply messages on PORT,
	 timing out as appropriate.  */

      union
	{
	  mach_msg_header_t head;
#ifdef MACH_MSG_TRAILER_MINIMUM_SIZE
	  struct
	    {
	      mach_msg_header_t head;
	      NDR_record_t ndr;
	      error_t err;
	    } error;
	  struct
	    {
	      mach_msg_header_t head;
	      NDR_record_t ndr;
	      error_t err;
	      int result;
	      mach_msg_trailer_t trailer;
	    } success;
#else
	  struct
	    {
	      mach_msg_header_t head;
	      union typeword err_type;
	      error_t err;
	    } error;
	  struct
	    {
	      mach_msg_header_t head;
	      union typeword err_type;
	      error_t err;
	      union typeword result_type;
	      int result;
	    } success;
#endif
	} msg;
      mach_msg_option_t options = (timeout == NULL ? 0 : MACH_RCV_TIMEOUT);
      error_t msgerr;
      while ((msgerr = __mach_msg (&msg.head,
				   MACH_RCV_MSG | MACH_RCV_INTERRUPT | options,
				   0, sizeof msg, portset, to,
				   MACH_PORT_NULL)) == MACH_MSG_SUCCESS)
	{
	  /* We got a message.  Decode it.  */
#define IO_SELECT_REPLY_MSGID (21012 + 100) /* XXX */
#ifdef MACH_MSG_TYPE_BIT
	  const union typeword inttype =
	  { type:
	    { MACH_MSG_TYPE_INTEGER_T, sizeof (integer_t) * 8, 1, 1, 0, 0 }
	  };
#endif
	  if (msg.head.msgh_id == IO_SELECT_REPLY_MSGID &&
	      msg.head.msgh_size >= sizeof msg.error &&
	      !(msg.head.msgh_bits & MACH_MSGH_BITS_COMPLEX) &&
#ifdef MACH_MSG_TYPE_BIT
	      msg.error.err_type.word == inttype.word
#endif
	      )
	    {
	      /* This is a properly formatted message so far.
		 See if it is a success or a failure.  */
	      if (msg.error.err == EINTR &&
		  msg.head.msgh_size == sizeof msg.error)
		{
		  /* EINTR response; poll for further responses
		     and then return quickly.  */
		  err = EINTR;
		  goto poll;
		}
	      if (msg.error.err ||
		  msg.head.msgh_size != sizeof msg.success ||
#ifdef MACH_MSG_TYPE_BIT
		  msg.success.result_type.word != inttype.word ||
#endif
		  (msg.success.result & SELECT_ALL) == 0)
		{
		  /* Error or bogus reply.  Simulate readiness.  */
		  __mach_msg_destroy (&msg.head);
		  msg.success.result = SELECT_ALL;
		}

	      /* Look up the respondent's reply port and record its
		 readiness.  */
	      {
		int had = got;
		if (firstfd != -1)
		  for (i = firstfd; i <= lastfd; ++i)
		    if (d[i].type
			&& d[i].reply_port == msg.head.msgh_local_port)
		      {
			d[i].type &= msg.success.result;
			d[i].type |= SELECT_RETURNED;
			++got;
		      }
		assert (got > had);
	      }
	    }

	  if (msg.head.msgh_remote_port != MACH_PORT_NULL)
	    __mach_port_deallocate (__mach_task_self (),
				    msg.head.msgh_remote_port);

	  if (got)
	  poll:
	    {
	      /* Poll for another message.  */
	      to = 0;
	      options |= MACH_RCV_TIMEOUT;
	    }
	}

      if (msgerr == MACH_RCV_INTERRUPTED)
	/* Interruption on our side (e.g. signal reception).  */
	err = EINTR;

      if (got)
	/* At least one descriptor is known to be ready now, so we will
	   return success.  */
	err = 0;
    }
Exemple #3
0
  error_t err;
  mach_port_t proc;
  struct hurd_userlink ulink;

  inline void kill_pid (pid_t pid) /* Kill one PID.  */
    {
      err = HURD_MSGPORT_RPC (__proc_getmsgport (proc, pid, &msgport),
			      (refport = arg_refport, 0), 0,
			      /* If no message port we cannot send signals.  */
			      msgport == MACH_PORT_NULL ? EPERM :
			      __msg_sig_post (msgport, sig, 0, refport));
      if (! err)
	delivered = 1;
    }

  proc = _hurd_port_get (&_hurd_ports[INIT_PORT_PROC], &ulink);

  if (pid <= 0)
    {
      /* Send SIG to each process in pgrp (- PID).  */
      mach_msg_type_number_t npids = 10, i;
      pid_t pidsbuf[10], *pids = pidsbuf;

      err = __proc_getpgrppids (proc, - pid, &pids, &npids);
      if (!err)
	{
	  int self = 0;
	  for (i = 0; i < npids; ++i)
	    if (pids[i] == _hurd_pid)
	      /* We must do ourselves last so we are not suspended
		 and fail to suspend the other processes in the pgrp.  */
Exemple #4
0
/* Duplicate FD to FD2, closing the old FD2 and making FD2 be
   open on the same file as FD is, and setting FD2's flags according to FLAGS.
   Return FD2 or -1.  */
int
__dup3 (int fd, int fd2, int flags)
{
  struct hurd_fd *d;

  /* Both passing flags different from O_CLOEXEC and FD2 being the same as FD
     are invalid.  */
  if ((flags & ~O_CLOEXEC
       || fd2 == fd)
      /* ... with the exception in case that dup2 behavior is requested: if FD
	 is valid and FD2 is already the same then just return it.  */
      && ! (flags == -1
	    && fd2 == fd))
    return __hurd_fail (EINVAL);

  /* Extract the ports and flags from FD.  */
  d = _hurd_fd_get (fd);
  if (d == NULL)
    return __hurd_fail (EBADF);

  HURD_CRITICAL_BEGIN;

  __spin_lock (&d->port.lock);
  if (d->port.port == MACH_PORT_NULL)
    {
      __spin_unlock (&d->port.lock);
      fd2 = __hurd_fail (EBADF);
    }
  else if (fd2 == fd)
    __spin_unlock (&d->port.lock);
  else
    {
      struct hurd_userlink ulink, ctty_ulink;
      int d_flags = d->flags;
      io_t ctty = _hurd_port_get (&d->ctty, &ctty_ulink);
      io_t port = _hurd_port_locked_get (&d->port, &ulink); /* Unlocks D.  */

      if (fd2 < 0)
	fd2 = __hurd_fail (EBADF);
      else
	{
	  /* Get a hold of the destination descriptor.  */
	  struct hurd_fd *d2;

	  __mutex_lock (&_hurd_dtable_lock);

	  if (fd2 >= _hurd_dtablesize)
	    {
	      /* The table is not large enough to hold the destination
		 descriptor.  Enlarge it as necessary to allocate this
		 descriptor.  */
	      __mutex_unlock (&_hurd_dtable_lock);
	      d2 = _hurd_alloc_fd (NULL, fd2);
	      if (d2)
		__spin_unlock (&d2->port.lock);
	      __mutex_lock (&_hurd_dtable_lock);
	    }
	  else
	    {
	      d2 = _hurd_dtable[fd2];
	      if (d2 == NULL)
		{
		  /* Must allocate a new one.  We don't initialize the port
		     cells with this call so that if it fails (out of
		     memory), we will not have already added user
		     references for the ports, which we would then have to
		     deallocate.  */
		  d2 = _hurd_dtable[fd2] = _hurd_new_fd (MACH_PORT_NULL,
							 MACH_PORT_NULL);
		}
	    }
	  __mutex_unlock (&_hurd_dtable_lock);

	  if (d2 == NULL)
	    {
	      fd2 = -1;
	      if (errno == EINVAL)
		errno = EBADF;	/* POSIX.1-1990 6.2.1.2 ll 54-55.  */
	    }
	  else
	    {
	      /* Give the ports each a user ref for the new descriptor.  */
	      __mach_port_mod_refs (__mach_task_self (), port,
				    MACH_PORT_RIGHT_SEND, 1);
	      if (ctty != MACH_PORT_NULL)
		__mach_port_mod_refs (__mach_task_self (), ctty,
				      MACH_PORT_RIGHT_SEND, 1);

	      /* Install the ports and flags in the new descriptor slot.  */
	      __spin_lock (&d2->port.lock);
	      if (flags & O_CLOEXEC)
		d2->flags = d_flags | FD_CLOEXEC;
	      else
		/* dup clears FD_CLOEXEC.  */
		d2->flags = d_flags & ~FD_CLOEXEC;
	      _hurd_port_set (&d2->ctty, ctty);
	      _hurd_port_locked_set (&d2->port, port); /* Unlocks D2.  */
	    }
	}

      _hurd_port_free (&d->port, &ulink, port);
      if (ctty != MACH_PORT_NULL)
	_hurd_port_free (&d->ctty, &ctty_ulink, port);
    }

  HURD_CRITICAL_END;

  return fd2;
}
kern_return_t
diskfs_S_file_exec (struct protid *cred,
                    task_t task,
                    int flags,
                    char *argv,
                    size_t argvlen,
                    char *envp,
                    size_t envplen,
                    mach_port_t *fds,
                    size_t fdslen,
                    mach_port_t *portarray,
                    size_t portarraylen,
                    int *intarray,
                    size_t intarraylen,
                    mach_port_t *deallocnames,
                    size_t deallocnameslen,
                    mach_port_t *destroynames,
                    size_t destroynameslen)
{
    struct node *np;
    uid_t uid;
    gid_t gid;
    mode_t mode;
    int suid, sgid;
    struct protid *newpi;
    struct peropen *newpo;
    error_t err = 0;
    mach_port_t execserver;
    int cached_exec;
    struct hurd_userlink ulink;
    mach_port_t right;

#define RETURN(code) do { err = (code); goto out; } while (0)

    if (!cred)
        return EOPNOTSUPP;

    /* Get a light reference to the cached exec server port.  */
    execserver = _hurd_port_get (&_diskfs_exec_portcell, &ulink);
    cached_exec = (execserver != MACH_PORT_NULL);
    if (execserver == MACH_PORT_NULL)
    {
        /* No cached port.  Look up the canonical naming point.  */
        execserver = file_name_lookup (_SERVERS_EXEC, 0, 0);
        if (execserver == MACH_PORT_NULL)
            return EOPNOTSUPP;	/* No exec server, no exec.  */
        else
        {
            /* Install the newly-gotten exec server port for other
               threads to use, then get a light reference for this call.  */
            _hurd_port_set (&_diskfs_exec_portcell, execserver);
            execserver = _hurd_port_get (&_diskfs_exec_portcell, &ulink);
        }
    }

    np = cred->po->np;

    mutex_lock (&np->lock);
    mode = np->dn_stat.st_mode;
    uid = np->dn_stat.st_uid;
    gid = np->dn_stat.st_gid;
    mutex_unlock (&np->lock);

    if (_diskfs_noexec)
        RETURN (EACCES);

    if ((cred->po->openstat & O_EXEC) == 0)
        RETURN (EBADF);

    if (!((mode & (S_IXUSR|S_IXGRP|S_IXOTH))
            || ((mode & S_IUSEUNK) && (mode & (S_IEXEC << S_IUNKSHIFT)))))
        RETURN (EACCES);

    if ((mode & S_IFMT) == S_IFDIR)
        RETURN (EACCES);

    suid = mode & S_ISUID;
    sgid = mode & S_ISGID;
    if (!_diskfs_nosuid && (suid || sgid))
    {
        int secure = 0;
        error_t get_file_ids (struct idvec *uids, struct idvec *gids)
        {
            error_t err = idvec_merge (uids, cred->user->uids);
            if (! err)
                err = idvec_merge (gids, cred->user->gids);
            return err;
        }
Exemple #6
0
/* Overlay TASK, executing FILE with arguments ARGV and environment ENVP.
   If TASK == mach_task_self (), some ports are dealloc'd by the exec server.
   ARGV and ENVP are terminated by NULL pointers.  */
error_t
_hurd_exec (task_t task, file_t file,
	    char *const argv[], char *const envp[])
{
  error_t err;
  char *args, *env;
  size_t argslen, envlen;
  int ints[INIT_INT_MAX];
  mach_port_t ports[_hurd_nports];
  struct hurd_userlink ulink_ports[_hurd_nports];
  file_t *dtable;
  unsigned int dtablesize, i;
  struct hurd_port **dtable_cells;
  struct hurd_userlink *ulink_dtable;
  struct hurd_sigstate *ss;
  mach_port_t *please_dealloc, *pdp;

  /* XXX needs to be hurdmalloc XXX */
  if (err = __argz_create (argv, &args, &argslen))
    return err;
  if (err = __argz_create (envp, &env, &envlen))
    goto outargs;

  /* Load up the ports to give to the new program.  */
  for (i = 0; i < _hurd_nports; ++i)
    if (i == INIT_PORT_PROC && task != __mach_task_self ())
      {
	/* This is another task, so we need to ask the proc server
	   for the right proc server port for it.  */
	if (err = __USEPORT (PROC, __proc_task2proc (port, task, &ports[i])))
	  {
	    while (--i > 0)
	      _hurd_port_free (&_hurd_ports[i], &ulink_ports[i], ports[i]);
	    goto outenv;
	  }
      }
    else
      ports[i] = _hurd_port_get (&_hurd_ports[i], &ulink_ports[i]);


  /* Load up the ints to give the new program.  */
  for (i = 0; i < INIT_INT_MAX; ++i)
    switch (i)
      {
      case INIT_UMASK:
	ints[i] = _hurd_umask;
	break;

      case INIT_SIGMASK:
      case INIT_SIGIGN:
      case INIT_SIGPENDING:
	/* We will set these all below.  */
	break;

      case INIT_TRACEMASK:
	ints[i] = _hurdsig_traced;
	break;

      default:
	ints[i] = 0;
      }

  ss = _hurd_self_sigstate ();

  assert (! __spin_lock_locked (&ss->critical_section_lock));
  __spin_lock (&ss->critical_section_lock);

  __spin_lock (&ss->lock);
  ints[INIT_SIGMASK] = ss->blocked;
  ints[INIT_SIGPENDING] = ss->pending;
  ints[INIT_SIGIGN] = 0;
  for (i = 1; i < NSIG; ++i)
    if (ss->actions[i].sa_handler == SIG_IGN)
      ints[INIT_SIGIGN] |= __sigmask (i);

  /* We hold the sigstate lock until the exec has failed so that no signal
     can arrive between when we pack the blocked and ignored signals, and
     when the exec actually happens.  A signal handler could change what
     signals are blocked and ignored.  Either the change will be reflected
     in the exec, or the signal will never be delivered.  Setting the
     critical section flag avoids anything we call trying to acquire the
     sigstate lock.  */

  __spin_unlock (&ss->lock);

  /* Pack up the descriptor table to give the new program.  */
  __mutex_lock (&_hurd_dtable_lock);

  dtablesize = _hurd_dtable ? _hurd_dtablesize : _hurd_init_dtablesize;

  if (task == __mach_task_self ())
    /* Request the exec server to deallocate some ports from us if the exec
       succeeds.  The init ports and descriptor ports will arrive in the
       new program's exec_startup message.  If we failed to deallocate
       them, the new program would have duplicate user references for them.
       But we cannot deallocate them ourselves, because we must still have
       them after a failed exec call.  */
    please_dealloc = __alloca ((_hurd_nports + (2 * dtablesize))
				* sizeof (mach_port_t));
  else
    please_dealloc = NULL;
  pdp = please_dealloc;

  if (_hurd_dtable != NULL)
    {
      dtable = __alloca (dtablesize * sizeof (dtable[0]));
      ulink_dtable = __alloca (dtablesize * sizeof (ulink_dtable[0]));
      dtable_cells = __alloca (dtablesize * sizeof (dtable_cells[0]));
      for (i = 0; i < dtablesize; ++i)
	{
	  struct hurd_fd *const d = _hurd_dtable[i];
	  if (d == NULL)
	    {
	      dtable[i] = MACH_PORT_NULL;
	      continue;
	    }
	  __spin_lock (&d->port.lock);
	  if (d->flags & FD_CLOEXEC)
	    {
	      /* This descriptor is marked to be closed on exec.
		 So don't pass it to the new program.  */
	      dtable[i] = MACH_PORT_NULL;
	      if (pdp && d->port.port != MACH_PORT_NULL)
		{
		  /* We still need to deallocate the ports.  */
		  *pdp++ = d->port.port;
		  if (d->ctty.port != MACH_PORT_NULL)
		    *pdp++ = d->ctty.port;
		}
	      __spin_unlock (&d->port.lock);
	    }
	  else
	    {
	      if (pdp && d->ctty.port != MACH_PORT_NULL)
		/* All the elements of DTABLE are added to PLEASE_DEALLOC
		   below, so we needn't add the port itself.
		   But we must deallocate the ctty port as well as
		   the normal port that got installed in DTABLE[I].  */
		*pdp++ = d->ctty.port;
	      dtable[i] = _hurd_port_locked_get (&d->port, &ulink_dtable[i]);
	      dtable_cells[i] = &d->port;
	    }
	}
    }
  else
    {
      dtable = _hurd_init_dtable;
      ulink_dtable = NULL;
      dtable_cells = NULL;
    }

  /* The information is all set up now.  Try to exec the file.  */

  {
    if (pdp)
      {
	/* Request the exec server to deallocate some ports from us if the exec
	   succeeds.  The init ports and descriptor ports will arrive in the
	   new program's exec_startup message.  If we failed to deallocate
	   them, the new program would have duplicate user references for them.
	   But we cannot deallocate them ourselves, because we must still have
	   them after a failed exec call.  */

	for (i = 0; i < _hurd_nports; ++i)
	  *pdp++ = ports[i];
	for (i = 0; i < dtablesize; ++i)
	  *pdp++ = dtable[i];
      }

    err = __file_exec (file, task, 0,
		       args, argslen, env, envlen,
		       dtable, MACH_MSG_TYPE_COPY_SEND, dtablesize,
		       ports, MACH_MSG_TYPE_COPY_SEND, _hurd_nports,
		       ints, INIT_INT_MAX,
		       please_dealloc, pdp - please_dealloc,
		       &_hurd_msgport, task == __mach_task_self () ? 1 : 0);
  }

  /* Release references to the standard ports.  */
  for (i = 0; i < _hurd_nports; ++i)
    if (i == INIT_PORT_PROC && task != __mach_task_self ())
      __mach_port_deallocate (__mach_task_self (), ports[i]);
    else
      _hurd_port_free (&_hurd_ports[i], &ulink_ports[i], ports[i]);

  if (ulink_dtable != NULL)
    /* Release references to the file descriptor ports.  */
    for (i = 0; i < dtablesize; ++i)
      if (dtable[i] != MACH_PORT_NULL)
	_hurd_port_free (dtable_cells[i], &ulink_dtable[i], dtable[i]);

  /* Release lock on the file descriptor table. */
  __mutex_unlock (&_hurd_dtable_lock);

  /* Safe to let signals happen now.  */
  _hurd_critical_section_unlock (ss);

 outargs:
  free (args);
 outenv:
  free (env);
  return err;
}