コード例 #1
0
ファイル: hurdsig.c プロジェクト: bminor/glibc
/* Call the crash dump server to mummify us before we die.
   Returns nonzero if a core file was written.  */
static int
write_corefile (int signo, const struct hurd_signal_detail *detail)
{
  error_t err;
  mach_port_t coreserver;
  file_t file, coredir;
  const char *name;

  /* Don't bother locking since we just read the one word.  */
  rlim_t corelimit = _hurd_rlimits[RLIMIT_CORE].rlim_cur;

  if (corelimit == 0)
    /* No core dumping, thank you very much.  Note that this makes
       `ulimit -c 0' prevent crash-suspension too, which is probably
       what the user wanted.  */
    return 0;

  /* XXX RLIMIT_CORE:
     When we have a protocol to make the server return an error
     for RLIMIT_FSIZE, then tell the corefile fs server the RLIMIT_CORE
     value in place of the RLIMIT_FSIZE value.  */

  /* First get a port to the core dumping server.  */
  coreserver = MACH_PORT_NULL;
  name = _hurdsig_getenv ("CRASHSERVER");
  if (name != NULL)
    coreserver = __file_name_lookup (name, 0, 0);
  if (coreserver == MACH_PORT_NULL)
    coreserver = __file_name_lookup (_SERVERS_CRASH, 0, 0);
  if (coreserver == MACH_PORT_NULL)
    return 0;

  /* Get a port to the directory where the new core file will reside.  */
  file = MACH_PORT_NULL;
  name = _hurdsig_getenv ("COREFILE");
  if (name == NULL)
    name = "core";
  coredir = __file_name_split (name, (char **) &name);
  if (coredir != MACH_PORT_NULL)
    /* Create the new file, but don't link it into the directory yet.  */
    __dir_mkfile (coredir, O_WRONLY|O_CREAT,
		  0600 & ~_hurd_umask, /* XXX ? */
		  &file);

  /* Call the core dumping server to write the core file.  */
  err = __crash_dump_task (coreserver,
			   __mach_task_self (),
			   file,
			   signo, detail->code, detail->error,
			   detail->exc, detail->exc_code, detail->exc_subcode,
			   _hurd_ports[INIT_PORT_CTTYID].port,
			   MACH_MSG_TYPE_COPY_SEND);
  __mach_port_deallocate (__mach_task_self (), coreserver);

  if (! err && file != MACH_PORT_NULL)
    /* The core dump into FILE succeeded, so now link it into the
       directory.  */
    err = __dir_link (coredir, file, name, 1);
  __mach_port_deallocate (__mach_task_self (), file);
  __mach_port_deallocate (__mach_task_self (), coredir);
  return !err && file != MACH_PORT_NULL;
}
コード例 #2
0
void
_hurd_port_cleanup (void *cleanup_data, jmp_buf env, int val)
{
  __mach_port_deallocate (__mach_task_self (), (mach_port_t) cleanup_data);
}
コード例 #3
0
ファイル: kill.c プロジェクト: Drakey83/steamlink-sdk
/* Send signal SIG to process number PID.  If PID is zero,
   send SIG to all processes in the current process's process group.
   If PID is < -1, send SIG to all processes in process group - PID.  */
int
__kill (pid_t pid, int sig)
{
  int delivered = 0;		/* Set when we deliver any signal.  */
  error_t err;
  mach_port_t proc;
  struct hurd_userlink ulink;

  void kill_pid (pid_t pid) /* Kill one PID.  */
    {
      /* SIGKILL is not delivered as a normal signal.
	 Sending SIGKILL to a process means to terminate its task.  */
      if (sig == SIGKILL)
	/* Fetch the process's task port and terminate the task.  We
	   loop in case the process execs and changes its task port.
	   If the old task port dies after we fetch it but before we
	   send the RPC, we get MACH_SEND_INVALID_DEST; if it dies
	   after we send the RPC request but before it is serviced, we
	   get MIG_SERVER_DIED.  */
	do
	  {
	    task_t refport;
	    err = __proc_pid2task (proc, pid, &refport);
	    /* Ignore zombies.  */
	    if (!err && refport != MACH_PORT_NULL)
	      {
		err = __task_terminate (refport);
		__mach_port_deallocate (__mach_task_self (), refport);
	      }
	  } while (err == MACH_SEND_INVALID_DEST ||
		   err == MIG_SERVER_DIED);
      else
	{
	  error_t taskerr;
	  error_t kill_port (mach_port_t msgport, mach_port_t refport)
	    {
	      if (msgport != MACH_PORT_NULL)
		/* Send a signal message to his message port.  */
		return __msg_sig_post (msgport, sig, 0, refport);

	      /* The process has no message port.  Perhaps try direct
		 frobnication of the task.  */

	      if (taskerr)
		/* If we could not get the task port, we can do nothing.  */
		return taskerr;

	      if (refport == MACH_PORT_NULL)
		/* proc_pid2task returned success with a null task port.
		   That means the process is a zombie.  Signals
		   to zombies should return success and do nothing.  */
		return 0;

	      /* For user convenience in the case of a task that has
		 not registered any message port with the proc server,
		 translate a few signals to direct task operations.  */
	      switch (sig)
		{
		  /* The only signals that really make sense for an
		     unregistered task are kill, suspend, and continue.  */
		case SIGSTOP:
		case SIGTSTP:
		  return __task_suspend (refport);
		case SIGCONT:
		  return __task_resume (refport);
		case SIGTERM:
		case SIGQUIT:
		case SIGINT:
		  return __task_terminate (refport);
		default:
		  /* We have full permission to send signals, but there is
		     no meaningful way to express this signal.  */
		  return EPERM;
		}
	    }
	  err = HURD_MSGPORT_RPC (__proc_getmsgport (proc, pid, &msgport),
				  (taskerr = __proc_pid2task (proc, pid,
							      &refport)) ?
				  __proc_getsidport (proc, &refport) : 0, 1,
				  kill_port (msgport, refport));
	}
      if (! err)
	delivered = 1;
    }

  proc = _hurd_port_get (&_hurd_ports[INIT_PORT_PROC], &ulink);

  if (pid <= 0)
    {
      /* Send SIG to each process in pgrp (- PID).  */
      pid_t pidbuf[10], *pids = pidbuf;
      mach_msg_type_number_t i, npids = sizeof (pidbuf) / sizeof (pidbuf[0]);

      err = __proc_getpgrppids (proc, - pid, &pids, &npids);
      if (!err)
	{
	  for (i = 0; i < npids; ++i)
	    {
	      kill_pid (pids[i]);
	      if (err == ESRCH)
		/* The process died already.  Ignore it.  */
		err = 0;
	    }
	  if (pids != pidbuf)
	    __vm_deallocate (__mach_task_self (),
			     (vm_address_t) pids, npids * sizeof (pids[0]));
	}
    }
  else
    kill_pid (pid);

  _hurd_port_free (&_hurd_ports[INIT_PORT_PROC], &ulink, proc);

  /* If we delivered no signals, but ERR is clear, this must mean that
     every kill_pid call failed with ESRCH, meaning all the processes in
     the pgrp died between proc_getpgrppids and kill_pid; in that case we
     fail with ESRCH.  */
  return delivered ? 0 : __hurd_fail (err ?: ESRCH);
}
コード例 #4
0
ファイル: setresgid.c プロジェクト: Drakey83/steamlink-sdk
/* Set the real group ID, effective group ID, and saved-set group ID,
   of the calling process to RGID, EGID, and SGID, respectively.  */
int
__setresgid (gid_t rgid, gid_t egid, gid_t sgid)
{
  auth_t newauth;
  error_t err;

  HURD_CRITICAL_BEGIN;
  __mutex_lock (&_hurd_id.lock);
  err = _hurd_check_ids ();

  if (!err)
    {
      /* Make a new auth handle which has EGID as the first element in the
	 list of effective gids.  */

      uid_t *newgen, *newaux;
      uid_t auxs[2] = { rgid, sgid };
      size_t ngen, naux;

      newgen = _hurd_id.gen.gids;
      ngen = _hurd_id.gen.ngids;
      if (egid != -1)
	{
	  if (_hurd_id.gen.ngids == 0)
	    {
	      /* No effective gids now.  The new set will be just UID.  */
	      newgen = &egid;
	      ngen = 1;
	    }
	  else
	    {
	      _hurd_id.gen.gids[0] = egid;
	      _hurd_id.valid = 0;
	    }
	}

      newaux = _hurd_id.aux.gids;
      naux = _hurd_id.aux.ngids;
      if (rgid != -1)
	{
	  if (_hurd_id.aux.ngids == 0)
	    {
	      newaux = &rgid;
	      naux = 1;
	    }
	  else
	    {
	      _hurd_id.aux.gids[0] = rgid;
	      _hurd_id.valid = 0;
	    }
	}

      if (sgid != -1)
	{
	  if (rgid == -1)
	    {
	      if (_hurd_id.aux.ngids >= 1)
		auxs[0] = _hurd_id.aux.gids[0];
	      else if (_hurd_id.gen.ngids >= 1)
		auxs[0] = _hurd_id.gen.gids[0];
	      else
		/* Not even an effective GID.
                   Fall back to the only GID we have. */
		auxs[0] = sgid;
	    }
	  if (_hurd_id.aux.ngids <= 1)
	    {
	      /* No saved gids now.  The new set will be just UID.  */
	      newaux = auxs;
	      naux = 2;
	    }
	  else
	    {
	      _hurd_id.aux.gids[1] = sgid;
	      _hurd_id.valid = 0;
	    }
	}

      err = __USEPORT (AUTH, __auth_makeauth
		       (port, NULL, MACH_MSG_TYPE_COPY_SEND, 0,
			_hurd_id.gen.uids, _hurd_id.gen.nuids,
			_hurd_id.aux.uids, _hurd_id.aux.nuids,
			newgen, ngen, newaux, naux,
			&newauth));
    }
  __mutex_unlock (&_hurd_id.lock);
  HURD_CRITICAL_END;

  if (err)
    return __hurd_fail (err);

  /* Install the new handle and reauthenticate everything.  */
  err = __setauth (newauth);
  __mach_port_deallocate (__mach_task_self (), newauth);
  return err;
}
コード例 #5
0
ファイル: xmknod.c プロジェクト: BackupTheBerlios/wl530g-svn
/* Create a device file named FILE_NAME, with permission and special bits MODE
   and device number DEV (which can be constructed from major and minor
   device numbers with the `makedev' macro above).  */
int
__xmknod (int vers, const char *file_name, mode_t mode, dev_t *dev)
{
    error_t err;
    file_t dir, node;
    char *name;
    char buf[100], *bp;
    const char *translator;
    size_t len;

    if (vers != _MKNOD_VER)
        return __hurd_fail (EINVAL);

    if (S_ISCHR (mode))
    {
        translator = _HURD_CHRDEV;
        len = sizeof (_HURD_CHRDEV);
    }
    else if (S_ISBLK (mode))
    {
        translator = _HURD_BLKDEV;
        len = sizeof (_HURD_BLKDEV);
    }
    else if (S_ISFIFO (mode))
    {
        translator = _HURD_FIFO;
        len = sizeof (_HURD_FIFO);
    }
    else
    {
        errno = EINVAL;
        return -1;
    }

    if (! S_ISFIFO (mode))
    {
        /* We set the translator to "ifmt\0major\0minor\0", where IFMT
        depends on the S_IFMT bits of our MODE argument, and MAJOR and
         MINOR are ASCII decimal (octal or hex would do as well)
         representations of our arguments.  Thus the convention is that
         CHRDEV and BLKDEV translators are invoked with two non-switch
         arguments, giving the major and minor device numbers in %i format. */

        bp = buf + sizeof (buf);
        *--bp = '\0';
        bp = _itoa (minor (*dev), bp, 10, 0);
        *--bp = '\0';
        bp = _itoa (major (*dev), bp, 10, 0);
        memcpy (bp - len, translator, len);
        translator = bp - len;
        len = buf + sizeof (buf) - translator;
    }

    dir = __file_name_split (file_name, &name);
    if (dir == MACH_PORT_NULL)
        return -1;

    /* Create a new, unlinked node in the target directory.  */
    err = __dir_mkfile (dir, O_WRITE, (mode & ~S_IFMT) & ~_hurd_umask, &node);

    if (! err)
        /* Set the node's translator to make it a device.  */
        err = __file_set_translator (node,
                                     FS_TRANS_EXCL | FS_TRANS_SET,
                                     FS_TRANS_EXCL | FS_TRANS_SET, 0,
                                     translator, len,
                                     MACH_PORT_NULL, MACH_MSG_TYPE_COPY_SEND);

    if (! err)
        /* Link the node, now a valid device, into the target directory.  */
        err = __dir_link (dir, node, name, 1);

    __mach_port_deallocate (__mach_task_self (), dir);
    __mach_port_deallocate (__mach_task_self (), node);

    if (err)
        return __hurd_fail (err);
    return 0;
}
コード例 #6
0
/* Read a directory entry from DIRP.  */
int
__readdir64_r (DIR *dirp, struct dirent64 *entry, struct dirent64 **result)
{
  struct dirent64 *dp;
  error_t err = 0;

  if (dirp == NULL)
    {
      errno = EINVAL;
      return errno;
    }

  __libc_lock_lock (dirp->__lock);

  do
    {
      if (dirp->__ptr - dirp->__data >= dirp->__size)
	{
	  /* We've emptied out our buffer.  Refill it.  */

	  char *data = dirp->__data;
	  int nentries;

	  if (err = HURD_FD_PORT_USE (dirp->__fd,
				      __dir_readdir (port,
						     &data, &dirp->__size,
						     dirp->__entry_ptr,
						     -1, 0, &nentries)))
	    {
	      __hurd_fail (err);
	      dp = NULL;
	      break;
	    }

	  /* DATA now corresponds to entry index DIRP->__entry_ptr.  */
	  dirp->__entry_data = dirp->__entry_ptr;

	  if (data != dirp->__data)
	    {
	      /* The data was passed out of line, so our old buffer is no
		 longer useful.  Deallocate the old buffer and reset our
		 information for the new buffer.  */
	      __vm_deallocate (__mach_task_self (),
			       (vm_address_t) dirp->__data,
			       dirp->__allocation);
	      dirp->__data = data;
	      dirp->__allocation = round_page (dirp->__size);
	    }

	  /* Reset the pointer into the buffer.  */
	  dirp->__ptr = dirp->__data;

	  if (nentries == 0)
	    {
	      /* End of file.  */
	      dp = NULL;
	      break;
	    }

	  /* We trust the filesystem to return correct data and so we
	     ignore NENTRIES.  */
	}

      dp = (struct dirent64 *) dirp->__ptr;
      dirp->__ptr += dp->d_reclen;
      ++dirp->__entry_ptr;

      /* Loop to ignore deleted files.  */
    } while (dp->d_fileno == 0);

  if (dp)
    {
      *entry = *dp;
      memcpy (entry->d_name, dp->d_name, dp->d_namlen + 1);
      *result = entry;
    }
  else
    *result = NULL;

  __libc_lock_unlock (dirp->__lock);

  return dp ? 0 : err ? errno : 0;
}
コード例 #7
0
void
_hurdsig_fault_init (void)
{
  error_t err;
  struct machine_thread_state state;
  mach_port_t sigexc;

  /* Allocate a port to receive signal thread exceptions.
     We will move this receive right to the proc server.  */
  err = __mach_port_allocate (__mach_task_self (),
			      MACH_PORT_RIGHT_RECEIVE, &sigexc);
  assert_perror (err);
  err = __mach_port_allocate (__mach_task_self (),
			      MACH_PORT_RIGHT_RECEIVE, &forward_sigexc);
  assert_perror (err);

  /* Allocate a port to receive the exception msgs forwarded
     from the proc server.  */
  err = __mach_port_insert_right (__mach_task_self (), sigexc,
				  sigexc, MACH_MSG_TYPE_MAKE_SEND);
  assert_perror (err);

  /* Set the queue limit for this port to just one.  The proc server will
     notice if we ever get a second exception while one remains queued and
     unreceived, and decide we are hopelessly buggy.  */
#ifdef MACH_PORT_RECEIVE_STATUS_COUNT
  {
    const mach_port_limits_t lim = { mpl_qlimit: 1 };
    assert (MACH_PORT_RECEIVE_STATUS_COUNT == sizeof lim / sizeof (natural_t));
    err = __mach_port_set_attributes (__mach_task_self (), forward_sigexc,
				      MACH_PORT_RECEIVE_STATUS,
				      (mach_port_info_t) &lim,
				      MACH_PORT_RECEIVE_STATUS_COUNT);
  }
#else
  err = __mach_port_set_qlimit (__mach_task_self (), forward_sigexc, 1);
#endif
  assert_perror (err);

  /* This state will be restored when we fault.
     It runs the function above.  */
  memset (&state, 0, sizeof state);
  MACHINE_THREAD_STATE_SET_PC (&state, faulted);
  MACHINE_THREAD_STATE_SET_SP (&state, faultstack, sizeof faultstack);

  err = __USEPORT
    (PROC,
     __proc_handle_exceptions (port,
			       sigexc,
			       forward_sigexc, MACH_MSG_TYPE_MAKE_SEND,
			       MACHINE_THREAD_STATE_FLAVOR,
			       (natural_t *) &state,
			       MACHINE_THREAD_STATE_COUNT));
  assert_perror (err);

  /* Direct signal thread exceptions to the proc server.  */
#ifdef THREAD_EXCEPTION_PORT
  err = __thread_set_special_port (_hurd_msgport_thread,
				   THREAD_EXCEPTION_PORT, sigexc);
#elif defined (EXC_MASK_ALL)
  __thread_set_exception_ports (_hurd_msgport_thread,
				EXC_MASK_ALL & ~(EXC_MASK_SYSCALL
						 | EXC_MASK_MACH_SYSCALL
						 | EXC_MASK_RPC_ALERT),
				sigexc,
				EXCEPTION_STATE_IDENTITY,
				MACHINE_THREAD_STATE);
#else
# error thread_set_exception_ports?
#endif
  __mach_port_deallocate (__mach_task_self (), sigexc);
  assert_perror (err);
}
コード例 #8
0
ファイル: hurdsig.c プロジェクト: AubrCool/glibc
mach_port_t
_hurdsig_abort_rpcs (struct hurd_sigstate *ss, int signo, int sigthread,
		     struct machine_thread_all_state *state, int *state_change,
		     void (*reply) (void))
{
  extern const void _hurd_intr_rpc_msg_in_trap;
  mach_port_t rcv_port = MACH_PORT_NULL;
  mach_port_t intr_port;

  *state_change = 0;

  intr_port = ss->intr_port;
  if (intr_port == MACH_PORT_NULL)
    /* No interruption needs done.  */
    return MACH_PORT_NULL;

  /* Abort the thread's kernel context, so any pending message send or
     receive completes immediately or aborts.  */
  abort_thread (ss, state, reply);

  if (state->basic.PC < (natural_t) &_hurd_intr_rpc_msg_in_trap)
    {
      /* The thread is about to do the RPC, but hasn't yet entered
	 mach_msg.  Mutate the thread's state so it knows not to try
	 the RPC.  */
      INTR_MSG_BACK_OUT (&state->basic);
      MACHINE_THREAD_STATE_SET_PC (&state->basic,
				   &_hurd_intr_rpc_msg_in_trap);
      state->basic.SYSRETURN = MACH_SEND_INTERRUPTED;
      *state_change = 1;
    }
  else if (state->basic.PC == (natural_t) &_hurd_intr_rpc_msg_in_trap &&
	   /* The thread was blocked in the system call.  After thread_abort,
	      the return value register indicates what state the RPC was in
	      when interrupted.  */
	   state->basic.SYSRETURN == MACH_RCV_INTERRUPTED)
      {
	/* The RPC request message was sent and the thread was waiting for
	   the reply message; now the message receive has been aborted, so
	   the mach_msg call will return MACH_RCV_INTERRUPTED.  We must tell
	   the server to interrupt the pending operation.  The thread must
	   wait for the reply message before running the signal handler (to
	   guarantee that the operation has finished being interrupted), so
	   our nonzero return tells the trampoline code to finish the message
	   receive operation before running the handler.  */

	mach_port_t *reply = interrupted_reply_port_location (state,
							      sigthread);
	error_t err = __interrupt_operation (intr_port, _hurdsig_interrupt_timeout);

	if (err)
	  {
	    if (reply)
	      {
		/* The interrupt didn't work.
		   Destroy the receive right the thread is blocked on.  */
		__mach_port_destroy (__mach_task_self (), *reply);
		*reply = MACH_PORT_NULL;
	      }

	    /* The system call return value register now contains
	       MACH_RCV_INTERRUPTED; when mach_msg resumes, it will retry the
	       call.  Since we have just destroyed the receive right, the
	       retry will fail with MACH_RCV_INVALID_NAME.  Instead, just
	       change the return value here to EINTR so mach_msg will not
	       retry and the EINTR error code will propagate up.  */
	    state->basic.SYSRETURN = EINTR;
	    *state_change = 1;
	  }
	else if (reply)
	  rcv_port = *reply;

	/* All threads whose RPCs were interrupted by the interrupt_operation
	   call above will retry their RPCs unless we clear SS->intr_port.
	   So we clear it for the thread taking a signal when SA_RESTART is
	   clear, so that its call returns EINTR.  */
	if (! signo || !(ss->actions[signo].sa_flags & SA_RESTART))
	  ss->intr_port = MACH_PORT_NULL;
      }

  return rcv_port;
}
コード例 #9
0
ファイル: if_index.c プロジェクト: siddhesh/glibc
/* Return an array of if_nameindex structures, one for each network
   interface present, plus one indicating the end of the array.  On
   error, return NULL.  */
struct if_nameindex *
__if_nameindex (void)
{
  error_t err = 0;
  char data[2048];
  file_t server;
  int fd = __opensock ();
  struct ifconf ifc;
  unsigned int nifs, i;
  struct if_nameindex *idx = NULL;

  ifc.ifc_buf = data;

  if (fd < 0)
    return NULL;

  server = _hurd_socket_server (PF_INET, 0);
  if (server == MACH_PORT_NULL)
    nifs = 0;
  else
    {
      size_t len = sizeof data;
      err = __pfinet_siocgifconf (server, -1, &ifc.ifc_buf, &len);
      if (err == MACH_SEND_INVALID_DEST || err == MIG_SERVER_DIED)
	{
	  /* On the first use of the socket server during the operation,
	     allow for the old server port dying.  */
	  server = _hurd_socket_server (PF_INET, 1);
	  if (server == MACH_PORT_NULL)
	    goto out;
	  err = __pfinet_siocgifconf (server, -1, &ifc.ifc_buf, &len);
	}
      if (err)
	goto out;

      ifc.ifc_len = len;
      nifs = len / sizeof (struct ifreq);
    }

  idx = malloc ((nifs + 1) * sizeof (struct if_nameindex));
  if (idx == NULL)
    {
      err = ENOBUFS;
      goto out;
    }

  for (i = 0; i < nifs; ++i)
    {
      struct ifreq *ifr = &ifc.ifc_req[i];
      idx[i].if_name = __strdup (ifr->ifr_name);
      if (idx[i].if_name == NULL
          || __ioctl (fd, SIOCGIFINDEX, ifr) < 0)
        {
          unsigned int j;
          err = errno;

          for (j =  0; j < i; ++j)
            free (idx[j].if_name);
          free (idx);
	  idx = NULL;

          if (err == EINVAL)
            err = ENOSYS;
          else if (err == ENOMEM)
            err = ENOBUFS;
          goto out;
        }
      idx[i].if_index = ifr->ifr_ifindex;
    }

  idx[i].if_index = 0;
  idx[i].if_name = NULL;

 out:
  __close (fd);
  if (data != ifc.ifc_buf)
    __vm_deallocate (__mach_task_self (), (vm_address_t) ifc.ifc_buf,
		     ifc.ifc_len);
  __set_errno (err);
  return idx;
}
コード例 #10
0
/* Check the first NFDS descriptors either in POLLFDS (if nonnnull) or in
   each of READFDS, WRITEFDS, EXCEPTFDS that is nonnull.  If TIMEOUT is not
   NULL, time out after waiting the interval specified therein.  Returns
   the number of ready descriptors, or -1 for errors.  */
int
_hurd_select (int nfds,
	      struct pollfd *pollfds,
	      fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
	      const struct timespec *timeout, const sigset_t *sigmask)
{
  int i;
  mach_port_t portset;
  int got;
  error_t err;
  fd_set rfds, wfds, xfds;
  int firstfd, lastfd;
  mach_msg_timeout_t to = (timeout != NULL ?
			   (timeout->tv_sec * 1000 +
			    (timeout->tv_nsec + 999999) / 1000000) :
			   0);
  struct
    {
      struct hurd_userlink ulink;
      struct hurd_fd *cell;
      mach_port_t io_port;
      int type;
      mach_port_t reply_port;
    } d[nfds];
  sigset_t oset;

  union typeword		/* Use this to avoid unkosher casts.  */
    {
      mach_msg_type_t type;
      uint32_t word;
    };
  assert (sizeof (union typeword) == sizeof (mach_msg_type_t));
  assert (sizeof (uint32_t) == sizeof (mach_msg_type_t));

  if (sigmask && __sigprocmask (SIG_SETMASK, sigmask, &oset))
    return -1;

  if (pollfds)
    {
      /* Collect interesting descriptors from the user's `pollfd' array.
	 We do a first pass that reads the user's array before taking
	 any locks.  The second pass then only touches our own stack,
	 and gets the port references.  */

      for (i = 0; i < nfds; ++i)
	if (pollfds[i].fd >= 0)
	  {
	    int type = 0;
	    if (pollfds[i].events & POLLIN)
	      type |= SELECT_READ;
	    if (pollfds[i].events & POLLOUT)
	      type |= SELECT_WRITE;
	    if (pollfds[i].events & POLLPRI)
	      type |= SELECT_URG;

	    d[i].io_port = pollfds[i].fd;
	    d[i].type = type;
	  }
	else
	  d[i].type = 0;

      HURD_CRITICAL_BEGIN;
      __mutex_lock (&_hurd_dtable_lock);

      for (i = 0; i < nfds; ++i)
	if (d[i].type != 0)
	  {
	    const int fd = (int) d[i].io_port;

	    if (fd < _hurd_dtablesize)
	      {
		d[i].cell = _hurd_dtable[fd];
		d[i].io_port = _hurd_port_get (&d[i].cell->port, &d[i].ulink);
		if (d[i].io_port != MACH_PORT_NULL)
		  continue;
	      }

	    /* If one descriptor is bogus, we fail completely.  */
	    while (i-- > 0)
	      if (d[i].type != 0)
		_hurd_port_free (&d[i].cell->port,
				 &d[i].ulink, d[i].io_port);
	    break;
	  }

      __mutex_unlock (&_hurd_dtable_lock);
      HURD_CRITICAL_END;

      if (i < nfds)
	{
	  if (sigmask)
	    __sigprocmask (SIG_SETMASK, &oset, NULL);
	  errno = EBADF;
	  return -1;
	}

      lastfd = i - 1;
      firstfd = i == 0 ? lastfd : 0;
    }
  else
    {
      /* Collect interested descriptors from the user's fd_set arguments.
	 Use local copies so we can't crash from user bogosity.  */

      if (readfds == NULL)
	FD_ZERO (&rfds);
      else
	rfds = *readfds;
      if (writefds == NULL)
	FD_ZERO (&wfds);
      else
	wfds = *writefds;
      if (exceptfds == NULL)
	FD_ZERO (&xfds);
      else
	xfds = *exceptfds;

      HURD_CRITICAL_BEGIN;
      __mutex_lock (&_hurd_dtable_lock);

      if (nfds > _hurd_dtablesize)
	nfds = _hurd_dtablesize;

      /* Collect the ports for interesting FDs.  */
      firstfd = lastfd = -1;
      for (i = 0; i < nfds; ++i)
	{
	  int type = 0;
	  if (readfds != NULL && FD_ISSET (i, &rfds))
	    type |= SELECT_READ;
	  if (writefds != NULL && FD_ISSET (i, &wfds))
	    type |= SELECT_WRITE;
	  if (exceptfds != NULL && FD_ISSET (i, &xfds))
	    type |= SELECT_URG;
	  d[i].type = type;
	  if (type)
	    {
	      d[i].cell = _hurd_dtable[i];
	      d[i].io_port = _hurd_port_get (&d[i].cell->port, &d[i].ulink);
	      if (d[i].io_port == MACH_PORT_NULL)
		{
		  /* If one descriptor is bogus, we fail completely.  */
		  while (i-- > 0)
		    if (d[i].type != 0)
		      _hurd_port_free (&d[i].cell->port, &d[i].ulink,
				       d[i].io_port);
		  break;
		}
	      lastfd = i;
	      if (firstfd == -1)
		firstfd = i;
	    }
	}

      __mutex_unlock (&_hurd_dtable_lock);
      HURD_CRITICAL_END;

      if (i < nfds)
	{
	  if (sigmask)
	    __sigprocmask (SIG_SETMASK, &oset, NULL);
	  errno = EBADF;
	  return -1;
	}
    }


  err = 0;
  got = 0;

  /* Send them all io_select request messages.  */

  if (firstfd == -1)
    /* But not if there were no ports to deal with at all.
       We are just a pure timeout.  */
    portset = __mach_reply_port ();
  else
    {
      portset = MACH_PORT_NULL;

      for (i = firstfd; i <= lastfd; ++i)
	if (d[i].type)
	  {
	    int type = d[i].type;
	    d[i].reply_port = __mach_reply_port ();
	    err = __io_select (d[i].io_port, d[i].reply_port,
			       /* Poll only if there's a single descriptor.  */
			       (firstfd == lastfd) ? to : 0,
			       &type);
	    switch (err)
	      {
	      case MACH_RCV_TIMED_OUT:
		/* No immediate response.  This is normal.  */
		err = 0;
		if (firstfd == lastfd)
		  /* When there's a single descriptor, we don't need a
		     portset, so just pretend we have one, but really
		     use the single reply port.  */
		  portset = d[i].reply_port;
		else if (got == 0)
		  /* We've got multiple reply ports, so we need a port set to
		     multiplex them.  */
		  {
		    /* We will wait again for a reply later.  */
		    if (portset == MACH_PORT_NULL)
		      /* Create the portset to receive all the replies on.  */
		      err = __mach_port_allocate (__mach_task_self (),
						  MACH_PORT_RIGHT_PORT_SET,
						  &portset);
		    if (! err)
		      /* Put this reply port in the port set.  */
		      __mach_port_move_member (__mach_task_self (),
					       d[i].reply_port, portset);
		  }
		break;

	      default:
		/* No other error should happen.  Callers of select
		   don't expect to see errors, so we simulate
		   readiness of the erring object and the next call
		   hopefully will get the error again.  */
		type = SELECT_ALL;
		/* FALLTHROUGH */

	      case 0:
		/* We got an answer.  */
		if ((type & SELECT_ALL) == 0)
		  /* Bogus answer; treat like an error, as a fake positive.  */
		  type = SELECT_ALL;

		/* This port is already ready already.  */
		d[i].type &= type;
		d[i].type |= SELECT_RETURNED;
		++got;
		break;
	      }
	    _hurd_port_free (&d[i].cell->port, &d[i].ulink, d[i].io_port);
	  }
    }

  /* Now wait for reply messages.  */
  if (!err && got == 0)
    {
      /* Now wait for io_select_reply messages on PORT,
	 timing out as appropriate.  */

      union
	{
	  mach_msg_header_t head;
#ifdef MACH_MSG_TRAILER_MINIMUM_SIZE
	  struct
	    {
	      mach_msg_header_t head;
	      NDR_record_t ndr;
	      error_t err;
	    } error;
	  struct
	    {
	      mach_msg_header_t head;
	      NDR_record_t ndr;
	      error_t err;
	      int result;
	      mach_msg_trailer_t trailer;
	    } success;
#else
	  struct
	    {
	      mach_msg_header_t head;
	      union typeword err_type;
	      error_t err;
	    } error;
	  struct
	    {
	      mach_msg_header_t head;
	      union typeword err_type;
	      error_t err;
	      union typeword result_type;
	      int result;
	    } success;
#endif
	} msg;
      mach_msg_option_t options = (timeout == NULL ? 0 : MACH_RCV_TIMEOUT);
      error_t msgerr;
      while ((msgerr = __mach_msg (&msg.head,
				   MACH_RCV_MSG | options,
				   0, sizeof msg, portset, to,
				   MACH_PORT_NULL)) == MACH_MSG_SUCCESS)
	{
	  /* We got a message.  Decode it.  */
#define IO_SELECT_REPLY_MSGID (21012 + 100) /* XXX */
#ifdef MACH_MSG_TYPE_BIT
	  const union typeword inttype =
	  { type:
	    { MACH_MSG_TYPE_INTEGER_T, sizeof (integer_t) * 8, 1, 1, 0, 0 }
	  };
#endif
	  if (msg.head.msgh_id == IO_SELECT_REPLY_MSGID &&
	      msg.head.msgh_size >= sizeof msg.error &&
	      !(msg.head.msgh_bits & MACH_MSGH_BITS_COMPLEX) &&
#ifdef MACH_MSG_TYPE_BIT
	      msg.error.err_type.word == inttype.word
#endif
	      )
	    {
	      /* This is a properly formatted message so far.
		 See if it is a success or a failure.  */
	      if (msg.error.err == EINTR &&
		  msg.head.msgh_size == sizeof msg.error)
		{
		  /* EINTR response; poll for further responses
		     and then return quickly.  */
		  err = EINTR;
		  goto poll;
		}
	      if (msg.error.err ||
		  msg.head.msgh_size != sizeof msg.success ||
#ifdef MACH_MSG_TYPE_BIT
		  msg.success.result_type.word != inttype.word ||
#endif
		  (msg.success.result & SELECT_ALL) == 0)
		{
		  /* Error or bogus reply.  Simulate readiness.  */
		  __mach_msg_destroy (&msg.head);
		  msg.success.result = SELECT_ALL;
		}

	      /* Look up the respondent's reply port and record its
                 readiness.  */
	      {
		int had = got;
		if (firstfd != -1)
		  for (i = firstfd; i <= lastfd; ++i)
		    if (d[i].type
			&& d[i].reply_port == msg.head.msgh_local_port)
		      {
			d[i].type &= msg.success.result;
			d[i].type |= SELECT_RETURNED;
			++got;
		      }
		assert (got > had);
	      }
	    }

	  if (msg.head.msgh_remote_port != MACH_PORT_NULL)
	    __mach_port_deallocate (__mach_task_self (),
				    msg.head.msgh_remote_port);

	  if (got)
	  poll:
	    {
	      /* Poll for another message.  */
	      to = 0;
	      options |= MACH_RCV_TIMEOUT;
	    }
	}

      if (err == MACH_RCV_TIMED_OUT)
	/* This is the normal value for ERR.  We might have timed out and
	   read no messages.  Otherwise, after receiving the first message,
	   we poll for more messages.  We receive with a timeout of 0 to
	   effect a poll, so ERR is MACH_RCV_TIMED_OUT when the poll finds no
	   message waiting.  */
	err = 0;

      if (got)
	/* At least one descriptor is known to be ready now, so we will
	   return success.  */
	err = 0;
    }
コード例 #11
0
ファイル: mmap.c プロジェクト: BackupTheBerlios/wl530g-svn
__ptr_t
__mmap (__ptr_t addr, size_t len, int prot, int flags, int fd, off_t offset)
{
  error_t err;
  vm_prot_t vmprot;
  memory_object_t memobj;
  vm_address_t mapaddr;
  vm_size_t pageoff;

  mapaddr = (vm_address_t) addr;

  if ((flags & (MAP_TYPE|MAP_INHERIT)) == MAP_ANON
      && prot == (PROT_READ|PROT_WRITE)) /* cf VM_PROT_DEFAULT */
    {
      /* vm_allocate has (a little) less overhead in the kernel too.  */
      err = __vm_allocate (__mach_task_self (), &mapaddr, len,
			   !(flags & MAP_FIXED));

      if (err == KERN_NO_SPACE && (flags & MAP_FIXED))
	{
	  /* XXX this is not atomic as it is in unix! */
	  /* The region is already allocated; deallocate it first.  */
	  err = __vm_deallocate (__mach_task_self (), mapaddr, len);
	  if (!err)
	    err = __vm_allocate (__mach_task_self (), &mapaddr, len, 0);
	}

      return err ? (__ptr_t) (long int) __hurd_fail (err) : (__ptr_t) mapaddr;
    }

  pageoff = offset & (vm_page_size - 1);
  offset &= ~(vm_page_size - 1);

  if (flags & MAP_FIXED)
    {
      /* A specific address is requested.  It need not be page-aligned;
	 it just needs to be congruent with the object offset.  */
      if ((mapaddr & (vm_page_size - 1)) != pageoff)
	return (__ptr_t) (long int) __hurd_fail (EINVAL);
      else
	/* We will add back PAGEOFF after mapping.  */
	mapaddr -= pageoff;
    }

  vmprot = VM_PROT_NONE;
  if (prot & PROT_READ)
    vmprot |= VM_PROT_READ;
  if (prot & PROT_WRITE)
    vmprot |= VM_PROT_WRITE;
  if (prot & PROT_EXEC)
    vmprot |= VM_PROT_EXECUTE;

  switch (flags & MAP_TYPE)
    {
    default:
      return (__ptr_t) (long int) __hurd_fail (EINVAL);

    case MAP_ANON:
      memobj = MACH_PORT_NULL;
      break;

    case MAP_FILE:
    case 0:			/* Allow, e.g., just MAP_SHARED.  */
      {
	mach_port_t robj, wobj;
	if (err = HURD_DPORT_USE (fd, __io_map (port, &robj, &wobj)))
	  return (__ptr_t) (long int) __hurd_dfail (fd, err);
	switch (prot & (PROT_READ|PROT_WRITE))
	  {
	  case PROT_READ:
	    memobj = robj;
	    if (wobj != MACH_PORT_NULL)
	      __mach_port_deallocate (__mach_task_self (), wobj);
	    break;
	  case PROT_WRITE:
	    memobj = wobj;
	    if (robj != MACH_PORT_NULL)
	      __mach_port_deallocate (__mach_task_self (), robj);
	    break;
	  case PROT_READ|PROT_WRITE:
	    if (robj == wobj)
	      {
		memobj = wobj;
		/* Remove extra reference.  */
		__mach_port_deallocate (__mach_task_self (), memobj);
	      }
	    else if (wobj == MACH_PORT_NULL && /* Not writable by mapping.  */
		     !(flags & MAP_SHARED))
	      /* The file can only be mapped for reading.  Since we are
		 making a private mapping, we will never try to write the
		 object anyway, so we don't care.  */
	      memobj = robj;
	    else
	      {
		__mach_port_deallocate (__mach_task_self (), wobj);
		return (__ptr_t) (long int) __hurd_fail (EACCES);
	      }
	    break;
	  }
	break;
	/* XXX handle MAP_NOEXTEND */
      }
    }

  /* XXX handle MAP_INHERIT */

  err = __vm_map (__mach_task_self (),
		  &mapaddr, (vm_size_t) len, (vm_address_t) 0,
		  ! (flags & MAP_FIXED),
		  memobj, (vm_offset_t) offset,
		  ! (flags & MAP_SHARED),
		  vmprot, VM_PROT_ALL,
		  (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY);

  if (err == KERN_NO_SPACE && (flags & MAP_FIXED))
    {
      /* XXX this is not atomic as it is in unix! */
      /* The region is already allocated; deallocate it first.  */
      err = __vm_deallocate (__mach_task_self (), mapaddr, len);
      if (! err)
	err = __vm_map (__mach_task_self (),
			&mapaddr, (vm_size_t) len, (vm_address_t) 0,
			0, memobj, (vm_offset_t) offset,
			! (flags & MAP_SHARED),
			vmprot, VM_PROT_ALL,
			(flags & MAP_SHARED) ? VM_INHERIT_SHARE
			: VM_INHERIT_COPY);
    }

  if (memobj != MACH_PORT_NULL)
    __mach_port_deallocate (__mach_task_self (), memobj);

  if (err)
    return (__ptr_t) (long int) __hurd_fail (err);

  /* Adjust the mapping address for the offset-within-page.  */
  mapaddr += pageoff;

  return (__ptr_t) mapaddr;
}
コード例 #12
0
ファイル: hurdexec.c プロジェクト: ljegege/linux-source
/* Overlay TASK, executing FILE with arguments ARGV and environment ENVP.
   If TASK == mach_task_self (), some ports are dealloc'd by the exec server.
   ARGV and ENVP are terminated by NULL pointers.  */
error_t
_hurd_exec (task_t task, file_t file,
	    char *const argv[], char *const envp[])
{
  error_t err;
  char *args, *env;
  size_t argslen, envlen;
  int ints[INIT_INT_MAX];
  mach_port_t ports[_hurd_nports];
  struct hurd_userlink ulink_ports[_hurd_nports];
  file_t *dtable;
  unsigned int dtablesize, i;
  struct hurd_port **dtable_cells;
  struct hurd_userlink *ulink_dtable;
  struct hurd_sigstate *ss;
  mach_port_t *please_dealloc, *pdp;

  /* XXX needs to be hurdmalloc XXX */
  if (err = __argz_create (argv, &args, &argslen))
    return err;
  if (err = __argz_create (envp, &env, &envlen))
    goto outargs;

  /* Load up the ports to give to the new program.  */
  for (i = 0; i < _hurd_nports; ++i)
    if (i == INIT_PORT_PROC && task != __mach_task_self ())
      {
	/* This is another task, so we need to ask the proc server
	   for the right proc server port for it.  */
	if (err = __USEPORT (PROC, __proc_task2proc (port, task, &ports[i])))
	  {
	    while (--i > 0)
	      _hurd_port_free (&_hurd_ports[i], &ulink_ports[i], ports[i]);
	    goto outenv;
	  }
      }
    else
      ports[i] = _hurd_port_get (&_hurd_ports[i], &ulink_ports[i]);


  /* Load up the ints to give the new program.  */
  for (i = 0; i < INIT_INT_MAX; ++i)
    switch (i)
      {
      case INIT_UMASK:
	ints[i] = _hurd_umask;
	break;

      case INIT_SIGMASK:
      case INIT_SIGIGN:
      case INIT_SIGPENDING:
	/* We will set these all below.  */
	break;

      case INIT_TRACEMASK:
	ints[i] = _hurdsig_traced;
	break;

      default:
	ints[i] = 0;
      }

  ss = _hurd_self_sigstate ();

  assert (! __spin_lock_locked (&ss->critical_section_lock));
  __spin_lock (&ss->critical_section_lock);

  __spin_lock (&ss->lock);
  ints[INIT_SIGMASK] = ss->blocked;
  ints[INIT_SIGPENDING] = ss->pending;
  ints[INIT_SIGIGN] = 0;
  for (i = 1; i < NSIG; ++i)
    if (ss->actions[i].sa_handler == SIG_IGN)
      ints[INIT_SIGIGN] |= __sigmask (i);

  /* We hold the sigstate lock until the exec has failed so that no signal
     can arrive between when we pack the blocked and ignored signals, and
     when the exec actually happens.  A signal handler could change what
     signals are blocked and ignored.  Either the change will be reflected
     in the exec, or the signal will never be delivered.  Setting the
     critical section flag avoids anything we call trying to acquire the
     sigstate lock.  */

  __spin_unlock (&ss->lock);

  /* Pack up the descriptor table to give the new program.  */
  __mutex_lock (&_hurd_dtable_lock);

  dtablesize = _hurd_dtable ? _hurd_dtablesize : _hurd_init_dtablesize;

  if (task == __mach_task_self ())
    /* Request the exec server to deallocate some ports from us if the exec
       succeeds.  The init ports and descriptor ports will arrive in the
       new program's exec_startup message.  If we failed to deallocate
       them, the new program would have duplicate user references for them.
       But we cannot deallocate them ourselves, because we must still have
       them after a failed exec call.  */
    please_dealloc = __alloca ((_hurd_nports + (2 * dtablesize))
				* sizeof (mach_port_t));
  else
    please_dealloc = NULL;
  pdp = please_dealloc;

  if (_hurd_dtable != NULL)
    {
      dtable = __alloca (dtablesize * sizeof (dtable[0]));
      ulink_dtable = __alloca (dtablesize * sizeof (ulink_dtable[0]));
      dtable_cells = __alloca (dtablesize * sizeof (dtable_cells[0]));
      for (i = 0; i < dtablesize; ++i)
	{
	  struct hurd_fd *const d = _hurd_dtable[i];
	  if (d == NULL)
	    {
	      dtable[i] = MACH_PORT_NULL;
	      continue;
	    }
	  __spin_lock (&d->port.lock);
	  if (d->flags & FD_CLOEXEC)
	    {
	      /* This descriptor is marked to be closed on exec.
		 So don't pass it to the new program.  */
	      dtable[i] = MACH_PORT_NULL;
	      if (pdp && d->port.port != MACH_PORT_NULL)
		{
		  /* We still need to deallocate the ports.  */
		  *pdp++ = d->port.port;
		  if (d->ctty.port != MACH_PORT_NULL)
		    *pdp++ = d->ctty.port;
		}
	      __spin_unlock (&d->port.lock);
	    }
	  else
	    {
	      if (pdp && d->ctty.port != MACH_PORT_NULL)
		/* All the elements of DTABLE are added to PLEASE_DEALLOC
		   below, so we needn't add the port itself.
		   But we must deallocate the ctty port as well as
		   the normal port that got installed in DTABLE[I].  */
		*pdp++ = d->ctty.port;
	      dtable[i] = _hurd_port_locked_get (&d->port, &ulink_dtable[i]);
	      dtable_cells[i] = &d->port;
	    }
	}
    }
  else
    {
      dtable = _hurd_init_dtable;
      ulink_dtable = NULL;
      dtable_cells = NULL;
    }

  /* The information is all set up now.  Try to exec the file.  */

  {
    if (pdp)
      {
	/* Request the exec server to deallocate some ports from us if the exec
	   succeeds.  The init ports and descriptor ports will arrive in the
	   new program's exec_startup message.  If we failed to deallocate
	   them, the new program would have duplicate user references for them.
	   But we cannot deallocate them ourselves, because we must still have
	   them after a failed exec call.  */

	for (i = 0; i < _hurd_nports; ++i)
	  *pdp++ = ports[i];
	for (i = 0; i < dtablesize; ++i)
	  *pdp++ = dtable[i];
      }

    err = __file_exec (file, task, 0,
		       args, argslen, env, envlen,
		       dtable, MACH_MSG_TYPE_COPY_SEND, dtablesize,
		       ports, MACH_MSG_TYPE_COPY_SEND, _hurd_nports,
		       ints, INIT_INT_MAX,
		       please_dealloc, pdp - please_dealloc,
		       &_hurd_msgport, task == __mach_task_self () ? 1 : 0);
  }

  /* Release references to the standard ports.  */
  for (i = 0; i < _hurd_nports; ++i)
    if (i == INIT_PORT_PROC && task != __mach_task_self ())
      __mach_port_deallocate (__mach_task_self (), ports[i]);
    else
      _hurd_port_free (&_hurd_ports[i], &ulink_ports[i], ports[i]);

  if (ulink_dtable != NULL)
    /* Release references to the file descriptor ports.  */
    for (i = 0; i < dtablesize; ++i)
      if (dtable[i] != MACH_PORT_NULL)
	_hurd_port_free (dtable_cells[i], &ulink_dtable[i], dtable[i]);

  /* Release lock on the file descriptor table. */
  __mutex_unlock (&_hurd_dtable_lock);

  /* Safe to let signals happen now.  */
  _hurd_critical_section_unlock (ss);

 outargs:
  free (args);
 outenv:
  free (env);
  return err;
}
コード例 #13
0
ファイル: socketpair.c プロジェクト: JamesLinus/glibc-mips
/* Create two new sockets, of type TYPE in domain DOMAIN and using
   protocol PROTOCOL, which are connected to each other, and put file
   descriptors for them in FDS[0] and FDS[1].  If PROTOCOL is zero,
   one will be chosen automatically.  Returns 0 on success, -1 for errors.  */
int
__socketpair (int domain, int type, int protocol, int fds[2])
{
  error_t err;
  socket_t server, sock1, sock2;
  int d1, d2;

  if (fds == NULL)
    return __hurd_fail (EINVAL);

  /* Find the domain's socket server.  */
  server = _hurd_socket_server (domain, 0);
  if (server == MACH_PORT_NULL)
    return -1;

  /* Create two sockets and connect them together.  */

  err = __socket_create (server, type, protocol, &sock1);
  if (err == MACH_SEND_INVALID_DEST || err == MIG_SERVER_DIED
      || err == MIG_BAD_ID || err == EOPNOTSUPP)
    {
      /* On the first use of the socket server during the operation,
	 allow for the old server port dying.  */
      server = _hurd_socket_server (domain, 1);
      if (server == MACH_PORT_NULL)
	return -1;
      err = __socket_create (server, type, protocol, &sock1);
    }
  if (err)
    return __hurd_fail (err);
  if (err = __socket_create (server, type, protocol, &sock2))
    {
      __mach_port_deallocate (__mach_task_self (), sock1);
      return __hurd_fail (err);
    }
  if (err = __socket_connect2 (sock1, sock2))
    {
      __mach_port_deallocate (__mach_task_self (), sock1);
      __mach_port_deallocate (__mach_task_self (), sock2);
      return __hurd_fail (err);
    }

  /* Put the sockets into file descriptors.  */

  d1 = _hurd_intern_fd (sock1, O_IGNORE_CTTY, 1);
  if (d1 < 0)
    {
      __mach_port_deallocate (__mach_task_self (), sock2);
      return -1;
    }
  d2 = _hurd_intern_fd (sock2, O_IGNORE_CTTY, 1);
  if (d2 < 0)
    {
      err = errno;
      (void) close (d1);
      return __hurd_fail (err);
    }

  fds[0] = d1;
  fds[1] = d2;
  return 0;
}
コード例 #14
0
ファイル: recvmsg.c プロジェクト: Dinesh-Ramakrishnan/glibc
/* Receive a message as described by MESSAGE from socket FD.
   Returns the number of bytes read or -1 for errors.  */
ssize_t
__libc_recvmsg (int fd, struct msghdr *message, int flags)
{
  error_t err;
  addr_port_t aport;
  char *data = NULL;
  mach_msg_type_number_t len = 0;
  mach_port_t *ports;
  mach_msg_type_number_t nports = 0;
  char *cdata = NULL;
  mach_msg_type_number_t clen = 0;
  size_t amount;
  char *buf;
  int i;

  /* Find the total number of bytes to be read.  */
  amount = 0;
  for (i = 0; i < message->msg_iovlen; i++)
    {
      amount += message->msg_iov[i].iov_len;

      /* As an optimization, we set the initial values of DATA and LEN
         from the first non-empty iovec.  This kicks-in in the case
         where the whole packet fits into that iovec buffer.  */
      if (data == NULL && message->msg_iov[i].iov_len > 0)
	{
	  data = message->msg_iov[i].iov_base;
	  len = message->msg_iov[i].iov_len;
	}
    }

  buf = data;
  if (err = HURD_DPORT_USE (fd, __socket_recv (port, &aport,
					       flags, &data, &len,
					       &ports, &nports,
					       &cdata, &clen,
					       &message->msg_flags, amount)))
    return __hurd_sockfail (fd, flags, err);

  if (message->msg_name != NULL)
    {
      char *buf = message->msg_name;
      mach_msg_type_number_t buflen = message->msg_namelen;
      int type;

      err = __socket_whatis_address (aport, &type, &buf, &buflen);
      if (err == EOPNOTSUPP)
	/* If the protocol server can't tell us the address, just return a
	   zero-length one.  */
	{
	  buf = message->msg_name;
	  buflen = 0;
	  err = 0;
	}

      if (err)
	{
	  __mach_port_deallocate (__mach_task_self (), aport);
	  return __hurd_sockfail (fd, flags, err);
	}

      if (message->msg_namelen > buflen)
	message->msg_namelen = buflen;

      if (buf != message->msg_name)
	{
	  memcpy (message->msg_name, buf, message->msg_namelen);
	  __vm_deallocate (__mach_task_self (), (vm_address_t) buf, buflen);
	}

      if (buflen > 0)
	((struct sockaddr *) message->msg_name)->sa_family = type;
    }

  __mach_port_deallocate (__mach_task_self (), aport);

  if (buf == data)
    buf += len;
  else
    {
      /* Copy the data into MSG.  */
      if (len > amount)
	message->msg_flags |= MSG_TRUNC;
      else
	amount = len;

      buf = data;
      for (i = 0; i < message->msg_iovlen; i++)
	{
#define min(a, b)	((a) > (b) ? (b) : (a))
	  size_t copy = min (message->msg_iov[i].iov_len, amount);

	  memcpy (message->msg_iov[i].iov_base, buf, copy);

	  buf += copy;
	  amount -= copy;
	  if (len == 0)
	    break;
	}

      __vm_deallocate (__mach_task_self (), (vm_address_t) data, len);
    }

  /* Copy the control message into MSG.  */
  if (clen > message->msg_controllen)
    message->msg_flags |= MSG_CTRUNC;
  else
    message->msg_controllen = clen;
  memcpy (message->msg_control, cdata, message->msg_controllen);

  __vm_deallocate (__mach_task_self (), (vm_address_t) cdata, clen);

  return (buf - data);
}