예제 #1
0
파일: fs_read.c 프로젝트: a1ien/nuttx
ssize_t read(int fd, FAR void *buf, size_t nbytes)
{
  ssize_t ret;

  /* read() is a cancellation point */

  (void)enter_cancellation_point();

  /* Did we get a valid file descriptor? */

#if CONFIG_NFILE_DESCRIPTORS > 0
  if ((unsigned int)fd >= CONFIG_NFILE_DESCRIPTORS)
#endif
    {
#if defined(CONFIG_NET) && CONFIG_NSOCKET_DESCRIPTORS > 0
      /* No.. If networking is enabled, read() is the same as recv() with
       * the flags parameter set to zero.  Note that recv() sets
       * the errno variable.
       */

      ret = recv(fd, buf, nbytes, 0);
#else
      /* No networking... it is a bad descriptor in any event */

      set_errno(EBADF);
      ret = ERROR;
#endif
    }

#if CONFIG_NFILE_DESCRIPTORS > 0
  else
    {
      FAR struct file *filep;

      /* The descriptor is in a valid range to file descriptor... do the
       * read.  First, get the file structure.  Note that on failure,
       * fs_getfilep() will set the errno variable.
       */

      filep = fs_getfilep(fd);
      if (filep == NULL)
        {
          /* The errno value has already been set */

          ret = ERROR;
        }
      else
        {
          /* Then let file_read do all of the work.  Note that file_read()
           * sets the errno variable.
           */

          ret = file_read(filep, buf, nbytes);
        }
    }
#endif

  leave_cancellation_point();
  return ret;
}
예제 #2
0
파일: sendto.c 프로젝트: dagar/NuttX
ssize_t sendto(int sockfd, FAR const void *buf, size_t len, int flags,
               FAR const struct sockaddr *to, socklen_t tolen)
{
  FAR struct socket *psock;
  ssize_t ret;

  /* sendto() is a cancellation point */

  (void)enter_cancellation_point();

  /* Get the underlying socket structure */

  psock = sockfd_socket(sockfd);

  /* And let psock_sendto do all of the work */

  ret = psock_sendto(psock, buf, len, flags, to, tolen);
  if (ret < 0)
    {
      set_errno((int)-ret);
      ret = ERROR;
    }

  leave_cancellation_point();
  return ret;
}
예제 #3
0
int fcntl(int fd, int cmd, ...)
{
	FAR struct file *filep;
	va_list ap;
	int ret;

	/* fcntl() is a cancellation point */
	(void)enter_cancellation_point();

	/* Setup to access the variable argument list */

	va_start(ap, cmd);

	/* Did we get a valid file descriptor? */

#if CONFIG_NFILE_DESCRIPTORS > 0
	if ((unsigned int)fd < CONFIG_NFILE_DESCRIPTORS) {
		/* Get the file structure corresponding to the file descriptor. */

		filep = fs_getfilep(fd);
		if (!filep) {
			/* The errno value has already been set */

			va_end(ap);
			leave_cancellation_point();
			return ERROR;
		}

		/* Let file_vfcntl() do the real work */

		ret = file_vfcntl(filep, cmd, ap);
	} else
#endif
	{
		/* No... check for operations on a socket descriptor */

#if defined(CONFIG_NET) && CONFIG_NSOCKET_DESCRIPTORS > 0
		if ((unsigned int)fd < (CONFIG_NFILE_DESCRIPTORS + CONFIG_NSOCKET_DESCRIPTORS)) {
			/* Yes.. defer socket descriptor operations to net_vfcntl() */
			ret = net_vfcntl(fd, cmd, ap);
		} else
#endif
		{
			/* No.. this descriptor number is out of range */

			set_errno(EBADF);
			ret = ERROR;
		}
	}

	va_end(ap);
	leave_cancellation_point();
	return ret;
}
예제 #4
0
파일: sig_waitinfo.c 프로젝트: a1ien/nuttx
int sigwaitinfo(FAR const sigset_t *set, FAR struct siginfo *info)
{
  int ret;

  /* sigwaitinfo() is a cancellation point */

  (void)enter_cancellation_point();

  /* Just a wrapper around sigtimedwait() */

  ret = sigtimedwait(set, info, NULL);
  leave_cancellation_point();
  return ret;
}
예제 #5
0
파일: send.c 프로젝트: a1ien/nuttx
ssize_t send(int sockfd, FAR const void *buf, size_t len, int flags)
{
  FAR struct socket *psock;
  ssize_t ret;

  /* send() is a cancellation point */

  (void)enter_cancellation_point();

  /* Get the underlying socket structure */

  psock = sockfd_socket(sockfd);

  /* And let psock_send do all of the work */

  ret = psock_send(psock, buf, len, flags);
  leave_cancellation_point();
  return ret;
}
예제 #6
0
파일: recvfrom.c 프로젝트: dagar/NuttX
ssize_t recvfrom(int sockfd, FAR void *buf, size_t len, int flags,
                 FAR struct sockaddr *from, FAR socklen_t *fromlen)
{
  ssize_t ret;

  /* recvfrom() is a cancellation point */

  (void)enter_cancellation_point();

  /* Let nx_recvfrom and psock_recvfrom() do all of the work */

  ret = nx_recvfrom(sockfd, buf, len, flags, from, fromlen);
  if (ret < 0)
    {
      set_errno(-ret);
      ret = ERROR;
    }

  leave_cancellation_point();
  return ret;
}
예제 #7
0
파일: send.c 프로젝트: a1ien/nuttx
ssize_t psock_send(FAR struct socket *psock, FAR const void *buf, size_t len,
                   int flags)
{
  int ret;

  /* Treat as a cancellation point */

  (void)enter_cancellation_point();

  switch (psock->s_type)
    {
#if defined(CONFIG_NET_PKT)
      case SOCK_RAW:
        {
          ret = psock_pkt_send(psock, buf, len);
        }
        break;
#endif

#if defined(CONFIG_NET_TCP) || defined(CONFIG_NET_LOCAL_STREAM)
      case SOCK_STREAM:
        {
#ifdef CONFIG_NET_LOCAL_STREAM
#ifdef CONFIG_NET_TCP
          if (psock->s_domain == PF_LOCAL)
#endif
            {
              ret = psock_local_send(psock, buf, len, flags);
            }
#endif /* CONFIG_NET_LOCAL_STREAM */

#ifdef CONFIG_NET_TCP
#ifdef CONFIG_NET_LOCAL_STREAM
          else
#endif
            {
              ret = psock_tcp_send(psock, buf, len);
            }
#endif /* CONFIG_NET_TCP */
        }
        break;
#endif /* CONFIG_NET_TCP || CONFIG_NET_LOCAL_STREAM */

#ifdef CONFIG_NET_UDP
      case SOCK_DGRAM:
        {
#ifdef CONFIG_NET_LOCAL_DGRAM
#ifdef CONFIG_NET_UDP
          if (psock->s_domain == PF_LOCAL)
#endif
            {
#warning Missing logic
              ret = -ENOSYS;
            }
#endif /* CONFIG_NET_LOCAL_DGRAM */

#ifdef CONFIG_NET_UDP
#ifdef CONFIG_NET_LOCAL_DGRAM
          else
#endif
            {
              ret = psock_udp_send(psock, buf, len);
            }
#endif /* CONFIG_NET_UDP */
        }
        break;
#endif /* CONFIG_NET_UDP */

      default:
        {
          /* EDESTADDRREQ.  Signifies that the socket is not connection-mode
           * and no peer address is set.
           */

          ret = -EDESTADDRREQ;
        }
        break;
    }

  leave_cancellation_point();
  return ret;
}
예제 #8
0
파일: fs_close.c 프로젝트: a1ien/nuttx
int close(int fd)
{
  int errcode;
#if CONFIG_NFILE_DESCRIPTORS > 0
  int ret;
#endif

  /* close() is a cancellation point */

  (void)enter_cancellation_point();

#if CONFIG_NFILE_DESCRIPTORS > 0
  /* Did we get a valid file descriptor? */

  if ((unsigned int)fd >= CONFIG_NFILE_DESCRIPTORS)
#endif
    {
      /* Close a socket descriptor */

#if defined(CONFIG_NET) && CONFIG_NSOCKET_DESCRIPTORS > 0
      if ((unsigned int)fd < (CONFIG_NFILE_DESCRIPTORS+CONFIG_NSOCKET_DESCRIPTORS))
        {
          ret = net_close(fd);
          leave_cancellation_point();
          return ret;
        }
      else
#endif
        {
          errcode = EBADF;
          goto errout;
        }
    }

#if CONFIG_NFILE_DESCRIPTORS > 0
  /* Close the driver or mountpoint.  NOTES: (1) there is no
   * exclusion mechanism here, the driver or mountpoint must be
   * able to handle concurrent operations internally, (2) The driver
   * may have been opened numerous times (for different file
   * descriptors) and must also handle being closed numerous times.
   * (3) for the case of the mountpoint, we depend on the close
   * methods bing identical in signature and position in the operations
   * vtable.
   */

  ret = files_close(fd);
  if (ret < 0)
    {
      /* An error occurred while closing the driver */

      errcode = -ret;
      goto errout;
    }

  leave_cancellation_point();
  return OK;

#endif

errout:
  set_errno(errcode);
  leave_cancellation_point();
  return ERROR;
}
예제 #9
0
int fcntl(int fd, int cmd, ...)
{
  FAR struct file *filep;
  va_list ap;
  int ret;

  /* fcntl() is a cancellation point */

  (void)enter_cancellation_point();

  /* Setup to access the variable argument list */

  va_start(ap, cmd);

  /* Did we get a valid file descriptor? */

#if CONFIG_NFILE_DESCRIPTORS > 0
  if ((unsigned int)fd < CONFIG_NFILE_DESCRIPTORS)
    {
      /* Get the file structure corresponding to the file descriptor. */

      ret = fs_getfilep(fd, &filep);
      if (ret >= 0)
        {
          DEBUGASSERT(filep != NULL);

          /* Let file_vfcntl() do the real work.  The errno is not set on
           * failures.
           */

          ret = file_vfcntl(filep, cmd, ap);
        }
    }
  else
#endif
    {
      /* No... check for operations on a socket descriptor */

#if defined(CONFIG_NET) && CONFIG_NSOCKET_DESCRIPTORS > 0
      if ((unsigned int)fd < (CONFIG_NFILE_DESCRIPTORS+CONFIG_NSOCKET_DESCRIPTORS))
        {
          /* Yes.. defer socket descriptor operations to net_vfcntl(). The
           * errno is not set on failures.
           */

          ret = net_vfcntl(fd, cmd, ap);
        }
      else
#endif
        {
          /* No.. this descriptor number is out of range */

          ret = -EBADF;
        }
    }

  va_end(ap);

  if (ret < 0)
    {
      set_errno(-ret);
      ret = ERROR;
    }

  leave_cancellation_point();
  return ret;
}
예제 #10
0
파일: sig_nanosleep.c 프로젝트: a1ien/nuttx
int nanosleep(FAR const struct timespec *rqtp, FAR struct timespec *rmtp)
{
  irqstate_t flags;
  systime_t starttick;
  sigset_t set;
  struct siginfo value;
  int errval;
#ifdef CONFIG_DEBUG_ASSERTIONS /* Warning avoidance */
  int ret;
#endif

  /* nanosleep() is a cancellation point */

  (void)enter_cancellation_point();

  if (!rqtp || rqtp->tv_nsec < 0 || rqtp->tv_nsec >= 1000000000)
    {
      errval = EINVAL;
      goto errout;
    }

  /* Get the start time of the wait.  Interrupts are disabled to prevent
   * timer interrupts while we do tick-related calculations before and
   * after the wait.
   */

  flags     = enter_critical_section();
  starttick = clock_systimer();

  /* Set up for the sleep.  Using the empty set means that we are not
   * waiting for any particular signal.  However, any unmasked signal can
   * still awaken sigtimedwait().
   */

  (void)sigemptyset(&set);

  /* nanosleep is a simple application of sigtimedwait. */

#ifdef CONFIG_DEBUG_ASSERTIONS /* Warning avoidance */
  ret = sigtimedwait(&set, &value, rqtp);
#else
  (void)sigtimedwait(&set, &value, rqtp);
#endif

  /* sigtimedwait() cannot succeed.  It should always return error with
   * either (1) EAGAIN meaning that the timeout occurred, or (2) EINTR
   * meaning that some other unblocked signal was caught.
   */

  errval = get_errno();
  DEBUGASSERT(ret < 0 && (errval == EAGAIN || errval == EINTR));

  if (errval == EAGAIN)
    {
      /* The timeout "error" is the normal, successful result */

      leave_critical_section(flags);
      leave_cancellation_point();
      return OK;
    }

  /* If we get there, the wait has failed because we were awakened by a
   * signal.  Return the amount of "unwaited" time if rmtp is non-NULL.
   */

  if (rmtp)
    {
      systime_t elapsed;
      systime_t remaining;
      int ticks;

      /* REVISIT: The conversion from time to ticks and back could
       * be avoided.  clock_timespec_subtract() would be used instead
       * to get the time difference.
       */

      /* First get the number of clock ticks that we were requested to
       * wait.
       */

      (void)clock_time2ticks(rqtp, &ticks);

      /* Get the number of ticks that we actually waited */

      elapsed = clock_systimer() - starttick;

      /* The difference between the number of ticks that we were requested
       * to wait and the number of ticks that we actualy waited is that
       * amount of time that we failed to wait.
       */

      if (elapsed >= (uint32_t)ticks)
        {
          remaining = 0;
        }
      else
        {
          remaining = (uint32_t)ticks - elapsed;
        }

      (void)clock_ticks2time((int)remaining, rmtp);
    }

  leave_critical_section(flags);

errout:
  set_errno(errval);
  leave_cancellation_point();
  return ERROR;
}
예제 #11
0
파일: sched_waitpid.c 프로젝트: a1ien/nuttx
pid_t waitpid(pid_t pid, int *stat_loc, int options)
{
  FAR struct tcb_s *rtcb = this_task();
  FAR struct tcb_s *ctcb;
#ifdef CONFIG_SCHED_CHILD_STATUS
  FAR struct child_status_s *child;
  bool retains;
#endif
  FAR struct siginfo info;
  sigset_t set;
  int errcode;
  int ret;

  DEBUGASSERT(stat_loc);

  /* waitpid() is a cancellation point */

  (void)enter_cancellation_point();

  /* None of the options are supported */

#ifdef CONFIG_DEBUG_FEATURES
  if (options != 0)
    {
      set_errno(ENOSYS);
      leave_cancellation_point();
      return ERROR;
    }
#endif

  /* Create a signal set that contains only SIGCHLD */

  (void)sigemptyset(&set);
  (void)sigaddset(&set, SIGCHLD);

  /* Disable pre-emption so that nothing changes while the loop executes */

  sched_lock();

  /* Verify that this task actually has children and that the requested PID
   * is actually a child of this task.
   */

#ifdef CONFIG_SCHED_CHILD_STATUS
  /* Does this task retain child status? */

  retains = ((rtcb->group->tg_flags && GROUP_FLAG_NOCLDWAIT) == 0);

  if (rtcb->group->tg_children == NULL && retains)
    {
      errcode = ECHILD;
      goto errout_with_errno;
    }
  else if (pid != (pid_t)-1)
    {
      /* Get the TCB corresponding to this PID and make sure that the
       * thread it is our child.
       */

      ctcb = sched_gettcb(pid);

#ifdef HAVE_GROUP_MEMBERS
      if (ctcb == NULL || ctcb->group->tg_pgid != rtcb->group->tg_gid)
#else
      if (ctcb == NULL || ctcb->group->tg_ppid != rtcb->pid)
#endif
        {
          errcode = ECHILD;
          goto errout_with_errno;
        }

      /* Does this task retain child status? */

      if (retains)
        {
          /* Check if this specific pid has allocated child status? */

          if (group_findchild(rtcb->group, pid) == NULL)
            {
              errcode = ECHILD;
              goto errout_with_errno;
            }
        }
    }

#else /* CONFIG_SCHED_CHILD_STATUS */

  if (rtcb->group->tg_nchildren == 0)
    {
      /* There are no children */

      errcode = ECHILD;
      goto errout_with_errno;
    }
  else if (pid != (pid_t)-1)
    {
      /* Get the TCB corresponding to this PID and make sure that the
       * thread it is our child.
       */

      ctcb = sched_gettcb(pid);

#ifdef HAVE_GROUP_MEMBERS
      if (ctcb == NULL || ctcb->group->tg_pgid != rtcb->group->tg_gid)
#else
      if (ctcb == NULL || ctcb->group->tg_ppid != rtcb->pid)
#endif
        {
          errcode = ECHILD;
          goto errout_with_errno;
        }
    }

#endif /* CONFIG_SCHED_CHILD_STATUS */

  /* Loop until the child that we are waiting for dies */

  for (; ; )
    {
#ifdef CONFIG_SCHED_CHILD_STATUS
      /* Check if the task has already died. Signals are not queued in
       * NuttX.  So a possibility is that the child has died and we
       * missed the death of child signal (we got some other signal
       * instead).
       */

      if (pid == (pid_t)-1)
        {
          /* We are waiting for any child, check if there are still
           * chilren.
           */

          DEBUGASSERT(!retains || rtcb->group->tg_children);
          if (retains && (child = group_exitchild(rtcb->group)) != NULL)
            {
              /* A child has exited.  Apparently we missed the signal.
               * Return the saved exit status.
               */

              /* The child has exited. Return the saved exit status */

              *stat_loc = child->ch_status << 8;

              /* Discard the child entry and break out of the loop */

              (void)group_removechild(rtcb->group, child->ch_pid);
              group_freechild(child);
              break;
            }
        }

      /* We are waiting for a specific PID. Does this task retain child status? */

      else if (retains)
        {
          /* Get the current status of the child task. */

          child = group_findchild(rtcb->group, pid);
          DEBUGASSERT(child);

          /* Did the child exit? */

          if ((child->ch_flags & CHILD_FLAG_EXITED) != 0)
            {
              /* The child has exited. Return the saved exit status */

              *stat_loc = child->ch_status << 8;

              /* Discard the child entry and break out of the loop */

              (void)group_removechild(rtcb->group, pid);
              group_freechild(child);
              break;
            }
        }
      else
        {
          /* We can use kill() with signal number 0 to determine if that
           * task is still alive.
           */

          ret = kill(pid, 0);
          if (ret < 0)
            {
              /* It is no longer running.  We know that the child task
               * was running okay when we started, so we must have lost
               * the signal.  In this case, we know that the task exit'ed,
               * but we do not know its exit status.  It would be better
               * to reported ECHILD than bogus status.
               */

              errcode = ECHILD;
              goto errout_with_errno;
            }
        }

#else  /* CONFIG_SCHED_CHILD_STATUS */

      /* Check if the task has already died. Signals are not queued in
       * NuttX.  So a possibility is that the child has died and we
       * missed the death of child signal (we got some other signal
       * instead).
       */

      if (rtcb->group->tg_nchildren == 0 ||
          (pid != (pid_t)-1 && (ret = kill(pid, 0)) < 0))
        {
          /* We know that the child task was running okay we stared,
           * so we must have lost the signal.  What can we do?
           * Let's return ECHILD.. that is at least informative.
           */

          errcode = ECHILD;
          goto errout_with_errno;
        }

#endif /* CONFIG_SCHED_CHILD_STATUS */

      /* Wait for any death-of-child signal */

      ret = sigwaitinfo(&set, &info);
      if (ret < 0)
        {
          goto errout_with_lock;
        }

      /* Was this the death of the thread we were waiting for? In the of
       * pid == (pid_t)-1, we are waiting for any child thread.
       */

      if (info.si_signo == SIGCHLD &&
         (pid == (pid_t)-1 || info.si_pid == pid))
        {
          /* Yes... return the status and PID (in the event it was -1) */

          *stat_loc = info.si_status << 8;
          pid = info.si_pid;
          break;
        }
    }

  leave_cancellation_point();
  sched_unlock();
  return (int)pid;

errout_with_errno:
  set_errno(errcode);

errout_with_lock:
  leave_cancellation_point();
  sched_unlock();
  return ERROR;
}
예제 #12
0
파일: sched_waitpid.c 프로젝트: a1ien/nuttx
pid_t waitpid(pid_t pid, int *stat_loc, int options)
{
  FAR struct tcb_s *ctcb;
  FAR struct task_group_s *group;
  bool mystat = false;
  int errcode;
  int ret;

  DEBUGASSERT(stat_loc);

  /* waitpid() is a cancellation point */

  (void)enter_cancellation_point();

  /* None of the options are supported */

#ifdef CONFIG_DEBUG_FEATURES
  if (options != 0)
    {
      set_errno(ENOSYS);
      leave_cancellation_point();
      return ERROR;
    }
#endif

  /* Disable pre-emption so that nothing changes in the following tests */

  sched_lock();

  /* Get the TCB corresponding to this PID */

  ctcb = sched_gettcb(pid);
  if (ctcb == NULL)
    {
      errcode = ECHILD;
      goto errout_with_errno;
    }

  /* Then the task group corresponding to this PID */

  group = ctcb->group;
  DEBUGASSERT(group);

  /* Lock this group so that it cannot be deleted until the wait completes */

  group_addwaiter(group);

  /* "If more than one thread is suspended in waitpid() awaiting termination of
   * the same process, exactly one thread will return the process status at the
   * time of the target process termination."  Hmmm.. what do we return to the
   * others?
   */

  if (stat_loc != NULL && group->tg_statloc == NULL)
    {
      group->tg_statloc = stat_loc;
      mystat = true;
    }

  /* Then wait for the task to exit */

  if (options & WNOHANG)
    {
      /* Don't wait if status is not available */

      ret = sem_trywait(&group->tg_exitsem);
      group_delwaiter(group);

      if (ret < 0)
        {
          pid = 0;
        }
    }
  else
    {
      /* Wait if necessary for status to become available */

      ret = sem_wait(&group->tg_exitsem);
      group_delwaiter(group);

      if (ret < 0)
        {
          /* Unlock pre-emption and return the ERROR (sem_wait has already set
           * the errno).  Handle the awkward case of whether or not we need to
           * nullify the stat_loc value.
           */

          if (mystat)
            {
              group->tg_statloc = NULL;
            }

          goto errout;
        }
    }

  /* On success, return the PID */

  leave_cancellation_point();
  sched_unlock();
  return pid;

errout_with_errno:
  set_errno(errcode);
errout:
  leave_cancellation_point();
  sched_unlock();
  return ERROR;
}
예제 #13
0
파일: fs_select.c 프로젝트: a1ien/nuttx
int select(int nfds, FAR fd_set *readfds, FAR fd_set *writefds,
           FAR fd_set *exceptfds, FAR struct timeval *timeout)
{
  struct pollfd *pollset;
  int errcode = OK;
  int fd;
  int npfds;
  int msec;
  int ndx;
  int ret;

  /* select() is cancellation point */

  (void)enter_cancellation_point();

  /* How many pollfd structures do we need to allocate? */

  /* Initialize the descriptor list for poll() */

  for (fd = 0, npfds = 0; fd < nfds; fd++)
    {
      /* Check if any monitor operation is requested on this fd */

      if ((readfds   && FD_ISSET(fd, readfds))  ||
          (writefds  && FD_ISSET(fd, writefds)) ||
          (exceptfds && FD_ISSET(fd, exceptfds)))
        {
          /* Yes.. increment the count of pollfds structures needed */

          npfds++;
        }
    }

  /* Allocate the descriptor list for poll() */

  pollset = (struct pollfd *)kmm_zalloc(npfds * sizeof(struct pollfd));
  if (!pollset)
    {
      set_errno(ENOMEM);
      leave_cancellation_point();
      return ERROR;
    }

  /* Initialize the descriptor list for poll() */

  for (fd = 0, ndx = 0; fd < nfds; fd++)
    {
      int incr = 0;

      /* The readfs set holds the set of FDs that the caller can be assured
       * of reading from without blocking.  Note that POLLHUP is included as
       * a read-able condition.  POLLHUP will be reported at the end-of-file
       * or when a connection is lost.  In either case, the read() can then
       * be performed without blocking.
       */

      if (readfds && FD_ISSET(fd, readfds))
        {
          pollset[ndx].fd      = fd;
          pollset[ndx].events |= POLLIN;
          incr                 = 1;
        }

      /* The writefds set holds the set of FDs that the caller can be assured
       * of writing to without blocking.
       */

      if (writefds && FD_ISSET(fd, writefds))
        {
          pollset[ndx].fd      = fd;
          pollset[ndx].events |= POLLOUT;
          incr                 = 1;
        }

      /* The exceptfds set holds the set of FDs that are watched for exceptions */

      if (exceptfds && FD_ISSET(fd, exceptfds))
        {
          pollset[ndx].fd      = fd;
          incr                  = 1;
        }

      ndx += incr;
    }

  DEBUGASSERT(ndx == npfds);

  /* Convert the timeout to milliseconds */

  if (timeout)
    {
      /* Calculate the timeout in milliseconds */

      msec = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
    }
  else
    {
      /* Any negative value of msec means no timeout */

      msec = -1;
    }

  /* Then let poll do all of the real work. */

  ret = poll(pollset, npfds, msec);
  if (ret < 0)
    {
      /* poll() failed! Save the errno value */

      errcode = get_errno();
    }

  /* Now set up the return values */

  if (readfds)
    {
      memset(readfds, 0, sizeof(fd_set));
    }

  if (writefds)
    {
      memset(writefds, 0, sizeof(fd_set));
    }

  if (exceptfds)
    {
      memset(exceptfds, 0, sizeof(fd_set));
    }

  /* Convert the poll descriptor list back into selects 3 bitsets */

  if (ret > 0)
    {
      ret = 0;
      for (ndx = 0; ndx < npfds; ndx++)
        {
          /* Check for read conditions.  Note that POLLHUP is included as a
           * read condition.  POLLHUP will be reported when no more data will
           * be available (such as when a connection is lost).  In either
           * case, the read() can then be performed without blocking.
           */

          if (readfds)
            {
              if (pollset[ndx].revents & (POLLIN | POLLHUP))
                {
                  FD_SET(pollset[ndx].fd, readfds);
                  ret++;
                }
            }

          /* Check for write conditions */

          if (writefds)
            {
              if (pollset[ndx].revents & POLLOUT)
                {
                  FD_SET(pollset[ndx].fd, writefds);
                  ret++;
                }
            }

          /* Check for exceptions */

          if (exceptfds)
            {
              if (pollset[ndx].revents & POLLERR)
                {
                  FD_SET(pollset[ndx].fd, exceptfds);
                  ret++;
                }
            }
        }
    }

  kmm_free(pollset);

  /* Did poll() fail above? */

  if (ret < 0)
    {
      /* Yes.. restore the errno value */

      set_errno(errcode);
    }

  leave_cancellation_point();
  return ret;
}
예제 #14
0
int mq_waitsend(mqd_t mqdes)
{
  FAR struct tcb_s *rtcb;
  FAR struct mqueue_inode_s *msgq;

  /* mq_waitsend() is not a cancellation point, but it is always called from
   * a cancellation point.
   */

  if (enter_cancellation_point())
    {
#ifndef CONFIG_CANCELLATION_POINTS /* Not reachable in this case */
      /* If there is a pending cancellation, then do not perform
       * the wait.  Exit now with ECANCELED.
       */

      set_errno(ECANCELED);
      leave_cancellation_point();
      return ERROR;
#endif
    }

  /* Get a pointer to the message queue */

  msgq = mqdes->msgq;

  /* Verify that the queue is indeed full as the caller thinks */

  if (msgq->nmsgs >= msgq->maxmsgs)
    {
      /* Should we block until there is sufficient space in the
       * message queue?
       */

      if ((mqdes->oflags & O_NONBLOCK) != 0)
        {
          /* No... We will return an error to the caller. */

          set_errno(EAGAIN);
          leave_cancellation_point();
          return ERROR;
        }

      /* Yes... We will not return control until the message queue is
       * available or we receive a signal or at timout occurs.
       */

      else
        {
          /* Loop until there are fewer than max allowable messages in the
           * receiving message queue
           */

          while (msgq->nmsgs >= msgq->maxmsgs)
            {
              /* Block until the message queue is no longer full.
               * When we are unblocked, we will try again
               */

              rtcb = this_task();
              rtcb->msgwaitq = msgq;
              msgq->nwaitnotfull++;

              set_errno(OK);
              up_block_task(rtcb, TSTATE_WAIT_MQNOTFULL);

              /* When we resume at this point, either (1) the message queue
               * is no longer empty, or (2) the wait has been interrupted by
               * a signal.  We can detect the latter case be examining the
               * errno value (should be EINTR or ETIMEOUT).
               */

              if (get_errno() != OK)
                {
                  leave_cancellation_point();
                  return ERROR;
                }
            }
        }
    }

  leave_cancellation_point();
  return OK;
}
예제 #15
0
파일: pthread_join.c 프로젝트: a1ien/nuttx
int pthread_join(pthread_t thread, FAR pthread_addr_t *pexit_value)
{
  FAR struct tcb_s *rtcb = this_task();
  FAR struct task_group_s *group = rtcb->group;
  FAR struct join_s *pjoin;
  int ret;

  sinfo("thread=%d group=%p\n", thread, group);
  DEBUGASSERT(group);

  /* pthread_join() is a cancellation point */

  (void)enter_cancellation_point();

  /* First make sure that this is not an attempt to join to
   * ourself.
   */

  if ((pid_t)thread == getpid())
    {
      leave_cancellation_point();
      return EDEADLK;
    }

  /* Make sure no other task is mucking with the data structures
   * while we are performing the following operations.  NOTE:
   * we can be also sure that pthread_exit() will not execute
   * because it will also attempt to get this semaphore.
   */

  (void)pthread_takesemaphore(&group->tg_joinsem);

  /* Find the join information associated with this thread.
   * This can fail for one of three reasons:  (1) There is no
   * thread associated with 'thread,' (2) the thread is a task
   * and does not have join information, or (3) the thread
   * was detached and has exited.
   */

  pjoin = pthread_findjoininfo(group, (pid_t)thread);
  if (!pjoin)
    {
      /* Determine what kind of error to return */

      FAR struct tcb_s *tcb = sched_gettcb((pthread_t)thread);

      serr("ERROR: Could not find thread data\n");

      /* Case (1) or (3) -- we can't tell which.  Assume (3) */

      if (!tcb)
        {
          ret = ESRCH;
        }

      /* The thread is still active but has no join info.  In that
       * case, it must be a task and not a pthread.
       */

      else
        {
          ret = EINVAL;
        }

      (void)pthread_givesemaphore(&group->tg_joinsem);
    }
  else
    {
      /* We found the join info structure.  Increment for the reference
       * to the join structure that we have.  This will keep things
       * stable for we have to do
       */

      sched_lock();
      pjoin->crefs++;

      /* Check if the thread is still running.  If not, then things are
       * simpler.  There are still race conditions to be concerned with.
       * For example, there could be multiple threads executing in the
       * 'else' block below when we enter!
       */

      if (pjoin->terminated)
        {
          sinfo("Thread has terminated\n");

          /* Get the thread exit value from the terminated thread. */

          if (pexit_value)
            {
              sinfo("exit_value=0x%p\n", pjoin->exit_value);
              *pexit_value = pjoin->exit_value;
            }
        }
      else
        {
          sinfo("Thread is still running\n");

          /* Relinquish the data set semaphore.  Since pre-emption is
           * disabled, we can be certain that no task has the
           * opportunity to run between the time we relinquish the
           * join semaphore and the time that we wait on the thread exit
           * semaphore.
           */

          (void)pthread_givesemaphore(&group->tg_joinsem);

          /* Take the thread's thread exit semaphore.  We will sleep here
           * until the thread exits.  We need to exercise caution because
           * there could be multiple threads waiting here for the same
           * pthread to exit.
           */

          (void)pthread_takesemaphore(&pjoin->exit_sem);

          /* The thread has exited! Get the thread exit value */

          if (pexit_value)
            {
             *pexit_value = pjoin->exit_value;
              sinfo("exit_value=0x%p\n", pjoin->exit_value);
            }

          /* Post the thread's data semaphore so that the exiting thread
           * will know that we have received the data.
           */

          (void)pthread_givesemaphore(&pjoin->data_sem);

          /* Retake the join semaphore, we need to hold this when
           * pthread_destroyjoin is called.
           */

          (void)pthread_takesemaphore(&group->tg_joinsem);
        }

      /* Pre-emption is okay now. The logic still cannot be re-entered
       * because we hold the join semaphore
       */

      sched_unlock();

      /* Release our reference to the join structure and, if the reference
       * count decrements to zero, deallocate the join structure.
       */

      if (--pjoin->crefs <= 0)
        {
          (void)pthread_destroyjoin(group, pjoin);
        }

      (void)pthread_givesemaphore(&group->tg_joinsem);
      ret = OK;
    }

  leave_cancellation_point();
  sinfo("Returning %d\n", ret);
  return ret;
}
예제 #16
0
int pthread_cond_timedwait(FAR pthread_cond_t *cond, FAR pthread_mutex_t *mutex, FAR const struct timespec *abstime)
{
	FAR struct tcb_s *rtcb = this_task();
	int ticks;
	int mypid = (int)getpid();
	irqstate_t int_state;
	uint16_t oldstate;
	int ret = OK;
	int status;

	trace_begin(TTRACE_TAG_TASK, "pthread_cond_timedwait");
	svdbg("cond=0x%p mutex=0x%p abstime=0x%p\n", cond, mutex, abstime);

	DEBUGASSERT(rtcb->waitdog == NULL);

	/* pthread_cond_timedwait() is a cancellation point */
	(void)enter_cancellation_point();

	/* Make sure that non-NULL references were provided. */

	if (!cond || !mutex) {
		ret = EINVAL;
	}

	/* Make sure that the caller holds the mutex */

	else if (mutex->pid != mypid) {
		ret = EPERM;
	}

	/* If no wait time is provided, this function degenerates to
	 * the same behavior as pthread_cond_wait().
	 */

	else if (!abstime) {
		ret = pthread_cond_wait(cond, mutex);
	}

	else {
		/* Create a watchdog */

		rtcb->waitdog = wd_create();
		if (!rtcb->waitdog) {
			ret = EINVAL;
		} else {
			svdbg("Give up mutex...\n");

			/* We must disable pre-emption and interrupts here so that
			 * the time stays valid until the wait begins.   This adds
			 * complexity because we assure that interrupts and
			 * pre-emption are re-enabled correctly.
			 */

			sched_lock();
			int_state = irqsave();

			/* Convert the timespec to clock ticks.  We must disable pre-emption
			 * here so that this time stays valid until the wait begins.
			 */

			ret = clock_abstime2ticks(CLOCK_REALTIME, abstime, &ticks);
			if (ret) {
				/* Restore interrupts  (pre-emption will be enabled when
				 * we fall through the if/then/else)
				 */

				irqrestore(int_state);
			} else {
				/* Check the absolute time to wait.  If it is now or in the past, then
				 * just return with the timedout condition.
				 */

				if (ticks <= 0) {
					/* Restore interrupts and indicate that we have already timed out.
					 * (pre-emption will be enabled when we fall through the
					 * if/then/else
					 */

					irqrestore(int_state);
					ret = ETIMEDOUT;
				} else {
					/* Give up the mutex */

					mutex->pid = -1;
					ret = pthread_mutex_give(mutex);
					if (ret != 0) {
						/* Restore interrupts  (pre-emption will be enabled when
						 * we fall through the if/then/else)
						 */

						irqrestore(int_state);
					} else {
						/* Start the watchdog */

						wd_start(rtcb->waitdog, ticks, (wdentry_t)pthread_condtimedout, 2, (uint32_t)mypid, (uint32_t)SIGCONDTIMEDOUT);

						/* Take the condition semaphore.  Do not restore interrupts
						 * until we return from the wait.  This is necessary to
						 * make sure that the watchdog timer and the condition wait
						 * are started atomically.
						 */

						status = sem_wait((sem_t *)&cond->sem);

						/* Did we get the condition semaphore. */

						if (status != OK) {
							/* NO.. Handle the special case where the semaphore wait was
							 * awakened by the receipt of a signal -- presumably the
							 * signal posted by pthread_condtimedout().
							 */

							if (get_errno() == EINTR) {
								sdbg("Timedout!\n");
								ret = ETIMEDOUT;
							} else {
								ret = EINVAL;
							}
						}

						/* The interrupts stay disabled until after we sample the errno.
						 * This is because when debug is enabled and the console is used
						 * for debug output, then the errno can be altered by interrupt
						 * handling! (bad)
						 */

						irqrestore(int_state);
					}

					/* Reacquire the mutex (retaining the ret). */

					svdbg("Re-locking...\n");

					oldstate = pthread_disable_cancel();
					status = pthread_mutex_take(mutex, false);
					pthread_enable_cancel(oldstate);

					if (status == OK) {
						mutex->pid = mypid;
					} else if (ret == 0) {
						ret = status;
					}
				}

				/* Re-enable pre-emption (It is expected that interrupts
				 * have already been re-enabled in the above logic)
				 */

				sched_unlock();
			}

			/* We no longer need the watchdog */

			wd_delete(rtcb->waitdog);
			rtcb->waitdog = NULL;
		}
	}

	svdbg("Returning %d\n", ret);
	leave_cancellation_point();
	trace_end(TTRACE_TAG_TASK);
	return ret;
}