Ejemplo n.º 1
0
FAR char *getcwd(FAR char *buf, size_t size)
{
  char *pwd;

  /* Verify input parameters */

#ifdef CONFIG_DEBUG
  if (!buf || !size)
    {
      set_errno(EINVAL);
      return NULL;
    }
#endif

  /* If no working directory is defined, then default to the home directory */

  pwd = getenv("PWD");
  if (!pwd)
    {
      pwd = CONFIG_LIB_HOMEDIR;
    }

  /* Verify that the cwd will fit into the user-provided buffer */

  if (strlen(pwd) + 1 > size)
    {
      set_errno(ERANGE);
      return NULL;
    }

  /* Copy the cwd to the user buffer */

  strcpy(buf, pwd);
  sched_unlock();
  return buf;
}
Ejemplo n.º 2
0
int px4_task_spawn_cmd(const char *name, int scheduler, int priority, int stack_size, main_t entry, char *const argv[])
{
	int pid;

	sched_lock();

	/* create the task */
	pid = task_create(name, priority, stack_size, entry, argv);

	if (pid > 0) {

		/* configure the scheduler */
		struct sched_param param;

		param.sched_priority = priority;
		sched_setscheduler(pid, scheduler, &param);

		/* XXX do any other private task accounting here before the task starts */
	}

	sched_unlock();

	return pid;
}
Ejemplo n.º 3
0
static int load_absmodule(FAR struct binary_s *bin)
{
  FAR struct binfmt_s *binfmt;
  int ret = -ENOENT;

  bdbg("Loading %s\n", bin->filename);

  /* Disabling pre-emption should be sufficient protection while accessing
   * the list of registered binary format handlers.
   */

  sched_lock();

  /* Traverse the list of registered binary format handlers.  Stop
   * when either (1) a handler recognized and loads the format, or
   * (2) no handler recognizes the format.
   */

  for (binfmt = g_binfmts; binfmt; binfmt = binfmt->next)
    {
      /* Use this handler to try to load the format */

      ret = binfmt->load(bin);
      if (ret == OK)
        {
          /* Successfully loaded -- break out with ret == 0 */

          bvdbg("Successfully loaded module %s\n", bin->filename);
          dump_module(bin);
          break;
        }
    }

  sched_unlock();
  return ret;
}
Ejemplo n.º 4
0
static void group_assigngid(FAR struct task_group_s *group)
{
	irqstate_t flags;
	gid_t gid;

	/* Pre-emption should already be enabled, but lets be paranoid careful */

	sched_lock();

	/* Loop until we create a unique ID */

	for (;;) {
		/* Increment the ID counter.  This is global data so be extra paranoid. */

		flags = irqsave();
		gid = ++g_gidcounter;

		/* Check for overflow */

		if (gid <= 0) {
			g_gidcounter = 1;
			irqrestore(flags);
		} else {
			/* Does a task group with this ID already exist? */

			irqrestore(flags);
			if (group_findbygid(gid) == NULL) {
				/* Now assign this ID to the group and return */

				group->tg_gid = gid;
				sched_unlock();
				return;
			}
		}
	}
}
Ejemplo n.º 5
0
Archivo: vm.c Proyecto: AndrewD/prex
/*
 * Deallocate memory region for specified address.
 *
 * The "addr" argument points to a memory region previously
 * allocated through a call to vm_allocate() or vm_map(). The
 * number of bytes freed is the number of bytes of the
 * allocated region. If one of the region of previous and next
 * are free, it combines with them, and larger free region is
 * created.
 */
int
vm_free(task_t task, void *addr)
{
	int err;

	sched_lock();
	if (!task_valid(task)) {
		err = ESRCH;
		goto out;
	}
	if (task != cur_task() && !task_capable(CAP_MEMORY)) {
		err = EPERM;
		goto out;
	}
	if (!user_area(addr)) {
		err = EFAULT;
		goto out;
	}

	err = do_free(task->map, addr);
 out:
	sched_unlock();
	return err;
}
Ejemplo n.º 6
0
int syslog_flush_intbuffer(FAR const struct syslog_channel_s *channel,
                           bool force)
{
  syslog_putc_t putfunc;
  int ch;
  int ret = OK;

  /* Select which putc function to use for this flush */

  putfunc = force ? channel->sc_putc : channel->sc_force;

  /* This logic is performed with the scheduler disabled to protect from
   * concurrent modification by other tasks.
   */

  sched_lock();
  do
    {
      /* Transfer one character to time.  This is inefficient, but is
       * done in this way to: (1) Deal with concurrent modification of
       * the interrupt buffer from interrupt activity, (2) Avoid keeper
       * interrupts disabled for a long time, and (3) to handler
       * wraparound of the circular buffer indices.
       */

      ch = syslog_remove_intbuffer();
      if (ch != EOF)
        {
          ret = putfunc(ch);
        }
    }
  while (ch != EOF && ret >= 0);

  sched_unlock();
  return ret;
}
Ejemplo n.º 7
0
int up_wdginitialize(void)
{
#if (defined(CONFIG_SAM34_WDT) && !defined(CONFIG_WDT_DISABLE_ON_RESET))
  int fd;
  int ret;

  /* Initialize tha register the watchdog timer device */

  wdgvdbg("Initializing Watchdog driver...\n");
  sam_wdtinitialize(CONFIG_WATCHDOG_DEVPATH);

  /* Open the watchdog device */

  wdgvdbg("Opening.\n");
  fd = open(CONFIG_WATCHDOG_DEVPATH, O_RDONLY);
  if (fd < 0)
    {
      wdgdbg("open %s failed: %d\n", CONFIG_WATCHDOG_DEVPATH, errno);
      goto errout;
    }

  /* Set the watchdog timeout */

  wdgvdbg("Timeout = %d.\n", CONFIG_WDT_TIMEOUT);
  ret = ioctl(fd, WDIOC_SETTIMEOUT, (unsigned long)CONFIG_WDT_TIMEOUT);
  if (ret < 0)
    {
      wdgdbg("ioctl(WDIOC_SETTIMEOUT) failed: %d\n", errno);
      goto errout_with_dev;
    }

  /* Set the watchdog minimum time */

  wdgvdbg("MinTime = %d.\n", CONFIG_WDT_MINTIME);
  ret = ioctl(fd, WDIOC_MINTIME, (unsigned long)CONFIG_WDT_MINTIME);
  if (ret < 0)
    {
      wdgdbg("ioctl(WDIOC_MINTIME) failed: %d\n", errno);
      goto errout_with_dev;
    }

  /* Start Kicker task */

#if defined(CONFIG_WDT_THREAD)
  sched_lock();

  int taskid = KERNEL_THREAD(CONFIG_WDT_THREAD_NAME,
                             CONFIG_WDT_THREAD_PRIORITY,
                             CONFIG_WDT_THREAD_STACKSIZE,
                             (main_t)wdog_daemon, (FAR char * const *)NULL);

  ASSERT(taskid > 0);
  sched_unlock();
#endif
  return OK;
errout_with_dev:
  close(fd);
errout:
  return ERROR;
#else
  return -ENODEV;
#endif
}
Ejemplo n.º 8
0
int aio_cancel(int fildes, FAR struct aiocb *aiocbp)
{
	FAR struct aio_container_s *aioc;
	FAR struct aio_container_s *next;
	int status;
	int ret;

	/* Check if a non-NULL aiocbp was provided */

	/* Lock the scheduler so that no I/O events can complete on the worker
	 * thread until we set complete this operation.
	 */

	ret = AIO_ALLDONE;
	sched_lock();
	aio_lock();

	if (aiocbp) {
		/* Check if the I/O has completed */

		if (aiocbp->aio_result == -EINPROGRESS) {
			/* No.. Find the container for this AIO control block */

			for (aioc = (FAR struct aio_container_s *)g_aio_pending.head; aioc && aioc->aioc_aiocbp != aiocbp; aioc = (FAR struct aio_container_s *)aioc->aioc_link.flink) ;

			/* Did we find a container for this fildes?  We should; the aio_result says
			 * that the transfer is pending.  If not we return AIO_ALLDONE.
			 */

			if (aioc) {
				/* Yes... attempt to cancel the I/O.  There are two
				 * possibilities:* (1) the work has already been started and
				 * is no longer queued, or (2) the work has not been started
				 * and is still in the work queue.  Only the second case can
				 * be cancelled.  work_cancel() will return -ENOENT in the
				 * first case.
				 */

				status = work_cancel(LPWORK, &aioc->aioc_work);
				if (status >= 0) {
					aiocbp->aio_result = -ECANCELED;
					ret = AIO_CANCELED;
				} else {
					ret = AIO_NOTCANCELED;
				}

				/* Remove the container from the list of pending transfers */

				(void)aioc_decant(aioc);
			}
		}
	} else {
		/* No aiocbp.. cancel all outstanding I/O for the fildes */

		next = (FAR struct aio_container_s *)g_aio_pending.head;
		do {
			/* Find the next container with this AIO control block */

			for (aioc = next; aioc && aioc->aioc_aiocbp->aio_fildes != fildes; aioc = (FAR struct aio_container_s *)aioc->aioc_link.flink) ;

			/* Did we find the container?  We should; the aio_result says
			 * that the transfer is pending.  If not we return AIO_ALLDONE.
			 */

			if (aioc) {
				/* Yes... attempt to cancel the I/O.  There are two
				 * possibilities:* (1) the work has already been started and
				 * is no longer queued, or (2) the work has not been started
				 * and is still in the work queue.  Only the second case can
				 * be cancelled.  work_cancel() will return -ENOENT in the
				 * first case.
				 */

				status = work_cancel(LPWORK, &aioc->aioc_work);

				/* Remove the container from the list of pending transfers */

				next = (FAR struct aio_container_s *)aioc->aioc_link.flink;
				aiocbp = aioc_decant(aioc);
				DEBUGASSERT(aiocbp);

				if (status >= 0) {
					aiocbp->aio_result = -ECANCELED;
					if (ret != AIO_NOTCANCELED) {
						ret = AIO_CANCELED;
					}
				} else {
					ret = AIO_NOTCANCELED;
				}
			}
		} while (aioc);
	}

	aio_unlock();
	sched_unlock();
	return ret;
}
Ejemplo n.º 9
0
void uipdriver_loop(void)
{
  /* netdev_read will return 0 on a timeout event and >0 on a data received event */

  g_sim_dev.d_len = netdev_read((unsigned char*)g_sim_dev.d_buf, CONFIG_NET_BUFSIZE);

  /* Disable preemption through to the following so that it behaves a little more
   * like an interrupt (otherwise, the following logic gets pre-empted an behaves
   * oddly.
   */

  sched_lock();
  if (g_sim_dev.d_len > 0)
    {
      /* Data received event.  Check for valid Ethernet header with destination == our
       * MAC address
       */

      if (g_sim_dev.d_len > UIP_LLH_LEN && up_comparemac(BUF->ether_dhost, &g_sim_dev.d_mac) == 0)
        {
          /* We only accept IP packets of the configured type and ARP packets */

#ifdef CONFIG_NET_IPv6
          if (BUF->ether_type == htons(UIP_ETHTYPE_IP6))
#else
          if (BUF->ether_type == htons(UIP_ETHTYPE_IP))
#endif
            {
              uip_arp_ipin(&g_sim_dev);
              uip_input(&g_sim_dev);

             /* If the above function invocation resulted in data that
              * should be sent out on the network, the global variable
              * d_len is set to a value > 0.
              */

              if (g_sim_dev.d_len > 0)
                {
                  uip_arp_out(&g_sim_dev);
                  netdev_send(g_sim_dev.d_buf, g_sim_dev.d_len);
                }
            }
          else if (BUF->ether_type == htons(UIP_ETHTYPE_ARP))
            {
              uip_arp_arpin(&g_sim_dev);

              /* If the above function invocation resulted in data that
               * should be sent out on the network, the global variable
               * d_len is set to a value > 0.
               */

              if (g_sim_dev.d_len > 0)
                {
                  netdev_send(g_sim_dev.d_buf, g_sim_dev.d_len);
                }
            }
        }
    }

  /* Otherwise, it must be a timeout event */

  else if (timer_expired(&g_periodic_timer))
    {
      timer_reset(&g_periodic_timer);
      uip_timer(&g_sim_dev, sim_uiptxpoll, 1);
    }
  sched_unlock();
}
Ejemplo n.º 10
0
int sigsuspend(FAR const sigset_t *set)
{
  FAR struct tcb_s *rtcb = (FAR struct tcb_s *)g_readytorun.head;
  sigset_t intersection;
  sigset_t saved_sigprocmask;
  FAR sigpendq_t *sigpend;
  irqstate_t saved_state;
  int unblocksigno;

  /* Several operations must be performed below:  We must determine if any
   * signal is pending and, if not, wait for the signal.  Since signals can
   * be posted from the interrupt level, there is a race condition that
   * can only be eliminated by disabling interrupts!
   */

  sched_lock();  /* Not necessary */
  saved_state = irqsave();

  /* Check if there is a pending signal corresponding to one of the
   * signals that will be unblocked by the new sigprocmask.
   */

  intersection = ~(*set) & sig_pendingset(rtcb);
  if (intersection != NULL_SIGNAL_SET)
    {
      /* One or more of the signals in intersections is sufficient to cause
       * us to not wait.  Pick the lowest numbered signal and mark it not
       * pending.
       */

      unblocksigno = sig_lowest(&intersection);
      sigpend = sig_removependingsignal(rtcb, unblocksigno);
      ASSERT(sigpend);

      sig_releasependingsignal(sigpend);
      irqrestore(saved_state);
    }
  else
    {
      /* Its time to wait. Save a copy of the old sigprocmask and install
       * the new (temporary) sigprocmask
       */

      saved_sigprocmask = rtcb->sigprocmask;
      rtcb->sigprocmask = *set;
      rtcb->sigwaitmask = NULL_SIGNAL_SET;

      /* And wait until one of the unblocked signals is posted */

      up_block_task(rtcb, TSTATE_WAIT_SIG);

      /* We are running again, restore the original sigprocmask */

      rtcb->sigprocmask = saved_sigprocmask;
      irqrestore(saved_state);

      /* Now, handle the (rare?) case where (a) a blocked signal was received
       * while the task was suspended but (b) restoring the original
       * sigprocmask will unblock the signal.
       */

      sig_unmaskpendingsignal();
    }

  sched_unlock();
  return ERROR;
}
Ejemplo n.º 11
0
static void stm32_idlepm(void)
{
  static enum pm_state_e oldstate = PM_NORMAL;
  enum pm_state_e newstate;
  int ret;

  /* The following is logic that is done after the wake-up from PM_STANDBY
   * state.  It decides whether to go back to the PM_NORMAL or to the deeper
   * power-saving mode PM_SLEEP:  If the alarm expired with no "normal"
   * wake-up event, then PM_SLEEP is entered.
   *
   * Logically, this code belongs at the end of the PM_STANDBY case below,
   * does not work in the position for some unkown reason.
   */

  if (oldstate == PM_STANDBY)
    {
      /* Were we awakened by the alarm? */

#ifdef CONFIG_RTC_ALARM
      if (g_alarmwakeup)
        {
          /* Yes.. Go to SLEEP mode */

          newstate = PM_SLEEP;
        }
      else
#endif
        {
          /* Resume normal operation */

          newstate = PM_NORMAL;
        }
    }
  else
    {
      /* Let the PM system decide, which power saving level can be obtained */

      newstate = pm_checkstate();
    }

  /* Check for state changes */

  if (newstate != oldstate)
    {
      llvdbg("newstate= %d oldstate=%d\n", newstate, oldstate);

      sched_lock();

      /* Force the global state change */

      ret = pm_changestate(newstate);
      if (ret < 0)
        {
          /* The new state change failed, revert to the preceding state */

          (void)pm_changestate(oldstate);

          /* No state change... */

          goto errout;
        }

      /* Then perform board-specific, state-dependent logic here */

      switch (newstate)
        {
        case PM_NORMAL:
          {
            /* If we just awakened from PM_STANDBY mode, then reconfigure
             * clocking.
             */

            if (oldstate == PM_STANDBY)
              {
                /* Re-enable clocking */

                stm32_clockenable();

                /* The system timer was disabled while in PM_STANDBY or
                 * PM_SLEEP modes.  But the RTC has still be running:  Reset
                 * the system time the current RTC time.
                 */

#ifdef CONFIG_RTC
                clock_synchronize();
#endif
              }
          }
          break;

        case PM_IDLE:
          {
          }
          break;

        case PM_STANDBY:
          {
            /* Set the alarm as an EXTI Line */

#ifdef CONFIG_RTC_ALARM
            stm32_rtc_alarm(CONFIG_PM_ALARM_SEC, CONFIG_PM_ALARM_NSEC, true);
#endif
            /* Wait 10ms */

            up_mdelay(10);

            /* Enter the STM32 stop mode */

            (void)stm32_pmstop(false);

            /* We have been re-awakened by some even:  A button press?
             * An alarm?  Cancel any pending alarm and resume the normal
             * operation.
             */

#ifdef CONFIG_RTC_ALARM
            stm32_exti_cancel();
            ret = stm32_rtc_cancelalarm();
            if (ret < 0)
              {
                lldbg("Warning: Cancel alarm failed\n");
              }
#endif
            /* Note:  See the additional PM_STANDBY related logic at the
             * beginning of this function.  That logic is executed after
             * this point.
             */
          }
          break;

        case PM_SLEEP:
          {
            /* We should not return from standby mode.  The only way out
             * of standby is via the reset path.
             */

            /* Configure the RTC alarm to Auto Reset the system */

#ifdef CONFIG_PM_SLEEP_WAKEUP
            stm32_rtc_alarm(CONFIG_PM_SLEEP_WAKEUP_SEC, CONFIG_PM_SLEEP_WAKEUP_NSEC, false);
#endif
            /* Wait 10ms */

            up_mdelay(10);

            /* Enter the STM32 standby mode */

            (void)stm32_pmstandby();
          }
          break;

        default:
          break;
        }

      /* Save the new state */

      oldstate = newstate;

errout:
      sched_unlock();
    }
}
Ejemplo n.º 12
0
int pthread_mutex_unlock(FAR pthread_mutex_t *mutex)
{
  int ret = OK;

  sdbg("mutex=0x%p\n", mutex);

  if (!mutex)
    {
      ret = EINVAL;
    }
  else
    {
      /* Make sure the semaphore is stable while we make the following
       * checks.  This all needs to be one atomic action.
       */

      sched_lock();

      /* Does the calling thread own the semaphore? */

      if (mutex->pid != (int)getpid())
        {
          /* No... return an error (default behavior is like PTHREAD_MUTEX_ERRORCHECK) */

          sdbg("Holder=%d returning EPERM\n", mutex->pid);
          ret = EPERM;
        }


      /* Yes, the caller owns the semaphore.. Is this a recursive mutex? */

#ifdef CONFIG_MUTEX_TYPES
      else if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->nlocks > 1)
        {
          /* This is a recursive mutex and we there are multiple locks held. Retain
           * the mutex lock, just decrement the count of locks held, and return
           * success.
           */
          mutex->nlocks--;
        }
#endif

      /* This is either a non-recursive mutex or is the outermost unlock of
       * a recursive mutex.
       */

      else
        {
          /* Nullify the pid and lock count then post the semaphore */

          mutex->pid    = -1;
#ifdef CONFIG_MUTEX_TYPES
          mutex->nlocks = 0;
#endif
          ret = pthread_givesemaphore((sem_t*)&mutex->sem);
        }
      sched_unlock();
    }

  sdbg("Returning %d\n", ret);
  return ret;
}
Ejemplo n.º 13
0
int task_restart(pid_t pid)
{
  FAR _TCB  *rtcb;
  FAR _TCB  *tcb;
  int        status;
  irqstate_t state;

  /* Make sure this task does not become ready-to-run while
   * we are futzing with its TCB
   */

  sched_lock();

  /* Check if the task to restart is the calling task */

  rtcb = (FAR _TCB*)g_readytorun.head;
  if ((pid == 0) || (pid == rtcb->pid))
    {
      /* Not implemented */

      return ERROR;
    }

  /* We are restarting some other task than ourselves */

  else
    {
      /* Find for the TCB associated with matching pid  */

      tcb = sched_gettcb(pid);
      if (!tcb)
        {
          /* There is no TCB with this pid */

          return ERROR;
        }

      /* Remove the TCB from whatever list it is in.  At this point, the
       * TCB should no longer be accessible to the system 
       */

      state = irqsave();
      dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)g_tasklisttable[tcb->task_state].list);
      tcb->task_state = TSTATE_TASK_INVALID;
      irqrestore(state);

      /* Deallocate anything left in the TCB's queues */

      sig_cleanup(tcb); /* Deallocate Signal lists */

      /* Reset the current task priority  */

      tcb->sched_priority = tcb->init_priority;

      /* Reset the base task priority and the number of pending reprioritizations */

#ifdef CONFIG_PRIORITY_INHERITANCE
      tcb->base_priority  = tcb->init_priority;
#  if CONFIG_SEM_NNESTPRIO > 0
      tcb->npend_reprio   = 0;
#  endif
#endif

      /* Re-initialize the processor-specific portion of the TCB
       * This will reset the entry point and the start-up parameters
       */

      up_initial_state(tcb);

      /* Add the task to the inactive task list */

      dq_addfirst((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks);
      tcb->task_state = TSTATE_TASK_INACTIVE;

      /* Activate the task */

      status = task_activate(tcb);
      if (status != OK)
        {
          dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks);
          sched_releasetcb(tcb);
          return ERROR;
        }
    }

  sched_unlock();
  return OK;
}
Ejemplo n.º 14
0
void nxcon_kbdin(NXCONSOLE handle, FAR const uint8_t *buffer, uint8_t buflen)
{
  FAR struct nxcon_state_s *priv;
  ssize_t nwritten;
  int nexthead;
  char ch;
  int ret;

  gvdbg("buflen=%d\n");
  DEBUGASSERT(handle);

  /* Get the reference to the driver structure from the handle */

  priv = (FAR struct nxcon_state_s *)handle;

  /* Get exclusive access to the driver structure */

  ret = nxcon_semwait(priv);
  if (ret < 0)
    {
      gdbg("ERROR: nxcon_semwait failed\n");
      return;
    }

 /* Loop until all of the bytes have been written.  This function may be
  * called from an interrupt handler!  Semaphores cannot be used!
  *
  * The write logic only needs to modify the head index.  Therefore,
  * there is a difference in the way that head and tail are protected:
  * tail is protected with a semaphore; tail is protected by disabling
  * interrupts.
  */

  for (nwritten = 0; nwritten < buflen; nwritten++)
    {
      /* Add the next character */

      ch = buffer[nwritten];

      /* Calculate the write index AFTER the next byte is add to the ring
       * buffer
       */

      nexthead = priv->head + 1;
      if (nexthead >= CONFIG_NXCONSOLE_KBDBUFSIZE)
        {
          nexthead = 0;
        }

      /* Would the next write overflow the circular buffer? */

      if (nexthead == priv->tail)
        {
          /* Yes... Return an indication that nothing was saved in the buffer. */

          gdbg("ERROR: Keyboard data overrun\n");
          break;
        }

      /* No... copy the byte */

      priv->rxbuffer[priv->head] = ch;
      priv->head = nexthead;
    }

  /* Was anything written? */

  if (nwritten > 0)
    {
      int i;

      /* Are there threads waiting for read data? */

      sched_lock();
      for (i = 0; i < priv->nwaiters; i++)
        {
          /* Yes.. Notify all of the waiting readers that more data is available */

          sem_post(&priv->waitsem);
        }

      /* Notify all poll/select waiters that they can write to the FIFO */

#ifndef CONFIG_DISABLE_POLL
      nxcon_pollnotify(priv, POLLIN);
#endif
      sched_unlock();
    }

  nxcon_sempost(priv);
}
Ejemplo n.º 15
0
ssize_t nxcon_read(FAR struct file *filep, FAR char *buffer, size_t len)
{
  FAR struct nxcon_state_s *priv;
  ssize_t nread;
  char ch;
  int ret;

  /* Recover our private state structure */

  DEBUGASSERT(filep && filep->f_priv);
  priv = (FAR struct nxcon_state_s *)filep->f_priv;

  /* Get exclusive access to the driver structure */

  ret = nxcon_semwait(priv);
  if (ret < 0)
    {
      gdbg("ERROR: nxcon_semwait failed\n");
      return ret;
    }

  /* Loop until something is read */

  for (nread = 0; nread < len; )
    {
      /* Get the next byte from the buffer */

      if (priv->head == priv->tail)
        {
          /* The circular buffer is empty. Did we read anything? */

          if (nread > 0)
            {
              /* Yes.. break out to return what we have.  */

              break;
            }

          /* If the driver was opened with O_NONBLOCK option, then don't wait.
           * Just return EGAIN.
           */

          if (filep->f_oflags & O_NONBLOCK)
            {
              nread = -EAGAIN;
              break;
            }

          /* Otherwise, wait for something to be written to the circular
           * buffer. Increment the number of waiters so that the nxcon_write()
           * will not that it needs to post the semaphore to wake us up.
           */

          sched_lock();
          priv->nwaiters++;
          nxcon_sempost(priv);

          /* We may now be pre-empted!  But that should be okay because we
           * have already incremented nwaiters.  Pre-emption is disabled
           * but will be re-enabled while we are waiting.
           */

          ret = sem_wait(&priv->waitsem);

          /* Pre-emption will be disabled when we return.  So the decrementing
           * nwaiters here is safe.
           */

          priv->nwaiters--;
          sched_unlock();

          /* Did we successfully get the waitsem? */

          if (ret >= 0)
            {
              /* Yes... then retake the mutual exclusion semaphore */

              ret = nxcon_semwait(priv);
            }

          /* Was the semaphore wait successful? Did we successful re-take the
           * mutual exclusion semaphore?
           */

          if (ret < 0)
            {
              /* No.. One of the two sem_wait's failed. */

              int errval = errno;

              gdbg("ERROR: nxcon_semwait failed\n");

              /* Were we awakened by a signal?  Did we read anything before
               * we received the signal?
               */

              if (errval != EINTR || nread >= 0)
                {
                  /* Yes.. return the error. */

                  nread = -errval;
                }

              /* Break out to return what we have.  Note, we can't exactly
               * "break" out because whichever error occurred, we do not hold
               * the exclusion semaphore.
               */

              goto errout_without_sem;
            }
        }
      else
        {
          /* The circular buffer is not empty, get the next byte from the
           * tail index.
           */

          ch = priv->rxbuffer[priv->tail];

          /* Increment the tail index and re-enable interrupts */

          if (++priv->tail >= CONFIG_NXCONSOLE_KBDBUFSIZE)
            {
              priv->tail = 0;
            }

          /* Add the character to the user buffer */

          buffer[nread] = ch;
          nread++;
        }
    }

  /* Relinquish the mutual exclusion semaphore */

  nxcon_sempost(priv);

  /* Notify all poll/select waiters that they can write to the FIFO */

errout_without_sem:

#ifndef CONFIG_DISABLE_POLL
  if (nread > 0)
    {
      nxcon_pollnotify(priv, POLLOUT);
    }
#endif

  /* Return the number of characters actually read */

  return nread;
}
Ejemplo n.º 16
0
int lio_listio(int mode, FAR struct aiocb *const list[], int nent,
               FAR struct sigevent *sig)
{
  FAR struct aiocb *aiocbp;
  int nqueued;
  int errcode;
  int retcode;
  int status;
  int ret;
  int i;

  DEBUGASSERT(mode == LIO_WAIT || mode == LIO_NOWAIT);
  DEBUGASSERT(list);

  nqueued = 0;    /* No I/O operations yet queued */
  ret     = OK;   /* Assume success */

  /* Lock the scheduler so that no I/O events can complete on the worker
   * thread until we set our wait set up.  Pre-emption will, of course, be
   * re-enabled while we are waiting for the signal.
   */

  sched_lock();

  /* Submit each asynchronous I/O operation in the list, skipping over NULL
   * entries.
   */

  for (i = 0; i < nent; i++)
    {
      /* Skip over NULL entries */

      aiocbp = list[i];
      if (aiocbp)
        {
          /* Submit the operation according to its opcode */

          status = OK;
          switch (aiocbp->aio_lio_opcode)
            {
            case LIO_NOP:
              {
                /* Mark the do-nothing operation complete */

                aiocbp->aio_result = OK;
              }
              break;

            case LIO_READ:
            case LIO_WRITE:
              {
                if (aiocbp->aio_lio_opcode == LIO_READ)
                  {
                    /* Submit the asynchronous read operation */

                    status = aio_read(aiocbp);
                  }
                else
                  {
                    /* Submit the asynchronous write operation */

                    status = aio_write(aiocbp);
                  }

                if (status < 0)
                  {
                    /* Failed to queue the I/O.  Set up the error return. */

                    errcode = get_errno();
                    ferr("ERROR: aio_read/write failed: %d\n", errcode);
                    DEBUGASSERT(errcode > 0);
                    aiocbp->aio_result = -errcode;
                    ret = ERROR;
                  }
                else
                  {
                    /* Increment the count of successfully queue operations */

                    nqueued++;
                  }
              }
              break;

            default:
              {
                /* Make the invalid operation complete with an error */

                ferr("ERROR: Unrecognized opcode: %d\n", aiocbp->aio_lio_opcode);
                aiocbp->aio_result = -EINVAL;
                ret = ERROR;
              }
              break;
            }
        }
    }

  /* If there was any failure in queuing the I/O, EIO will be returned */

  retcode = EIO;

  /* Now what? Three possibilities:
   *
   * Case 1: mode == LIO_WAIT
   *
   *   Ignore the sig argument; Do no return until all I/O completes.
   */

  if (mode == LIO_WAIT)
    {
      /* Don't wait if all if no I/O was queue */

      if (nqueued > 0)
        {
          /* Wait until all I/O completes.  The scheduler will be unlocked
           * while we are waiting.
           */

          status = lio_waitall(list, nent);
          if (status < 0 && ret == OK)
            {
              /* Something bad happened while waiting and this is the first
               * error to be reported.
               */

              retcode = -status;
              ret     = ERROR;
            }
        }
    }

  /* Case 2: mode == LIO_NOWAIT and sig != NULL
   *
   *   If any I/O was queued, then setup to signal the caller when all of
   *   the transfers complete.
   *
   *   If no I/O was queue, then we I suppose that we need to signal the
   *   caller ourself?
   */

  else if (sig && sig->sigev_notify == SIGEV_SIGNAL)
    {
      if (nqueued > 0)
        {
          /* Setup a signal handler to detect when until all I/O completes. */

          status = lio_sigsetup(list, nent, sig);
          if (status < 0 && ret == OK)
            {
              /* Something bad happened while setting up the signal and this
               * is the first error to be reported.
               */

              retcode = -status;
              ret     = ERROR;
            }
        }
      else
        {
#ifdef CONFIG_CAN_PASS_STRUCTS
          status = sigqueue(getpid(), sig->sigev_signo,
                            sig->sigev_value);
#else
          status = sigqueue(getpid(), sig->sigev_signo,
                            sig->sigev_value.sival_ptr);
#endif
          if (status < 0 && ret == OK)
            {
              /* Something bad happened while signalling ourself and this is
               * the first error to be reported.
               */

              retcode = get_errno();
              ret     = ERROR;
            }
        }
    }

#ifdef CONFIG_SIG_EVTHREAD
  /* Notify the client via a function call */

  else if (sig && sig->sigev_notify == SIGEV_THREAD)
    {
      status = sig_notification(sighand->pid, &sighand->sig);
      if (status < 0 && ret == OK)
        {
         /* Something bad happened while performing the notification
          * and this is the first error to be reported.
          */

          retcode = -status;
          ret     = ERROR;
        }
    }
#endif

  /* Case 3: mode == LIO_NOWAIT and sig == NULL
   *
   *   Just return now.
   */

  sched_unlock();
  if (ret < 0)
    {
      set_errno(retcode);
      return ERROR;
    }

  return OK;
}
Ejemplo n.º 17
0
int sched_getparam (pid_t pid, FAR struct sched_param *param)
{
  FAR struct tcb_s *rtcb;
  FAR struct tcb_s *tcb;
  int ret = OK;

  if (!param)
    {
      return ERROR;
    }

  /* Check if the task to restart is the calling task */

  rtcb = (FAR struct tcb_s *)g_readytorun.head;
  if ((pid == 0) || (pid == rtcb->pid))
    {
      /* Return the priority if the calling task. */

      param->sched_priority = (int)rtcb->sched_priority;
    }

  /* Ths pid is not for the calling task, we will have to look it up */

  else
    {
      /* Get the TCB associated with this pid */

      sched_lock();
      tcb = sched_gettcb(pid);
      if (!tcb)
        {
          /* This pid does not correspond to any known task */

          ret = ERROR;
        }
      else
        {
#ifdef CONFIG_SCHED_SPORADIC
#endif
          /* Return the priority of the task */

          param->sched_priority = (int)tcb->sched_priority;

#ifdef CONFIG_SCHED_SPORADIC
          if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC)
            {
              FAR struct sporadic_s *sporadic = tcb->sporadic;
              DEBUGASSERT(sporadic != NULL);

              /* Return parameters associated with SCHED_SPORADIC */

              param->sched_ss_low_priority = (int)sporadic->low_priority;
              param->sched_ss_max_repl     = (int)sporadic->max_repl;

              clock_ticks2time((int)sporadic->repl_period, &param->sched_ss_repl_period);
              clock_ticks2time((int)sporadic->budget, &param->sched_ss_init_budget);
            }
          else
            {
              param->sched_ss_low_priority        = 0;
              param->sched_ss_max_repl            = 0;
              param->sched_ss_repl_period.tv_sec  = 0;
              param->sched_ss_repl_period.tv_nsec = 0;
              param->sched_ss_init_budget.tv_sec  = 0;
              param->sched_ss_init_budget.tv_nsec = 0;
            }
#endif
        }

      sched_unlock();
    }

  return ret;
}
Ejemplo n.º 18
0
int mq_dosend(mqd_t mqdes, FAR struct mqueue_msg_s *mqmsg, FAR const char *msg,
              size_t msglen, int prio)
{
  FAR struct tcb_s *btcb;
  FAR struct mqueue_inode_s *msgq;
  FAR struct mqueue_msg_s *next;
  FAR struct mqueue_msg_s *prev;
  irqstate_t saved_state;

  /* Get a pointer to the message queue */

  sched_lock();
  msgq = mqdes->msgq;

  /* Construct the message header info */

  mqmsg->priority = prio;
  mqmsg->msglen   = msglen;

  /* Copy the message data into the message */

  memcpy((FAR void *)mqmsg->mail, (FAR const void *)msg, msglen);

  /* Insert the new message in the message queue */

  saved_state = irqsave();

  /* Search the message list to find the location to insert the new
   * message. Each is list is maintained in ascending priority order.
   */

  for (prev = NULL, next = (FAR struct mqueue_msg_s *)msgq->msglist.head;
       next && prio <= next->priority;
       prev = next, next = next->next);

  /* Add the message at the right place */

  if (prev)
    {
      sq_addafter((FAR sq_entry_t *)prev, (FAR sq_entry_t *)mqmsg,
                  &msgq->msglist);
    }
  else
    {
      sq_addfirst((FAR sq_entry_t *)mqmsg, &msgq->msglist);
    }

  /* Increment the count of messages in the queue */

  msgq->nmsgs++;
  irqrestore(saved_state);

  /* Check if we need to notify any tasks that are attached to the
   * message queue
   */

#ifndef CONFIG_DISABLE_SIGNALS
  if (msgq->ntmqdes)
    {
      struct sigevent event;
      pid_t pid;

      /* Remove the message notification data from the message queue. */

      memcpy(&event, &msgq->ntevent, sizeof(struct sigevent));
      pid = msgq->ntpid;

      /* Detach the notification */

      memset(&msgq->ntevent, 0, sizeof(struct sigevent));
      msgq->ntpid   = INVALID_PROCESS_ID;
      msgq->ntmqdes = NULL;

      /* Notification the client via signal? */

      if (event.sigev_notify == SIGEV_SIGNAL)
        {
          /* Yes... Queue the signal -- What if this returns an error? */

#ifdef CONFIG_CAN_PASS_STRUCTS
          DEBUGVERIFY(sig_mqnotempty(pid, event.sigev_signo,
                      event.sigev_value));
#else
          DEBUGVERIFY(sig_mqnotempty(pid, event.sigev_signo,
                      event.sigev_value.sival_ptr));
#endif
        }

#ifdef CONFIG_SIG_EVTHREAD
      /* Notify the client via a function call */

      else if (event.sigev_notify == SIGEV_THREAD)
        {
          DEBUGVERIFY(sig_notification(pid, &event));
        }
#endif

    }
#endif

  /* Check if any tasks are waiting for the MQ not empty event. */

  saved_state = irqsave();
  if (msgq->nwaitnotempty > 0)
    {
      /* Find the highest priority task that is waiting for
       * this queue to be non-empty in g_waitingformqnotempty
       * list. sched_lock() should give us sufficent protection since
       * interrupts should never cause a change in this list
       */

      for (btcb = (FAR struct tcb_s *)g_waitingformqnotempty.head;
           btcb && btcb->msgwaitq != msgq;
           btcb = btcb->flink);

      /* If one was found, unblock it */

      ASSERT(btcb);

      btcb->msgwaitq = NULL;
      msgq->nwaitnotempty--;
      up_unblock_task(btcb);
    }

  irqrestore(saved_state);
  sched_unlock();
  return OK;
}
Ejemplo n.º 19
0
int mq_close(mqd_t mqdes)
{
  FAR _TCB    *rtcb = (FAR _TCB*)g_readytorun.head;
  FAR msgq_t *msgq;
  irqstate_t  saved_state;
  int         ret = ERROR;

  /* Verify the inputs */

   if (mqdes)
     {
       sched_lock();

       /* Remove the message descriptor from the current task's
        * list of message descriptors.
        */

       sq_rem((FAR sq_entry_t*)mqdes, &rtcb->msgdesq);

       /* Find the message queue associated with the message descriptor */

       msgq = mqdes->msgq;

       /* Check if the calling task has a notification attached to
        * the message queue via this mqdes.
        */

#ifndef CONFIG_DISABLE_SIGNALS
       if (msgq->ntmqdes == mqdes)
         {
           msgq->ntpid   = INVALID_PROCESS_ID;
           msgq->ntsigno = 0;
           msgq->ntvalue.sival_int = 0;
           msgq->ntmqdes = NULL;
         }
#endif

       /* Decrement the connection count on the message queue. */

       if (msgq->nconnect)
         {
           msgq->nconnect--;
         }

       /* If it is no longer connected to any message descriptor and if the
        * message queue has already been unlinked, then we can discard the
        * message queue.
        */

       if (!msgq->nconnect && msgq->unlinked)
         {
           /* Remove the message queue from the list of all
            * message queues
            */

           saved_state = irqsave();
           (void)sq_rem((FAR sq_entry_t*)msgq, &g_msgqueues);
           irqrestore(saved_state);

           /* Then deallocate it (and any messages left in it) */

           mq_msgqfree(msgq);
         }

       /* Deallocate the message descriptor */

       mq_desfree(mqdes);

       sched_unlock();
       ret = OK;
     }

   return ret;
}
Ejemplo n.º 20
0
int waitid(idtype_t idtype, id_t id, siginfo_t *info, int options)
{
  FAR _TCB *rtcb = (FAR _TCB *)g_readytorun.head;
  sigset_t sigset;
  int err;
  int ret;

  /* MISSING LOGIC:   If WNOHANG is provided in the options, then this function
   * should returned immediately.  However, there is no mechanism available now
   * know if the thread has child:  The children remember their parents (if
   * CONFIG_SCHED_HAVE_PARENT) but the parents do not remember their children.
   */

  /* None of the options are supported except for WEXITED (which must be
   * provided.  Currently SIGCHILD always reports CLD_EXITED so we cannot
   * distinguish any other events.
   */

#ifdef CONFIG_DEBUG
  if (options != WEXITED)
    {
      set_errno(ENOSYS);
      return ERROR;
    }
#endif

  /* Create a signal set that contains only SIGCHLD */

  (void)sigemptyset(&sigset);
  (void)sigaddset(&sigset, SIGCHLD);

  /* Disable pre-emption so that nothing changes while the loop executes */

  sched_lock();

  /* Verify that this task actually has children and that the the requeste
   * TCB is actually a child of this task.
   */

  if (rtcb->nchildren == 0)
    {
      err = ECHILD;
      goto errout_with_errno;
    }
  else if (idtype == P_PID)
    {
     /* Get the TCB corresponding to this PID and make sure it is our child. */

      FAR _TCB *ctcb = sched_gettcb((pid_t)id);
      if (!ctcb || ctcb->parent != rtcb->pid)
        {
          err = ECHILD;
          goto errout_with_errno;
        }
    }

  /* Loop until the child that we are waiting for dies */

  for (;;)
    {
      /* Check if the task has already died. Signals are not queued in
       * NuttX.  So a possibility is that the child has died and we
       * missed the death of child signal (we got some other signal
       * instead).
       */

      if (rtcb->nchildren == 0 ||
          (idtype == P_PID && (ret = kill((pid_t)id, 0)) < 0))
        {
          /* We know that the child task was running okay we stared,
           * so we must have lost the signal.  What can we do?
           * Let's claim we were interrupted by a signal.
           */

          err = EINTR;
          goto errout_with_errno;
        }

      /* Wait for any death-of-child signal */

      ret = sigwaitinfo(&sigset, info);
      if (ret < 0)
        {
          goto errout;
        }

      /* Make there this was SIGCHLD */

      if (info->si_signo == SIGCHLD)
        {
          /* Yes.. Are we waiting for the death of a specific child? */

          if (idtype == P_PID)
            {
              /* Was this the death of the thread we were waiting for? */

              if (info->si_pid == (pid_t)id)
                {
                   /* Yes... return success */

                   break;
                }
            }

          /* Are we waiting for any child to change state? */

          else if (idtype == P_ALL)
            {
              /* Return success */

              break;
            }

          /* Other ID types are not supported */

          else /* if (idtype == P_PGID) */
            {
              set_errno(ENOSYS);
              goto errout;
            }
        }
    }

  sched_unlock();
  return OK;

errout_with_errno:
  set_errno(err);
errout:
  sched_unlock();
  return ERROR;
}
Ejemplo n.º 21
0
mqd_t mq_open(FAR const char *mq_name, int oflags, ...)
{
  FAR struct inode *inode;
  FAR const char *relpath = NULL;
  FAR struct mqueue_inode_s *msgq;
  char fullpath[MAX_MQUEUE_PATH];
  va_list ap;
  struct mq_attr *attr;
  mqd_t mqdes;
  mode_t mode;
  int errcode;
  int ret;

  /* Make sure that a non-NULL name is supplied */

  if (!mq_name)
    {
      errcode = EINVAL;
      goto errout;
    }

  /* Get the full path to the message queue */

  snprintf(fullpath, MAX_MQUEUE_PATH, CONFIG_FS_MQUEUE_MPATH "/%s", mq_name);

  /* Make sure that the check for the existence of the message queue
   * and the creation of the message queue are atomic with respect to
   * other processes executing mq_open().  A simple sched_lock() should
   * be sufficient.
   */

  sched_lock();

  /* Get the inode for this mqueue.  This should succeed if the message
   * queue has already been created.
   */

  inode = inode_find(fullpath, &relpath);
  if (inode)
    {
      /* It exists.  Verify that the inode is a message queue */

      if (!INODE_IS_MQUEUE(inode))
        {
          errcode = ENXIO;
          goto errout_with_inode;
        }

      /* It exists and is a message queue.  Check if the caller wanted to
       * create a new mqueue with this name.
       */

      if ((oflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
        {
          errcode = EEXIST;
          goto errout_with_inode;
        }

      /* Create a message queue descriptor for the current thread */

      msgq  = inode->u.i_mqueue;
      mqdes = mq_descreate(NULL, msgq, oflags);
      if (!mqdes)
        {
          errcode = ENOMEM;
          goto errout_with_inode;
        }
    }
  else
    {
      /* The mqueue does not exists.  Were we asked to create it? */

      if ((oflags & O_CREAT) == 0)
        {
          /* The mqueue does not exist and O_CREAT is not set */

          errcode = ENOENT;
          goto errout_with_lock;
        }

     /* Create the mqueue.  First we have to extract the additional
      * parameters from the variable argument list.
      */

      va_start(ap, oflags);
      mode = va_arg(ap, mode_t);
      attr = va_arg(ap, FAR struct mq_attr*);
      va_end(ap);

      /* Create an inode in the pseudo-filesystem at this path */

      inode_semtake();
      ret = inode_reserve(fullpath, &inode);
      inode_semgive();

      if (ret < 0)
        {
          errcode = -ret;
          goto errout_with_lock;
        }

      /* Allocate memory for the new message queue. */

      msgq = (FAR struct mqueue_inode_s*)mq_msgqalloc(mode, attr);
      if (!msgq)
        {
          errcode = ENOSPC;
          goto errout_with_inode;
        }

      /* Create a message queue descriptor for the TCB */

       mqdes = mq_descreate(NULL, msgq, oflags);
       if (!mqdes)
         {
           errcode = ENOMEM;
           goto errout_with_msgq;
         }

      /* Bind the message queue and the inode structure */

      INODE_SET_MQUEUE(inode);
      inode->u.i_mqueue = msgq;
      msgq->inode       = inode;

    }

  sched_unlock();
  return mqdes;

errout_with_msgq:
  mq_msgqfree(msgq);
  inode->u.i_mqueue = NULL;
errout_with_inode:
  inode_release(inode);
errout_with_lock:
  sched_unlock();
errout:
  set_errno(errcode);
  return (mqd_t)ERROR;
}
Ejemplo n.º 22
0
static ssize_t ramlog_read(FAR struct file *filep, FAR char *buffer, size_t len)
{
  struct inode *inode  = filep->f_inode;
  struct ramlog_dev_s *priv;
  ssize_t nread;
  char ch;
  int ret;

  /* Some sanity checking */

  DEBUGASSERT(inode && inode->i_private);
  priv = inode->i_private;

  /* If the circular buffer is empty, then wait for something to be written
   * to it.  This function may NOT be called from an interrupt handler.
   */

  DEBUGASSERT(!up_interrupt_context());

  /* Get exclusive access to the rl_tail index */

  ret = sem_wait(&priv->rl_exclsem);
  if (ret < 0)
    {
      return ret;
    }

  /* Loop until something is read */

  for (nread = 0; nread < len; )
    {
      /* Get the next byte from the buffer */

      if (priv->rl_head == priv->rl_tail)
        {
          /* The circular buffer is empty. */

#ifdef CONFIG_RAMLOG_NONBLOCKING
          /* Return what we have (with zero mean the end-of-file) */

          break;
#else
          /* Did we read anything? */

          if (nread > 0)
            {
              /* Yes.. re-enable interrupts and the break out to return what
               * we have.
               */

              break;
            }

          /* If the driver was opened with O_NONBLOCK option, then don't wait.
           * Re-enable interrupts and return EGAIN.
           */

          if (filep->f_oflags & O_NONBLOCK)
            {
              nread = -EAGAIN;
              break;
            }

          /* Otherwise, wait for something to be written to the circular
           * buffer. Increment the number of waiters so that the ramlog_write()
           * will not that it needs to post the semaphore to wake us up.
           */

          sched_lock();
          priv->rl_nwaiters++;
          sem_post(&priv->rl_exclsem);

          /* We may now be pre-empted!  But that should be okay because we
           * have already incremented nwaiters.  Pre-emptions is disabled
           * but will be re-enabled while we are waiting.
           */

          ret = sem_wait(&priv->rl_waitsem);

          /* Interrupts will be disabled when we return.  So the decrementing
           * rl_nwaiters here is safe.
           */

          priv->rl_nwaiters--;
          sched_unlock();

          /* Did we successfully get the rl_waitsem? */

          if (ret >= 0)
            {
              /* Yes... then retake the mutual exclusion semaphore */

              ret = sem_wait(&priv->rl_exclsem);
            }

          /* Was the semaphore wait successful? Did we successful re-take the
           * mutual exclusion semaphore?
           */

          if (ret < 0)
            {
              /* No.. One of the two sem_wait's failed. */

              int errval = errno;

              /* Were we awakened by a signal?  Did we read anything before
               * we received the signal?
               */

              if (errval != EINTR || nread >= 0)
                {
                  /* Yes.. return the error. */

                  nread = -errval;
                }

              /* Break out to return what we have.  Note, we can't exactly
               * "break" out because whichever error occurred, we do not hold
               * the exclusion semaphore.
               */

              goto errout_without_sem;
            }
#endif /* CONFIG_RAMLOG_NONBLOCKING */
        }
      else
        {
          /* The circular buffer is not empty, get the next byte from the
           * tail index.
           */

          ch = priv->rl_buffer[priv->rl_tail];

          /* Increment the tail index and re-enable interrupts */

          if (++priv->rl_tail >= priv->rl_bufsize)
            {
              priv->rl_tail = 0;
            }

          /* Add the character to the user buffer */

          buffer[nread] = ch;
          nread++;
        }
    }

  /* Relinquish the mutual exclusion semaphore */

  sem_post(&priv->rl_exclsem);

  /* Notify all poll/select waiters that they can write to the FIFO */

#ifndef CONFIG_RAMLOG_NONBLOCKING
errout_without_sem:
#endif

#ifndef CONFIG_DISABLE_POLL
  if (nread > 0)
    {
      ramlog_pollnotify(priv, POLLOUT);
    }
#endif

  /* Return the number of characters actually read */

  return nread;
}
Ejemplo n.º 23
0
int chdir(FAR const char *path)
{
  struct stat buf;
  char *oldpwd;
  char *alloc;
  int errcode;
  int ret;

  /* Verify the input parameters */

  if (!path)
    {
      errcode = ENOENT;
      goto errout;
    }

  /* Verify that 'path' refers to a directory */

  ret = stat(path, &buf);
  if (ret != 0)
    {
      errcode = ENOENT;
      goto errout;
    }

  /* Something exists here... is it a directory? */

  if (!S_ISDIR(buf.st_mode))
    {
      errcode = ENOTDIR;
      goto errout;
    }

  /* Yes, it is a directory. Remove any trailing '/' characters from the path */

  _trimdir(path);

  /* Replace any preceding OLDPWD with the current PWD (this is to
   * support 'cd -' in NSH)
   */

  sched_lock();
  oldpwd = getenv("PWD");
  if (!oldpwd)
    {
      oldpwd = CONFIG_LIB_HOMEDIR;
    }

  alloc = strdup(oldpwd);  /* kludge needed because environment is realloc'ed */
  setenv("OLDPWD", alloc, TRUE);
  lib_free(alloc);

  /* Set the cwd to the input 'path' */

  setenv("PWD", path, TRUE);
  sched_unlock();
  return OK;

errout:
  set_errno(errcode);
  return ERROR;
}
static void *simuart_thread(void *arg)
{
  unsigned char ch;
  ssize_t nread;
  int next;
  int prev;

  /* Now loop, collecting a buffering data from stdin forever */

  for (;;)
    {
      /* Read one character from stdin */

      nread = read(0, &ch, 1);

      /* Check for failures (but don't do anything) */

      if (nread == 1)
        {
#ifdef CONFIG_SIM_UART_DATAPOST
          sched_lock();
#endif

          /* Get the index to the next slot in the UART buffer */

          prev = g_uarthead;
          next = prev + 1;
          if (next >= SIMUART_BUFSIZE)
            {
              next = 0;
            }

          /* Would adding this character cause an overflow? */

          if (next != g_uarttail)
            {
              /* No.. Add the character to the UART buffer */

              g_uartbuffer[prev] = ch;

              /* Update the head index (BEFORE posting) */

              g_uarthead = next;

              /* Was the buffer previously empty? */

              if (prev == g_uarttail)
                {
                  /* Yes.. signal any (NuttX) threads waiting for serial
                   * input.
                   */

#ifdef CONFIG_SIM_UART_DATAPOST
                  simuart_post();
#else
                  g_uart_data_available = 1;
#endif
                }
            }

#ifdef CONFIG_SIM_UART_DATAPOST
          /* REVISIT:  This is very weird and scary here.  When sched_unlock()
           * is called, we may do a lonjmp() style context switch meaning
           * that the logic will be run running on this thread! (but with a
           * different stack).  So we do not get back here until the task
           * sleeps again.  I can't help but believe that that is going to
           * be a problem someday.
           */

          sched_unlock();
#endif
        }
    }

  return NULL;
}
Ejemplo n.º 25
0
int exec(FAR const char *filename, FAR char * const *argv,
         FAR const struct symtab_s *exports, int nexports)
{
  FAR struct binary_s *bin;
  int pid;
  int errcode;
  int ret;

  /* Allocate the load information */

  bin = (FAR struct binary_s *)kmm_zalloc(sizeof(struct binary_s));
  if (!bin)
    {
      berr("ERROR: Failed to allocate binary_s\n");
      errcode = ENOMEM;
      goto errout;
    }

  /* Initialize the binary structure */

  bin->filename = filename;
  bin->exports  = exports;
  bin->nexports = nexports;

  /* Copy the argv[] list */

  ret = binfmt_copyargv(bin, argv);
  if (ret < 0)
    {
      errcode = -ret;
      berr("ERROR: Failed to copy argv[]: %d\n", errcode);
      goto errout_with_bin;
    }

  /* Load the module into memory */

  ret = load_module(bin);
  if (ret < 0)
    {
      errcode = -ret;
      berr("ERROR: Failed to load program '%s': %d\n", filename, errcode);
      goto errout_with_argv;
    }

  /* Disable pre-emption so that the executed module does
   * not return until we get a chance to connect the on_exit
   * handler.
   */

  sched_lock();

  /* Then start the module */

  pid = exec_module(bin);
  if (pid < 0)
    {
      errcode = -pid;
      berr("ERROR: Failed to execute program '%s': %d\n",
           filename, errcode);
      goto errout_with_lock;
    }

#ifdef CONFIG_BINFMT_LOADABLE
  /* Set up to unload the module (and free the binary_s structure)
   * when the task exists.
   */

  ret = group_exitinfo(pid, bin);
  if (ret < 0)
    {
      berr("ERROR: Failed to schedule unload '%s': %d\n", filename, ret);
    }

#else
  /* Free the binary_s structure here */

  binfmt_freeargv(bin);
  kmm_free(bin);

  /* TODO: How does the module get unloaded in this case? */
#endif

  sched_unlock();
  return pid;

errout_with_lock:
  sched_unlock();
  (void)unload_module(bin);
errout_with_argv:
  binfmt_freeargv(bin);
errout_with_bin:
  kmm_free(bin);
errout:
  set_errno(errcode);
  return ERROR;

}
Ejemplo n.º 26
0
int sem_unlink(FAR const char *name)
{
  FAR struct inode *inode;
  FAR const char *relpath = NULL;
  char fullpath[MAX_SEMPATH];
  int errcode;
  int ret;

  /* Get the full path to the semaphore */

  snprintf(fullpath, MAX_SEMPATH, CONFIG_FS_NAMED_SEMPATH "/%s", name);

  /* Get the inode for this semaphore. */

  sched_lock();
  inode = inode_find(fullpath, &relpath);
  if (!inode)
    {
      /* There is no inode that includes in this path */

      errcode = ENOENT;
      goto errout;
    }

  /* Verify that what we found is, indeed, a semaphore */

  if (!INODE_IS_NAMEDSEM(inode))
    {
      errcode = ENXIO;
      goto errout_with_inode;
    }

  /* Refuse to unlink the inode if it has children.  I.e., if it is
   * functioning as a directory and the directory is not empty.
   */

  inode_semtake();
  if (inode->i_child != NULL)
    {
      errcode = ENOTEMPTY;
      goto errout_with_semaphore;
    }

  /* Remove the old inode from the tree.  Because we hold a reference count
   * on the inode, it will not be deleted now.  This will set the
   * FSNODEFLAG_DELETED bit in the inode flags.
   */

  ret = inode_remove(fullpath);

  /* inode_remove() should always fail with -EBUSY because we hae a reference
   * on the inode.  -EBUSY means taht the inode was, indeed, unlinked but
   * thatis could not be freed because there are refrences.
   */

  DEBUGASSERT(ret >= 0 || ret == -EBUSY);
  UNUSED(ret);

  /* Now we do not release the reference count in the normal way (by calling
   * inode release.  Rather, we call sem_close().  sem_close will decrement
   * the reference count on the inode.  But it will also free the semaphore
   * if that reference count decrements to zero.  Since we hold one reference,
   * that can only occur if the semaphore is not in-use.
   */

  inode_semgive();
  ret = sem_close((FAR sem_t *)inode->u.i_nsem);
  sched_unlock();
  return ret;

errout_with_semaphore:
  inode_semgive();
errout_with_inode:
  inode_release(inode);
errout:
  set_errno(errcode);
  sched_unlock();
  return ERROR;
}
Ejemplo n.º 27
0
static int max11802_waitsample(FAR struct max11802_dev_s *priv,
                               FAR struct max11802_sample_s *sample)
{
  irqstate_t flags;
  int ret;

  /* Interrupts must be disabled when this is called to (1) prevent posting
   * of semaphores from interrupt handlers, and (2) to prevent sampled data
   * from changing until it has been reported.
   *
   * In addition, we will also disable pre-emption to prevent other threads
   * from getting control while we muck with the semaphores.
   */

  sched_lock();
  flags = enter_critical_section();

  /* Now release the semaphore that manages mutually exclusive access to
   * the device structure.  This may cause other tasks to become ready to
   * run, but they cannot run yet because pre-emption is disabled.
   */

  sem_post(&priv->devsem);

  /* Try to get the a sample... if we cannot, then wait on the semaphore
   * that is posted when new sample data is available.
   */

  while (max11802_sample(priv, sample) < 0)
    {
      /* Wait for a change in the MAX11802 state */

      iinfo("Waiting..\n");
      priv->nwaiters++;
      ret = sem_wait(&priv->waitsem);
      priv->nwaiters--;

      if (ret < 0)
        {
          /* If we are awakened by a signal, then we need to return
           * the failure now.
           */

          ierr("ERROR: sem_wait: %d\n", errno);
          DEBUGASSERT(errno == EINTR);
          ret = -EINTR;
          goto errout;
        }
    }

  iinfo("Sampled\n");

  /* Re-acquire the semaphore that manages mutually exclusive access to
   * the device structure.  We may have to wait here.  But we have our
   * sample.  Interrupts and pre-emption will be re-enabled while we wait.
   */

  ret = sem_wait(&priv->devsem);

errout:
  /* Then re-enable interrupts.  We might get interrupt here and there
   * could be a new sample.  But no new threads will run because we still
   * have pre-emption disabled.
   */

  leave_critical_section(flags);

  /* Restore pre-emption.  We might get suspended here but that is okay
   * because we already have our sample.  Note:  this means that if there
   * were two threads reading from the MAX11802 for some reason, the data
   * might be read out of order.
   */

  sched_unlock();
  return ret;
}
Ejemplo n.º 28
0
int pthread_mutex_trylock(FAR pthread_mutex_t *mutex)
{
  int ret = OK;

  sdbg("mutex=0x%p\n", mutex);

  if (!mutex)
    {
      ret = EINVAL;
    }
  else
    {
      int mypid = (int)getpid();

      /* Make sure the semaphore is stable while we make the following
       * checks.  This all needs to be one atomic action.
       */

      sched_lock();

      /* Try to get the semaphore. */

      if (sem_trywait((FAR sem_t *)&mutex->sem) == OK)
        {
          /* If we successfully obtained the semaphore, then indicate
           * that we own it.
           */

          mutex->pid = mypid;

#ifdef CONFIG_MUTEX_TYPES
          if (mutex->type == PTHREAD_MUTEX_RECURSIVE)
            {
              mutex->nlocks = 1;
            }
#endif
        }

      /* Was it not available? */

      else if (get_errno() == EAGAIN)
        {
#ifdef CONFIG_MUTEX_TYPES

          /* Check if recursive mutex was locked by ourself. */

          if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->pid == mypid)
            {
              /* Increment the number of locks held and return successfully. */

              mutex->nlocks++;
            }
          else
            {
              ret = EBUSY;
            }
#else
          ret = EBUSY;
#endif
        }
      else
        {
          ret = EINVAL;
        }

      sched_unlock();
    }

  sdbg("Returning %d\n", ret);
  return ret;
}
Ejemplo n.º 29
0
static int thread_schedsetup(FAR struct tcb_s *tcb, int priority,
                             start_t start, CODE void *entry, uint8_t ttype)
{
  int ret;

  /* Assign a unique task ID to the task. */

  ret = task_assignpid(tcb);
  if (ret == OK)
    {
      /* Save task priority and entry point in the TCB */

      tcb->sched_priority = (uint8_t)priority;
#ifdef CONFIG_PRIORITY_INHERITANCE
      tcb->base_priority  = (uint8_t)priority;
#endif
      tcb->start          = start;
      tcb->entry.main     = (main_t)entry;

      /* Save the thread type.  This setting will be needed in
       * up_initial_state() is called.
       */

      ttype              &= TCB_FLAG_TTYPE_MASK;
      tcb->flags         &= ~TCB_FLAG_TTYPE_MASK;
      tcb->flags         |= ttype;

      /* Save the task ID of the parent task in the TCB and allocate
       * a child status structure.
       */

      task_saveparent(tcb, ttype);

      /* exec(), pthread_create(), task_create(), and vfork() all
       * inherit the signal mask of the parent thread.
       */

#ifndef CONFIG_DISABLE_SIGNALS
      (void)sigprocmask(SIG_SETMASK, NULL, &tcb->sigprocmask);
#endif

      /* Initialize the task state.  It does not get a valid state
       * until it is activated.
       */

      tcb->task_state = TSTATE_TASK_INVALID;

      /* Clone the parent tasks D-Space (if it was running PIC).  This
       * must be done before calling up_initial_state() so that the
       * state setup will take the PIC address base into account.
       */

      task_dupdspace(tcb);

      /* Initialize the processor-specific portion of the TCB */

      up_initial_state(tcb);

      /* Add the task to the inactive task list */

      sched_lock();
      dq_addfirst((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks);
      tcb->task_state = TSTATE_TASK_INACTIVE;
      sched_unlock();
    }

  return ret;
}
Ejemplo n.º 30
0
static void lio_sighandler(int signo, siginfo_t *info, void *ucontext)
{
  FAR struct aiocb *aiocbp;
  FAR struct lio_sighand_s *sighand;
  int ret;

  DEBUGASSERT(signo == SIGPOLL && info);

  /* The info structure should contain a pointer to the AIO control block */

  aiocbp = (FAR struct aiocb *)info->si_value.sival_ptr;
  DEBUGASSERT(aiocbp && aiocbp->aio_result != -EINPROGRESS);

  /* Recover our private data from the AIO control block */

  sighand = (FAR struct lio_sighand_s *)aiocbp->aio_priv;
  DEBUGASSERT(sighand && sighand->list);
  aiocbp->aio_priv = NULL;

  /* Prevent any asynchronous I/O completions while the signal handler runs */

  sched_lock();

  /* Check if all of the pending I/O has completed */

  ret = lio_checkio(sighand->list, sighand->nent);
  if (ret != -EINPROGRESS)
    {
      /* All pending I/O has completed */
      /* Restore the signal handler */

      (void)sigaction(SIGPOLL, &sighand->oact, NULL);

      /* Restore the sigprocmask */

      (void)sigprocmask(SIG_SETMASK, &sighand->oprocmask, NULL);

      /* Signal the client */

      if (sighand->sig->sigev_notify == SIGEV_SIGNAL)
        {
#ifdef CONFIG_CAN_PASS_STRUCTS
          DEBUGASSERT(sigqueue(sighand->pid, sighand->sig->sigev_signo,
                               sighand->sig->sigev_value));
#else
          DEBUGASSERT(sigqueue(sighand->pid, sighand->sig->sigev_signo,
                               sighand->sig->sigev_value.sival_ptr));
#endif
        }

#ifdef CONFIG_SIG_EVTHREAD
      /* Notify the client via a function call */

      else if (ighand->sig->sigev_notify == SIGEV_THREAD)
        {
          DEBUGASSERT(sig_notification(sighand->pid, &sighand->sig));
        }
#endif

      /* And free the container */

      lib_free(sighand);
    }

  sched_unlock();
}