Example #1
0
int
SPI::transfer(uint8_t *send, uint8_t *recv, unsigned len)
{

	if ((send == nullptr) && (recv == nullptr))
		return -EINVAL;

	/* do common setup */
	if (!up_interrupt_context())
		SPI_LOCK(_dev, true);

	SPI_SETFREQUENCY(_dev, _frequency);
	SPI_SETMODE(_dev, _mode);
	SPI_SETBITS(_dev, 8);
	SPI_SELECT(_dev, _device, true);

	/* do the transfer */
	SPI_EXCHANGE(_dev, send, recv, len);

	/* and clean up */
	SPI_SELECT(_dev, _device, false);

	if (!up_interrupt_context())
		SPI_LOCK(_dev, false);

	return OK;
}
Example #2
0
void sched_ufree(FAR void *address)
{
#ifdef CONFIG_BUILD_KERNEL
    /* REVISIT:  It is not safe to defer user allocation in the kernel mode
     * build.  Why?  Because the correct user context is in place now but
     * will not be in place when the deferred de-allocation is performed.  In
     * order to make this work, we would need to do something like:  (1) move
     * g_delayed_kufree into the group structure, then traverse the groups to
     * collect garbage on a group-by-group basis.
     */

    ASSERT(!up_interrupt_context());
    kumm_free(address);

#else
    /* Check if this is an attempt to deallocate memory from an exception
     * handler.  If this function is called from the IDLE task, then we
     * must have exclusive access to the memory manager to do this.
     */

    if (up_interrupt_context() || kumm_trysemaphore() != 0)
    {
        irqstate_t flags;

        /* Yes.. Make sure that this is not a attempt to free kernel memory
         * using the user deallocator.
         */

        flags = irqsave();
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
     defined(CONFIG_MM_KERNEL_HEAP)
        DEBUGASSERT(!kmm_heapmember(address));
#endif

        /* Delay the deallocation until a more appropriate time. */

        sq_addlast((FAR sq_entry_t *)address,
                   (FAR sq_queue_t *)&g_delayed_kufree);

        /* Signal the worker thread that is has some clean up to do */

#ifdef CONFIG_SCHED_WORKQUEUE
        work_signal(LPWORK);
#endif
        irqrestore(flags);
    }
    else
    {
        /* No.. just deallocate the memory now. */

        kumm_free(address);
        kumm_givesemaphore();
    }
#endif
}
/****************************************************************************
 * Name: up_unblock_task
 *
 * Description:
 *   A task is currently in an inactive task list
 *   but has been prepped to execute.  Move the TCB to the
 *   ready-to-run list, restore its context, and start execution.
 *
 * Inputs:
 *   tcb: Refers to the tcb to be unblocked.  This tcb is
 *     in one of the waiting tasks lists.  It must be moved to
 *     the ready-to-run list and, if it is the highest priority
 *     ready to run taks, executed.
 *
 ****************************************************************************/
void up_unblock_task(struct tcb_s *tcb)
{
    /* Verify that the context switch can be performed */
    if ((tcb->task_state < FIRST_BLOCKED_STATE) ||
        (tcb->task_state > LAST_BLOCKED_STATE)) {
        warn("%s: task sched error\n", __func__);
        return;
    }
    else {
        struct tcb_s *rtcb = current_task;

        /* Remove the task from the blocked task list */
        sched_removeblocked(tcb);

        /* Reset its timeslice.  This is only meaningful for round
         * robin tasks but it doesn't here to do it for everything
         */
#if CONFIG_RR_INTERVAL > 0
        tcb->timeslice = CONFIG_RR_INTERVAL / MSEC_PER_TICK;
#endif
    
        // Add the task in the correct location in the prioritized
        // g_readytorun task list.
        if (sched_addreadytorun(tcb) && !up_interrupt_context()) {
            /* The currently active task has changed! */
            struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
            // context switch
            up_switchcontext(rtcb, nexttcb);
        }
    }
}
Example #4
0
irqstate_t enter_critical_section(void)
{
  FAR struct tcb_s *rtcb;

  /* Do nothing if called from an interrupt handler */

  if (up_interrupt_context())
    {
      /* The value returned does not matter.  We assume only that it is a
       * scalar here.
       */

      return (irqstate_t)0;
    }

  /* Do we already have interrupts disabled? */

  rtcb = this_task();
  DEBUGASSERT(rtcb != NULL);

  if (rtcb->irqcount > 0)
    {
      /* Yes... make sure that the spinlock is set and increment the IRQ
       * lock count.
       */

      DEBUGASSERT(g_cpu_irqlock == SP_LOCKED && rtcb->irqcount < INT16_MAX);
      rtcb->irqcount++;
    }
  else
    {
      /* NO.. Take the spinlock to get exclusive access and set the lock
       * count to 1.
       *
       * We must avoid that case where a context occurs between taking the
       * g_cpu_irqlock and disabling interrupts.  Also interrupts disables
       * must follow a stacked order.  We cannot other context switches to
       * re-order the enabling/disabling of interrupts.
       *
       * The scheduler accomplishes this by treating the irqcount like
       * lockcount:  Both will disable pre-emption.
       */

      spin_setbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock,
                  &g_cpu_irqlock);
      rtcb->irqcount = 1;

#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
      /* Note that we have entered the critical section */

      sched_note_csection(rtcb, true);
#endif
    }

  /* Then disable interrupts (they may already be disabled, be we need to
   * return valid interrupt status in any event).
   */

  return up_irq_save();
}
Example #5
0
int
SPI::transfer(uint8_t *send, uint8_t *recv, unsigned len)
{
	int result;

	if ((send == nullptr) && (recv == nullptr))
		return -EINVAL;

	LockMode mode = up_interrupt_context() ? LOCK_NONE : locking_mode;

	/* lock the bus as required */
	switch (mode) {
	default:
	case LOCK_PREEMPTION:
		{
			irqstate_t state = irqsave();
			result = _transfer(send, recv, len);
			irqrestore(state);
		}
		break;
	case LOCK_THREADS:
		SPI_LOCK(_dev, true);
		result = _transfer(send, recv, len);
		SPI_LOCK(_dev, false);
		break;
	case LOCK_NONE:
		result = _transfer(send, recv, len);
		break;
	}
	return result;
}
Example #6
0
void board_go_to_standby(void)
{
  /* We cannot power-off display from interrrupt. */

  if (!up_interrupt_context())
    {
      sched_lock();

      /* Power-off display. */
#ifdef CONFIG_THINGSEE_DISPLAY_MODULE
      board_lcdoff();
#endif
  }

  DEBUGASSERT((getreg32(STM32_PWR_CR) & PWR_CR_DBP) == 0);
  stm32_pwr_enablebkp(true);

  /* Setup bootloader to jump directly to firmware. */

  putreg32(BOARD_FIRMWARE_BASE_ADDR, CONFIG_BOOTLOADER_ADDR_BKREG);

  /* Setup backup register for standby mode (checked after reset in boot-up
   * routine).
   */

  putreg32(CONFIG_STANDBYMODE_MAGIC, CONFIG_STANDBYMODE_MAGIC_BKREG);

  stm32_pwr_enablebkp(false);

  lldbg("Driving MCU to standby mode (with wake-up by power-button)...\n");
  up_mdelay(250);

  board_systemreset();
}
Example #7
0
/*
 * Ensure that snapshots of timer values account for rollover of the clock.
 *
 * When not in interrupt context:
 *   If rollover occurs after reading clock_value then tick_count may or may
 *   not be valid. Reading the values again, just after the rollover, will
 *   ensure they are correct.
 *
 * When in interrupt context detecting an accurate tick_count becomes more
 * difficult:
 *   - once in interrupt context the global tickcount will not change until
 *     processed by the systick ISR
 *   - rollover can happen any time before or during the current ISR
 *   - once rollover is detected the tickcount value must be corrected
 *   - hrt_rollover must track if rollover had previously occurred
 *
 * timer_snapshot() is interrupt context safe.
 */
static inline void timer_snapshot(uint32_t *tick_count, uint32_t *clock_value)
{
    uint32_t systick_ctrl, ticks, clock;

    if (up_interrupt_context()) {
        systick_ctrl = getreg32(NVIC_SYSTICK_CTRL);
        if (systick_ctrl & NVIC_SYSTICK_CTRL_COUNTFLAG) {
            htr_rollover = true;
        }

        clock = getreg32(NVIC_SYSTICK_CURRENT);
        ticks = clock_systimer();
        if (clock < getreg32(NVIC_SYSTICK_CURRENT)) {
            clock = getreg32(NVIC_SYSTICK_CURRENT);
            ticks++;
        } else if (htr_rollover) {
            ticks++;
        }
    } else {
        clock = getreg32(NVIC_SYSTICK_CURRENT);
        ticks = clock_systimer();

        if (clock < getreg32(NVIC_SYSTICK_CURRENT)) {
            clock = getreg32(NVIC_SYSTICK_CURRENT);
            ticks = clock_systimer();
        }
    }

    *tick_count = ticks;
    *clock_value = clock;
}
Example #8
0
static inline FAR struct usbhost_state_s *usbhost_allocclass(void)
{
  FAR struct usbhost_state_s *priv;

  DEBUGASSERT(!up_interrupt_context());
  priv = (FAR struct usbhost_state_s *)kmalloc(sizeof(struct usbhost_state_s));
  uvdbg("Allocated: %p\n", priv);;
  return priv;
}
Example #9
0
static FAR sigpendq_t *sig_allocatependingsignal(void)
{
  FAR sigpendq_t *sigpend;
  irqstate_t      flags;

  /* Check if we were called from an interrupt handler. */

  if (up_interrupt_context())
    {
      /* Try to get the pending signal structure from the free list */

      sigpend = (FAR sigpendq_t *)sq_remfirst(&g_sigpendingsignal);
      if (!sigpend)
        {
          /* If no pending signal structure is available in the free list,
           * then try the special list of structures reserved for
           * interrupt handlers
           */

          sigpend = (FAR sigpendq_t *)sq_remfirst(&g_sigpendingirqsignal);
        }
    }

  /* If we were not called from an interrupt handler, then we are
   * free to allocate pending action structures if necessary.
   */

  else
    {
      /* Try to get the pending signal structure from the free list */

      flags = enter_critical_section();
      sigpend = (FAR sigpendq_t *)sq_remfirst(&g_sigpendingsignal);
      leave_critical_section(flags);

      /* Check if we got one. */

      if (!sigpend)
        {
          /* No... Allocate the pending signal */

          if (!sigpend)
            {
              sigpend = (FAR sigpendq_t *)kmm_malloc((sizeof (sigpendq_t)));
            }

          /* Check if we got an allocated message */

          if (sigpend)
            {
              sigpend->type = SIG_ALLOC_DYN;
            }
        }
    }

  return sigpend;
}
Example #10
0
FAR struct mqueue_msg_s *mq_msgalloc(void)
{
  FAR struct mqueue_msg_s *mqmsg;
  irqstate_t flags;

  /* If we were called from an interrupt handler, then try to get the message
   * from generally available list of messages. If this fails, then try the
   * list of messages reserved for interrupt handlers
   */

  if (up_interrupt_context())
    {
      /* Try the general free list */

      mqmsg = (FAR struct mqueue_msg_s *)sq_remfirst(&g_msgfree);
      if (mqmsg == NULL)
        {
          /* Try the free list reserved for interrupt handlers */

          mqmsg = (FAR struct mqueue_msg_s *)sq_remfirst(&g_msgfreeirq);
        }
    }

  /* We were not called from an interrupt handler. */

  else
    {
      /* Try to get the message from the generally available free list.
       * Disable interrupts -- we might be called from an interrupt handler.
       */

      flags = enter_critical_section();
      mqmsg = (FAR struct mqueue_msg_s *)sq_remfirst(&g_msgfree);
      leave_critical_section(flags);

      /* If we cannot a message from the free list, then we will have to
       * allocate one.
       */

      if (mqmsg == NULL)
        {
          mqmsg = (FAR struct mqueue_msg_s *)
            kmm_malloc((sizeof (struct mqueue_msg_s)));

          /* Check if we allocated the message */

          if (mqmsg != NULL)
            {
              /* Yes... remember that this message was dynamically allocated */

              mqmsg->type = MQ_ALLOC_DYN;
            }
        }
    }

  return mqmsg;
}
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
    /* Verify that the caller is sane */

    if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
        tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > UINT8_MIN
        || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
        || priority > SCHED_PRIORITY_MAX
#endif
        ) {
        warn("%s: task sched error\n", __func__);
        return;
    }
    else {
        struct tcb_s *rtcb = current_task;
        bool switch_needed;

        /* Remove the tcb task from the ready-to-run list.
         * sched_removereadytorun will return true if we just
         * remove the head of the ready to run list.
         */
        switch_needed = sched_removereadytorun(tcb);

        /* Setup up the new task priority */
        tcb->sched_priority = (uint8_t)priority;

        /* Return the task to the specified blocked task list.
         * sched_addreadytorun will return true if the task was
         * added to the new list.  We will need to perform a context
         * switch only if the EXCLUSIVE or of the two calls is non-zero
         * (i.e., one and only one the calls changes the head of the
         * ready-to-run list).
         */
        switch_needed ^= sched_addreadytorun(tcb);

        /* Now, perform the context switch if one is needed */
        if (switch_needed && !up_interrupt_context()) {
            struct tcb_s *nexttcb;
            // If there are any pending tasks, then add them to the g_readytorun
            // task list now. It should be the up_realease_pending() called from
            // sched_unlock() to do this for disable preemption. But it block 
            // itself, so it's OK.
            if (g_pendingtasks.head) {
                warn("Disable preemption failed for reprioritize task\n");
                sched_mergepending();
            }

            nexttcb = (struct tcb_s*)g_readytorun.head;
            // context switch
            up_switchcontext(rtcb, nexttcb);
        }
    }
}
FAR sigq_t *sig_allocatependingsigaction(void)
{
    FAR sigq_t    *sigq;
    irqstate_t saved_state;

    /* Check if we were called from an interrupt handler. */

    if (up_interrupt_context())
    {
        /* Try to get the pending signal action structure from the free list */

        sigq = (FAR sigq_t*)sq_remfirst(&g_sigpendingaction);

        /* If so, then try the special list of structures reserved for
         * interrupt handlers
         */

        if (!sigq)
        {
            sigq = (FAR sigq_t*)sq_remfirst(&g_sigpendingirqaction);
        }
    }

    /* If we were not called from an interrupt handler, then we are
     * free to allocate pending signal action structures if necessary. */

    else
    {
        /* Try to get the pending signal action structure from the free list */

        saved_state = irqsave();
        sigq = (FAR sigq_t*)sq_remfirst(&g_sigpendingaction);
        irqrestore(saved_state);

        /* Check if we got one. */

        if (!sigq)
        {
            /* No...Try the resource pool */

            if (!sigq)
            {
                sigq = (FAR sigq_t *)kmalloc((sizeof (sigq_t)));
            }

            /* Check if we got an allocated message */

            if (sigq)
            {
                sigq->type = SIG_ALLOC_DYN;
            }
        }
    }

    return sigq;
}
ssize_t
uORB::DeviceNode::write(struct file *filp, const char *buffer, size_t buflen)
{
    /*
     * Writes are legal from interrupt context as long as the
     * object has already been initialised from thread context.
     *
     * Writes outside interrupt context will allocate the object
     * if it has not yet been allocated.
     *
     * Note that filp will usually be NULL.
     */
    if (nullptr == _data) {
        if (!up_interrupt_context()) {

            lock();

            /* re-check size */
            if (nullptr == _data) {
                _data = new uint8_t[_meta->o_size * _queue_size];
            }

            unlock();
        }

        /* failed or could not allocate */
        if (nullptr == _data) {
            return -ENOMEM;
        }
    }

    /* If write size does not match, that is an error */
    if (_meta->o_size != buflen) {
        return -EIO;
    }

    /* Perform an atomic copy. */
    irqstate_t flags = px4_enter_critical_section();
    memcpy(_data + (_meta->o_size * (_generation % _queue_size)), buffer, _meta->o_size);

    /* update the timestamp and generation count */
    _last_update = hrt_absolute_time();
    /* wrap-around happens after ~49 days, assuming a publisher rate of 1 kHz */
    _generation++;

    _published = true;

    px4_leave_critical_section(flags);

    /* notify any poll waiters */
    poll_notify(POLLIN);

    return _meta->o_size;
}
Example #14
0
ssize_t mq_receive(mqd_t mqdes, FAR char *msg, size_t msglen,
                   FAR int *prio)
{
  FAR struct mqueue_msg_s *mqmsg;
  irqstate_t saved_state;
  ssize_t ret = ERROR;

  DEBUGASSERT(up_interrupt_context() == false);

  /* Verify the input parameters and, in case of an error, set
   * errno appropriately.
   */

  if (mq_verifyreceive(mqdes, msg, msglen) != OK)
    {
      return ERROR;
    }

  /* Get the next message from the message queue.  We will disable
   * pre-emption until we have completed the message received.  This
   * is not too bad because if the receipt takes a long time, it will
   * be because we are blocked waiting for a message and pre-emption
   * will be re-enabled while we are blocked
   */

  sched_lock();

  /* Furthermore, mq_waitreceive() expects to have interrupts disabled
   * because messages can be sent from interrupt level.
   */

  saved_state = irqsave();

  /* Get the message from the message queue */

  mqmsg = mq_waitreceive(mqdes);
  irqrestore(saved_state);

  /* Check if we got a message from the message queue.  We might
   * not have a message if:
   *
   * - The message queue is empty and O_NONBLOCK is set in the mqdes
   * - The wait was interrupted by a signal
   */

  if (mqmsg)
    {
      ret = mq_doreceive(mqdes, mqmsg, msg, prio);
    }

  sched_unlock();
  return ret;
}
Example #15
0
FAR struct igmp_group_s *igmp_grpalloc(FAR struct net_driver_s *dev,
                                       FAR const in_addr_t *addr)
{
  FAR struct igmp_group_s *group;
  net_lock_t flags;

  nllvdbg("addr: %08x dev: %p\n", *addr, dev);
  if (up_interrupt_context())
    {
#if CONFIG_PREALLOC_IGMPGROUPS > 0
      grplldbg("Use a pre-allocated group entry\n");
      group = igmp_grpprealloc();
#else
      grplldbg("Cannot allocate from interrupt handler\n");
      group = NULL;
#endif
    }
  else
    {
      grplldbg("Allocate from the heap\n");
      group = igmp_grpheapalloc();
    }

  grplldbg("group: %p\n", group);

  /* Check if we successfully allocated a group structure */

  if (group)
    {
      /* Initialize the non-zero elements of the group structure */

      net_ipv4addr_copy(group->grpaddr, *addr);
      sem_init(&group->sem, 0, 0);

      /* Initialize the group timer (but don't start it yet) */

      group->wdog = wd_create();
      DEBUGASSERT(group->wdog);

      /* Interrupts must be disabled in order to modify the group list */

      flags = net_lock();

      /* Add the group structure to the list in the device structure */

      sq_addfirst((FAR sq_entry_t *)group, &dev->grplist);
      net_unlock(flags);
    }

  return group;
}
Example #16
0
void up_unblock_task(struct tcb_s *tcb)
{
  /* Verify that the context switch can be performed */

  if ((tcb->task_state < FIRST_BLOCKED_STATE) ||
      (tcb->task_state > LAST_BLOCKED_STATE))
    {
      warn("%s: task sched error\n", __func__);
      return;
    }
  else
    {
      struct tcb_s *rtcb = current_task;

      /* Remove the task from the blocked task list */

      sched_removeblocked(tcb);

      /* Add the task in the correct location in the prioritized
       * ready-to-run task list.
       */

      if (sched_addreadytorun(tcb) && !up_interrupt_context())
        {
          /* The currently active task has changed! */
          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

          /* Are we in an interrupt handler? */

          struct tcb_s *nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.

          (void)group_addrenv(nexttcb);
#endif
          /* Update scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* context switch */

          up_switchcontext(rtcb, nexttcb);
        }
    }
}
Example #17
0
FAR struct mqueue_msg_s *mq_msgalloc(void)
{
  FAR struct mqueue_msg_s *mqmsg;
  irqstate_t saved_state;

  /* If we were called from an interrupt handler, then try to get the message
   * from generally available list of messages. If this fails, then try the
   * list of messages reserved for interrupt handlers
   */

  if (up_interrupt_context())
    {
      /* Try the general free list */

      mqmsg = (FAR struct mqueue_msg_s *)sq_remfirst(&g_msgfree);
      if (!mqmsg)
        {
          /* Try the free list reserved for interrupt handlers */

          mqmsg = (FAR struct mqueue_msg_s *)sq_remfirst(&g_msgfreeirq);
        }
    }

  /* We were not called from an interrupt handler. */

  else
    {
      /* Try to get the message from the generally available free list.
       * Disable interrupts -- we might be called from an interrupt handler.
       */

      saved_state = irqsave();
      mqmsg = (FAR struct mqueue_msg_s *)sq_remfirst(&g_msgfree);
      irqrestore(saved_state);

      /* If we cannot a message from the free list, then we will have to allocate one. */

      if (!mqmsg)
        {
          mqmsg = (FAR struct mqueue_msg_s *)kmm_malloc((sizeof (struct mqueue_msg_s)));

          /* Check if we got an allocated message */

          ASSERT(mqmsg);
          mqmsg->type = MQ_ALLOC_DYN;
        }
    }

  return mqmsg;
}
void up_assert(const uint8_t *filename, int line)
{
    fprintf(stderr, "Assertion failed at file:%s line: %d\n", filename, line);

    // in interrupt context or idle task means kernel error 
    // which will stop the OS
    // if in user space just terminate the task
    if (up_interrupt_context() || current_task->pid == 0) {
        panic("%s: %d\n", __func__, __LINE__);
    }
    else {
        exit(EXIT_FAILURE);
    }
}
/**
 * This function is called in non-interrupt context
 * to switch tasks.
 * Assumption: global interrupt is disabled.
 */
static inline void up_switchcontext(struct tcb_s *ctcb, struct tcb_s *ntcb)
{
    // do nothing if two tasks are the same
    if (ctcb == ntcb)
        return;

    // this function can not be called in interrupt
    if (up_interrupt_context()) {
        panic("%s: try to switch context in interrupt\n", __func__);
    }

    // start switch
    current_task = ntcb;
    rgmp_context_switch(ctcb ? &ctcb->xcp.ctx : NULL, &ntcb->xcp.ctx);
}
static int usbhost_disconnected(struct usbhost_class_s *usbclass)
{
  FAR struct usbhost_state_s *priv = (FAR struct usbhost_state_s *)usbclass;
  irqstate_t flags;

  DEBUGASSERT(priv != NULL);

  /* Set an indication to any users of the device that the device is no
   * longer available.
   */

  flags              = irqsave();
  priv->disconnected = true;

  /* Now check the number of references on the class instance.  If it is one,
   * then we can free the class instance now.  Otherwise, we will have to
   * wait until the holders of the references free them by closing the
   * block driver.
   */

  ullvdbg("crefs: %d\n", priv->crefs);
  if (priv->crefs == 1)
    {
      /* Destroy the class instance.  If we are executing from an interrupt
       * handler, then defer the destruction to the worker thread.
       * Otherwise, destroy the instance now.
       */

      if (up_interrupt_context())
        {
          /* Destroy the instance on the worker thread. */

          uvdbg("Queuing destruction: worker %p->%p\n", priv->work.worker, usbhost_destroy);
          DEBUGASSERT(priv->work.worker == NULL);
          (void)work_queue(HPWORK, &priv->work, usbhost_destroy, priv, 0);
       }
      else
        {
          /* Do the work now */

          usbhost_destroy(priv);
        }
    }

  irqrestore(flags);
  return OK;
}
Example #21
0
ssize_t
ORBDevNode::write(struct file *filp, const char *buffer, size_t buflen)
{
	/*
	 * Writes are legal from interrupt context as long as the
	 * object has already been initialised from thread context.
	 *
	 * Writes outside interrupt context will allocate the object
	 * if it has not yet been allocated.
	 *
	 * Note that filp will usually be NULL.
	 */
	if (nullptr == _data) {
		if (!up_interrupt_context()) {

			lock();

			/* re-check size */
			if (nullptr == _data)
				_data = new uint8_t[_meta->o_size];

			unlock();
		}

		/* failed or could not allocate */
		if (nullptr == _data)
			return -ENOMEM;
	}

	/* If write size does not match, that is an error */
	if (_meta->o_size != buflen)
		return -EIO;

	/* Perform an atomic copy. */
	irqstate_t flags = irqsave();
	memcpy(_data, buffer, _meta->o_size);
	irqrestore(flags);

	/* update the timestamp and generation count */
	_last_update = hrt_absolute_time();
	_generation++;

	/* notify any poll waiters */
	poll_notify(POLLIN);

	return _meta->o_size;
}
Example #22
0
int sem_trywait(FAR sem_t *sem)
{
  FAR _TCB  *rtcb = (FAR _TCB*)g_readytorun.head;
  irqstate_t saved_state;
  int        ret = ERROR;

  /* This API should not be called from interrupt handlers */

  DEBUGASSERT(up_interrupt_context() == false)

  /* Assume any errors reported are due to invalid arguments. */

  *get_errno_ptr() = EINVAL;

  if (sem)
    {
      /* The following operations must be performed with interrupts
       * disabled because sem_post() may be called from an interrupt
       * handler.
       */

      saved_state = irqsave();

      /* Any further errors could only be occurred because the semaphore
       * is not available.
       */

      *get_errno_ptr() = EAGAIN;

      /* If the semaphore is available, give it to the requesting task */

      if (sem->semcount > 0)
        {
          /* It is, let the task take the semaphore */

          sem->semcount--;
          rtcb->waitsem = NULL;
          ret = OK;
        }

      /* Interrupts may now be enabled. */

      irqrestore(saved_state);
    }

  return ret;
}
Example #23
0
FAR struct iob_qentry_s *iob_alloc_qentry(void)
{
  /* Were we called from the interrupt level? */

  if (up_interrupt_context())
    {
      /* Yes, then try to allocate an I/O buffer without waiting */

      return iob_tryalloc_qentry();
    }
  else
    {
      /* Then allocate an I/O buffer, waiting as necessary */

      return iob_allocwait_qentry();
    }
}
Example #24
0
int sem_trywait(FAR sem_t *sem)
{
  FAR struct tcb_s *rtcb = this_task();
  irqstate_t flags;
  int ret = ERROR;

  /* This API should not be called from interrupt handlers */

  DEBUGASSERT(up_interrupt_context() == false);

  /* Assume any errors reported are due to invalid arguments. */

  set_errno(EINVAL);

  if (sem)
    {
      /* The following operations must be performed with interrupts disabled
       * because sem_post() may be called from an interrupt handler.
       */

      flags = enter_critical_section();

      /* Any further errors could only occurr because the semaphore is not
       * available.
       */

      set_errno(EAGAIN);

      /* If the semaphore is available, give it to the requesting task */

      if (sem->semcount > 0)
        {
          /* It is, let the task take the semaphore */

          sem->semcount--;
          rtcb->waitsem = NULL;
          ret = OK;
        }

      /* Interrupts may now be enabled. */

      leave_critical_section(flags);
    }

  return ret;
}
Example #25
0
int sched_lock(void)
{
  struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

  /* Check for some special cases:  (1) rtcb may be NULL only during
   * early boot-up phases, and (2) sched_lock() should have no
   * effect if called from the interrupt level.
   */

  if (rtcb && !up_interrupt_context())
    {
     ASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
     rtcb->lockcount++;
    }

  return OK;
}
Example #26
0
int sched_unlock(void)
{
  struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

  /* Check for some special cases:  (1) rtcb may be NULL only during
   * early boot-up phases, and (2) sched_unlock() should have no
   * effect if called from the interrupt level.
   */

  if (rtcb && !up_interrupt_context())
    {
      /* Prevent context switches throughout the following */

      irqstate_t flags = irqsave();

      /* Decrement the preemption lock counter */

      if (rtcb->lockcount)
        {
          rtcb->lockcount--;
        }

      /* Check if the lock counter has decremented to zero.  If so,
       * then pre-emption has been re-enabled.
       */

      if (rtcb->lockcount <= 0)
        {
          rtcb->lockcount = 0;

          /* Release any ready-to-run tasks that have collected in
           * g_pendingtasks.
           */

         if (g_pendingtasks.head)
           {
             up_release_pending();
           }
        }

      irqrestore(flags);
    }

  return OK;
}
Example #27
0
      /* Restore the previous interrupt state which may still be interrupts
       * disabled (but we don't have a mechanism to verify that now)
       */

      up_irq_restore(flags);
    }
}
#else /* defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) */
void leave_critical_section(irqstate_t flags)
{
  /* Check if we were called from an interrupt handler */

  if (!up_interrupt_context())
    {
      FAR struct tcb_s *rtcb = this_task();
      DEBUGASSERT(rtcb != NULL);

      /* Note that we have left the critical section */

      sched_note_csection(rtcb, false);
    }

  /* Restore the previous interrupt state. */

  up_irq_restore(flags);
}
Example #28
0
  /* Then disable interrupts (they may already be disabled, be we need to
   * return valid interrupt status in any event).
   */

  return up_irq_save();
}
#else /* defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) */
irqstate_t enter_critical_section(void)
{
  /* Check if we were called from an interrupt handler */

  if (!up_interrupt_context())
    {
      FAR struct tcb_s *rtcb = this_task();
      DEBUGASSERT(rtcb != NULL);

      /* No.. note that we have entered the critical section */

      sched_note_csection(rtcb, true);
    }

  /* And disable interrupts */

  return up_irq_save();
}
Example #29
0
void up_assert(const uint8_t *filename, int line)
{
    fprintf(stderr, "Assertion failed at file:%s line: %d\n", filename, line);

#ifdef CONFIG_BOARD_CRASHDUMP
    board_crashdump(up_getsp(), this_task(), filename, line);
#endif

    // in interrupt context or idle task means kernel error
    // which will stop the OS
    // if in user space just terminate the task
    if (up_interrupt_context() || current_task->pid == 0) {
        panic("%s: %d\n", __func__, __LINE__);
    }
    else {
        exit(EXIT_FAILURE);
    }
}
/****************************************************************************
 * Name: up_block_task
 *
 * Description:
 *   The currently executing task at the head of
 *   the ready to run list must be stopped.  Save its context
 *   and move it to the inactive list specified by task_state.
 *
 *   This function is called only from the NuttX scheduling
 *   logic.  Interrupts will always be disabled when this
 *   function is called.
 *
 * Inputs:
 *   tcb: Refers to a task in the ready-to-run list (normally
 *     the task at the head of the list).  It most be
 *     stopped, its context saved and moved into one of the
 *     waiting task lists.  It it was the task at the head
 *     of the ready-to-run list, then a context to the new
 *     ready to run task must be performed.
 *   task_state: Specifies which waiting task list should be
 *     hold the blocked task TCB.
 *
 ****************************************************************************/
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
    /* Verify that the context switch can be performed */
    if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) ||
        (tcb->task_state > LAST_READY_TO_RUN_STATE)) {
        warn("%s: task sched error\n", __func__);
        return;
    }
    else {
        struct tcb_s *rtcb = current_task;
        bool switch_needed;

        /* Remove the tcb task from the ready-to-run list.  If we
         * are blocking the task at the head of the task list (the
         * most likely case), then a context switch to the next
         * ready-to-run task is needed. In this case, it should
         * also be true that rtcb == tcb.
         */
        switch_needed = sched_removereadytorun(tcb);

        /* Add the task to the specified blocked task list */
        sched_addblocked(tcb, (tstate_t)task_state);

        /* Now, perform the context switch if one is needed */
        if (switch_needed) {
            struct tcb_s *nexttcb;
            // this part should not be executed in interrupt context
            if (up_interrupt_context()) {
                panic("%s: %d\n", __func__, __LINE__);
            }
            // If there are any pending tasks, then add them to the g_readytorun
            // task list now. It should be the up_realease_pending() called from
            // sched_unlock() to do this for disable preemption. But it block 
            // itself, so it's OK.
            if (g_pendingtasks.head) {
                warn("Disable preemption failed for task block itself\n");
                sched_mergepending();
            }
            nexttcb = (struct tcb_s*)g_readytorun.head;
            // context switch
            up_switchcontext(rtcb, nexttcb);
        }
    }
}