Пример #1
0
/**
 * 功能: 队列注册的定时器的回调函数,给到期的任务返回EASYRTOS_TIMEOUT的标志.
 * 将到期的任务移除队列悬挂列表,并加入Ready列表.
 *
 * 参数:
 * 输入:                                                输出:
 * POINTER cb_data 回调函数数据包含需要唤醒的TCB等信息   POINTER cb_data 回调函数数据包含需要唤醒的TCB等信息                              
 * 
 * 返回:void
 * 
 * 调用的函数:
 * (void)tcb_dequeue_entry (timer_data_ptr->suspQ, timer_data_ptr->tcb_ptr);
 * (void)tcbEnqueuePriority (&tcb_readyQ, timer_data_ptr->tcb_ptr);
 */
static void eQueueTimerCallback (POINTER cb_data)
{
    QUEUE_TIMER *timer_data_ptr;
    CRITICAL_STORE;

    /* 获取队列定时器回调指针 */
    timer_data_ptr = (QUEUE_TIMER *)cb_data;

    /* 检测参数是否为空 */
    if (timer_data_ptr)
    {
        /* 进入临界区  */
        CRITICAL_ENTER ();

        /* 给任务设置标志位,标识任务是因为定时器回调唤醒的 */
        timer_data_ptr->tcb_ptr->pendedWakeStatus = EASYRTOS_TIMEOUT;

        /* 取消定时器注册 */
        timer_data_ptr->tcb_ptr->pended_timo_cb = NULL;

        /* 将任务移除悬挂列表,将任务移出receive或者send列表 */
        (void)tcb_dequeue_entry (timer_data_ptr->suspQ, timer_data_ptr->tcb_ptr);

        /* 将任务加入Ready队列 */
        if (tcbEnqueuePriority (&tcb_readyQ, timer_data_ptr->tcb_ptr) == EASYRTOS_OK)
        {
          timer_data_ptr->tcb_ptr->state= TASK_READY;
        }

        /* 退出临界区 */
        CRITICAL_EXIT ();
    }
}
Пример #2
0
/**
 * \b atomSemTimerCallback
 *
 * This is an internal function not for use by application code.
 *
 * Timeouts on suspended threads are notified by the timer system through
 * this generic callback. The timer system calls us back with a pointer to
 * the relevant \c SEM_TIMER object which is used to retrieve the
 * semaphore details.
 *
 * @param[in] cb_data Pointer to a SEM_TIMER object
 */
static void atomSemTimerCallback (POINTER cb_data)
{
    SEM_TIMER *timer_data_ptr;
    CRITICAL_STORE;

    /* Get the SEM_TIMER structure pointer */
    timer_data_ptr = (SEM_TIMER *)cb_data;

    /* Check parameter is valid */
    if (timer_data_ptr)
    {
        /* Enter critical region */
        CRITICAL_START ();

        /* Set status to indicate to the waiting thread that it timed out */
        timer_data_ptr->tcb_ptr->suspend_wake_status = ATOM_TIMEOUT;

        /* Flag as no timeout registered */
        timer_data_ptr->tcb_ptr->suspend_timo_cb = NULL;

        /* Remove this thread from the semaphore's suspend list */
        (void)tcbDequeueEntry (&timer_data_ptr->sem_ptr->suspQ, timer_data_ptr->tcb_ptr);

        /* Put the thread on the ready queue */
        (void)tcbEnqueuePriority (&tcbReadyQ, timer_data_ptr->tcb_ptr);

        /* Exit critical region */
        CRITICAL_END ();

        /**
         * Note that we don't call the scheduler now as it will be called
         * when we exit the ISR by atomIntExit().
         */
    }
}
Пример #3
0
/**
 * \b atomTimerDelayCallback
 *
 * This is an internal function not for use by application code.
 *
 * Callback for atomTimerDelay() calls. Wakes up the sleeping threads.
 *
 * @param[in] cb_data Callback parameter (DELAY_TIMER ptr for sleeping thread)
 *
 * @return None
 */
static void atomTimerDelayCallback (POINTER cb_data)
{
    DELAY_TIMER *timer_data_ptr;
    CRITICAL_STORE;

    /* Get the DELAY_TIMER structure pointer */
    timer_data_ptr = (DELAY_TIMER *)cb_data;

    /* Check parameter is valid */
    if (timer_data_ptr)
    {
        /* Enter critical region */
        CRITICAL_START ();

        /* Put the thread on the ready queue */
        (void)tcbEnqueuePriority (&tcbReadyQ, timer_data_ptr->tcb_ptr);

        /* Exit critical region */
        CRITICAL_END ();

        /**
         * Don't call the scheduler yet. The ISR exit routine will do this
         * in case there are other callbacks to be made, which may also make
         * threads ready.
         */
    }
}
Пример #4
0
/**
 * 功能: 信号量注册的定时器的回调函数,给到期的任务返回EASYRTOS_TIMEOUT的标志.
 * 将到期的任务移除队列悬挂列表,并加入Ready列表.
 *
 * 参数:
 * 输入:                                                输出:
 * POINTER cb_data 回调函数数据包含需要唤醒的TCB等信息   POINTER cb_data 回调函数数据包含需要唤醒的TCB等信息                              
 * 
 * 返回:void
 * 
 * 调用的函数:
 * (void)tcb_dequeue_entry (timer_data_ptr->suspQ, timer_data_ptr->tcb_ptr);
 * (void)tcbEnqueuePriority (&tcb_readyQ, timer_data_ptr->tcb_ptr);
 */
static void eSemTimerCallback (POINTER cb_data)
{
    SEM_TIMER *timer_data_ptr;
    CRITICAL_STORE;

    /* 获取SEM_TIMER结构指针 */
    timer_data_ptr = (SEM_TIMER *)cb_data;

    /* 检查参数是否为空 */
    if (timer_data_ptr)
    {
      /* 进入临界区 */
      CRITICAL_ENTER ();

      /* 设置标志,表明任务是由于timeout到期而唤醒的  */
      timer_data_ptr->tcb_ptr->pendedWakeStatus = EASYRTOS_TIMEOUT ;

      /* 解除timeout定时器注册 */
      timer_data_ptr->tcb_ptr->pended_timo_cb = NULL;

      /* 将任务移除信号量悬挂队列 */
      (void)tcb_dequeue_entry (&timer_data_ptr->sem_ptr->suspQ, timer_data_ptr->tcb_ptr);

      /* 将任务加入Ready队列 */
      if (tcbEnqueuePriority (&tcb_readyQ, timer_data_ptr->tcb_ptr) == EASYRTOS_OK)
      {
        timer_data_ptr->tcb_ptr->state = TASK_READY;
      }
      /* 退出临界区 */
      CRITICAL_EXIT ();

      /* 这里没有启动调度器,因为之后在退出timer ISR的时候会通过atomIntExit()启动 */
    }
}
Пример #5
0
/**
 * 功能: 设置计数信号量的Count
 *
 * 参数:
 * 输入:                                        输出:
 * EASYRTOS_SEM *sem 信号量指针                 EASYRTOS_SEM *sem 信号量指针
 * uint8_t count设置的Count数               
 * 
 * 返回:
 * 返回 EASYRTOS_OK 成功
 * 返回 EASYRTOS_ERR_PARAM 错误的参数
 * 
 * 调用的函数:
 * 无
 */
ERESULT eSemResetCount (EASYRTOS_SEM *sem, uint8_t count)
{
  ERESULT status;
  CRITICAL_STORE;
  EASYRTOS_TCB *tcb_ptr;
  

  
  /* 参数检查 */
  if (sem == NULL || sem->type != SEM_COUNTY)
  {
    status = EASYRTOS_ERR_PARAM;
  }
  else
  {
    if (sem->suspQ && sem->count == 0)
    {
      /* 进入临界区 */
      CRITICAL_ENTER ();
      tcb_ptr = tcb_dequeue_head (&sem->suspQ);
      if (tcbEnqueuePriority (&tcb_readyQ, tcb_ptr) != EASYRTOS_OK)
      {
        
        /* 若加入Ready列表失败,退出临界区 */
        CRITICAL_EXIT ();

        status = EASYRTOS_ERR_QUEUE;
      }
      else
        CRITICAL_EXIT ();
    }
    
    /* 设置count值 */
    sem->count = count;

    /* 成功 */
    status = EASYRTOS_OK;
  }
  return (status);  
}
Пример #6
0
/**
 * 功能: 放置信号量,根据信号量类型不同会有以下不同反应:
 * 1、二值信号量
 * 当计数为0的时候计数加1,计数已经为1则会返回溢出错误。
 * 2、计数信号量
 * 计数加1,当计数大于127时,则会返回溢出错误。
 * 3、互斥锁
 * 当互斥锁拥有者调用的时候,若计数<=0,则计数加1,当计数达到1时,清除拥有任务。
 * 当非拥有者调用的时候返回EASYRTOS_ERR_OWNERSHIP
 * 当有任务被悬挂的时候,将会调用调度器.
 *
 * 参数:
 * 输入:                                      输出:
 * EASYRTOS_SEM * sem 信号量指针              EASYRTOS_SEM * sem 信号量指针       
 * 
 * 返回:
 * EASYRTOS_OK 成功
 * EASYRTOS_ERR_OVF 计数信号量count>32767(>32767)
 * EASYRTOS_ERR_PARAM 错误的参数
 * EASYRTOS_ERR_QUEUE 将任务加入运行队列失败
 * EASYRTOS_ERR_TIMER 注册定时器未成功
 * EASYRTOS_ERR_BIN_OVF 二值信号量count已经为1
 * EASYRTOS_SEM_UINIT 信号量没有被初始化
 * EASYRTOS_ERR_OWNERSHIP 尝试解锁Mutex的任务不是Mutex拥有者
 * 
 * 调用的函数:
 * eCurrentContext();
 * tcb_dequeue_head (&sem->suspQ);
 * tcbEnqueuePriority (&tcb_readyQ, tcb_ptr);
 * eTimerCancel (tcb_ptr->pended_timo_cb);
 * easyRTOSSched (FALSE);
 */
ERESULT eSemGive (EASYRTOS_SEM * sem)
{
  ERESULT status;
  CRITICAL_STORE;
  EASYRTOS_TCB *tcb_ptr;
  EASYRTOS_TCB *curr_tcb_ptr;
  
  /* 参数检查 */
  if (sem == NULL)
  {
    status = EASYRTOS_ERR_PARAM;
  }
  else if (sem->type == NULL)
  {
    status = EASYRTOS_SEM_UINIT;
  }
  else
  {
    
    /* 获取正在运行的任务的TCB */
    curr_tcb_ptr = eCurrentContext();
        
    /* 进入临界区 */
    CRITICAL_ENTER ();
    
    if (sem->type == SEM_MUTEX && sem->owner != curr_tcb_ptr)
    {
        /* 退出临界区 */
        CRITICAL_EXIT ();
        
        status = EASYRTOS_ERR_OWNERSHIP;
    }

    /* 将被信号量悬挂的任务置入Ready任务列表 */
    else 
    {
      
      if (sem->suspQ && sem->count == 0)
      {
        sem->owner = NULL;
        //if ( sem->type == SEM_MUTEX )sem->count++;
        tcb_ptr = tcb_dequeue_head (&sem->suspQ);
        if (tcbEnqueuePriority (&tcb_readyQ, tcb_ptr) != EASYRTOS_OK)
        {
          
          /* 若加入Ready列表失败,退出临界区 */
          CRITICAL_EXIT ();

          status = EASYRTOS_ERR_QUEUE;
        }
        else
        {
          
          /* 给等待的任务返回EASYRTOS_OK */
          tcb_ptr->pendedWakeStatus = EASYRTOS_OK;
          tcb_ptr->state = TASK_READY;
          
          /* 设置任务为新的互斥锁ower */
          sem->owner = tcb_ptr;
          
          /* 解除该信号量timeout注册的定时器 */
          if ((tcb_ptr->pended_timo_cb != NULL)
              && (eTimerCancel (tcb_ptr->pended_timo_cb) != EASYRTOS_OK))
          {
            
              /* 解除定时器失败 */
              status = EASYRTOS_ERR_TIMER;
          }
          else
          {
            
              /* 没有timeout定时器注册 */
              tcb_ptr->pended_timo_cb = NULL;

              /* 成功 */
              status = EASYRTOS_OK;
          }

          /* 退出临界区 */
          CRITICAL_EXIT ();

          if (eCurrentContext())
              easyRTOSSched (FALSE);
        }
      }
    

      /* 若没有任务被该信号量悬挂,则增加count,然后返回 */
      else
      {
        switch (sem->type)
        {
          case SEM_COUNTY:
            
            /* 检查是否溢出 */
            if (sem->count == 32767)
            {
              
              /* 返回错误标识 */
              status = EASYRTOS_ERR_OVF;
            }
            else
            {
              
              /* 增加count并返回 */
              sem->count++;
              status = EASYRTOS_OK;
            }
          break;
          
          case SEM_BINARY:
            
            /* 检查是否已经为1 */
            if (sem->count == 1)
            {
              
              /* 返回错误标识 */
              status = EASYRTOS_ERR_OVF;
            }
            else
            {
              
              /* 增加count并返回 */
              sem->count = 1;
              status = EASYRTOS_OK;
            }
          break;
          
          case SEM_MUTEX:
            if (sem->count>1)
            {
              
              /* 返回错误标识 */
              status = EASYRTOS_ERR_OVF;
            }
            else
            {
              sem->count++;
              //测试 若sem->count==1 清除掉拥有者
              if (sem->count>=1)sem->owner=NULL;
              status = EASYRTOS_OK;
            }
          break;
        }
      }

      /* 退出临界区 */
      CRITICAL_EXIT ();
    }
  }

  return (status);
}
Пример #7
0
/**
 * 功能: 获取信号量,若信号量数量为0,根据timeout的不同值和信号量的不同类型有不同的处理方式.
 *
 * 一、二值信号量和计数信号量
 * 1.timeout>0 悬挂调用的任务,当timeout到期的时候唤醒任务并返回timeout标志
 * 2.timeout=0 永久悬挂调用的任务,直到获取到信号量.
 * 3.timeout=-1 不悬挂任务,若信号量计数为0会返回信号量为0的标志.
 *
 * 二、互斥锁
 * 若调用者为拥有者,则进入递归调用模式,计数变为负值,并不会悬挂任务。
 * 若调用者不是拥有者,则根据timeout的不同值有以下的处理方式。
 * 1.timeout>0 悬挂调用的任务,当timeout到期的时候唤醒任务并返回timeout标志
 * 2.timeout=0 永久悬挂调用的任务,直到获取到信号量.
 * 3.timeout=-1 不悬挂任务,若信号量计数为0会返回信号量为0的标志.
 *
 * 当有任务被悬挂的时候,将会调用调度器.
 *
 * 参数:
 * 输入:                                        输出:
 * EASYRTOS_SEM *sem  信号量指针                EASYRTOS_SEM *sem  信号量指针
 * int32_t timeout timeout时间,依赖于心跳时间                  
 * 
 * 返回:
 * EASYRTOS_OK 成功
 * EASYRTOS_TIMEOUT 信号量timeout到期
 * EASYRTOS_WOULDBLOCK 技术为0的时候,timeout=-1
 * EASYRTOS_ERR_DELETED 信号量在悬挂任务时被删除
 * EASYRTOS_ERR_CONTEXT 错误的上下文调用
 * EASYRTOS_ERR_PARAM  错误的参数
 * EASYRTOS_ERR_QUEUE 将任务加入运行队列失败
 * EASYRTOS_ERR_TIMER 注册没有成功
 * EASYRTOS_SEM_UINIT 信号量没有被初始化
 * 
 * 调用的函数:
 * eCurrentContext();
 * tcbEnqueuePriority (&sem->suspQ, curr_tcb_ptr);
 * eTimerRegister (&timerCb);
 * (void)tcb_dequeue_entry (&sem->suspQ, curr_tcb_ptr);
 * easyRTOSSched (FALSE);
 */
ERESULT eSemTake (EASYRTOS_SEM *sem, int32_t timeout)
{
  CRITICAL_STORE;
  ERESULT status;
  SEM_TIMER timerData;
  EASYRTOS_TIMER timerCb;
  EASYRTOS_TCB *curr_tcb_ptr;

  /* 参数检查 */
  if (sem == NULL)
  {
    status = EASYRTOS_ERR_PARAM;
  }
  else if (sem->type == NULL)
  {
    status = EASYRTOS_SEM_UINIT;
  }
  else
  {
    /* 进入临界区 防止在此时信号量发生变化 */
    CRITICAL_ENTER ();
    
    /* 获取正在运行任务的TCB */
    curr_tcb_ptr = eCurrentContext();
        
    /**
     * 检测是否在任务上下文(而不是中断),因为MUTEX需要一个拥有者,所以不能
     * 被ISR调用
     */
    if (curr_tcb_ptr == NULL)
    {
        /* 退出临界区 */
        CRITICAL_EXIT ();

        /* 不在任务上下文中,无法悬挂任务 */
        status = EASYRTOS_ERR_CONTEXT;
    }

    /** 
     * 当为二值信号或者计数信号量的时候,则判断count是否为0.
     * 若为互斥锁信号量,则判断是否上下文与拥有任务不相同.
     * 满足其一,则悬挂该任务. 
     */
    else if (((sem->type != SEM_MUTEX) && (sem->count == 0)) || \\
             ((sem->type == SEM_MUTEX) && (sem->owner != curr_tcb_ptr) && (sem->owner != NULL)))
    {
      /* 若timeout >= 0 则悬挂任务 */
      if (timeout >= 0)
      {
        /* Count为0, 悬挂任务 */

        /* 若是在任务上下文中 */
        if (curr_tcb_ptr)
        {
          /* 将该任务加入该信号量的悬挂列表 */
          if (tcbEnqueuePriority (&sem->suspQ, curr_tcb_ptr) != EASYRTOS_OK)
          {
            /* 若失败,退出临界区 */
            CRITICAL_EXIT ();

            /* 返回错误 */
            status = EASYRTOS_ERR_QUEUE;
          }
          else
          {
            /* 将任务状态设置为悬挂 */
            curr_tcb_ptr->state = TASK_PENDED;
            
            status = EASYRTOS_OK;

            /* 根据timeout的值,决定是否需要注册定时器回调 */
            if (timeout)
            {
              /* 保存回调需要的数据 */
              timerData.tcb_ptr = curr_tcb_ptr;
              timerData.sem_ptr = sem;

              /* 定时器回调需要的数据 */
              timerCb.cb_func = eSemTimerCallback;
              timerCb.cb_data = (POINTER)&timerData;
              timerCb.cb_ticks = timeout;

              /**
               * 保存定时器回调在TCB中,当信号量GIVE的时候,方便取消
               * timeout注册的定时器
               */
              curr_tcb_ptr->pended_timo_cb = &timerCb;

              /* 注册timeout的定时器 */
              if (eTimerRegister (&timerCb) != EASYRTOS_OK)
              {
                /* 若注册失败,返回错误 */
                status = EASYRTOS_ERR_TIMER;

                /* 清除队列 */
                (void)tcb_dequeue_entry (&sem->suspQ, curr_tcb_ptr);
                curr_tcb_ptr->state = TASK_READY;
                curr_tcb_ptr->pended_timo_cb = NULL;
              }
            }

            /* 没有timeout请求 */
            else
            {
              curr_tcb_ptr->pended_timo_cb = NULL;
            }

            /* 退出临界区 */
            CRITICAL_EXIT ();

            /* 检查是否有错误发生 */
            if (status == EASYRTOS_OK)
            {
              /* 任务被悬挂,调用调度器启动一个新的任务 */
              easyRTOSSched (FALSE);

              /**
               * 通过eSemGive()唤醒会返回EASYRTOS_OK,当timeout时间到返回
               * EASYRTOS_TIMEOUT,当信号量被删除时,返回EASYRTOS_ERR_DELETED
               */
              status = curr_tcb_ptr->pendedWakeStatus;

              /**
               * 若线程在EASYRTOS_OK的情况下被唤醒时,其他任务增加了信号量
               * 计数并把控制权交给该任务。理论上,之前的任务增加信号量计数,
               * 然后这个任务减少信号量计数,但是为了能够让其他优先级更高的
               * 任务抢占,我们把减少计数的地方放在了eSemGive()中
               */
            }
          }
        }
        else
        {
          /* 退出临界区 */
          CRITICAL_EXIT ();

          /* 不在任务上下文,不能悬挂 */
          status = EASYRTOS_ERR_CONTEXT;
        }
      }
      else
      {
        /* timeout == -1, 不需要悬挂 */
        CRITICAL_EXIT();
        status = EASYRTOS_WOULDBLOCK;
      }
    }
    else
    {
      switch (sem->type)
      {
        case SEM_BINARY:
        case SEM_COUNTY:
            sem->count--;
            status = EASYRTOS_OK;
          break;
        case SEM_MUTEX:
          
          /* Count不是0,减少Count的值,并返回 */
          if (sem->owner == NULL)
          {
            sem->owner = curr_tcb_ptr;
          }
      
          /* Count不是0,减少Count的值,并返回 */
          if (sem->count>-32768)
          {
            sem->count--;
        
            /* 成功 */
            status = EASYRTOS_OK;
          }
          else {
            status = EASYRTOS_ERR_OVF;
          }
          break;
        default:
          status = EASYRTOS_SEM_UINIT;
      }

      /* 退出临界区 */
      CRITICAL_EXIT ();
  
    }
  }

  return (status);
}
Пример #8
0
/**
 * 功能: 删除信号量,并唤醒所有被该信号量悬挂的任务加入Ready列表中.同时取消该任务
 * 注册的定时器.若有任务被唤醒,则会启动调度器.
 *
 * 参数:
 * 输入:                                   输出:
 * EASYRTOS_SEM *sem 信号量指针            EASYRTOS_SEM *sem 信号量指针
 *
 * 返回:
 * 返回 EASYRTOS_OK 成功
 * 返回 EASYRTOS_ERR_QUEUE 将任务放置到Ready队列中失败
 * 返回 EASYRTOS_ERR_TIMER 取消定时器失败
 * 返回 EASYRTOS_ERR_PARAM 输入参数错误
 * 返回 EASYRTOS_ERR_DELETED 信号量在悬挂任务时被删除
 * 
 * 调用的函数:
 * tcb_dequeue_head (&sem->suspQ);
 * tcbEnqueuePriority (&tcb_readyQ, tcb_ptr);
 * eTimerCancel (tcb_ptr->pended_timo_cb);
 */
ERESULT eSemDelete (EASYRTOS_SEM *sem)
{
  ERESULT status;
  CRITICAL_STORE;
  EASYRTOS_TCB *tcb_ptr;
  uint8_t woken_threads = FALSE;

  /* 参数检查 */
  if (sem == NULL)
  {
    status = EASYRTOS_ERR_PARAM;
  }
  else
  {
    status = EASYRTOS_OK;

    /* 唤醒所有被悬挂的任务 */
    while (1)
    {
      /* 进入临界区 */
      CRITICAL_ENTER ();

      /* 检查是否有任务被悬挂 可能有很多任务被该信号量悬挂 &sem->suspQ为悬挂链表 */
      tcb_ptr = tcb_dequeue_head (&sem->suspQ);

      /* 若有任务被信号量悬挂 */
      if (tcb_ptr)
      {
        /* 对被悬挂的任务返回错误标志 */
        tcb_ptr->pendedWakeStatus = EASYRTOS_ERR_DELETED;

        /* 将任务TCB加入Ready的链表 */
        if (tcbEnqueuePriority (&tcb_readyQ, tcb_ptr) != EASYRTOS_OK)
        {
          /* 若加入失败,退出临界区 */
          CRITICAL_EXIT ();

          /* 退出循环,返回加入Ready链表失败 */
          status = EASYRTOS_ERR_QUEUE;
          break;
        }
        
        /* 成功则把任务设置为READY状态 */
        else tcb_ptr->state = TASK_READY;

        /* 若悬挂有timeout,取消对应的定时器 */
        if (tcb_ptr->pended_timo_cb)
        {
          if (eTimerCancel (tcb_ptr->pended_timo_cb) != EASYRTOS_OK)
          {
            /* 取消定时器失败,退出临界区 */
            CRITICAL_EXIT ();

            /* 退出循环,返回错误标识 */
            status = EASYRTOS_ERR_TIMER;
            break;
          }

          /* 标志没有timeout定时器注册 */
          tcb_ptr->pended_timo_cb = NULL;
        }

        /* 退出临界区 */
        CRITICAL_EXIT ();

        /* 请求调度器 */
        woken_threads = TRUE;
      }

      /* 没有被悬挂的任务了 */
      else
      {
        /* 退出临界区并结束循环 */
        CRITICAL_EXIT ();
        break;
      }
    }

    /* 有任务被唤醒则调用调度器 */
    if (woken_threads == TRUE)
    {
      /**
       *  只有在任务中才运行调度器,在中断中时会在退出中断
       *  时eIntExit()调用
       */
      if (eCurrentContext())
          easyRTOSSched (FALSE);
    }
  }

  return (status);
}
Пример #9
0
/**
 * \b atomSemPut
 *
 * Perform a put operation on a semaphore.
 *
 * This increments the current count value for the semaphore and returns.
 *
 * If the count value was previously zero and there are threads blocking on the
 * semaphore, the call will wake up the highest priority thread suspended. Only
 * one thread is woken per call to atomSemPut(). If multiple threads of the
 * same priority are suspended, they are woken in order of suspension (FIFO).
 *
 * This function can be called from interrupt context.
 *
 * @param[in] sem Pointer to semaphore object
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_OVF The semaphore count would have overflowed (>255)
 * @retval ATOM_ERR_PARAM Bad parameter
 * @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
 * @retval ATOM_ERR_TIMER Problem cancelling a timeout for a woken thread
 */
uint8_t atomSemPut (ATOM_SEM * sem)
{
    uint8_t status;
    CRITICAL_STORE;
    ATOM_TCB *tcb_ptr;

    /* Check parameters */
    if (sem == NULL)
    {
        /* Bad semaphore pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Protect access to the semaphore object and OS queues */
        CRITICAL_START ();

        /* If any threads are blocking on the semaphore, wake up one */
        if (sem->suspQ)
        {
            /**
             * Threads are woken up in priority order, with a FIFO system
             * used on same priority threads. We always take the head,
             * ordering is taken care of by an ordered list enqueue.
             */
            tcb_ptr = tcbDequeueHead (&sem->suspQ);
            if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
            {
                /* Exit critical region */
                CRITICAL_END ();

                /* There was a problem putting the thread on the ready queue */
                status = ATOM_ERR_QUEUE;
            }
            else
            {
                /* Set OK status to be returned to the waiting thread */
                tcb_ptr->suspend_wake_status = ATOM_OK;

                /* If there's a timeout on this suspension, cancel it */
                if ((tcb_ptr->suspend_timo_cb != NULL)
                    && (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK))
                {
                    /* There was a problem cancelling a timeout on this semaphore */
                    status = ATOM_ERR_TIMER;
                }
                else
                {
                    /* Flag as no timeout registered */
                    tcb_ptr->suspend_timo_cb = NULL;

                    /* Successful */
                    status = ATOM_OK;
                }

                /* Exit critical region */
                CRITICAL_END ();

                /**
                 * The scheduler may now make a policy decision to thread
                 * switch if we are currently in thread context. If we are
                 * in interrupt context it will be handled by atomIntExit().
                 */
                if (atomCurrentContext())
                    atomSched (FALSE);
            }
        }

        /* If no threads waiting, just increment the count and return */
        else
        {
            /* Check for count overflow */
            if (sem->count == 255)
            {
                /* Don't increment, just return error status */
                status = ATOM_ERR_OVF;
            }
            else
            {
                /* Increment the count and return success */
                sem->count++;
                status = ATOM_OK;
            }

            /* Exit critical region */
            CRITICAL_END ();
        }
    }

    return (status);
}
Пример #10
0
/**
 * \b atomSemGet
 *
 * Perform a get operation on a semaphore.
 *
 * This decrements the current count value for the semaphore and returns.
 * If the count value is already zero then the call will block until the
 * count is incremented by another thread, or until the specified \c timeout
 * is reached. Blocking threads will also be woken if the semaphore is
 * deleted by another thread while blocking.
 *
 * Depending on the \c timeout value specified the call will do one of
 * the following if the count value is zero:
 *
 * \c timeout == 0 : Call will block until the count is non-zero \n
 * \c timeout > 0 : Call will block until non-zero up to the specified timeout \n
 * \c timeout == -1 : Return immediately if the count is zero \n
 *
 * If the call needs to block and \c timeout is zero, it will block
 * indefinitely until atomSemPut() or atomSemDelete() is called on the
 * semaphore.
 *
 * If the call needs to block and \c timeout is non-zero, the call will only
 * block for the specified number of system ticks after which time, if the
 * thread was not already woken, the call will return with \c ATOM_TIMEOUT.
 *
 * If the call would normally block and \c timeout is -1, the call will
 * return immediately with \c ATOM_WOULDBLOCK.
 *
 * This function can only be called from interrupt context if the \c timeout
 * parameter is -1 (in which case it does not block).
 *
 * @param[in] sem Pointer to semaphore object
 * @param[in] timeout Max system ticks to block (0 = forever)
 *
 * @retval ATOM_OK Success
 * @retval ATOM_TIMEOUT Semaphore timed out before being woken
 * @retval ATOM_WOULDBLOCK Called with timeout == -1 but count is zero
 * @retval ATOM_ERR_DELETED Semaphore was deleted while suspended
 * @retval ATOM_ERR_CONTEXT Not called in thread context and attempted to block
 * @retval ATOM_ERR_PARAM Bad parameter
 * @retval ATOM_ERR_QUEUE Problem putting the thread on the suspend queue
 * @retval ATOM_ERR_TIMER Problem registering the timeout
 */
uint8_t atomSemGet (ATOM_SEM *sem, int32_t timeout)
{
    CRITICAL_STORE;
    uint8_t status;
    SEM_TIMER timer_data;
    ATOM_TIMER timer_cb;
    ATOM_TCB *curr_tcb_ptr;

    /* Check parameters */
    if (sem == NULL)
    {
        /* Bad semaphore pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Protect access to the semaphore object and OS queues */
        CRITICAL_START ();

        /* If count is zero, block the calling thread */
        if (sem->count == 0)
        {
            /* If called with timeout >= 0, we should block */
            if (timeout >= 0)
            {
                /* Count is zero, block the calling thread */

                /* Get the current TCB */
                curr_tcb_ptr = atomCurrentContext();

                /* Check we are actually in thread context */
                if (curr_tcb_ptr)
                {
                    /* Add current thread to the suspend list on this semaphore */
                    if (tcbEnqueuePriority (&sem->suspQ, curr_tcb_ptr) != ATOM_OK)
                    {
                        /* Exit critical region */
                        CRITICAL_END ();

                        /* There was an error putting this thread on the suspend list */
                        status = ATOM_ERR_QUEUE;
                    }
                    else
                    {
                        /* Set suspended status for the current thread */
                        curr_tcb_ptr->suspended = TRUE;

                        /* Track errors */
                        status = ATOM_OK;

                        /* Register a timer callback if requested */
                        if (timeout)
                        {
                            /* Fill out the data needed by the callback to wake us up */
                            timer_data.tcb_ptr = curr_tcb_ptr;
                            timer_data.sem_ptr = sem;

                            /* Fill out the timer callback request structure */
                            timer_cb.cb_func = atomSemTimerCallback;
                            timer_cb.cb_data = (POINTER)&timer_data;
                            timer_cb.cb_ticks = timeout;

                            /**
                             * Store the timer details in the TCB so that we can
                             * cancel the timer callback if the semaphore is put
                             * before the timeout occurs.
                             */
                            curr_tcb_ptr->suspend_timo_cb = &timer_cb;

                            /* Register a callback on timeout */
                            if (atomTimerRegister (&timer_cb) != ATOM_OK)
                            {
                                /* Timer registration failed */
                                status = ATOM_ERR_TIMER;

                                /* Clean up and return to the caller */
                                (void)tcbDequeueEntry (&sem->suspQ, curr_tcb_ptr);
                                curr_tcb_ptr->suspended = FALSE;
                                curr_tcb_ptr->suspend_timo_cb = NULL;
                            }
                        }

                        /* Set no timeout requested */
                        else
                        {
                            /* No need to cancel timeouts on this one */
                            curr_tcb_ptr->suspend_timo_cb = NULL;
                        }

                        /* Exit critical region */
                        CRITICAL_END ();

                        /* Check no errors have occurred */
                        if (status == ATOM_OK)
                        {
                            /**
                             * Current thread now blocking, schedule in a new
                             * one. We already know we are in thread context
                             * so can call the scheduler from here.
                             */
                            atomSched (FALSE);

                            /**
                             * Normal atomSemPut() wakeups will set ATOM_OK status,
                             * while timeouts will set ATOM_TIMEOUT and semaphore
                             * deletions will set ATOM_ERR_DELETED.
                             */
                            status = curr_tcb_ptr->suspend_wake_status;

                            /**
                             * If we have been woken up with ATOM_OK then
                             * another thread incremented the semaphore and
                             * handed control to this thread. In theory the
                             * the posting thread increments the counter and
                             * as soon as this thread wakes up we decrement
                             * the counter here, but to prevent another
                             * thread preempting this thread and decrementing
                             * the semaphore before this section was
                             * scheduled back in, we emulate the increment
                             * and decrement by not incrementing in the
                             * atomSemPut() and not decrementing here. The
                             * count remains zero throughout preventing other
                             * threads preempting before we decrement the
                             * count again.
                             */

                        }
                    }
                }
                else
                {
                    /* Exit critical region */
                    CRITICAL_END ();

                    /* Not currently in thread context, can't suspend */
                    status = ATOM_ERR_CONTEXT;
                }
            }
            else
            {
                /* timeout == -1, requested not to block and count is zero */
                CRITICAL_END();
                status = ATOM_WOULDBLOCK;
            }
        }
        else
        {
            /* Count is non-zero, just decrement it and return to calling thread */
            sem->count--;

            /* Exit critical region */
            CRITICAL_END ();

            /* Successful */
            status = ATOM_OK;
        }
    }

    return (status);
}
Пример #11
0
/**
 * \b atomMutexPut
 *
 * Give back the lock on a mutex.
 *
 * This checks that the mutex is owned by the calling thread, and decrements
 * the recursive lock count. Once the lock count reaches zero, the lock is
 * considered relinquished and no longer owned by this thread.
 *
 * If the lock is relinquished and there are threads blocking on the mutex, the
 * call will wake up the highest priority thread suspended. Only one thread is
 * woken per call to atomMutexPut(). If multiple threads of the same priority
 * are suspended, they are woken in order of suspension (FIFO).
 *
 * This function can only be called from thread context. A mutex has the
 * concept of an owner thread, so it is never valid to make a mutex call
 * from interrupt context when there is no thread to associate with.
 *
 * @param[in] mutex Pointer to mutex object
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_PARAM Bad parameter
 * @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
 * @retval ATOM_ERR_TIMER Problem cancelling a timeout for a woken thread
 * @retval ATOM_ERR_OWNERSHIP Attempt to unlock mutex not owned by this thread
 */
uint8_t atomMutexPut (ATOM_MUTEX * mutex){
    uint8_t status;
    CRITICAL_STORE;
    ATOM_TCB *tcb_ptr, *curr_tcb_ptr;

    /* Check parameters */
    if (mutex == NULL)
    {
        /* Bad mutex pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Get the current TCB */
        curr_tcb_ptr = atomCurrentContext();

        /* Protect access to the mutex object and OS queues */
        CRITICAL_START ();

        /* Check if the calling thread owns this mutex */
        if (mutex->owner != curr_tcb_ptr)
        {
            /* Exit critical region */
            CRITICAL_END ();

            /* Attempt to unlock by non-owning thread */
            status = ATOM_ERR_OWNERSHIP;
        }
        else
        {
            /* Lock is owned by this thread, decrement the recursive lock count */
            mutex->count--;

            /* Once recursive lock count reaches zero, we relinquish ownership */
            if (mutex->count == 0)
            {
                /* Relinquish ownership */
                mutex->owner = NULL;

                /* If any threads are blocking on this mutex, wake them now */
                if (mutex->suspQ)
                {
                    /**
                     * Threads are woken up in priority order, with a FIFO system
                     * used on same priority threads. We always take the head,
                     * ordering is taken care of by an ordered list enqueue.
                     */
                    tcb_ptr = tcbDequeueHead (&mutex->suspQ);
                    if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
                    {
                        /* Exit critical region */
                        CRITICAL_END ();

                        /* There was a problem putting the thread on the ready queue */
                        status = ATOM_ERR_QUEUE;
                    }
                    else
                    {
                        /* Set OK status to be returned to the waiting thread */
                        tcb_ptr->suspend_wake_status = ATOM_OK;

                        /* Set this thread as the new owner of the mutex */
                        mutex->owner = tcb_ptr;

                        /* If there's a timeout on this suspension, cancel it */
                        if ((tcb_ptr->suspend_timo_cb != NULL)
                            && (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK))
                        {
                            /* There was a problem cancelling a timeout on this mutex */
                            status = ATOM_ERR_TIMER;
                        }
                        else
                        {
                            /* Flag as no timeout registered */
                            tcb_ptr->suspend_timo_cb = NULL;

                            /* Successful */
                            status = ATOM_OK;
                        }

                        /* Exit critical region */
                        CRITICAL_END ();

                        /**
                         * The scheduler may now make a policy decision to
                         * thread switch. We already know we are in thread
                         * context so can call the scheduler from here.
                         */
                        atomSched (FALSE);
                    }
                }
                else
                {
                    /**
                     * Relinquished ownership and no threads waiting.
                     * Nothing to do.
                     */

                    /* Exit critical region */
                    CRITICAL_END ();

                    /* Successful */
                    status = ATOM_OK;
                }
            }
            else
            {
                /**
                 * Decremented lock but still retain ownership due to
                 * recursion. Nothing to do.
                 */

                /* Exit critical region */
                CRITICAL_END ();

                /* Successful */
                status = ATOM_OK;
            }
        }
    }

    return (status);
    }
Пример #12
0
/**
 * \b atomThreadCreate
 *
 * Creates and starts a new thread.
 *
 * Callers provide the ATOM_TCB structure storage, these are not obtained
 * from an internal TCB free list.
 *
 * The function puts the new thread on the ready queue and calls the
 * scheduler. If the priority is higher than the current priority, then the
 * new thread may be scheduled in before the function returns.
 *
 * Optionally prefills the thread stack with a known value to enable stack
 * usage checking (if the ATOM_STACK_CHECKING macro is defined).
 *
 * @param[in] tcb_ptr Pointer to the thread's TCB storage
 * @param[in] priority Priority of the thread (0 to 255)
 * @param[in] entry_point Thread entry point
 * @param[in] entry_param Parameter passed to thread entry point
 * @param[in] stack_top Top of the stack area
 * @param[in] stack_size Size of the stack area in bytes
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_PARAM Bad parameters
 * @retval ATOM_ERR_QUEUE Error putting the thread on the ready queue
 */
uint8_t atomThreadCreate (ATOM_TCB *tcb_ptr, uint8_t priority, void (*entry_point)(uint32_t), uint32_t entry_param, void *stack_top, uint32_t stack_size)
{
    CRITICAL_STORE;
    uint8_t status;

    if ((tcb_ptr == NULL) || (entry_point == NULL) || (stack_top == NULL)
        || (stack_size == 0))
    {
        /* Bad parameters */
        status = ATOM_ERR_PARAM;
    }
    else
    {

        /* Set up the TCB initial values */
        tcb_ptr->suspended = FALSE;
        tcb_ptr->priority = priority;
        tcb_ptr->prev_tcb = NULL;
        tcb_ptr->next_tcb = NULL;
        tcb_ptr->suspend_timo_cb = NULL;

        /**
         * Store the thread entry point and parameter in the TCB. This may
         * not be necessary for all architecture ports if they put all of
         * this information in the initial thread stack.
         */
        tcb_ptr->entry_point = entry_point;
        tcb_ptr->entry_param = entry_param;

        /**
         * Additional processing only required if stack-checking is
         * enabled. Incurs a slight overhead on each thread creation
         * and uses some additional storage in the TCB, but can be
         * compiled out if not desired.
         */
#ifdef ATOM_STACK_CHECKING

        /* Store the stack details for use by the stack-check function */
        tcb_ptr->stack_top = stack_top;
        tcb_ptr->stack_size = stack_size;

        /**
         * Prefill the stack with a known value. This is used later in
         * calls to atomThreadStackCheck() to get an indication of how
         * much stack has been used during runtime.
         */
        while (stack_size > 0)
        {
            /* Initialise all stack bytes from bottom up to 0x5A */
            *((uint8_t *)stack_top - (stack_size - 1)) = STACK_CHECK_BYTE;
            stack_size--;
        }
#else
        /* Avoid compiler warnings due to unused stack_size variable */
        stack_size = stack_size;
#endif

        /**
         * Call the arch-specific routine to set up the stack. This routine
         * is responsible for creating the context save area necessary for
         * allowing atomThreadSwitch() to schedule it in. The initial
         * archContextSwitch() call when this thread gets scheduled in the
         * first time will then restore the program counter to the thread
         * entry point, and any other necessary register values ready for
         * it to start running.
         */
        archThreadContextInit (tcb_ptr, stack_top, entry_point, entry_param);

        /* Protect access to the OS queue */
        CRITICAL_START ();

        /* Put this thread on the ready queue */
        if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
        {
            /* Exit critical region */
            CRITICAL_END ();

            /* Queue-related error */
            status = ATOM_ERR_QUEUE;
        }
        else
        {
            /* Exit critical region */
            CRITICAL_END ();

            /**
             * If the OS is started and we're in thread context, check if we
             * should be scheduled in now.
             */
            if ((atomOSStarted == TRUE) && atomCurrentContext())
                atomSched (FALSE);

            /* Success */
            status = ATOM_OK;
        }
    }

    return (status);
}
Пример #13
0
/**
 * \b atomSched
 *
 * This is an internal function not for use by application code.
 *
 * This is the main scheduler routine. It is called by the various OS
 * library routines to check if any threads should be scheduled in now.
 * If so, the context will be switched from the current thread to the
 * new one.
 *
 * The scheduler is priority-based with round-robin performed on threads
 * with the same priority. Round-robin is only performed on timer ticks
 * however. During reschedules caused by an OS operation (e.g. after
 * giving or taking a semaphore) we only allow the scheduling in of
 * threads with higher priority than current priority. On timer ticks we
 * also allow the scheduling of same-priority threads - in that case we
 * schedule in the head of the ready list for that priority and put the
 * current thread at the tail.
 *
 * @param[in] timer_tick Should be TRUE when called from the system tick
 *
 * @return None
 */
void atomSched (uint8_t timer_tick)
{
    CRITICAL_STORE;
    ATOM_TCB *new_tcb = NULL;
    int16_t lowest_pri;

    /**
     * Check the OS has actually started. As long as the proper initialisation
     * sequence is followed there should be no calls here until the OS is
     * started, but we check to handle badly-behaved ports.
     */
    if (atomOSStarted == FALSE)
    {
        /* Don't schedule anything in until the OS is started */
        return;
    }

    /* Enter critical section */
    CRITICAL_START ();

    /**
     * If the current thread is going into suspension, then
     * unconditionally dequeue the next thread for execution.
     */
    if (curr_tcb->suspended == TRUE)
    {
        /**
         * Dequeue the next ready to run thread. There will always be
         * at least the idle thread waiting. Note that this could
         * actually be the suspending thread if it was unsuspended
         * before the scheduler was called.
         */
        new_tcb = tcbDequeueHead (&tcbReadyQ);

        /**
         * Don't need to add the current thread to any queue because
         * it was suspended by another OS mechanism and will be
         * sitting on a suspend queue or similar within one of the OS
         * primitive libraries (e.g. semaphore).
         */

        /* Switch to the new thread */
        atomThreadSwitch (curr_tcb, new_tcb);
    }

    /**
     * Otherwise the current thread is still ready, but check
     * if any other threads are ready.
     */
    else
    {
        /* Calculate which priority is allowed to be scheduled in */
        if (timer_tick == TRUE)
        {
            /* Same priority or higher threads can preempt */
            lowest_pri = (int16_t)curr_tcb->priority;
        }
        else if (curr_tcb->priority > 0)
        {
            /* Only higher priority threads can preempt, invalid for 0 (highest) */
            lowest_pri = (int16_t)(curr_tcb->priority - 1);
        }
        else
        {
            /**
             * Current priority is already highest (0), don't allow preempt by
             * threads of any priority because this is not a time-slice.
             */
            lowest_pri = -1;
        }

        /* Check if a reschedule is allowed */
        if (lowest_pri >= 0)
        {
            /* Check for a thread at the given minimum priority level or higher */
            new_tcb = tcbDequeuePriority (&tcbReadyQ, (uint8_t)lowest_pri);

            /* If a thread was found, schedule it in */
            if (new_tcb)
            {
                /* Add the current thread to the ready queue */
                (void)tcbEnqueuePriority (&tcbReadyQ, curr_tcb);

                /* Switch to the new thread */
                atomThreadSwitch (curr_tcb, new_tcb);
            }
        }
    }

    /* Exit critical section */
    CRITICAL_END ();
}
Пример #14
0
/**
 * 功能: 删除队列,并唤醒所有被该队列悬挂的任务加入Ready列表中.同时取消该任务
 * 注册的定时器.若有任务被唤醒,则会启动调度器.
 *
 * 参数:
 * 输入:                                   输出:
 * EASYRTOS_QUEUE *qptr 队列指针           EASYRTOS_QUEUE *qptr 队列指针
 *
 * 返回:
 * EASYRTOS_OK 成功
 * EASYRTOS_ERR_QUEUE 将任务放置到Ready队列中失败
 * EASYRTOS_ERR_TIMER 取消定时器失败
 * 
 * 调用的函数:
 * tcb_dequeue_head (&qptr->getSuspQ);
 * tcb_dequeue_head (&qptr->putSuspQ);
 * tcbEnqueuePriority (&tcb_readyQ, tcb_ptr);
 * eTimerCancel (tcb_ptr->pended_timo_cb);
 * eCurrentContext();
 * easyRTOSSched (FALSE);
 */
ERESULT eQueueDelete (EASYRTOS_QUEUE *qptr)
{
  ERESULT status;
  CRITICAL_STORE;
  EASYRTOS_TCB *tcb_ptr;
  uint8_t wokenTasks = FALSE;

  /* 参数检查 */
  if (qptr == NULL)
  {
    status = EASYRTOS_ERR_PARAM;
  }
  else
  {
    /* 默认返回 */
    status = EASYRTOS_OK;

    /* 唤醒所有被悬挂的任务(将其加入Ready队列) */
    while (1)
    {
      /* 进入临界区 */
      CRITICAL_ENTER ();

      /* 检查是否有线程被悬挂 (等待发送或等待接收) */
      if (((tcb_ptr = tcb_dequeue_head (&qptr->getSuspQ)) != NULL)
          || ((tcb_ptr = tcb_dequeue_head (&qptr->putSuspQ)) != NULL))
      {

        /* 返回错误状态 */
        tcb_ptr->pendedWakeStatus = EASYRTOS_ERR_DELETED;

        /* 将任务加入Ready队列 */
        if (tcbEnqueuePriority (&tcb_readyQ, tcb_ptr) != EASYRTOS_OK)
        {
          /* 退出临界区 */
          CRITICAL_EXIT ();

          /* 退出循环,返回错误 */
          status = EASYRTOS_ERR_QUEUE;
          break;
        }
        else tcb_ptr->state = TASK_READY;

        /* 取消阻塞任务注册的定时器 */
        if (tcb_ptr->pended_timo_cb)
        {
          /* 取消回调函数 */
          if (eTimerCancel (tcb_ptr->pended_timo_cb) != EASYRTOS_OK)
          {
              /* 退出临界区 */
              CRITICAL_EXIT ();

              /* 退出循环,返回错误 */
              status = EASYRTOS_ERR_TIMER;
              break;
          }

          /* 标记任务没有定时器回调 */
          tcb_ptr->pended_timo_cb = NULL;
        }

        /* 退出临界区 */
        CRITICAL_EXIT ();

        /* 是否调用调度器 */
        wokenTasks= TRUE;
      }

      /* 没有被悬挂的任务 */
      else
      {
        /* 退出临界区 */
        CRITICAL_EXIT ();
        break;
      }
    }

    /* 若有任务被唤醒,调用调度器 */
    if (wokenTasks == TRUE)
    {
      /**
       * 只在任务上下文环境调用调度器。
       * 中断环境会有eIntExit()调用调度器。
       */
      if (eCurrentContext())
        easyRTOSSched (FALSE);
    }
  }

  return (status);
}
Пример #15
0
/**
 * 功能: 向队列中加入消息,若有任务等待接收消息(加入消息后则可获取消息),则唤醒任
 * 务并取消其注册的定时器.
 *
 * 参数:
 * 输入:                                输出:
 * EASYRTOS_QUEUE *qptr 队列指针        EASYRTOS_QUEUE *qptr 队列指针                              
 * void* msgptr 插入的消息             void* msgptr 插入的消息
 *
 * 返回:
 * EASYRTOS_OK 成功
 * EASYRTOS_ERR_PARAM 参数错误
 * EASYRTOS_ERR_QUEUE 将任务加入运行队列失败
 * EASYRTOS_ERR_TIMER 取消定时器失败
 *
 * 调用的函数:
 * memcpy (((uint8_t*)qptr->buff_ptr + qptr->insert_index), (uint8_t*)msgptr, qptr->unit_size);
 * tcb_dequeue_head (&qptr->getSuspQ)
 * tcbEnqueuePriority (&tcb_readyQ, tcb_ptr);
 * eTimerCancel (tcb_ptr->pended_timo_cb);
 */
static ERESULT queue_insert (EASYRTOS_QUEUE *qptr, void *msgptr)
{
    ERESULT status;
    EASYRTOS_TCB *tcb_ptr;

    /* 参数检查 */
    if ((qptr == NULL)|| (msgptr == NULL))
    {
        status = EASYRTOS_ERR_PARAM;
    }
    else
    {
        /* 队列中有空闲位置,将数据复制进去 */
        memcpy (((uint8_t*)qptr->buff_ptr + qptr->insert_index), (uint8_t*)msgptr, qptr->unit_size);
        qptr->insert_index += qptr->unit_size;
        qptr->num_msgs_stored++;

        /* 队列为循环存储数据,目的是为了加快查找速度 */
        /* 检查是否重置remove_index */
        if (qptr->insert_index >= (qptr->unit_size * qptr->max_num_msgs))
            qptr->insert_index = 0;

        /* 若有任务正在等待接收,则将其唤醒 */    
        tcb_ptr = tcb_dequeue_head (&qptr->getSuspQ);
        if (tcb_ptr)
        {
            /* 将悬挂的任务加入Ready列表 */
            if (tcbEnqueuePriority (&tcb_readyQ, tcb_ptr) == EASYRTOS_OK)
            {

                tcb_ptr->pendedWakeStatus = EASYRTOS_OK;
                tcb_ptr->state = TASK_READY;
                
                /* 若注册了定时器回调,则先取消 */
                if ((tcb_ptr->pended_timo_cb != NULL)
                    && (eTimerCancel (tcb_ptr->pended_timo_cb) != EASYRTOS_OK))
                {
                    status = EASYRTOS_ERR_TIMER;
                }
                else
                {
                  
                    tcb_ptr->pended_timo_cb = NULL;

                    /* 成功 */
                    status = EASYRTOS_OK;
                }
            }
            else
            {
                /* 将任务加入Ready列表失败 */  
                status = EASYRTOS_ERR_QUEUE;
            }
        }
        else
        {
            /* 没有任务等待接收 */
            status = EASYRTOS_OK;
        }
    }

    return (status);
}
Пример #16
0
/**
 * 功能: 取出队列中的数据,若对队列为空,根据timeout的不同值有不同的处理方式.
 * 1.timeout>0 悬挂调用的任务,当timeout到期的时候唤醒任务并返回timeout标志
 * 2.timeout=0 永久悬挂调用的任务,直到从队列中等到数据.
 * 3.timeout=-1 不悬挂任务,若队列为空会返回队列为空标志.
 * 当有任务被悬挂的时候,将会调用调度器.
 *
 * 参数:
 * 输入:                                        输出:
 * EASYRTOS_QUEUE *qptr 队列指针                EASYRTOS_QUEUE *qptr 队列指针
 * int32_t timeout timeout时间,依赖于心跳时间   void *msgptr 放入队列的消息
 * void *msgptr 放入队列的消息                
 * 
 * 返回:
 * EASYRTOS_OK 成功
 * EASYRTOS_WOULDBLOCK 本来会被悬挂但由于timeout为-1所以返回了
 * EASYRTOS_TIMEOUT 信号量timeout到期
 * EASYRTOS_ERR_DELETED 队列在悬挂任务时被删除
 * EASYRTOS_ERR_CONTEXT 错误的上下文调用
 * EASYRTOS_ERR_PARAM 参数错误
 * EASYRTOS_ERR_QUEUE 将任务加入运行队列失败
 * EASYRTOS_ERR_TIMER 注册定时器未成功
 * 
 * 调用的函数:
 * eCurrentContext();
 * tcbEnqueuePriority (&qptr->getSuspQ, curr_tcb_ptr);
 * eTimerRegister (&timerCb);
 * tcb_dequeue_entry (&qptr->getSuspQ, curr_tcb_ptr);
 * easyRTOSSched (FALSE);
 * queue_remove (qptr, msgptr);
 */
ERESULT eQueueGive (EASYRTOS_QUEUE *qptr, int32_t timeout, void *msgptr)
{
    CRITICAL_STORE;
    ERESULT status;
    QUEUE_TIMER timerData;
    EASYRTOS_TIMER timerCb;
    EASYRTOS_TCB *curr_tcb_ptr;

    /* 参数检查 */
    if ((qptr == NULL) || (msgptr == NULL))
    {
        status = EASYRTOS_ERR_PARAM;
    }
    else
    {
        /* 进入临界区 */
        CRITICAL_ENTER ();

        /* 若队列已满,悬挂调用此函数的任务 */
        if (qptr->num_msgs_stored == qptr->max_num_msgs)
        {
            /* timeout >= 0, 任务将被悬挂 */
            if (timeout >= 0)
            {

                /* 获取当前任务TCB */
                curr_tcb_ptr = eCurrentContext();

                /* 检查是否在任务上下文 */
                if (curr_tcb_ptr)
                {
                    /* 将当前任务添加到send悬挂列表中 */
                    if (tcbEnqueuePriority (&qptr->putSuspQ, curr_tcb_ptr) == EASYRTOS_OK)
                    {
                        /* 设置任务状态标志位为悬挂 */
                        curr_tcb_ptr->state = TASK_PENDED;

                        status = EASYRTOS_OK;

                        /* timeout>0 注册定时器回调 */
                        if (timeout)
                        {
                            /* 填充定时器需要的数据 */
                            timerData.tcb_ptr = curr_tcb_ptr;
                            timerData.queue_ptr = qptr;
                            timerData.suspQ = &qptr->putSuspQ;


                            /* 填充回调需要的数据 */
                            timerCb.cb_func = eQueueTimerCallback;
                            timerCb.cb_data = (POINTER)&timerData;
                            timerCb.cb_ticks = timeout;

                            /* 在任务TCB中存储定时器回调,方便对其进行取消操作 */
                            curr_tcb_ptr->pended_timo_cb = &timerCb;

                            /* 注册定时器 */
                            if (eTimerRegister (&timerCb) != EASYRTOS_OK)
                            {
                                /* 注册失败 */
                                status = EASYRTOS_ERR_TIMER;
                                
                                (void)tcb_dequeue_entry (&qptr->putSuspQ, curr_tcb_ptr);
                                curr_tcb_ptr->state = TASK_RUN;
                                curr_tcb_ptr->pended_timo_cb = NULL;
                            }
                        }

                        /* 不需要注册定时器 */
                        else
                        {
                            curr_tcb_ptr->pended_timo_cb = NULL;
                        }

                        /* 退出临界区 */
                        CRITICAL_EXIT ();

                        /* 检测是否注册成功 */
                        if (status == EASYRTOS_OK)
                        {
                            /* 当前任务被悬挂,我们将调用调度器 */
                            easyRTOSSched (FALSE);
                            
                            /* 下次任务将从此处开始运行,此时队列被删除 或者timeout到期 或者调用了eQueueGive */
                            status = curr_tcb_ptr->pendedWakeStatus;

                            /** 
                             * 检测pendedWakeStatus,若其值为EASYRTOS_OK,则说明
                             * 读取是成功的,若为其他的值,则说明有可能队列被删除
                             * 或者timeout到期,此时我们只需要退出就好了
                             */
                            if (status == EASYRTOS_OK)
                            {
                                /* 进入临界区 */
                                CRITICAL_ENTER();

                                /* 将消息加入队列 */
                                status = queue_insert (qptr, msgptr);

                                /* 退出临界区 */
                                CRITICAL_EXIT();
                            }
                        }
                    }
                    else
                    {
                        /* 将任务加入悬挂队列失败 */
                        CRITICAL_EXIT();
                        status = EASYRTOS_ERR_QUEUE;
                    }
                }
                else
                {
                    /* 不再任务上下文,不能悬挂任务 */
                    CRITICAL_EXIT ();
                    status = EASYRTOS_ERR_CONTEXT;
                }
            }
            else
            {
                /* timeout == -1, 不需要悬挂任务,且队列此时数据量为0 */
                CRITICAL_EXIT();
                status = EASYRTOS_WOULDBLOCK;
            }
        }
        else
        {
            /* 不用悬挂任务,直接将数据复制进队列 */
            status = queue_insert (qptr, msgptr);

            /* 退出临界区 */
            CRITICAL_EXIT ();

            /**
             * 只在任务上下文环境调用调度器。
             * 中断环境会有eIntExit()调用调度器。.
             */
            if (eCurrentContext())
                easyRTOSSched (FALSE);
        }
    }

    return (status);
}
Пример #17
0
/**
 * \b atomSemDelete
 *
 * Deletes a semaphore object.
 *
 * Any threads currently suspended on the semaphore will be woken up with
 * return status ATOM_ERR_DELETED. If called at thread context then the
 * scheduler will be called during this function which may schedule in one
 * of the woken threads depending on relative priorities.
 *
 * This function can be called from interrupt context, but loops internally
 * waking up all threads blocking on the semaphore, so the potential
 * execution cycles cannot be determined in advance.
 *
 * @param[in] sem Pointer to semaphore object
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
 * @retval ATOM_ERR_TIMER Problem cancelling a timeout on a woken thread
 */
uint8_t atomSemDelete (ATOM_SEM *sem)
{
    uint8_t status;
    CRITICAL_STORE;
    ATOM_TCB *tcb_ptr;
    uint8_t woken_threads = FALSE;

    /* Parameter check */
    if (sem == NULL)
    {
        /* Bad semaphore pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Default to success status unless errors occur during wakeup */
        status = ATOM_OK;

        /* Wake up all suspended tasks */
        while (1)
        {
            /* Enter critical region */
            CRITICAL_START ();

            /* Check if any threads are suspended */
            tcb_ptr = tcbDequeueHead (&sem->suspQ);

            /* A thread is suspended on the semaphore */
            if (tcb_ptr)
            {
                /* Return error status to the waiting thread */
                tcb_ptr->suspend_wake_status = ATOM_ERR_DELETED;

                /* Put the thread on the ready queue */
                if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
                {
                    /* Exit critical region */
                    CRITICAL_END ();

                    /* Quit the loop, returning error */
                    status = ATOM_ERR_QUEUE;
                    break;
                }

                /* If there's a timeout on this suspension, cancel it */
                if (tcb_ptr->suspend_timo_cb)
                {
                    /* Cancel the callback */
                    if (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK)
                    {
                        /* Exit critical region */
                        CRITICAL_END ();

                        /* Quit the loop, returning error */
                        status = ATOM_ERR_TIMER;
                        break;
                    }

                    /* Flag as no timeout registered */
                    tcb_ptr->suspend_timo_cb = NULL;

                }

                /* Exit critical region */
                CRITICAL_END ();

                /* Request a reschedule */
                woken_threads = TRUE;
            }

            /* No more suspended threads */
            else
            {
                /* Exit critical region and quit the loop */
                CRITICAL_END ();
                break;
            }
        }

        /* Call scheduler if any threads were woken up */
        if (woken_threads == TRUE)
        {
            /**
             * Only call the scheduler if we are in thread context, otherwise
             * it will be called on exiting the ISR by atomIntExit().
             */
            if (atomCurrentContext())
                atomSched (FALSE);
        }
    }

    return (status);
}
Пример #18
0
/**
 * \b atomMutexGet
 *
 * Take the lock on a mutex.
 *
 * This takes ownership of a mutex if it is not currently owned. Ownership
 * is held by this thread until a corresponding call to atomMutexPut() by
 * the same thread.
 *
 * Can be called recursively by the original locking thread (owner).
 * Recursive calls are counted, and ownership is not relinquished until
 * the number of unlock (atomMutexPut()) calls by the owner matches the
 * number of lock (atomMutexGet()) calls.
 *
 * No thread other than the owner can lock or unlock the mutex while it is
 * locked by another thread.
 *
 * Depending on the \c timeout value specified the call will do one of
 * the following if the mutex is already locked by another thread:
 *
 * \c timeout == 0 : Call will block until the mutex is available \n
 * \c timeout > 0 : Call will block until available up to the specified timeout \n
 * \c timeout == -1 : Return immediately if mutex is locked by another thread \n
*
 * If the call needs to block and \c timeout is zero, it will block
 * indefinitely until the owning thread calls atomMutexPut() or
 * atomMutexDelete() is called on the mutex.
 *
 * If the call needs to block and \c timeout is non-zero, the call will only
 * block for the specified number of system ticks after which time, if the
 * thread was not already woken, the call will return with \c ATOM_TIMEOUT.
 *
 * If the call would normally block and \c timeout is -1, the call will
 * return immediately with \c ATOM_WOULDBLOCK.
 *
 * This function can only be called from thread context. A mutex has the
 * concept of an owner thread, so it is never valid to make a mutex call
 * from interrupt context when there is no thread to associate with.
 *
 * @param[in] mutex Pointer to mutex object
 * @param[in] timeout Max system ticks to block (0 = forever)
 *
 * @retval ATOM_OK Success
 * @retval ATOM_TIMEOUT Mutex timed out before being woken
 * @retval ATOM_WOULDBLOCK Called with timeout == -1 but count is zero
 * @retval ATOM_ERR_DELETED Mutex was deleted while suspended
 * @retval ATOM_ERR_CONTEXT Not called in thread context and attempted to block
 * @retval ATOM_ERR_PARAM Bad parameter
 * @retval ATOM_ERR_QUEUE Problem putting the thread on the suspend queue
 * @retval ATOM_ERR_TIMER Problem registering the timeout
 * @retval ATOM_ERR_OVF The recursive lock count would have overflowed (>255)
 */
uint8_t atomMutexGet (ATOM_MUTEX *mutex, int32_t timeout){
    CRITICAL_STORE;
    uint8_t status;
    MUTEX_TIMER timer_data;
    ATOM_TIMER timer_cb;
    ATOM_TCB *curr_tcb_ptr;

    /* Check parameters */
    if (mutex == NULL)
    {
        /* Bad mutex pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Get the current TCB */
        curr_tcb_ptr = atomCurrentContext();

        /* Protect access to the mutex object and OS queues */
        CRITICAL_START ();

        /**
         * Check we are at thread context. Because mutexes have the concept of
         * owner threads, it is never valid to call here from an ISR,
         * regardless of whether we will block.
         */
        if (curr_tcb_ptr == NULL)
        {
            /* Exit critical region */
            CRITICAL_END ();

            /* Not currently in thread context, can't suspend */
            status = ATOM_ERR_CONTEXT;
        }

        /* Otherwise if mutex is owned by another thread, block the calling thread */
        else if ((mutex->owner != NULL) && (mutex->owner != curr_tcb_ptr))
        {
            /* If called with timeout >= 0, we should block */
            if (timeout >= 0)
            {
                /* Add current thread to the suspend list on this mutex */
                if (tcbEnqueuePriority (&mutex->suspQ, curr_tcb_ptr) != ATOM_OK)
                {
                    /* Exit critical region */
                    CRITICAL_END ();

                    /* There was an error putting this thread on the suspend list */
                    status = ATOM_ERR_QUEUE;
                }
                else
                {
                    /* Set suspended status for the current thread */
                    curr_tcb_ptr->suspended = TRUE;

                    /* Track errors */
                    status = ATOM_OK;

                    /* Register a timer callback if requested */
                    if (timeout)
                    {
                        /* Fill out the data needed by the callback to wake us up */
                        timer_data.tcb_ptr = curr_tcb_ptr;
                        timer_data.mutex_ptr = mutex;

                        /* Fill out the timer callback request structure */
                        timer_cb.cb_func = atomMutexTimerCallback;
                        timer_cb.cb_data = (POINTER)&timer_data;
                        timer_cb.cb_ticks = timeout;

                        /**
                         * Store the timer details in the TCB so that we can
                         * cancel the timer callback if the mutex is put
                         * before the timeout occurs.
                         */
                        curr_tcb_ptr->suspend_timo_cb = &timer_cb;

                        /* Register a callback on timeout */
                        if (atomTimerRegister (&timer_cb) != ATOM_OK)
                        {
                            /* Timer registration failed */
                            status = ATOM_ERR_TIMER;

                            /* Clean up and return to the caller */
                            (void)tcbDequeueEntry (&mutex->suspQ, curr_tcb_ptr);
                            curr_tcb_ptr->suspended = FALSE;
                            curr_tcb_ptr->suspend_timo_cb = NULL;
                        }
                    }

                    /* Set no timeout requested */
                    else
                    {
                        /* No need to cancel timeouts on this one */
                        curr_tcb_ptr->suspend_timo_cb = NULL;
                    }

                    /* Exit critical region */
                    CRITICAL_END ();

                    /* Check no errors have occurred */
                    if (status == ATOM_OK)
                    {
                        /**
                         * Current thread now blocking, schedule in a new
                         * one. We already know we are in thread context
                         * so can call the scheduler from here.
                         */
                        atomSched (FALSE);

                        /**
                         * Normal atomMutexPut() wakeups will set ATOM_OK status,
                         * while timeouts will set ATOM_TIMEOUT and mutex
                         * deletions will set ATOM_ERR_DELETED. */
                        status = curr_tcb_ptr->suspend_wake_status;

                        /**
                         * If we were woken up by another thread relinquishing
                         * the mutex and handing this thread ownership, then
                         * the relinquishing thread will set status to ATOM_OK
                         * and will make this thread the owner. Setting the
                         * owner before waking the thread ensures that no other
                         * thread can preempt and take ownership of the mutex
                         * between this thread being made ready to run, and
                         * actually being scheduled back in here.
                         */
                        if (status == ATOM_OK)
                        {
                            /**
                             * Since this thread has just gained ownership, the
                             * lock count is zero and should be incremented
                             * once for this call.
                             */
                            mutex->count++;
                        }
                    }
                }
            }
            else
            {
                /* timeout == -1, requested not to block and mutex is owned by another thread */
                CRITICAL_END();
                status = ATOM_WOULDBLOCK;
            }
        }
        else
        {
            /* Thread is not owned or is owned by us, we can claim ownership */

            /* Increment the lock count, checking for count overflow */
            if (mutex->count == 255)
            {
                /* Don't increment, just return error status */
                status = ATOM_ERR_OVF;
            }
            else
            {
                /* Increment the count and return to the calling thread */
                mutex->count++;

                /* If the mutex is not locked, mark the calling thread as the new owner */
                if (mutex->owner == NULL)
                {
                    mutex->owner = curr_tcb_ptr;
                }

                /* Successful */
                status = ATOM_OK;
            }

            /* Exit critical region */
            CRITICAL_END ();
        }
    }

    return (status);
    }