Esempio n. 1
0
/*!
 * \brief Creates the periodic timer queue.
 *
 * \param[in] period_ptr The location of the data structure to be initialized.
 * \param[in] period     The cycle length of this timer in ticks.
 * \param[in] wait_ticks The number of ticks to wait before starting this queue.
 *
 * \return MQX_OK
 *
 * \see _lwtimer_add_timer_to_queue
 * \see _lwtimer_cancel_period
 * \see _lwtimer_cancel_timer
 * \see _lwtimer_create_periodic_queue
 * \see LWTIMER_PERIOD_STRUCT
 */
_mqx_uint _lwtimer_create_periodic_queue
(
    LWTIMER_PERIOD_STRUCT_PTR period_ptr,
    _mqx_uint                 period,
    _mqx_uint                 wait_ticks
)
{ /* Body */
    KERNEL_DATA_STRUCT_PTR kernel_data;

    _GET_KERNEL_DATA(kernel_data);

    _KLOGE4(KLOG_lwtimer_create_periodic_queue, period_ptr, period, wait_ticks);

    period_ptr->PERIOD = period;
    period_ptr->EXPIRY = 0;
    period_ptr->WAIT   = wait_ticks;
    _QUEUE_INIT(&period_ptr->TIMERS,0);
    period_ptr->TIMER_PTR = (void *) &period_ptr->TIMERS;

    _int_disable();
    if (kernel_data->LWTIMERS.NEXT == NULL)
    {
        /* Initialize the light weight timer queue */
        _QUEUE_INIT(&kernel_data->LWTIMERS, 0);
        kernel_data->LWTIMER_ISR = _lwtimer_isr_internal;
    } /* Endif */
    period_ptr->VALID = LWTIMER_VALID;
    _QUEUE_ENQUEUE(&kernel_data->LWTIMERS, &period_ptr->LINK);
    _int_enable();

    _KLOGX2(KLOG_lwtimer_create_periodic_queue, MQX_OK);
    return (MQX_OK);

} /* Endbody */
Esempio n. 2
0
_mqx_uint _lwsem_wait_timed_internal
   (
      /* [IN] the semaphore address */
      LWSEM_STRUCT_PTR sem_ptr,

      /* [IN] the task descriptor waiting */
      TD_STRUCT_PTR          td_ptr
   )
{ /* Body */

   td_ptr->STATE = LWSEM_BLOCKED;
   td_ptr->INFO  = (_mqx_uint)&sem_ptr->TD_QUEUE;
   _QUEUE_UNLINK(td_ptr);
   _QUEUE_ENQUEUE(&sem_ptr->TD_QUEUE, &td_ptr->AUX_QUEUE);
   _time_delay_internal(td_ptr);
   if (td_ptr->INFO != 0) {
/* Start CR 544 */
      /*_QUEUE_REMOVE(&sem_ptr->TD_QUEUE, &td_ptr->AUX_QUEUE);*/
/* End CR 544 */
      return(MQX_LWSEM_WAIT_TIMEOUT);
   } /* Endif */
   
   return(MQX_OK);
   
} /* Endbody */
Esempio n. 3
0
pointer _taskq_create
   ( 
      /* [IN] the policy of this task queue (fifo or priority queueing) */
      _mqx_uint policy
   )
{ /* Body */
   KERNEL_DATA_STRUCT_PTR kernel_data;
   TASK_QUEUE_STRUCT_PTR  task_queue_ptr;

   _GET_KERNEL_DATA(kernel_data);

   _KLOGE2(KLOG_taskq_create, policy);

#if MQX_CHECK_ERRORS
   if (! ((policy == MQX_TASK_QUEUE_FIFO) ||
          (policy == MQX_TASK_QUEUE_BY_PRIORITY)))
   {
      _task_set_error(MQX_INVALID_PARAMETER);
      _KLOGX2(KLOG_taskq_create, NULL);
      return (NULL);
   } /* Endif */
   if (kernel_data->IN_ISR) {
      _task_set_error(MQX_CANNOT_CALL_FUNCTION_FROM_ISR);
      _KLOGX2(KLOG_taskq_create, NULL);
      return(NULL);
   }/* Endif */
#endif

   task_queue_ptr = (TASK_QUEUE_STRUCT_PTR)_mem_alloc_system((_mem_size)
      sizeof(TASK_QUEUE_STRUCT));
#if MQX_CHECK_MEMORY_ALLOCATION_ERRORS
   if (task_queue_ptr == NULL) {
      _KLOGX2(KLOG_taskq_create, NULL);
      return(NULL);
   } /* Endif */
#endif

   _mem_set_type(task_queue_ptr, MEM_TYPE_TASK_Q);

   task_queue_ptr->POLICY = policy;
   _QUEUE_INIT(&task_queue_ptr->TD_QUEUE, 0);
   task_queue_ptr->VALID  = TASK_QUEUE_VALID;
   _int_disable();
   if (kernel_data->KERNEL_TASK_QUEUES.NEXT == NULL) {
       /* Initialize the task queue */
      _QUEUE_INIT(&kernel_data->KERNEL_TASK_QUEUES,0);
   
   } /* Endif */
   _QUEUE_ENQUEUE(&kernel_data->KERNEL_TASK_QUEUES, task_queue_ptr);
   _int_enable();

   _KLOGX2(KLOG_taskq_create, task_queue_ptr);

   return(task_queue_ptr);
   
} /* Endbody */
Esempio n. 4
0
_mqx_uint _taskq_suspend
   (
      /* [IN] the task queue handle */
      pointer users_task_queue_ptr
   )
{ /* Body */
   register KERNEL_DATA_STRUCT_PTR kernel_data;
   register TD_STRUCT_PTR          td_ptr;
   register TASK_QUEUE_STRUCT_PTR  task_queue_ptr = 
      (TASK_QUEUE_STRUCT_PTR)users_task_queue_ptr;

   _GET_KERNEL_DATA(kernel_data);

   _KLOGE2(KLOG_taskq_suspend, users_task_queue_ptr);

#if MQX_CHECK_ERRORS
   if (task_queue_ptr == NULL){
      _KLOGX2(KLOG_taskq_suspend, MQX_INVALID_PARAMETER);
      return(MQX_INVALID_PARAMETER);
   } /* Endif */
   if (kernel_data->IN_ISR) {
      _KLOGX2(KLOG_taskq_suspend, MQX_CANNOT_CALL_FUNCTION_FROM_ISR);
      return(MQX_CANNOT_CALL_FUNCTION_FROM_ISR);
   }/* Endif */
#endif

   td_ptr      = kernel_data->ACTIVE_PTR;

   _INT_DISABLE();

#if MQX_CHECK_VALIDITY
   if (task_queue_ptr->VALID != TASK_QUEUE_VALID) {
      _int_enable();
      _KLOGX2(KLOG_taskq_suspend, MQX_INVALID_TASK_QUEUE);
      return(MQX_INVALID_TASK_QUEUE);
   } /* Endif */
#endif

   td_ptr->STATE = TASK_QUEUE_BLOCKED;
   _QUEUE_UNLINK(td_ptr); /* Remove task from ready to run queue */
   td_ptr->INFO = (_mqx_uint)&task_queue_ptr->TD_QUEUE;
   if (task_queue_ptr->POLICY & MQX_TASK_QUEUE_BY_PRIORITY)  {
      _sched_insert_priorityq_internal(&task_queue_ptr->TD_QUEUE, td_ptr);
   } else {
      _QUEUE_ENQUEUE(&task_queue_ptr->TD_QUEUE, td_ptr);
   } /* Endif */

   _sched_execute_scheduler_internal(); /* Let the other tasks run */

   _INT_ENABLE();

   _KLOGX2(KLOG_taskq_suspend, MQX_OK);
   return( MQX_OK );
   
} /* Endbody */
Esempio n. 5
0
_mqx_uint _mmu_destroy_vcontext
   (
      /* [IN] the task for which a virtual context is to be removed */
      _task_id task_id
   )
{ /* Body */
   KERNEL_DATA_STRUCT_PTR         kernel_data;
   TD_STRUCT_PTR                  td_ptr;
   PSP_VIRTUAL_CONTEXT_STRUCT_PTR context_ptr;
   PSP_PAGE_INFO_STRUCT_PTR       mem_ptr;
   PSP_SUPPORT_STRUCT_PTR         psp_support_ptr;

   _GET_KERNEL_DATA(kernel_data);
   psp_support_ptr = kernel_data->PSP_SUPPORT_PTR;
   td_ptr = _task_get_td(task_id);
   if (td_ptr == NULL) {
      return(MQX_INVALID_TASK_ID);
   }/* Endif */
   _int_disable();
   if ((td_ptr->FLAGS & TASK_MMU_CONTEXT_EXISTS) == 0) {
      _int_enable();
      return(MQX_MMU_CONTEXT_DOES_NOT_EXIST);
   } /* Endif */
   if (td_ptr == kernel_data->ACTIVE_PTR) {
      /* Remove task MMU pages from the MMU table */
      _mmu_reset_vcontext_internal();
   }/* Endif */
   td_ptr->FLAGS &= ~TASK_MMU_CONTEXT_EXISTS;
   context_ptr = td_ptr->MMU_VIRTUAL_CONTEXT_PTR;
   td_ptr->MMU_VIRTUAL_CONTEXT_PTR = NULL;
   _lwsem_wait(&psp_support_ptr->VPAGE_FREELIST_LWSEM);
   _int_enable();

   while (_QUEUE_GET_SIZE(&context_ptr->PAGE_INFO)) {
      _QUEUE_DEQUEUE(&context_ptr->PAGE_INFO, mem_ptr);
      _QUEUE_ENQUEUE(&psp_support_ptr->VPAGE_FREELIST, &mem_ptr->ELEMENT);
   } /* Endwhile */
   _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM);
   _mem_free(context_ptr);
   return(MQX_OK);

} /* Endbody */
Esempio n. 6
0
_mqx_uint _lwsem_wait_ticks
   (
      /* [IN] the semaphore address */
      LWSEM_STRUCT_PTR sem_ptr,

      /* [IN] the number of ticks to delay, if 0, delay forever */
      _mqx_uint time_in_ticks
   )
{ /* Body */
   KERNEL_DATA_STRUCT_PTR kernel_data;
   TD_STRUCT_PTR          td_ptr;
   _mqx_uint              result;
   
#if MQX_ENABLE_USER_MODE && MQX_ENABLE_USER_STDAPI
    if (MQX_RUN_IN_USER_MODE) {
        return _usr_lwsem_wait_ticks(sem_ptr, time_in_ticks);
    }
#endif

   _GET_KERNEL_DATA(kernel_data);

   _KLOGE3(KLOG_lwsem_wait_ticks, sem_ptr, time_in_ticks);

#if MQX_CHECK_ERRORS
   if (kernel_data->IN_ISR) {
      _KLOGX2(KLOG_lwsem_wait_ticks, MQX_CANNOT_CALL_FUNCTION_FROM_ISR);
      return(MQX_CANNOT_CALL_FUNCTION_FROM_ISR);
   } /* Endif */
#endif
#if MQX_CHECK_VALIDITY
   if (sem_ptr->VALID != LWSEM_VALID) {
      _KLOGX2(KLOG_lwsem_wait_ticks, MQX_INVALID_LWSEM);
      return(MQX_INVALID_LWSEM);
   } /* Endif */
#endif

   _INT_DISABLE();
   if (sem_ptr->VALUE <= 0) {
      td_ptr = kernel_data->ACTIVE_PTR;
      if (time_in_ticks == 0) {
         td_ptr->STATE = LWSEM_BLOCKED;
         td_ptr->INFO  = (_mqx_uint)&sem_ptr->TD_QUEUE;
         _QUEUE_UNLINK(td_ptr);
         _QUEUE_ENQUEUE(&sem_ptr->TD_QUEUE, &td_ptr->AUX_QUEUE);
         _sched_execute_scheduler_internal();  /* Let the other tasks run */
         /* Another task has posted a semaphore, and it has been tranfered to this 
         ** task.
         */
         result = MQX_OK;
      } else {
         PSP_ADD_TICKS_TO_TICK_STRUCT(&kernel_data->TIME, time_in_ticks, 
            &td_ptr->TIMEOUT);
         result = _lwsem_wait_timed_internal(sem_ptr, td_ptr);
      } /* Endif */
   } else {
      --sem_ptr->VALUE;
      /* Start CR 788 */
      result = MQX_OK;
      /* End  CR 788 */
   } /* Endif */

//#if MQX_COMPONENT_DESTRUCTION
   /* We must check for component destruction */
   if (sem_ptr->VALID != LWSEM_VALID) {
      _int_enable();
      /* The semaphore has been deleted */
      _KLOGX2(KLOG_lwsem_wait_ticks, MQX_INVALID_LWSEM);
      return(MQX_INVALID_LWSEM);
   } /* Endif */
//#endif

   _INT_ENABLE();

   _KLOGX2(KLOG_lwsem_wait_ticks, result);

   return(result);
   
}
Esempio n. 7
0
/*!
 * \private
 *
 * \brief Used by a task to create an instance of a lightweight event.
 *
 * \param[in] event_ptr Pointer representing location of the event.
 * \param[in] flags     Flags for the light weight event.
 * \param[in] user      User mode
 *
 * \return MQX_OK
 * \return MQX_EINVAL (lwevent is already initialized.)
 * \return MQX_LWEVENT_INVALID (In case of user mode, MQX tries to access
 * a lwevent with inappropriate access rights.)
 *
 * \see _lwevent_create
 * \see LWEVENT_STRUCT
 */
_mqx_uint _lwevent_create_internal
(
    LWEVENT_STRUCT_PTR  event_ptr,
    _mqx_uint           flags,
    bool             user
)
{
    KERNEL_DATA_STRUCT_PTR  kernel_data;
    LWEVENT_STRUCT_PTR      event_chk_ptr;

#if MQX_ENABLE_USER_MODE
    if (user && !_psp_mem_check_access_mask((uint32_t)event_ptr,
                                             sizeof(LWEVENT_STRUCT),
                                             MPU_UM_R, MPU_UM_RW) )
    {
        return MQX_LWEVENT_INVALID;
    }
#endif

    _GET_KERNEL_DATA(kernel_data);

    _KLOGE2(KLOG_lwevent_create, event_ptr);

    _QUEUE_INIT(&event_ptr->WAITING_TASKS, 0);
    event_ptr->VALUE = 0;
    event_ptr->FLAGS = flags;

    if (flags & LWEVENT_AUTO_CLEAR)
        event_ptr->AUTO = ~0;
    else
        event_ptr->AUTO = 0;

    _int_disable();

#if MQX_ENABLE_USER_MODE
    if (user)
    {
        if (kernel_data->USR_LWEVENTS.NEXT == NULL)
        {
            /* Initialize the light weight event queue */
            _QUEUE_INIT(&kernel_data->USR_LWEVENTS, 0);
        }
    }
    else
#endif
    {
        if (kernel_data->LWEVENTS.NEXT == NULL)
        {
            /* Initialize the light weight event queue */
            _QUEUE_INIT(&kernel_data->LWEVENTS, 0);
        }
    }

    event_ptr->VALID = LWEVENT_VALID;

#if MQX_CHECK_ERRORS
    /* Check if lwevent is already initialized */
#if MQX_ENABLE_USER_MODE
    if (user)
    {
        event_chk_ptr = (LWEVENT_STRUCT_PTR)((void *)kernel_data->USR_LWEVENTS.NEXT);
        while (event_chk_ptr != (LWEVENT_STRUCT_PTR)((void *)&kernel_data->USR_LWEVENTS))
        {
            if (event_chk_ptr == event_ptr)
            {
                _int_enable();
                _KLOGX2(KLOG_lwevent_create, MQX_EINVAL);
                return(MQX_EINVAL);
            }
            event_chk_ptr = (LWEVENT_STRUCT_PTR)((void *)event_chk_ptr->LINK.NEXT);
        }
    }
    else
#endif
    {
        event_chk_ptr = (LWEVENT_STRUCT_PTR) ((void *) kernel_data->LWEVENTS.NEXT);
        while (event_chk_ptr != (LWEVENT_STRUCT_PTR) ((void *) &kernel_data->LWEVENTS))
        {
            if (event_chk_ptr == event_ptr)
            {
                _int_enable();
                _KLOGX2(KLOG_lwevent_create, MQX_EINVAL);
                return (MQX_EINVAL);
            }
            event_chk_ptr = (LWEVENT_STRUCT_PTR) ((void *) event_chk_ptr->LINK.NEXT);
        }
    }
#endif

#if MQX_ENABLE_USER_MODE
    if (user)
    {
        _QUEUE_ENQUEUE(&kernel_data->USR_LWEVENTS, &event_ptr->LINK);
    }
    else
#endif
    {
        _QUEUE_ENQUEUE(&kernel_data->LWEVENTS, &event_ptr->LINK);
    }

    _int_enable();

    _KLOGX2(KLOG_lwevent_create, MQX_OK);
    return (MQX_OK);
}
Esempio n. 8
0
/*!
 * \private
 *
 * \brief This is internal function used by a task to wait for a specified event.
 *
 * \param[in] event_ptr          Read only. Pointer to the lightweight event.
 * \param[in] bit_mask           Bit mask. Each set bit represents an event bit
 * to wait for.
 * \param[in] all                TRUE (wait for all bits in bit_mask to be set),
 * FALSE (wait for any bit in bit_mask to be set).
 * \param[in] tick_ptr           Pointer to the maximum number of ticks to wait
 * for the events to be set. If the value is NULL, then the timeout will be infinite.
 * \param[in] ticks_are_absolute TRUE (ticks represents absolute time), FALSE
 * (ticks represents relative time).
 *
 * \return MQX_OK
 * \return LWEVENT_WAIT_TIMEOUT (The time elapsed before an event signalled.)
 * \return MQX_LWEVENT_INVALID (Lightweight event is no longer valid or was never valid.)
 * \return MQX_CANNOT_CALL_FUNCTION_FROM_ISR (Function cannot be called from an ISR.)
 *
 * \see _lwevent_wait_for
 * \see _usr_lwevent_wait_for
 * \see _lwevent_wait_until
 * \see _usr_lwevent_wait_until
 * \see _lwevent_wait_ticks
 * \see _usr_lwevent_wait_ticks
 * \see LWEVENT_STRUCT
 * \see MQX_TICK_STRUCT
 */
_mqx_uint _lwevent_wait_internal
(
    LWEVENT_STRUCT_PTR  event_ptr,
    _mqx_uint           bit_mask,
    bool             all,
    MQX_TICK_STRUCT_PTR tick_ptr,
    bool             ticks_are_absolute
)
{
    KERNEL_DATA_STRUCT_PTR  kernel_data;
    TD_STRUCT_PTR           td_ptr;
    _mqx_uint               result;

    _GET_KERNEL_DATA(kernel_data);

#if MQX_CHECK_ERRORS
    if (kernel_data->IN_ISR)
    {
        _KLOGX2(KLOG_lwevent_wait_internal, MQX_CANNOT_CALL_FUNCTION_FROM_ISR);
        return (MQX_CANNOT_CALL_FUNCTION_FROM_ISR);
    } /* Endif */
#endif

    result = MQX_OK;
    td_ptr = kernel_data->ACTIVE_PTR;
    _INT_DISABLE();

#if MQX_CHECK_VALIDITY
    if (event_ptr->VALID != LWEVENT_VALID)
    {
        _int_enable();
        return (MQX_LWEVENT_INVALID);
    } /* Endif */
#endif

    if (    (all &&  (event_ptr->VALUE & bit_mask) == bit_mask)
         || (!all && (event_ptr->VALUE & bit_mask)))
    {
        /* store information about which bits caused task to be unblocked */
        td_ptr->LWEVENT_BITS = event_ptr->VALUE & bit_mask;
        /* clear used automatic events */
        event_ptr->VALUE &= ~(event_ptr->AUTO & bit_mask);

        _INT_ENABLE();
        return (result);
    } /* Endif */

    /* Must wait for a event to become available */

    td_ptr->LWEVENT_BITS = bit_mask;
    if (all)
    {
        td_ptr->FLAGS |= TASK_LWEVENT_ALL_BITS_WANTED;
    }
    else
    {
        td_ptr->FLAGS &= ~TASK_LWEVENT_ALL_BITS_WANTED;
    } /* Endif */

    /* Enqueue at end */
    _QUEUE_ENQUEUE(&event_ptr->WAITING_TASKS, &td_ptr->AUX_QUEUE);

    /* Now put the task to sleep */
    td_ptr->STATE = LWEVENT_BLOCKED;
    td_ptr->INFO = (_mqx_uint) &event_ptr->WAITING_TASKS;
    if (tick_ptr)
    {
        if (ticks_are_absolute)
        {
            _time_delay_until(tick_ptr);
        }
        else
        {
            _time_delay_for(tick_ptr);
        } /* Endif */
        if (td_ptr->INFO)
        {
            /* Must have timed out */
            /*_QUEUE_REMOVE(&event_ptr->WAITING_TASKS, &td_ptr->AUX_QUEUE);*/
            result = LWEVENT_WAIT_TIMEOUT;
        } /* Endif */
    }
    else
    {
        _task_block();
    } /* Endif */

#if MQX_COMPONENT_DESTRUCTION
    if (event_ptr->VALID == 0)
    { /* We've been deleted */
        result = MQX_LWEVENT_INVALID;
    } /* Endif */
#endif

    _INT_ENABLE();
    return (result);

}
Esempio n. 9
0
_mqx_uint _mem_extend_pool_internal
   (
      /* [IN] the address of the start of the memory pool addition */
      pointer             start_of_pool,

      /* [IN] the size of the memory pool addition */
      _mem_size           size,

      /* [IN] the memory pool to extend */
      MEMPOOL_STRUCT_PTR  mem_pool_ptr

   )
{ /* Body */
   KERNEL_DATA_STRUCT_PTR       kernel_data;
   STOREBLOCK_STRUCT_PTR        block_ptr;
   STOREBLOCK_STRUCT_PTR        end_ptr;
   STOREBLOCK_STRUCT_PTR        free_ptr;
   STOREBLOCK_STRUCT_PTR        tmp_ptr;
   MEMPOOL_EXTENSION_STRUCT_PTR ext_ptr;
   uchar_ptr                    real_start_ptr;
   uchar_ptr                    end_of_pool;
   _mem_size                    block_size;
   _mem_size                    real_size;
   _mem_size                    free_block_size;

   _GET_KERNEL_DATA(kernel_data);

#if MQX_CHECK_ERRORS
   if (size < (_mem_size)(3*MQX_MIN_MEMORY_STORAGE_SIZE)) {
      /* Pool must be big enough to hold at least 3 memory blocks */
      return(MQX_INVALID_SIZE);
   }/* Endif */
#endif

#if MQX_CHECK_VALIDITY
   if (mem_pool_ptr->VALID != MEMPOOL_VALID) {
      return(MQX_INVALID_COMPONENT_HANDLE);
   }/* Endif */
#endif
      
   ext_ptr = (MEMPOOL_EXTENSION_STRUCT_PTR)
      _ALIGN_ADDR_TO_HIGHER_MEM(start_of_pool);

   real_start_ptr = (uchar_ptr)ext_ptr + sizeof(MEMPOOL_EXTENSION_STRUCT);
   real_start_ptr = (uchar_ptr)_ALIGN_ADDR_TO_HIGHER_MEM(real_start_ptr);

   end_of_pool = (uchar_ptr)start_of_pool + size;
   end_of_pool = (uchar_ptr)_ALIGN_ADDR_TO_LOWER_MEM(end_of_pool);

   real_size  = (_mem_size)(end_of_pool - real_start_ptr);

   ext_ptr->START = start_of_pool;
   ext_ptr->SIZE  = size;
   ext_ptr->REAL_START = real_start_ptr;

   block_ptr  = (STOREBLOCK_STRUCT_PTR)real_start_ptr;
   block_size = MQX_MIN_MEMORY_STORAGE_SIZE;

   free_ptr = (STOREBLOCK_STRUCT_PTR)((uchar_ptr)block_ptr + block_size);
   free_block_size = real_size - (_mem_size)(2 * MQX_MIN_MEMORY_STORAGE_SIZE);

   end_ptr  = (STOREBLOCK_STRUCT_PTR)((uchar_ptr)free_ptr + free_block_size);

   /* 
   ** Make a small minimal sized memory block to be as
   ** the first block in the pool.  This will be an in-use block
   ** and will thus avoid problems with memory co-allescing during
   ** memory frees
   */
   block_ptr->BLOCKSIZE = block_size;
   block_ptr->MEM_TYPE = 0;
   block_ptr->USER_AREA = 0;
   block_ptr->PREVBLOCK = (struct storeblock_struct _PTR_)NULL;
   block_ptr->NEXTBLOCK = free_ptr;
   MARK_BLOCK_AS_USED(block_ptr, SYSTEM_TASK_ID(kernel_data));
   CALC_CHECKSUM(block_ptr);

   /* 
   ** Let the next block be the actual free block that will be added
   ** to the free list
   */
   free_ptr->BLOCKSIZE = free_block_size;
   free_ptr->MEM_TYPE = 0;
   free_ptr->USER_AREA = 0;
   free_ptr->PREVBLOCK = block_ptr;
   free_ptr->NEXTBLOCK = end_ptr;
   MARK_BLOCK_AS_FREE(free_ptr);
   CALC_CHECKSUM(free_ptr);

   /*
   ** Set up a minimal sized block at the end of the pool, and also
   ** mark it as being allocated.  Again this is to comply with the
   ** _mem_free algorithm
   */
   end_ptr->BLOCKSIZE = block_size;
   end_ptr->MEM_TYPE = 0;
   end_ptr->USER_AREA = 0;
   end_ptr->PREVBLOCK = free_ptr;
   end_ptr->NEXTBLOCK = NULL;
   MARK_BLOCK_AS_USED(end_ptr, SYSTEM_TASK_ID(kernel_data));
   CALC_CHECKSUM(end_ptr);

   _int_disable();  /* Add the block to the free list */
   tmp_ptr = mem_pool_ptr->POOL_FREE_LIST_PTR;
   mem_pool_ptr->POOL_FREE_LIST_PTR = free_ptr;
   if (tmp_ptr != NULL) {
      PREV_FREE(tmp_ptr) = free_ptr;
   } /* Endif */
   PREV_FREE(free_ptr) = NULL;
   NEXT_FREE(free_ptr) = tmp_ptr;

   /* Reset the free list queue walker for some other task */
   mem_pool_ptr->POOL_FREE_CURRENT_BLOCK = mem_pool_ptr->POOL_FREE_LIST_PTR;

   /* Link in the extension */
   _QUEUE_ENQUEUE(&mem_pool_ptr->EXT_LIST, &ext_ptr->LINK);

   _int_enable();

   return(MQX_OK);

} /* Endbody */
Esempio n. 10
0
_mqx_uint _mutex_init
   (
      /* [IN] - the address where the mutex is to be initialized */
      register MUTEX_STRUCT_PTR      mutex_ptr,

      /* [IN]  - Initialization parameters for the mutex  */
      register MUTEX_ATTR_STRUCT_PTR attr_ptr
   )
{ /* Body */
   KERNEL_DATA_STRUCT_PTR     kernel_data;
   MUTEX_COMPONENT_STRUCT_PTR mutex_component_ptr;
   MUTEX_ATTR_STRUCT          default_attr;
#if MQX_CHECK_ERRORS
   MUTEX_STRUCT_PTR           mutex_chk_ptr;
#endif
   _mqx_uint                   result;

   _GET_KERNEL_DATA(kernel_data);
   if (attr_ptr == NULL) {
      attr_ptr = &default_attr;
      _mutatr_init(attr_ptr);
      _KLOGE3(KLOG_mutex_init, mutex_ptr, NULL);
   } else {
      _KLOGE3(KLOG_mutex_init, mutex_ptr, attr_ptr);
   } /* Endif */
   
#if MQX_CHECK_ERRORS
   if (mutex_ptr == NULL) {
      _KLOGX2(KLOG_mutex_init, MQX_EINVAL);
      return(MQX_EINVAL);
   } /* Endif */
#endif
#if MQX_CHECK_VALIDITY
   if (attr_ptr->VALID != MUTEX_VALID) {
      _KLOGX2(KLOG_mutex_init, MQX_EINVAL);
      return(MQX_EINVAL);
   } /* Endif */
#endif

   mutex_component_ptr = (MUTEX_COMPONENT_STRUCT_PTR)
      kernel_data->KERNEL_COMPONENTS[KERNEL_MUTEXES];
   if (mutex_component_ptr == NULL) {
      result = _mutex_create_component();
      mutex_component_ptr = (MUTEX_COMPONENT_STRUCT_PTR)
         kernel_data->KERNEL_COMPONENTS[KERNEL_MUTEXES];
#if MQX_CHECK_MEMORY_ALLOCATION_ERRORS
      if (mutex_component_ptr == NULL){
         _KLOGX2(KLOG_mutex_init, result);
         return(result);
      } /* Endif */
#endif
   } /* Endif */

#if MQX_CHECK_VALIDITY
   if (mutex_component_ptr->VALID != MUTEX_VALID) {
      _KLOGX2(KLOG_mutex_init, MQX_INVALID_COMPONENT_BASE);
      return(MQX_INVALID_COMPONENT_BASE);
   } /* Endif */
#endif

   _int_disable();
#if MQX_CHECK_ERRORS
   /* Check if mutex is already initialized */
   mutex_chk_ptr = (MUTEX_STRUCT_PTR)
      ((pointer)mutex_component_ptr->MUTEXES.NEXT);
   while (mutex_chk_ptr != (MUTEX_STRUCT_PTR)
      ((pointer)&mutex_component_ptr->MUTEXES))
   {
      if (mutex_chk_ptr == mutex_ptr) {
         _int_enable();
         _KLOGX2(KLOG_mutex_init, MQX_EINVAL);
         return(MQX_EINVAL);
      } /* Endif */
      mutex_chk_ptr = (MUTEX_STRUCT_PTR)((pointer)mutex_chk_ptr->LINK.NEXT);
   } /* Endif */
#endif
   
   mutex_ptr->PROTOCOLS        = 
      attr_ptr->SCHED_PROTOCOL | attr_ptr->WAIT_PROTOCOL;
   mutex_ptr->VALID            = MUTEX_VALID;
   mutex_ptr->COUNT            = attr_ptr->COUNT;
   mutex_ptr->PRIORITY_CEILING = attr_ptr->PRIORITY_CEILING;
   mutex_ptr->LOCK             = 0;
   mutex_ptr->BOOSTED          = 0;
   mutex_ptr->OWNER_TD         = NULL;
   _QUEUE_INIT(&mutex_ptr->WAITING_TASKS, 0);

   _QUEUE_ENQUEUE(&mutex_component_ptr->MUTEXES, mutex_ptr);
   _int_enable();

   _KLOGX2(KLOG_mutex_init, MQX_EOK);
   return(MQX_EOK);
   
} /* Endbody */
Esempio n. 11
0
_mqx_uint _lwmsgq_receive
   (
      /* Handle to the queue */
      pointer             handle,

      /* location of message to copy to */
      _mqx_max_type_ptr   message,

      /* flags for blocking on empty */
      _mqx_uint           flags,

      /* Timeout for receive if using ticks if 0, ignored */
      _mqx_uint           ticks,

      /* Timeout if receive timout using tick struct must have flags set */
      MQX_TICK_STRUCT_PTR tick_ptr
   )
{/* Body */
   KERNEL_DATA_STRUCT_PTR kernel_data;
   TD_STRUCT_PTR          td_ptr;
   LWMSGQ_STRUCT_PTR      q_ptr = (LWMSGQ_STRUCT_PTR)handle;
   _mqx_uint              i;
   _mqx_max_type_ptr      from_ptr;
   _mqx_max_type_ptr      to_ptr;

#if MQX_ENABLE_USER_MODE && MQX_ENABLE_USER_STDAPI
    if (MQX_RUN_IN_USER_MODE) {
        return _usr_lwmsgq_receive(handle, message, flags, ticks, tick_ptr);
    }
#endif
    
   /* Start CR 1944 */
   _GET_KERNEL_DATA(kernel_data);
   _KLOGE6(KLOG_lwmsgq_receive, handle, message, flags, ticks, tick_ptr);
   /* End CR 1944 */
   
   _int_disable();
#if MQX_CHECK_VALIDITY
   if (q_ptr->VALID != LWMSGQ_VALID){
      _int_enable();
      /* Start CR 1944 */
      _KLOGX2(KLOG_lwmsgq_send, LWMSGQ_INVALID);
      /* End CR 1944 */
      return LWMSGQ_INVALID;
   } /* Endif */
#endif
   if (LWMSGQ_IS_EMPTY(q_ptr)) {
      if (flags & LWMSGQ_RECEIVE_BLOCK_ON_EMPTY) {
         td_ptr = kernel_data->ACTIVE_PTR;
         while (LWMSGQ_IS_EMPTY(q_ptr)) {
            td_ptr->STATE = LWMSGQ_READ_BLOCKED;
            td_ptr->INFO  = (_mqx_uint)&q_ptr->WAITING_READERS;
            _QUEUE_UNLINK(td_ptr);
            _QUEUE_ENQUEUE(&q_ptr->WAITING_READERS, &td_ptr->AUX_QUEUE);
            if (ticks || (flags & (LWMSGQ_TIMEOUT_UNTIL | LWMSGQ_TIMEOUT_FOR))){
               if (ticks) {
                  PSP_ADD_TICKS_TO_TICK_STRUCT(&kernel_data->TIME, ticks,
                     &td_ptr->TIMEOUT);
               } else if (flags & LWMSGQ_TIMEOUT_UNTIL){
                  td_ptr->TIMEOUT = *tick_ptr;
               } else {
                  PSP_ADD_TICKS(tick_ptr, &kernel_data->TIME, &td_ptr->TIMEOUT);
               } /* Endif */
               _time_delay_internal(td_ptr);
               if (td_ptr->INFO != 0) {
                  _int_enable();
                  /* Start CR 1944 */
                  _KLOGX2(KLOG_lwmsgq_receive, LWMSGQ_TIMEOUT);
                  /* End CR 1944 */
                  return LWMSGQ_TIMEOUT;
               } /* Endif */
            } else {
               _sched_execute_scheduler_internal(); /* Let other tasks run */
            } /* Endif */
         } /* Endwhile */
      } else {
         _int_enable();
         /* Start CR 1944 */
         _KLOGX2(KLOG_lwmsgq_receive, LWMSGQ_EMPTY);
         /* End CR 1944 */
         return LWMSGQ_EMPTY;
      } /* Endif */
   }/* Endif */
   from_ptr = q_ptr->MSG_READ_LOC;
   to_ptr = message;
   i = q_ptr->MSG_SIZE+1;
   while (--i) {
      *to_ptr++ = *from_ptr++;
   } /* Endwhile */
   q_ptr->MSG_READ_LOC += q_ptr->MSG_SIZE;
   if (q_ptr->MSG_READ_LOC >= q_ptr->MSG_END_LOC) {
      q_ptr->MSG_READ_LOC = q_ptr->MSG_START_LOC;
   } /* Endif */
   q_ptr->CURRENT_SIZE--;
   if (! _QUEUE_IS_EMPTY(&q_ptr->WAITING_WRITERS)) {
      _QUEUE_DEQUEUE(&q_ptr->WAITING_WRITERS, td_ptr);
      _BACKUP_POINTER(td_ptr, TD_STRUCT, AUX_QUEUE);
      td_ptr->INFO = 0;  /* Signal that post is activating the task */
      _TASK_READY(td_ptr, kernel_data);
      _CHECK_RUN_SCHEDULER(); /* Let higher priority task run */
   } /* Endif */
   _int_enable();
   /* Start CR 1944 */
   _KLOGX2(KLOG_lwmsgq_receive, MQX_OK);
   /* End CR 1944 */
   return MQX_OK;

}/* Endbody */
Esempio n. 12
0
_mqx_uint _mem_create_pool_internal
(
    /* [IN] the start of the memory pool */
    pointer             start,

    /* [IN] the end of the memory pool */
    pointer             end,

    /* [IN] where to store the memory pool context info. */
    MEMPOOL_STRUCT_PTR  mem_pool_ptr

)
{   /* Body */
    KERNEL_DATA_STRUCT_PTR  kernel_data;
    STOREBLOCK_STRUCT_PTR   block_ptr;
    STOREBLOCK_STRUCT_PTR   end_block_ptr;

    _GET_KERNEL_DATA(kernel_data);

#if MQX_CHECK_VALIDITY
    _INT_DISABLE();
    if (kernel_data->MEM_COMP.VALID != MEMPOOL_VALID) {
        /* The RTOS memory system has been corrupted */
        _int_enable();
        return(MQX_CORRUPT_MEMORY_SYSTEM);
    } /* Endif */

    _INT_ENABLE();
#endif

    /* Align the start of the pool */
    mem_pool_ptr->POOL_PTR = (STOREBLOCK_STRUCT_PTR)
                             _ALIGN_ADDR_TO_HIGHER_MEM(start);

    /* Set the end of memory (aligned) */
    mem_pool_ptr->POOL_LIMIT = (STOREBLOCK_STRUCT_PTR)
                               _ALIGN_ADDR_TO_LOWER_MEM(end);

#if MQX_CHECK_ERRORS
    if ( (uchar_ptr)mem_pool_ptr->POOL_LIMIT <=
            ((uchar_ptr)mem_pool_ptr->POOL_PTR + MQX_MIN_MEMORY_POOL_SIZE) )
    {
        return MQX_MEM_POOL_TOO_SMALL;
    } /* Endif */
#endif

    block_ptr = (STOREBLOCK_STRUCT_PTR)mem_pool_ptr->POOL_PTR;
    mem_pool_ptr->POOL_HIGHEST_MEMORY_USED = (pointer)block_ptr;
    mem_pool_ptr->POOL_CHECK_POOL_PTR      = (char _PTR_)mem_pool_ptr->POOL_PTR;
    mem_pool_ptr->POOL_BLOCK_IN_ERROR      = NULL;

    /* Compute the pool size. */
    mem_pool_ptr->POOL_SIZE = (_mem_size)((uchar_ptr)mem_pool_ptr->POOL_LIMIT -
                                          (uchar_ptr)mem_pool_ptr->POOL_PTR);

    /* Set up the first block as an idle block */
    block_ptr->BLOCKSIZE = mem_pool_ptr->POOL_SIZE - MQX_MIN_MEMORY_STORAGE_SIZE;
    block_ptr->USER_AREA = NULL;
    block_ptr->PREVBLOCK = NULL;
    block_ptr->NEXTBLOCK = NULL;
    MARK_BLOCK_AS_FREE(block_ptr);

    CALC_CHECKSUM(block_ptr);

    mem_pool_ptr->POOL_FREE_LIST_PTR = block_ptr;

    /*
    ** Set up last block as an in_use block, so that the _mem_free algorithm
    ** will work (block coalescing)
    */
    end_block_ptr = (STOREBLOCK_STRUCT_PTR)
                    ((uchar_ptr)block_ptr + block_ptr->BLOCKSIZE);
    end_block_ptr->BLOCKSIZE = (_mem_size)(MQX_MIN_MEMORY_STORAGE_SIZE);
    end_block_ptr->USER_AREA = 0;
    end_block_ptr->PREVBLOCK = (struct storeblock_struct _PTR_)block_ptr;
    end_block_ptr->NEXTBLOCK = NULL;
    MARK_BLOCK_AS_USED(end_block_ptr, SYSTEM_TASK_ID(kernel_data));
    CALC_CHECKSUM(end_block_ptr);

    mem_pool_ptr->POOL_END_PTR = end_block_ptr;

    /* Initialize the list of extensions to this pool */
    _QUEUE_INIT(&mem_pool_ptr->EXT_LIST, 0);

    mem_pool_ptr->VALID = MEMPOOL_VALID;

    /* Protect the list of pools while adding new pool */
    _lwsem_wait((LWSEM_STRUCT_PTR)&kernel_data->MEM_COMP.SEM);
    _QUEUE_ENQUEUE(&kernel_data->MEM_COMP.POOLS, &mem_pool_ptr->LINK);
    _lwsem_post((LWSEM_STRUCT_PTR)&kernel_data->MEM_COMP.SEM);

    return MQX_OK;

} /* Endbody */
Esempio n. 13
0
_mqx_uint _lwsem_wait
   (
      /* [IN] the semaphore address */
      LWSEM_STRUCT_PTR sem_ptr
   )
{ /* Body */
    KERNEL_DATA_STRUCT_PTR kernel_data;
    TD_STRUCT_PTR          td_ptr;
   
#if MQX_ENABLE_USER_MODE && MQX_ENABLE_USER_STDAPI
    if (MQX_RUN_IN_USER_MODE) {
        return _usr_lwsem_wait(sem_ptr);
    }
#endif

    _GET_KERNEL_DATA(kernel_data);

    _KLOGE2(KLOG_lwsem_wait, sem_ptr);

#if MQX_CHECK_ERRORS
    if (kernel_data->IN_ISR) {
        _KLOGX2(KLOG_lwsem_wait, MQX_CANNOT_CALL_FUNCTION_FROM_ISR);
        return(MQX_CANNOT_CALL_FUNCTION_FROM_ISR);
    }
#endif
    
#if MQX_CHECK_VALIDITY
    if (sem_ptr->VALID != LWSEM_VALID) {
        _KLOGX2(KLOG_lwsem_wait, MQX_INVALID_LWSEM);
        return(MQX_INVALID_LWSEM);
    }
#endif

    _INT_DISABLE();
    if (sem_ptr->VALUE <= 0) {
        td_ptr = kernel_data->ACTIVE_PTR;
        td_ptr->STATE = LWSEM_BLOCKED;
        td_ptr->INFO  = (_mqx_uint)&sem_ptr->TD_QUEUE;
        _QUEUE_UNLINK(td_ptr);
        _QUEUE_ENQUEUE(&sem_ptr->TD_QUEUE, &td_ptr->AUX_QUEUE);
        _sched_execute_scheduler_internal();  /* Let the other tasks run */
        /* Another task has posted a semaphore, and it has been tranfered to this 
        ** task.
        */
    } else {
        --sem_ptr->VALUE;
    }

//#if MQX_COMPONENT_DESTRUCTION
    /* We must check for component destruction */
    if (sem_ptr->VALID != LWSEM_VALID) {
        _int_enable();
        /* The semaphore has been deleted */
        _KLOGX2(KLOG_lwsem_wait, MQX_INVALID_LWSEM);
        return(MQX_INVALID_LWSEM);
    } /* Endif */
//#endif

    _INT_ENABLE();

    _KLOGX2(KLOG_lwsem_wait, MQX_OK);

    return(MQX_OK);
}
Esempio n. 14
0
_mqx_uint _mmu_add_vcontext
   (
      /* [IN] the task to which a virtual context is to be added */
      _task_id  task_id,

      /* [IN] the virtual address to use */
      pointer   vaddr,

      /* [IN] the size */
      _mem_size input_size,

      /* [IN] the MMU flags to use */
      _mqx_uint flags
   )
{ /* Body */
   KERNEL_DATA_STRUCT_PTR         kernel_data;
   TD_STRUCT_PTR                  td_ptr;
   PSP_VIRTUAL_CONTEXT_STRUCT_PTR context_ptr;
   PSP_PAGE_INFO_STRUCT_PTR       mem_ptr;
   PSP_SUPPORT_STRUCT_PTR         psp_support_ptr;
   uint_32                        page_size;
   int_32                         test_size;
   int_32                         size = (int_32)input_size;
   uint_32                        result;
   uchar_ptr                      taddr;
   pointer                        tmp;

   _GET_KERNEL_DATA(kernel_data);
   td_ptr = _task_get_td(task_id);
   if (td_ptr == NULL) {
      return(MQX_INVALID_TASK_ID);
   }/* Endif */
   context_ptr = td_ptr->MMU_VIRTUAL_CONTEXT_PTR;
   psp_support_ptr = kernel_data->PSP_SUPPORT_PTR;
   page_size = psp_support_ptr->PAGE_SIZE;

   _lwsem_wait(&psp_support_ptr->VPAGE_FREELIST_LWSEM);
   if ((td_ptr->FLAGS & TASK_MMU_CONTEXT_EXISTS) == 0) {
      _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM);
      return(MQX_MMU_CONTEXT_DOES_NOT_EXIST);
   } /* Endif */

   if (size > (_QUEUE_GET_SIZE(&psp_support_ptr->VPAGE_FREELIST) * page_size)) {
      _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM);
      return(MQX_OUT_OF_MEMORY);
   }/* Endif */

   /* Verify virtual memory not in use */
   test_size = size;
   taddr = vaddr;
   while (test_size > 0) {
      /* Use Vtop !! */
      if (_mmu_vtop(taddr,&tmp) == MQX_OK) {
         _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM);
         return(MQX_INVALID_PARAMETER);
      }/* Endif */
      taddr += page_size;
      test_size -= page_size;
   } /* Endwhile */

   while (size > 0) {
      _QUEUE_DEQUEUE(&psp_support_ptr->VPAGE_FREELIST,mem_ptr);
      mem_ptr->VADDR = vaddr;
      result = _mmu_set_vmem_loc_internal(mem_ptr, flags);
      if (result != MQX_OK) {
         _QUEUE_ENQUEUE(&psp_support_ptr->VPAGE_FREELIST,mem_ptr);
         _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM);
         return(MQX_OUT_OF_MEMORY);
      } /* Endif */
      if (td_ptr == kernel_data->ACTIVE_PTR) {
         result = _mmu_add_vregion(mem_ptr->ADDR, mem_ptr->VADDR, page_size,
            flags);
         if (result != MQX_OK) {
            _QUEUE_ENQUEUE(&psp_support_ptr->VPAGE_FREELIST,mem_ptr);
            _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM);
            return(MQX_OUT_OF_MEMORY);
         } /* Endif */
      } /* Endif */
      _QUEUE_ENQUEUE(&context_ptr->PAGE_INFO,mem_ptr);
      size  -= page_size;
      vaddr = (pointer)((uint_32)vaddr + page_size);
   } /* Endwhile */

   _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM);
   return(MQX_OK);

} /* Endbody */
Esempio n. 15
0
_mqx_uint _lwmsgq_send
   (
      /* Handle to the queue */
      pointer           handle,

      /* location of message to copy in */
      _mqx_max_type_ptr message,

      /* flags for blocking on full, blocking on send */
      _mqx_uint         flags
   )
{/* Body */
   KERNEL_DATA_STRUCT_PTR kernel_data;
   TD_STRUCT_PTR          td_ptr;
   LWMSGQ_STRUCT_PTR      q_ptr = (LWMSGQ_STRUCT_PTR)handle;
   _mqx_uint              i;
   _mqx_max_type_ptr      from_ptr;
   _mqx_max_type_ptr      to_ptr;

#if MQX_ENABLE_USER_MODE && MQX_ENABLE_USER_STDAPI
    if (MQX_RUN_IN_USER_MODE) {
        return _usr_lwmsgq_send(handle, message, flags);
    }
#endif
    
   /* Start CR 1944 */
   _GET_KERNEL_DATA(kernel_data);
   _KLOGE4(KLOG_lwmsgq_send, handle, message, flags);
   /* End CR 1944 */
   
   _int_disable();
#if MQX_CHECK_VALIDITY
   if (q_ptr->VALID != LWMSGQ_VALID){
      _int_enable();
      /* Start CR 1944 */
      _KLOGX2(KLOG_lwmsgq_send, LWMSGQ_INVALID);
      /* End CR 1944 */
      return LWMSGQ_INVALID;
   } /* Endif */
#endif
   if (LWMSGQ_IS_FULL(q_ptr)) {
      if (flags & LWMSGQ_SEND_BLOCK_ON_FULL) {
         td_ptr = kernel_data->ACTIVE_PTR;
         while (LWMSGQ_IS_FULL(q_ptr)) {
            td_ptr->STATE = LWMSGQ_WRITE_BLOCKED;
            td_ptr->INFO  = (_mqx_uint)&q_ptr->WAITING_WRITERS;
            _QUEUE_UNLINK(td_ptr);
            _QUEUE_ENQUEUE(&q_ptr->WAITING_WRITERS, &td_ptr->AUX_QUEUE);
            _sched_execute_scheduler_internal(); /* Let other tasks run */
         } /* Endwhile */
      } else {
         _int_enable();
         /* Start CR 1944 */
         _KLOGX2(KLOG_lwmsgq_send, LWMSGQ_FULL);
         /* End CR 1944 */
         return LWMSGQ_FULL;
      } /* Endif */
   }/* Endif */
   to_ptr = q_ptr->MSG_WRITE_LOC;
   from_ptr = message;
   i = q_ptr->MSG_SIZE+1;
   while (--i) {
      *to_ptr++ = *from_ptr++;
   } /* Endwhile */
   q_ptr->MSG_WRITE_LOC += q_ptr->MSG_SIZE;
   if (q_ptr->MSG_WRITE_LOC >= q_ptr->MSG_END_LOC) {
      q_ptr->MSG_WRITE_LOC = q_ptr->MSG_START_LOC;
   } /* Endif */
   q_ptr->CURRENT_SIZE++;
   if (! _QUEUE_IS_EMPTY(&q_ptr->WAITING_READERS)) {
      _QUEUE_DEQUEUE(&q_ptr->WAITING_READERS, td_ptr);
      _BACKUP_POINTER(td_ptr, TD_STRUCT, AUX_QUEUE);
      _TIME_DEQUEUE(td_ptr, kernel_data);
      td_ptr->INFO = 0;  /* Signal that post is activating the task */
      _TASK_READY(td_ptr, kernel_data);
      if (flags & LWMSGQ_SEND_BLOCK_ON_SEND) {
         _task_block();
      } else {
         _CHECK_RUN_SCHEDULER(); /* Let higher priority task run */
      }/* Endif */
   } else {
      if (flags & LWMSGQ_SEND_BLOCK_ON_SEND) {
         _task_block();
      }/* Endif */
   } /* Endif */
   _int_enable();
   /* Start CR 1944 */
   _KLOGX2(KLOG_lwmsgq_send, MQX_OK);
   /* End CR 1944 */
   return MQX_OK;

}/* Endbody */
Esempio n. 16
0
_mqx_uint _partition_create_internal
   ( 
      /* [IN] the start of the partition */
      PARTPOOL_STRUCT_PTR partpool_ptr,
      
      /* [IN] the total size of each block with overheads */
      _mem_size           actual_size,

      /* [IN] the initial number of blocks in the partition */
      _mqx_uint           initial_blocks

   )
{ /* Body */
   KERNEL_DATA_STRUCT_PTR              kernel_data;
   INTERNAL_PARTITION_BLOCK_STRUCT_PTR block_ptr;
   PARTITION_COMPONENT_STRUCT_PTR      part_component_ptr;
   _mqx_uint                           result;

   _GET_KERNEL_DATA(kernel_data);

   part_component_ptr = (PARTITION_COMPONENT_STRUCT_PTR)
      kernel_data->KERNEL_COMPONENTS[KERNEL_PARTITIONS];
   if (part_component_ptr == NULL) {
      result = _partition_create_component();
      part_component_ptr = (PARTITION_COMPONENT_STRUCT_PTR)
         kernel_data->KERNEL_COMPONENTS[KERNEL_PARTITIONS];
#if MQX_CHECK_MEMORY_ALLOCATION_ERRORS
      if (part_component_ptr == NULL){
         return(result);
      } /* Endif */
#endif
   } /* Endif */

   _mem_zero(partpool_ptr, (_mem_size)sizeof(PARTPOOL_STRUCT));

   partpool_ptr->BLOCK_SIZE            = actual_size;
/* START CR 308 */
   partpool_ptr->POOL.VALID            = PARTITION_VALID;
/* END CR 308 */
   partpool_ptr->POOL.NUMBER_OF_BLOCKS = initial_blocks;
   partpool_ptr->AVAILABLE             = initial_blocks;
   partpool_ptr->TOTAL_BLOCKS          = initial_blocks;

   block_ptr = (INTERNAL_PARTITION_BLOCK_STRUCT_PTR)((uchar_ptr)partpool_ptr + 
      sizeof(PARTPOOL_STRUCT));
   
   block_ptr = (INTERNAL_PARTITION_BLOCK_STRUCT_PTR)
      _ALIGN_ADDR_TO_HIGHER_MEM(block_ptr);

   partpool_ptr->POOL.FIRST_IBLOCK_PTR = block_ptr;
   
   while (initial_blocks--)  {
      block_ptr->LINK.NEXT_BLOCK_PTR = partpool_ptr->FREE_LIST_PTR;
      CALC_PARTITION_CHECKSUM(block_ptr);
      partpool_ptr->FREE_LIST_PTR  = block_ptr;
      block_ptr = (INTERNAL_PARTITION_BLOCK_STRUCT_PTR)((uchar_ptr)block_ptr + 
         actual_size);
   } /* Endwhile */
   
   _int_disable();
   _QUEUE_ENQUEUE(&part_component_ptr->PARTITIONS, partpool_ptr);
   _int_enable();

   return(MQX_OK);

} /* Endbody */