void *OSMemGet (OS_MEM *p_mem, OS_ERR *p_err) { void *p_blk; CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == (OS_ERR *)0) { OS_SAFETY_CRITICAL_EXCEPTION(); return ((void *)0); } #endif #if OS_CFG_ARG_CHK_EN > 0u if (p_mem == (OS_MEM *)0) { /* Must point to a valid memory partition */ *p_err = OS_ERR_MEM_INVALID_P_MEM; return ((void *)0); } #endif CPU_CRITICAL_ENTER(); if (p_mem->NbrFree == (OS_MEM_QTY)0) { /* See if there are any free memory blocks */ CPU_CRITICAL_EXIT(); *p_err = OS_ERR_MEM_NO_FREE_BLKS; /* No, Notify caller of empty memory partition */ return ((void *)0); /* Return NULL pointer to caller */ } p_blk = p_mem->FreeListPtr; /* Yes, point to next free memory block */ p_mem->FreeListPtr = *(void **)p_blk; /* Adjust pointer to new free list */ p_mem->NbrFree--; /* One less memory block in this partition */ CPU_CRITICAL_EXIT(); *p_err = OS_ERR_NONE; /* No error */ return (p_blk); /* Return memory block to caller */ }
void *SerialOS_SemCreate (CPU_INT16U cnt) { OS_EVENT *psem; INT8U os_err; CPU_SR_ALLOC(); CPU_CRITICAL_ENTER(); if (OSEventObjIx < 1u) { CPU_CRITICAL_EXIT(); return ((void *)0); } if (OSEventObjIx > SERIAL_OS_MAX_NBR_SEM) { CPU_CRITICAL_EXIT(); return ((void *)0); } /* -------------- GET OS EVENT FROM POOL -------------- */ OSEventObjIx--; psem = OSEventObj[OSEventObjIx]; CPU_CRITICAL_EXIT(); OSSemSet(psem, cnt, &os_err); return ((void *)psem); }
CPU_BOOLEAN SerialBuf_Cmp (SERIAL_BUF *pbuf, CPU_INT08U datum) { CPU_SIZE_T ix_rd; CPU_BOOLEAN full; CPU_SR_ALLOC(); CPU_CRITICAL_ENTER(); full = SerialBuf_IsFull(pbuf); if (full == DEF_NO) { CPU_CRITICAL_EXIT(); return (DEF_NO); } ix_rd = pbuf->IxRd; if(pbuf->DataPtr[ix_rd] != datum) { pbuf->IxRd = 0; /* Reset comparison. */ CPU_CRITICAL_EXIT(); return (DEF_NO); } ix_rd++; if (ix_rd == pbuf->Len) { pbuf->IxRd = 0; CPU_CRITICAL_EXIT(); return (DEF_YES); } pbuf->IxRd = ix_rd; CPU_CRITICAL_EXIT(); return (DEF_NO); }
int wireless_exec_cmd(char *cmd) { SHELL_ERR err; CPU_SR cpu_sr; struct net_device *net_dev; char s[45]; CPU_CRITICAL_ENTER(); if(!_pnet_device) { CPU_CRITICAL_EXIT(); return -1; } net_dev = _pnet_device; net_dev->status |= NET_IOCTL_RUNNING; CPU_CRITICAL_EXIT(); strncpy(s,cmd,sizeof(s)); Shell_Exec(s, NULL, NULL, &err); CPU_CRITICAL_ENTER(); net_dev->status &= ~NET_IOCTL_RUNNING; CPU_CRITICAL_EXIT(); return 0; }
void SerialOS_SemDel (void *psem) { INT8U os_err; #if (SERIAL_CFG_ARG_CHK_EXT_EN == DEF_ENABLED) CPU_SIZE_T i; #endif CPU_SR_ALLOC(); OSSemPendAbort((OS_EVENT *) psem, (INT8U ) OS_PEND_OPT_BROADCAST, (INT8U *)&os_err); CPU_CRITICAL_ENTER(); if (OSEventObjIx >= SERIAL_OS_MAX_NBR_SEM) { CPU_CRITICAL_EXIT(); return; } #if (SERIAL_CFG_ARG_CHK_EXT_EN == DEF_ENABLED) /* ----------------- VALIDATE OS EVENT ---------------- */ for (i = 0u; i < OSEventObjIx; i++) { if (OSEventObj[i] == (OS_EVENT *)psem) { CPU_CRITICAL_EXIT(); return; } } #endif /* --------------- FREE OS EVENT TO POOL -------------- */ OSEventObj[OSEventObjIx] = (OS_EVENT *)psem; OSEventObjIx++; CPU_CRITICAL_EXIT(); }
//in t->func(t->data), don't restart tasklet_schedule,oterwise tasklet_kill will not exit. void tasklet_kill(struct tasklet_struct *t) { OS_ERR err; CPU_SR cpu_sr; if (in_interrupt()) USBH_DBG("tasklet_kill Attempt to kill tasklet from interrupt\r\n"); CPU_CRITICAL_ENTER(); while (t->state == TASKLET_STATE_SCHED){ t->state = TASKLET_STATE_SCHED; do { CPU_CRITICAL_EXIT(); OSTimeDlyHMSM(0,0,0,5, OS_OPT_TIME_DLY,&err); CPU_CRITICAL_ENTER(); } while (t->state == TASKLET_STATE_SCHED); } CPU_CRITICAL_EXIT(); // while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { // do { // yield(); // } while (test_bit(TASKLET_STATE_SCHED, &t->state)); // } // tasklet_unlock_wait(t); // clear_bit(TASKLET_STATE_SCHED, &t->state); }
CPU_BOOLEAN SerialBuf_WrOctet (SERIAL_BUF *pbuf, CPU_INT08U datum) { CPU_SIZE_T empty_cnt; CPU_SIZE_T ix_wr; CPU_SIZE_T len; CPU_SR_ALLOC(); CPU_CRITICAL_ENTER(); empty_cnt = pbuf->EmptyCnt; if (empty_cnt == 0) { CPU_CRITICAL_EXIT(); return (DEF_NO); } ix_wr = pbuf->IxWr; pbuf->DataPtr[ix_wr] = datum; pbuf->EmptyCnt = empty_cnt - 1; len = pbuf->Len; if (ix_wr + 1 == len) { pbuf->IxWr = 0; } else { pbuf->IxWr = ix_wr + 1; } CPU_CRITICAL_EXIT(); return (DEF_YES); }
CPU_BOOLEAN SerialBuf_RdOctet (SERIAL_BUF *pbuf, CPU_INT08U *pdatum) { CPU_SIZE_T empty_cnt; CPU_SIZE_T ix_rd; CPU_SIZE_T len; CPU_SR_ALLOC(); CPU_CRITICAL_ENTER(); len = pbuf->Len; empty_cnt = pbuf->EmptyCnt; if (empty_cnt == len) { CPU_CRITICAL_EXIT(); return (DEF_NO); } ix_rd = pbuf->IxRd; *pdatum = pbuf->DataPtr[ix_rd]; pbuf->EmptyCnt = empty_cnt + 1; if (ix_rd + 1 == len) { pbuf->IxRd = 0; } else { pbuf->IxRd = ix_rd + 1; } CPU_CRITICAL_EXIT(); return (DEF_YES); }
OS_TLS_ID OS_TLS_GetID (OS_ERR *p_err) { OS_TLS_ID id; CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == (OS_ERR *)0) { OS_SAFETY_CRITICAL_EXCEPTION(); return ((OS_TLS_ID)OS_CFG_TLS_TBL_SIZE); } #endif CPU_CRITICAL_ENTER(); if (OS_TLS_NextAvailID >= OS_CFG_TLS_TBL_SIZE) { /* See if we exceeded the number of IDs available */ *p_err = OS_ERR_TLS_NO_MORE_AVAIL; /* Yes, cannot allocate more TLS */ CPU_CRITICAL_EXIT(); return ((OS_TLS_ID)OS_CFG_TLS_TBL_SIZE); } id = OS_TLS_NextAvailID; /* Assign the next available ID */ OS_TLS_NextAvailID++; /* Increment available ID for next request */ CPU_CRITICAL_EXIT(); *p_err = OS_ERR_NONE; return (id); }
void OSMemPut (OS_MEM *p_mem, void *p_blk, OS_ERR *p_err) { CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == DEF_NULL) { OS_SAFETY_CRITICAL_EXCEPTION(); return; } #endif #if (OS_CFG_ARG_CHK_EN == DEF_ENABLED) if (p_mem == DEF_NULL) { /* Must point to a valid memory partition */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN == DEF_ENABLED)) TRACE_OS_MEM_PUT_FAILED(p_mem); /* Record the event. */ #endif *p_err = OS_ERR_MEM_INVALID_P_MEM; return; } if (p_blk == DEF_NULL) { /* Must release a valid block */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN == DEF_ENABLED)) TRACE_OS_MEM_PUT_FAILED(p_mem); /* Record the event. */ #endif *p_err = OS_ERR_MEM_INVALID_P_BLK; return; } #endif #if (OS_CFG_OBJ_TYPE_CHK_EN == DEF_ENABLED) if (p_mem->Type != OS_OBJ_TYPE_MEM) { /* Make sure the memory block was created */ *p_err = OS_ERR_OBJ_TYPE; return; } #endif CPU_CRITICAL_ENTER(); if (p_mem->NbrFree >= p_mem->NbrMax) { /* Make sure all blocks not already returned */ CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN == DEF_ENABLED)) TRACE_OS_MEM_PUT_FAILED(p_mem); /* Record the event. */ #endif *p_err = OS_ERR_MEM_FULL; return; } *(void **)p_blk = p_mem->FreeListPtr; /* Insert released block into free block list */ p_mem->FreeListPtr = p_blk; p_mem->NbrFree++; /* One more memory block in this partition */ CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN == DEF_ENABLED)) TRACE_OS_MEM_PUT(p_mem); /* Record the event. */ #endif *p_err = OS_ERR_NONE; /* Notify caller that memory block was released */ }
OS_SEM_CTR OS_SemPost (OS_SEM *p_sem, OS_OPT opt, CPU_TS ts, OS_ERR *p_err) { OS_SEM_CTR ctr; OS_PEND_LIST *p_pend_list; OS_PEND_DATA *p_pend_data; OS_PEND_DATA *p_pend_data_next; OS_TCB *p_tcb; CPU_SR_ALLOC(); CPU_CRITICAL_ENTER(); p_pend_list = &p_sem->PendList; if (p_pend_list->HeadPtr == DEF_NULL) { /* Any task waiting on semaphore? */ if (p_sem->Ctr == (OS_SEM_CTR)-1) { CPU_CRITICAL_EXIT(); *p_err = OS_ERR_SEM_OVF; return (0u); } p_sem->Ctr++; /* No */ ctr = p_sem->Ctr; #if (OS_CFG_TS_EN == DEF_ENABLED) p_sem->TS = ts; /* Save timestamp in semaphore control block */ #endif CPU_CRITICAL_EXIT(); *p_err = OS_ERR_NONE; return (ctr); } OS_CRITICAL_ENTER_CPU_EXIT(); p_pend_data = p_pend_list->HeadPtr; while (p_pend_data != DEF_NULL) { p_tcb = p_pend_data->TCBPtr; p_pend_data_next = p_pend_data->NextPtr; OS_Post((OS_PEND_OBJ *)((void *)p_sem), p_tcb, DEF_NULL, 0u, ts); if ((opt & OS_OPT_POST_ALL) == 0) { /* Post to all tasks waiting? */ break; /* No */ } p_pend_data = p_pend_data_next; } OS_CRITICAL_EXIT_NO_SCHED(); if ((opt & OS_OPT_POST_NO_SCHED) == 0u) { OSSched(); /* Run the scheduler */ } *p_err = OS_ERR_NONE; return (0u); }
void *OSMemGet (OS_MEM *p_mem, OS_ERR *p_err) { void *p_blk; CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == DEF_NULL) { OS_SAFETY_CRITICAL_EXCEPTION(); return (DEF_NULL); } #endif #if (OS_CFG_ARG_CHK_EN == DEF_ENABLED) if (p_mem == DEF_NULL) { /* Must point to a valid memory partition */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN == DEF_ENABLED)) TRACE_OS_MEM_GET_FAILED(p_mem); /* Record the event. */ #endif *p_err = OS_ERR_MEM_INVALID_P_MEM; return (DEF_NULL); } #endif #if (OS_CFG_OBJ_TYPE_CHK_EN == DEF_ENABLED) if (p_mem->Type != OS_OBJ_TYPE_MEM) { /* Make sure the memory block was created */ *p_err = OS_ERR_OBJ_TYPE; return (DEF_NULL); } #endif CPU_CRITICAL_ENTER(); if (p_mem->NbrFree == 0u) { /* See if there are any free memory blocks */ CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN == DEF_ENABLED)) TRACE_OS_MEM_GET_FAILED(p_mem); /* Record the event. */ #endif *p_err = OS_ERR_MEM_NO_FREE_BLKS; /* No, Notify caller of empty memory partition */ return (DEF_NULL); /* Return NULL pointer to caller */ } p_blk = p_mem->FreeListPtr; /* Yes, point to next free memory block */ p_mem->FreeListPtr = *(void **)p_blk; /* Adjust pointer to new free list */ p_mem->NbrFree--; /* One less memory block in this partition */ CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN == DEF_ENABLED)) TRACE_OS_MEM_GET(p_mem); /* Record the event. */ #endif *p_err = OS_ERR_NONE; /* No error */ return (p_blk); /* Return memory block to caller */ }
void OS_TLS_LockCreate (void **p_lock) { OS_TLS_LOCK *p_tls_lock; OS_ERR os_err; CPU_SR_ALLOC(); if (p_lock == (void **)0) { return; } if (OS_TLS_LockPoolListPtr == (OS_TLS_LOCK *)0) { /* If 'OS_TLS_LOCK' object pool is empty? */ *p_lock = (void *)0; /* return a 'NULL' pointer. */ return; } p_tls_lock = OS_TLS_LockPoolListPtr; /* Get the first object in the list. */ OSMutexCreate((OS_MUTEX *)&p_tls_lock->Mutex, /* Create the mutex in the kernel. */ (CPU_CHAR *) 0, (OS_ERR *)&os_err); if (os_err != OS_ERR_NONE) { /* If the mutex create funtion fail? */ *p_lock = (void *)0; /* ... return a 'NULL' pointer. */ return; } CPU_CRITICAL_ENTER(); OS_TLS_LockPoolListPtr = p_tls_lock->NextPtr; /* Move HEAD pointer to the next object in the list.*/ CPU_CRITICAL_EXIT(); *p_lock = (void *)p_tls_lock; /* Return the new 'OS_TLS_LOCK' object pointer. */ }
/* ********************************************************************************************************* * Rx/Tx Communication handler for uC/OS-View * * Description: This function is called in stm32f10x_it.c ********************************************************************************************************* */ void OSView_RxTxISRHandler (void) { CPU_SR cpu_sr; CPU_CRITICAL_ENTER(); /* Tell uC/OS-II that we are starting an ISR */ OSIntNesting++; CPU_CRITICAL_EXIT(); if (USART_GetITStatus(OS_VIEW_USART, USART_IT_RXNE) != RESET) { /* Read one byte from the receive data register */ OSView_RxHandler( USART_ReceiveData(OS_VIEW_USART) & 0xFF ); USART_ClearITPendingBit(OS_VIEW_USART, USART_IT_RXNE); /* Clear the USART Receive interrupt */ } if (USART_GetITStatus(OS_VIEW_USART, USART_IT_TXE) != RESET) { OSView_TxHandler(); USART_ClearITPendingBit(OS_VIEW_USART, USART_IT_TXE); /* Clear the USART transmit interrupt */ } OSIntExit(); /* Tell uC/OS-II that we are leaving the ISR */ }
void OS_TLS_Init (OS_ERR *p_err) { CPU_INT16U ix; OS_TLS_LOCK *p_lock; CPU_SR_ALLOC(); OS_TLS_NextAvailID = 0u; OS_TLS_LibID = OS_TLS_GetID(p_err); CPU_CRITICAL_ENTER(); /* Create the link list of OS_TLS_LOCK objects. */ for (ix = 0u; ix < (OS_TLS_LOCK_MAX - 1u); ix++) { p_lock = &OS_TLS_LockPoolTbl[ix]; p_lock->NextPtr = &OS_TLS_LockPoolTbl[ix + 1u]; } p_lock = &OS_TLS_LockPoolTbl[OS_TLS_LOCK_MAX - 1u]; p_lock->NextPtr = (OS_TLS_LOCK *)0; /* Last node points to 'NULL' */ OS_TLS_LockPoolListPtr = &OS_TLS_LockPoolTbl[0]; /* Initialize the list head pointer. */ CPU_CRITICAL_EXIT(); }
CPU_BOOLEAN CSP_DMA_CH_FreeExt (CSP_DEV_NBR ch_nbr) { CSP_DMA_REG *p_dma_reg; CSP_DMA_CH_REG *p_dma_ch_reg; CSP_DMA_CH *p_ch_tbl; CPU_SR_ALLOC(); #if (CSP_CFG_ARG_CHK_EN == DEF_ENABLED) if (ch_nbr > CSP_DMA_CH_MAX_NBR - 1u) { return (DEF_FAIL); } #endif p_dma_reg = (CSP_DMA_REG *)CSP_ADDR_DMA_REG; p_dma_ch_reg = &(p_dma_reg->CHx[ch_nbr]); p_ch_tbl = &CSP_DMA_ChTbl[ch_nbr]; CPU_CRITICAL_ENTER(); p_dma_reg->IntTCClr = DEF_BIT(ch_nbr); /* Clear all pending interrupts. */ p_dma_reg->IntErrClr = DEF_BIT(ch_nbr); p_ch_tbl->State = CSP_DMA_CH_STATE_FREE; /* Free the channel. */ p_dma_ch_reg->SrcAddr = DEF_BIT_NONE; /* Unitialize DMA channel cfg & ctrl registers. */ p_dma_ch_reg->DestAddr = DEF_BIT_NONE; p_dma_ch_reg->Cfg = DEF_BIT_NONE; p_dma_ch_reg->Ctrl = DEF_BIT_NONE; CPU_CRITICAL_EXIT(); return (DEF_OK); }
/******************************************************************* 函数功能: 判断CAN队列是否为空 返 回 值: 1: 空; 0:非空 ********************************************************************/ u8 IsEmptyQueue(_CANQUEUE* canqueue) { CPU_SR_ALLOC();// 申请cpu_sr //// CPU_CRITICAL_ENTER(); // 关键段,关闭全局中断,视具体情况添加此语句 if(canqueue->front == canqueue->rear) { CPU_CRITICAL_EXIT(); // 退出关键段,开启全局中断,视具体情况添加此语句 return 1; } else { CPU_CRITICAL_EXIT(); // 退出关键段,开启全局中断,视具体情况添加此语句 return 0; } }
/******************************************************************* 函数功能: 判队列是否满 返 回 值: 1: 满; 0:非满 ********************************************************************/ u8 IsFullQueue(_CANQUEUE* canqueue) { CPU_SR_ALLOC();// 申请cpu_sr //// CPU_CRITICAL_ENTER(); // 关键段,关闭全局中断,视具体情况添加此语句 if(((MAX_CAN_QUEUE_SIZE - canqueue->front) + canqueue->rear) == (MAX_CAN_QUEUE_SIZE - 1)) { CPU_CRITICAL_EXIT(); // 退出关键段,开启全局中断,视具体情况添加此语句 return 1; } else { CPU_CRITICAL_EXIT(); // 退出关键段,开启全局中断,视具体情况添加此语句 return 0; } }
void CSP_TmrInit (void) { CSP_DEV_NBR per_nbr; CSP_DEV_NBR tmr_nbr; CSP_TMR_REG *p_tmr_reg; CPU_SR_ALLOC(); for (tmr_nbr = CSP_TMR_NBR_00; tmr_nbr <= CSP_TMR_NBR_03; tmr_nbr++) { p_tmr_reg = (CSP_TMR_REG *)CSP_TmrAddrTbl[tmr_nbr]; per_nbr = (CSP_DEV_NBR )CSP_TmrPerTbl[tmr_nbr]; CPU_CRITICAL_ENTER(); CSP_PM_PerClkEn(per_nbr); p_tmr_reg->MCR = DEF_BIT_NONE; p_tmr_reg->MRx[0] = 0u; p_tmr_reg->MRx[1] = 0u; p_tmr_reg->MRx[2] = 0u; p_tmr_reg->IR = DEF_BIT_FIELD(5u, 0u); p_tmr_reg->TCR = DEF_BIT_NONE; p_tmr_reg->MCR = DEF_BIT_NONE; p_tmr_reg->EMR = DEF_BIT_NONE; CSP_PM_PerClkDis(per_nbr); CPU_CRITICAL_EXIT(); } }
void tasklet_action(void *p_arg) { OS_ERR err; OS_MSG_SIZE msg_size; struct tasklet_struct *t; CPU_SR cpu_sr; while(1) { t = OSTaskQPend(0, OS_OPT_PEND_BLOCKING, &msg_size, 0,&err); if(err != OS_ERR_NONE) { USBH_DBG("tasklet_schedule OSTaskQPend Failed %d\r\n",err); continue; } if(!t){ USBH_DBG("tasklet_schedule t == NULL\r\n"); continue; } // OSSchedLock(&err); CPU_CRITICAL_ENTER(); if(t->state == TASKLET_STATE_UNSCHED) USBH_DBG("tasklet_action Error at %d\r\n",__LINE__); t->state = TASKLET_STATE_UNSCHED; CPU_CRITICAL_EXIT(); t->func(t->data); // OSSchedUnlock(&err); } }
void OS_TLS_SetDestruct (OS_TLS_ID id, OS_TLS_DESTRUCT_PTR p_destruct, OS_ERR *p_err) { CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == (OS_ERR *)0) { OS_SAFETY_CRITICAL_EXCEPTION(); return; } #endif if (id >= OS_TLS_NextAvailID) { /* See if we exceeded the number of TLS IDs available */ *p_err = OS_ERR_TLS_ID_INVALID; return; } if (OS_TLS_DestructPtrTbl[id] != (OS_TLS_DESTRUCT_PTR)0) { /* Can only assign a destructor once */ *p_err = OS_ERR_TLS_DESTRUCT_ASSIGNED; return; } CPU_CRITICAL_ENTER(); OS_TLS_DestructPtrTbl[id] = p_destruct; CPU_CRITICAL_EXIT(); *p_err = OS_ERR_NONE; }
void OS_TLS_LockDel (void *p_lock) { OS_TLS_LOCK *p_tls_lock; OS_ERR os_err; CPU_SR_ALLOC(); if (p_lock == (void *)0) { return; } p_tls_lock = (OS_TLS_LOCK *)p_lock; (void)OSMutexDel((OS_MUTEX *)&p_tls_lock->Mutex, (OS_OPT ) OS_OPT_DEL_ALWAYS, (OS_ERR *)&os_err); (void)&os_err; CPU_CRITICAL_ENTER(); /* Return the OS_TLS_LOCK in front of the list */ if (OS_TLS_LockPoolListPtr == (OS_TLS_LOCK *)0) { p_tls_lock->NextPtr = (OS_TLS_LOCK *)0; } else { p_tls_lock->NextPtr = OS_TLS_LockPoolListPtr; } OS_TLS_LockPoolListPtr = p_tls_lock; CPU_CRITICAL_EXIT(); }
/******************************************************************************* * 名 称: IAP_JumpTo() * 功 能: 跳转到应用程序段 * 入口参数: * 出口参数: 无 * 作 者: 无名沈 * 创建日期: 2014-04-23 * 修 改: * 修改日期: *******************************************************************************/ void IAP_JumpTo(u32 appAddr) { u32 JumpAddress = 0; u8 cpu_sr; /*********************************************** * 描述: 保存程序地址 */ IAP_SetAppAddr(appAddr); /*********************************************** * 描述: 关中断,防止值被中断修改 */ CPU_CRITICAL_ENTER(); /*********************************************** * 描述: 外设恢复默认,避免进入应用程序后影响程序正常运行 */ IAP_DevDeInit(); /*********************************************** * 描述: 获取应用入口及初始化堆栈指针 */ JumpAddress =*(volatile u32*) (appAddr + 4); // 地址+4为PC地址 pApp = (pFunction)JumpAddress; // 函数指针指向APP __set_MSP (*(volatile u32*) appAddr); // 初始化主堆栈指针(MSP) __set_PSP (*(volatile u32*) appAddr); // 初始化进程堆栈指针(PSP) __set_CONTROL (0); // 清零CONTROL /*********************************************** * 描述: 跳转到APP程序 */ pApp(); CPU_CRITICAL_EXIT(); }
// handler associated to SW1 (labeled SW2 on board) void SW1_Intr_Handler(void) { static uint32_t c_ifsr; // port c interrupt flag status register uint32_t c_portBaseAddr = g_portBaseAddr[GPIO_EXTRACT_PORT(kGpioSW1)]; uint32_t portPinMask = (1 << GPIO_EXTRACT_PIN(kGpioSW1)); CPU_CRITICAL_ENTER(); // enter critical section (disable interrupts) OSIntEnter(); // notify to scheduler the beginning of an ISR ("This allows ?C/OS-III to keep track of interrupt nesting") c_ifsr = PORT_HAL_GetPortIntFlag(c_portBaseAddr); // get intr flag reg related to port C if( (c_ifsr & portPinMask) ) // check if kGpioSW1 generated the interrupt [pin 6 -> 7th flag (flags start with index 0)] { //sem_sw1_post OSSemPost(&MySem1, OS_OPT_POST_1 + OS_OPT_POST_NO_SCHED, &os_err); } GPIO_DRV_ClearPinIntFlag( kGpioSW1 ); CPU_CRITICAL_EXIT(); // renable interrupts OSIntExit(); /* notify to scheduler the end of an ISR ("determines if a higher priority task is ready-to-run. If so, the interrupt returns to the higher priority task instead of the interrupted task.") */ }
void OS_IntQTask (void *p_arg) { CPU_BOOLEAN done; #if (OS_CFG_TS_EN == DEF_ENABLED) CPU_TS ts_start; CPU_TS ts_end; #endif CPU_SR_ALLOC(); (void)p_arg; /* Not using 'p_arg', prevent compiler warning */ while (DEF_ON) { done = DEF_FALSE; while (done == DEF_FALSE) { CPU_CRITICAL_ENTER(); if (OSIntQNbrEntries == 0u) { #if (OS_CFG_DBG_EN == DEF_ENABLED) OSRdyList[0].NbrEntries = 0u; /* Remove from ready list */ #endif OSRdyList[0].HeadPtr = DEF_NULL; OSRdyList[0].TailPtr = DEF_NULL; OS_PrioRemove(0u); /* Remove from the priority table */ CPU_CRITICAL_EXIT(); OSSched(); done = DEF_TRUE; /* No more entries in the queue, we are done */ } else { CPU_CRITICAL_EXIT(); #if (OS_CFG_TS_EN == DEF_ENABLED) ts_start = OS_TS_GET(); #endif OS_IntQRePost(); #if (OS_CFG_TS_EN == DEF_ENABLED) ts_end = OS_TS_GET() - ts_start; /* Measure execution time of tick task */ if (OSIntQTaskTimeMax < ts_end) { OSIntQTaskTimeMax = ts_end; } #endif CPU_CRITICAL_ENTER(); OSIntQOutPtr = OSIntQOutPtr->NextPtr; /* Point to next item in the ISR queue */ OSIntQNbrEntries--; CPU_CRITICAL_EXIT(); } } } }
CPU_BOOLEAN CSP_DMA_XferStartExt (CSP_DEV_NBR ch_nbr, void *p_dest, void *p_src, CPU_SIZE_T xfer_size, CSP_OPT_FLAGS opt) { CSP_DMA_REG *p_dma_reg; CSP_DMA_CH_REG *p_dma_ch_reg; CPU_INT32U reg_ctrl; CPU_SR_ALLOC(); /* ------------------ ARGUMENTS CHECKING -------------- */ #if (CSP_CFG_ARG_CHK_EN == DEF_ENABLED) if (ch_nbr > CSP_DMA_CH_MAX_NBR - 1u) { /* Invalid channel number? */ return (DEF_FAIL); } /* Channel not available? */ if (CSP_DMA_ChTbl[ch_nbr].State != CSP_DMA_CH_STATE_ALLOC) { return (DEF_FAIL); } if ((p_dest == (void *)0) || /* Null pointers? */ (p_src == (void *)0)) { return (DEF_FAIL); } #endif p_dma_reg = (CSP_DMA_REG *)CSP_ADDR_DMA_REG; p_dma_ch_reg = &(p_dma_reg->CHx[ch_nbr]); reg_ctrl = p_dma_ch_reg->Ctrl; DEF_BIT_CLR(reg_ctrl, CSP_DMA_MSK_CH_CTRL_XFER_SIZE | CSP_DMA_BIT_CH_CTRL_SI | CSP_DMA_BIT_CH_CTRL_DI | CSP_DMA_BIT_CH_CTRL_I); DEF_BIT_SET(reg_ctrl, (xfer_size & CSP_DMA_MSK_CH_CTRL_XFER_SIZE)); if (DEF_BIT_IS_SET(opt, CSP_DMA_OPT_FLAG_XFER_SRC_INC)) { DEF_BIT_SET(reg_ctrl, CSP_DMA_BIT_CH_CTRL_SI); } if (DEF_BIT_IS_SET(opt, CSP_DMA_OPT_FLAG_XFER_DEST_INC)) { DEF_BIT_SET(reg_ctrl, CSP_DMA_BIT_CH_CTRL_DI); } CPU_CRITICAL_ENTER(); p_dma_ch_reg->SrcAddr = (CPU_INT32U )p_src; p_dma_ch_reg->DestAddr = (CPU_INT32U )p_dest; p_dma_ch_reg->Ctrl = reg_ctrl; DEF_BIT_CLR(p_dma_ch_reg->Cfg, CSP_DMA_BIT_CH_CFG_ITC | CSP_DMA_BIT_CH_CFG_IE); DEF_BIT_SET(p_dma_ch_reg->Cfg, CSP_DMA_BIT_CH_CFG_CH_EN); CPU_CRITICAL_EXIT(); return (DEF_OK); }
static void OSView_RxTxISRHandler (void) { USART_TypeDef *usart; CPU_INT08U rx_data; CPU_SR cpu_sr; CPU_CRITICAL_ENTER(); /* Tell uC/OS-II that we are starting an ISR */ OSIntNesting++; CPU_CRITICAL_EXIT(); #if (OS_VIEW_COMM_SEL == OS_VIEW_UART_1) usart = USART1; #elif (OS_VIEW_COMM_SEL == OS_VIEW_UART_2) usart = USART2; #elif (OS_VIEW_COMM_SEL == OS_VIEW_UART_3) usart = USART3; #else OSIntExit(); return; #endif if (USART_GetITStatus(usart, USART_IT_RXNE) != RESET) { rx_data = USART_ReceiveData(usart) & 0xFF; /* Read one byte from the receive data register */ USART_SendData( USART1, rx_data); //////////////////////////////////////////// #if 1 if(com_app.com_cnt == 10) { com_app.buf_ok =com_app.buf_ok | (1<<com_buf_cnt); com_buf_cnt++; if(com_buf_cnt == 3) com_buf_cnt = 0; com_app.com_cnt = 0; } sprintf(com_rev_tmp,"%x",rx_data); strcat(com_app.app_buf[com_buf_cnt], com_rev_tmp); com_app.com_cnt ++; #endif //////////////////////////////////////////// OSView_RxHandler(rx_data); USART_ClearITPendingBit(usart, USART_IT_RXNE); /* Clear the USART1 Receive interrupt */ } if (USART_GetITStatus(usart, USART_IT_TXE) != RESET) { OSView_TxHandler(); USART_ClearITPendingBit(usart, USART_IT_TXE); /* Clear the USART1 transmit interrupt */ } OSIntExit(); /* Tell uC/OS-II that we are leaving the ISR */ }
void OSStatTaskCPUUsageInit (OS_ERR *p_err) { OS_ERR err; OS_TICK dly; CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == (OS_ERR *)0) { OS_SAFETY_CRITICAL_EXCEPTION(); return; } #endif OSTimeDly((OS_TICK )2, /* Synchronize with clock tick */ (OS_OPT )OS_OPT_TIME_DLY, (OS_ERR *)&err); if (err != OS_ERR_NONE) { *p_err = err; return; } CPU_CRITICAL_ENTER(); OSStatTaskCtr = (OS_TICK)0; /* Clear idle counter */ CPU_CRITICAL_EXIT(); dly = (OS_TICK)0; if (OSCfg_TickRate_Hz > OSCfg_StatTaskRate_Hz) { dly = (OS_TICK)(OSCfg_TickRate_Hz / OSCfg_StatTaskRate_Hz); } if (dly == (OS_TICK)0) { dly = (OS_TICK)(OSCfg_TickRate_Hz / (OS_RATE_HZ)10); } OSTimeDly(dly, /* Determine MAX. idle counter value */ OS_OPT_TIME_DLY, &err); CPU_CRITICAL_ENTER(); OSStatTaskTimeMax = (CPU_TS)0; OSStatTaskCtrMax = OSStatTaskCtr; /* Store maximum idle counter count */ OSStatTaskRdy = OS_STATE_RDY; CPU_CRITICAL_EXIT(); *p_err = OS_ERR_NONE; }
void Math_RandSetSeed (RAND_NBR seed) { CPU_SR_ALLOC(); CPU_CRITICAL_ENTER(); Math_RandSeedCur = seed; CPU_CRITICAL_EXIT(); }
void OS_MsgQEntriesPeakReset (OS_MSG_Q *p_msg_q) { CPU_SR_ALLOC(); CPU_CRITICAL_ENTER(); p_msg_q->NbrEntriesMax = (OS_MSG_QTY)0; CPU_CRITICAL_EXIT(); }