int FreeRTOS_write(Network* n, unsigned char* buffer, int len, uint32_t timeout_ms) { portTickType xTicksToWait = timeout_ms / portTICK_RATE_MS; /* convert milliseconds to ticks */ xTimeOutType xTimeOut; int sentLen = 0; vTaskSetTimeOutState(&xTimeOut); /* Record the time at which this function was entered. */ FreeRTOS_setsockopt(n->my_socket, SOL_SOCKET, FREERTOS_SO_RCVTIMEO, &timeout_ms, sizeof(timeout_ms)); do { int rc = 0; rc = FreeRTOS_send(n->my_socket, buffer + sentLen, len - sentLen, 0); if (rc > 0) sentLen += rc; else if (rc < 0) { sentLen = rc; break; } } while (sentLen < len && xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE); return sentLen; }
void vDNSCheckCallBack( void *pvSearchID ) { const ListItem_t *pxIterator; const MiniListItem_t* xEnd = ( const MiniListItem_t* )listGET_END_MARKER( &xCallbackList ); vTaskSuspendAll(); { for( pxIterator = ( const ListItem_t * ) listGET_NEXT( xEnd ); pxIterator != ( const ListItem_t * ) xEnd; ) { DNSCallback_t *pxCallback = ( DNSCallback_t * ) listGET_LIST_ITEM_OWNER( pxIterator ); /* Move to the next item because we might remove this item */ pxIterator = ( const ListItem_t * ) listGET_NEXT( pxIterator ); if( ( pvSearchID != NULL ) && ( pvSearchID == pxCallback->pvSearchID ) ) { uxListRemove( &pxCallback->xListItem ); vPortFree( pxCallback ); } else if( xTaskCheckForTimeOut( &pxCallback->xTimeoutState, &pxCallback->xRemaningTime ) != pdFALSE ) { pxCallback->pCallbackFunction( pxCallback->pcName, pxCallback->pvSearchID, 0 ); uxListRemove( &pxCallback->xListItem ); vPortFree( ( void * ) pxCallback ); } } } xTaskResumeAll(); if( listLIST_IS_EMPTY( &xCallbackList ) ) { vIPSetDnsTimerEnableState( pdFALSE ); } }
/* * For internal use only. * If a peripheral access mutex is not defined, just return STATUS_OK and take * no other action. * If a peripheral access mutex is defined, attempt to obtain the mutex. * Return STATUS_OK if the mutex was obtained, and ERR_TIMEOUT if the mutex * did not become available within max_block_time_ticks tick periods. */ status_code_t freertos_obtain_peripheral_access_mutex( freertos_dma_event_control_t *dma_event_control, TickType_t *max_block_time_ticks) { status_code_t return_value = STATUS_OK; TimeOut_t time_out_definition; if (dma_event_control->peripheral_access_mutex != NULL) { /* Remember the time on entry. */ vTaskSetTimeOutState(&time_out_definition); /* Wait to get exclusive access to the peripheral. */ if (xSemaphoreTake(dma_event_control->peripheral_access_mutex, *max_block_time_ticks) == pdFAIL) { return_value = ERR_TIMEOUT; } else { /* Adjust the time out value in case the task had to block to wait for the semaphore. */ if (xTaskCheckForTimeOut(&time_out_definition, max_block_time_ticks) == pdTRUE) { *max_block_time_ticks = 0; } } } return return_value; }
ERRORTYPE SensorManager::run() { ERRORTYPE eRet = ET_OK; for(uint8_t u8Loop = 0; u8Loop < m_sConfig.u8SensorCount; ++u8Loop) { eRet = m_sConfig.pSensorList[u8Loop]->run(); if(ET_OK != eRet) { xSemaphoreTake(xConsoleMutex, portMAX_DELAY); xSerialPrint_P(PSTR("/SENSOR MGR/ ")); xSerialPrint_P(m_sConfig.pSensorList[u8Loop]->getMeasDataInfo(0).psMeasDataDescription); xSerialxPrintf_P( &xSerialPort, PSTR(", in state %u, encountered problems.\r\n"),m_sConfig.pSensorList[u8Loop]->getSensorState()); xSemaphoreGive(xConsoleMutex); break; } } if( (ET_OK == eRet) && (pdTRUE == xTaskCheckForTimeOut(&m_sLastResultPrintTimestamp, &m_sPrintResultsTimeout))) { this->printResultsStdOut(); vTaskSetTimeOutState(&m_sLastResultPrintTimestamp); m_sPrintResultsTimeout = m_sConfig.sPrintResultsTimeout; } return eRet; }
BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) { BaseType_t xReturn; BaseType_t xRunningPrivileged = xPortRaisePrivilege(); xReturn = xTaskCheckForTimeOut( pxTimeOut, pxTicksToWait ); vPortResetPrivilege( xRunningPrivileged ); return xReturn; }
bool connection_timeout(socket_connection_t * cnx) { bool timeout = false; if (xTaskCheckForTimeOut(&cnx->timeout, &cnx->max_wait_millis) == pdTRUE) { // printf("idle_timeout.\r\n"); timeout = true; } return timeout; }
BaseType_t xPhyCheckLinkStatus( EthernetPhy_t *pxPhyObject, BaseType_t xHadReception ) { uint32_t ulStatus, ulBitMask = 1u; BaseType_t xPhyIndex; BaseType_t xNeedCheck = pdFALSE; if( xHadReception > 0 ) { /* A packet was received. No need to check for the PHY status now, but set a timer to check it later on. */ vTaskSetTimeOutState( &( pxPhyObject->xLinkStatusTimer ) ); pxPhyObject->xLinkStatusRemaining = pdMS_TO_TICKS( ipconfigPHY_LS_HIGH_CHECK_TIME_MS ); } else if( xTaskCheckForTimeOut( &( pxPhyObject->xLinkStatusTimer ), &( pxPhyObject->xLinkStatusRemaining ) ) != pdFALSE ) { for( xPhyIndex = 0; xPhyIndex < pxPhyObject->xPortCount; xPhyIndex++, ulBitMask <<= 1 ) { BaseType_t xPhyAddress = pxPhyObject->ucPhyIndexes[ xPhyIndex ]; if( pxPhyObject->fnPhyRead( xPhyAddress, phyREG_01_BMSR, &ulStatus ) == 0 ) { if( !!( pxPhyObject->ulLinkStatusMask & ulBitMask ) != !!( ulStatus & phyBMSR_LINK_STATUS ) ) { if( ( ulStatus & phyBMSR_LINK_STATUS ) != 0 ) { pxPhyObject->ulLinkStatusMask |= ulBitMask; } else { pxPhyObject->ulLinkStatusMask &= ~( ulBitMask ); } FreeRTOS_printf( ( "xPhyCheckLinkStatus: PHY LS now %02lX\n", pxPhyObject->ulLinkStatusMask ) ); eventLogAdd( "PHY LS now %02lX", pxPhyObject->ulLinkStatusMask ); xNeedCheck = pdTRUE; } } } vTaskSetTimeOutState( &( pxPhyObject->xLinkStatusTimer ) ); if( ( pxPhyObject->ulLinkStatusMask & phyBMSR_LINK_STATUS ) != 0 ) { pxPhyObject->xLinkStatusRemaining = pdMS_TO_TICKS( ipconfigPHY_LS_HIGH_CHECK_TIME_MS ); } else { pxPhyObject->xLinkStatusRemaining = pdMS_TO_TICKS( ipconfigPHY_LS_LOW_CHECK_TIME_MS ); } } return xNeedCheck; }
//------------------------------------------------------------------------------------ size_t FreeRTOS_UART_read( Peripheral_Descriptor_t const pxPeripheral, void * const pvBuffer, const size_t xBytes ) { // Lee caracteres de la cola de recepcion y los deja en el buffer. // El timeout lo fijo con ioctl. Peripheral_Control_t * const pxPeripheralControl = ( Peripheral_Control_t * const ) pxPeripheral; size_t xBytesReceived = 0U; portTickType xTicksToWait; xTimeOutType xTimeOut; UART_device_control_t *pUart; pUart = pxPeripheralControl->phDevice; xTicksToWait = pxPeripheralControl->xBlockTime; xTicksToWait = 1; vTaskSetTimeOutState( &xTimeOut ); /* Are there any more bytes to be received? */ while( xBytesReceived < xBytes ) { /* Receive the next character. */ if ( pUart->rxBufferType == QUEUE ) { if( xQueueReceive( pUart->rxStruct, &((char *)pvBuffer)[ xBytesReceived ], xTicksToWait ) == pdPASS ) { xBytesReceived++; } } else { // Los fifo no tienen timeout, retornan enseguida if( xFifoReceive( pUart->rxStruct, &((char *)pvBuffer)[ xBytesReceived ], xTicksToWait ) == pdPASS ) { xBytesReceived++; } else { // Espero xTicksToWait antes de volver a chequear vTaskDelay( ( TickType_t)( xTicksToWait ) ); } } /* Time out has expired ? */ if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) != pdFALSE ) { break; } } return xBytesReceived; }
/* Send a reset commando to a set of PHY-ports. */ static uint32_t xPhyReset( EthernetPhy_t *pxPhyObject, uint32_t ulPhyMask ) { uint32_t ulDoneMask, ulConfig; TickType_t xRemainingTime; TimeOut_t xTimer; BaseType_t xPhyIndex; /* A bit-mask ofPHY ports that are ready. */ ulDoneMask = 0ul; /* Set the RESET bits high. */ for( xPhyIndex = 0; xPhyIndex < pxPhyObject->xPortCount; xPhyIndex++ ) { BaseType_t xPhyAddress = pxPhyObject->ucPhyIndexes[ xPhyIndex ]; /* Read Control register. */ pxPhyObject->fnPhyRead( xPhyAddress, phyREG_00_BMCR, &ulConfig ); pxPhyObject->fnPhyWrite( xPhyAddress, phyREG_00_BMCR, ulConfig | phyBMCR_RESET ); } xRemainingTime = ( TickType_t ) pdMS_TO_TICKS( 1000UL ); vTaskSetTimeOutState( &xTimer ); /* The reset should last less than a second. */ for( ;; ) { for( xPhyIndex = 0; xPhyIndex < pxPhyObject->xPortCount; xPhyIndex++ ) { BaseType_t xPhyAddress = pxPhyObject->ucPhyIndexes[ xPhyIndex ]; pxPhyObject->fnPhyRead( xPhyAddress, phyREG_00_BMCR, &ulConfig ); if( ( ulConfig & phyBMCR_RESET ) == 0 ) { FreeRTOS_printf( ( "xPhyReset: phyBMCR_RESET %d ready\n", (int)xPhyIndex ) ); ulDoneMask |= ( 1ul << xPhyIndex ); } } if( ulDoneMask == ulPhyMask ) { break; } if( xTaskCheckForTimeOut( &xTimer, &xRemainingTime ) != pdFALSE ) { FreeRTOS_printf( ( "xPhyReset: phyBMCR_RESET timed out ( done 0x%02lX )\n", ulDoneMask ) ); break; } } /* Clear the reset bits. */ for( xPhyIndex = 0; xPhyIndex < pxPhyObject->xPortCount; xPhyIndex++ ) { BaseType_t xPhyAddress = pxPhyObject->ucPhyIndexes[ xPhyIndex ]; pxPhyObject->fnPhyRead( xPhyAddress, phyREG_00_BMCR, &ulConfig ); pxPhyObject->fnPhyWrite( xPhyAddress, phyREG_00_BMCR, ulConfig & ~phyBMCR_RESET ); } vTaskDelay( pdMS_TO_TICKS( 50ul ) ); eventLogAdd( "PHY reset %d ports", (int)pxPhyObject->xPortCount ); return ulDoneMask; }
signed portBASE_TYPE xQueueGenericSend( xQueueHandle pxQueue, const void * const pvItemToQueue, portTickType xTicksToWait, portBASE_TYPE xCopyPosition ) { signed portBASE_TYPE xReturn = pdTRUE; xTimeOutType xTimeOut; do { /* If xTicksToWait is zero then we are not going to block even if there is no room in the queue to post. */ if( xTicksToWait > ( portTickType ) 0 ) { vTaskSuspendAll(); prvLockQueue( pxQueue ); if( xReturn == pdTRUE ) { /* This is the first time through - we need to capture the time while the scheduler is locked to ensure we attempt to block at least once. */ vTaskSetTimeOutState( &xTimeOut ); } if( prvIsQueueFull( pxQueue ) ) { /* Need to call xTaskCheckForTimeout again as time could have passed since it was last called if this is not the first time around this loop. */ if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { traceBLOCKING_ON_QUEUE_SEND( pxQueue ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); /* Unlocking the queue means queue events can effect the event list. It is possible that interrupts occurring now remove this task from the event list again - but as the scheduler is suspended the task will go onto the pending ready last instead of the actual ready list. */ prvUnlockQueue( pxQueue ); /* Resuming the scheduler will move tasks from the pending ready list into the ready list - so it is feasible that this task is already in a ready list before it yields - in which case the yield will not cause a context switch unless there is also a higher priority task in the pending ready list. */ if( !xTaskResumeAll() ) { taskYIELD(); } } else { prvUnlockQueue( pxQueue ); ( void ) xTaskResumeAll(); } } else { /* The queue was not full so we can just unlock the scheduler and queue again before carrying on. */ prvUnlockQueue( pxQueue ); ( void ) xTaskResumeAll(); } } /* Higher priority tasks and interrupts can execute during this time and could possible refill the queue - even if we unblocked because space became available. */ taskENTER_CRITICAL(); { /* Is there room on the queue now? To be running we must be the highest priority task wanting to access the queue. */ if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) { traceQUEUE_SEND( pxQueue ); prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); xReturn = pdPASS; /* If there was a task waiting for data to arrive on the queue then unblock it now. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE ) { /* The unblocked task has a priority higher than our own so yield immediately. */ taskYIELD(); } } } else { /* Setting xReturn to errQUEUE_FULL will force its timeout to be re-evaluated. This is necessary in case interrupts and higher priority tasks accessed the queue between this task being unblocked and subsequently attempting to write to the queue. */ xReturn = errQUEUE_FULL; } } taskEXIT_CRITICAL(); if( xReturn == errQUEUE_FULL ) { if( xTicksToWait > ( portTickType ) 0 ) { if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { xReturn = queueERRONEOUS_UNBLOCK; } else { traceQUEUE_SEND_FAILED( pxQueue ); } } else { traceQUEUE_SEND_FAILED( pxQueue ); } } } while( xReturn == queueERRONEOUS_UNBLOCK ); return xReturn; }
uint32_t TimerLeftMS(Timer* timer) { xTaskCheckForTimeOut(&timer->xTimeOut, &timer->xTicksToWait); /* updates xTicksToWait to the number left */ return (timer->xTicksToWait < 0) ? 0 : (timer->xTicksToWait * portTICK_RATE_MS); }
portBASE_TYPE TimerIsExpired(Timer* timer) { return xTaskCheckForTimeOut(&timer->xTimeOut, &timer->xTicksToWait) == pdTRUE; }
static void prvEMACHandlerTask( void *pvParameters ) { TimeOut_t xPhyTime; TickType_t xPhyRemTime; UBaseType_t uxCount; #if( ipconfigZERO_COPY_TX_DRIVER != 0 ) NetworkBufferDescriptor_t *pxBuffer; #endif uint8_t *pucBuffer; BaseType_t xResult = 0; uint32_t xStatus; const TickType_t ulMaxBlockTime = pdMS_TO_TICKS( EMAC_MAX_BLOCK_TIME_MS ); /* Remove compiler warnings about unused parameters. */ ( void ) pvParameters; configASSERT( xEMACTaskHandle ); vTaskSetTimeOutState( &xPhyTime ); xPhyRemTime = pdMS_TO_TICKS( PHY_LS_LOW_CHECK_TIME_MS ); for( ;; ) { vCheckBuffersAndQueue(); if( ( ulISREvents & EMAC_IF_ALL_EVENT ) == 0 ) { /* No events to process now, wait for the next. */ ulTaskNotifyTake( pdFALSE, ulMaxBlockTime ); } if( ( ulISREvents & EMAC_IF_RX_EVENT ) != 0 ) { ulISREvents &= ~EMAC_IF_RX_EVENT; /* Wait for the EMAC interrupt to indicate that another packet has been received. */ xResult = prvEMACRxPoll(); } if( ( ulISREvents & EMAC_IF_TX_EVENT ) != 0 ) { /* Future extension: code to release TX buffers if zero-copy is used. */ ulISREvents &= ~EMAC_IF_TX_EVENT; while( xQueueReceive( xTxBufferQueue, &pucBuffer, 0 ) != pdFALSE ) { #if( ipconfigZERO_COPY_TX_DRIVER != 0 ) { pxBuffer = pxPacketBuffer_to_NetworkBuffer( pucBuffer ); if( pxBuffer != NULL ) { vReleaseNetworkBufferAndDescriptor( pxBuffer ); tx_release_count[ 0 ]++; } else { tx_release_count[ 1 ]++; } } #else { tx_release_count[ 0 ]++; } #endif uxCount = uxQueueMessagesWaiting( ( QueueHandle_t ) xTXDescriptorSemaphore ); if( uxCount < GMAC_TX_BUFFERS ) { /* Tell the counting semaphore that one more TX descriptor is available. */ xSemaphoreGive( xTXDescriptorSemaphore ); } } } if( ( ulISREvents & EMAC_IF_ERR_EVENT ) != 0 ) { /* Future extension: logging about errors that occurred. */ ulISREvents &= ~EMAC_IF_ERR_EVENT; } if( xResult > 0 ) { /* A packet was received. No need to check for the PHY status now, but set a timer to check it later on. */ vTaskSetTimeOutState( &xPhyTime ); xPhyRemTime = pdMS_TO_TICKS( PHY_LS_HIGH_CHECK_TIME_MS ); xResult = 0; } else if( xTaskCheckForTimeOut( &xPhyTime, &xPhyRemTime ) != pdFALSE ) { /* Check the link status again. */ xStatus = ulReadMDIO( PHY_REG_01_BMSR ); if( ( ulPHYLinkStatus & BMSR_LINK_STATUS ) != ( xStatus & BMSR_LINK_STATUS ) ) { ulPHYLinkStatus = xStatus; FreeRTOS_printf( ( "prvEMACHandlerTask: PHY LS now %d\n", ( ulPHYLinkStatus & BMSR_LINK_STATUS ) != 0 ) ); } vTaskSetTimeOutState( &xPhyTime ); if( ( ulPHYLinkStatus & BMSR_LINK_STATUS ) != 0 ) { xPhyRemTime = pdMS_TO_TICKS( PHY_LS_HIGH_CHECK_TIME_MS ); } else { xPhyRemTime = pdMS_TO_TICKS( PHY_LS_LOW_CHECK_TIME_MS ); } } } }
BaseType_t xPhyStartAutoNegotiation( EthernetPhy_t *pxPhyObject, uint32_t ulPhyMask ) { uint32_t xPhyIndex, ulDoneMask, ulBitMask; uint32_t ulPHYLinkStatus, ulRegValue; TickType_t xRemainingTime; TimeOut_t xTimer; if( ulPhyMask == ( uint32_t )0u ) { return 0; } for( xPhyIndex = 0; xPhyIndex < pxPhyObject->xPortCount; xPhyIndex++ ) { if( ( ulPhyMask & ( 1lu << xPhyIndex ) ) != 0lu ) { BaseType_t xPhyAddress = pxPhyObject->ucPhyIndexes[ xPhyIndex ]; /* Enable Auto-Negotiation. */ pxPhyObject->fnPhyWrite( xPhyAddress, phyREG_04_ADVERTISE, pxPhyObject->ulACRValue); pxPhyObject->fnPhyWrite( xPhyAddress, phyREG_00_BMCR, pxPhyObject->ulBCRValue | phyBMCR_AN_RESTART ); } } eventLogAdd( "AN start" ); xRemainingTime = ( TickType_t ) pdMS_TO_TICKS( 3000UL ); vTaskSetTimeOutState( &xTimer ); ulDoneMask = 0; /* Wait until the auto-negotiation will be completed */ for( ;; ) { ulBitMask = ( uint32_t )1u; for( xPhyIndex = 0; xPhyIndex < pxPhyObject->xPortCount; xPhyIndex++, ulBitMask <<= 1 ) { if( ( ulPhyMask & ulBitMask ) != 0lu ) { if( ( ulDoneMask & ulBitMask ) == 0lu ) { BaseType_t xPhyAddress = pxPhyObject->ucPhyIndexes[ xPhyIndex ]; pxPhyObject->fnPhyRead( xPhyAddress, phyREG_01_BMSR, &ulRegValue ); if( ( ulRegValue & phyBMSR_AN_COMPLETE ) != 0 ) { ulDoneMask |= ulBitMask; } } } } if( ulPhyMask == ulDoneMask ) { break; } if( xTaskCheckForTimeOut( &xTimer, &xRemainingTime ) != pdFALSE ) { FreeRTOS_printf( ( "xPhyReset: phyBMCR_RESET timed out ( done 0x%02lX )\n", ulDoneMask ) ); eventLogAdd( "ANtimed out"); break; } } eventLogAdd( "AN done %02lX / %02lX", ulDoneMask, ulPhyMask ); if( ulDoneMask != ( uint32_t)0u ) { ulBitMask = ( uint32_t )1u; pxPhyObject->ulLinkStatusMask &= ~( ulDoneMask ); for( xPhyIndex = 0; xPhyIndex < pxPhyObject->xPortCount; xPhyIndex++, ulBitMask <<= 1 ) { BaseType_t xPhyAddress = pxPhyObject->ucPhyIndexes[ xPhyIndex ]; uint32_t ulPhyID = pxPhyObject->ulPhyIDs[ xPhyIndex ]; if( ( ulDoneMask & ulBitMask ) == ( uint32_t )0u ) { continue; } /* Clear the 'phyBMCR_AN_RESTART' bit. */ pxPhyObject->fnPhyWrite( xPhyAddress, phyREG_00_BMCR, pxPhyObject->ulBCRValue ); pxPhyObject->fnPhyRead( xPhyAddress, phyREG_01_BMSR, &ulRegValue); if( ( ulRegValue & phyBMSR_LINK_STATUS ) != 0 ) { ulPHYLinkStatus |= phyBMSR_LINK_STATUS; pxPhyObject->ulLinkStatusMask |= ulBitMask; } else { ulPHYLinkStatus &= ~( phyBMSR_LINK_STATUS ); } if( xHas_1F_PHYSPCS( ulPhyID ) ) { /* 31 RW PHY Special Control Status */ uint32_t ulControlStatus; pxPhyObject->fnPhyRead( xPhyAddress, phyREG_1F_PHYSPCS, &ulControlStatus); ulRegValue = 0; if( ( ulControlStatus & phyPHYSPCS_FULL_DUPLEX ) != 0 ) { ulRegValue |= phyPHYSTS_DUPLEX_STATUS; } if( ( ulControlStatus & phyPHYSPCS_SPEED_MASK ) == phyPHYSPCS_SPEED_10 ) { ulRegValue |= phyPHYSTS_SPEED_STATUS; } } else { /* Read the result of the auto-negotiation. */ pxPhyObject->fnPhyRead( xPhyAddress, PHYREG_10_PHYSTS, &ulRegValue); } FreeRTOS_printf( ( ">> Autonego ready: %08lx: %s duplex %u mbit %s status\n", ulRegValue, ( ulRegValue & phyPHYSTS_DUPLEX_STATUS ) ? "full" : "half", ( ulRegValue & phyPHYSTS_SPEED_STATUS ) ? 10 : 100, ( ( ulPHYLinkStatus |= phyBMSR_LINK_STATUS ) != 0) ? "high" : "low" ) ); eventLogAdd( "%s duplex %u mbit %s st", ( ulRegValue & phyPHYSTS_DUPLEX_STATUS ) ? "full" : "half", ( ulRegValue & phyPHYSTS_SPEED_STATUS ) ? 10 : 100, ( ( ulPHYLinkStatus |= phyBMSR_LINK_STATUS ) != 0) ? "high" : "low" ); { uint32_t regs[4]; int i,j; int address = 0x10; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { pxPhyObject->fnPhyRead( xPhyAddress, address, regs + j ); address++; } eventLogAdd("%04lX %04lX %04lX %04lX", regs[0], regs[1], regs[2], regs[3]); } } if( ( ulRegValue & phyPHYSTS_DUPLEX_STATUS ) != ( uint32_t )0u ) { pxPhyObject->xPhyProperties.ucDuplex = PHY_DUPLEX_FULL; } else { pxPhyObject->xPhyProperties.ucDuplex = PHY_DUPLEX_HALF; } if( ( ulRegValue & phyPHYSTS_SPEED_STATUS ) != 0 ) { pxPhyObject->xPhyProperties.ucSpeed = PHY_SPEED_10; } else { pxPhyObject->xPhyProperties.ucSpeed = PHY_SPEED_100; } } } /* if( ulDoneMask != ( uint32_t)0u ) */ return 0; }
/** * \ingroup freertos_uart_peripheral_control_group * \brief Initiate a completely multi-byte read operation on a UART peripheral. * * The FreeRTOS ASF UART driver uses the PDC to transfer data from a peripheral * to a circular buffer. Reception happens in the background, while the * microcontroller is executing application code.* freertos_uart_read_packet() * copies bytes from the DMA buffer into the buffer passed as a * freertos_uart_read_packet() parameter. * * Readers are recommended to also reference the application note and examples * that accompany the FreeRTOS ASF drivers. * * The FreeRTOS ASF driver both installs and handles the UART PDC interrupts. * Users do not need to concern themselves with interrupt handling, and must * not install their own interrupt handler. * * \param p_uart The handle to the UART port returned by the * freertos_uart_serial_init() call used to initialise the port. * \param data A pointer to the buffer into which received data is to be * copied. * \param len The number of bytes to copy. * \param block_time_ticks Defines the maximum combined time the function * will wait to get exclusive access to the peripheral and receive the * requested number of bytes. Other tasks will execute during any waiting * time. * * The FreeRTOS ASF UART driver is initialized using a * call to freertos_uart_serial_init(). The * freertos_driver_parameters.options_flags parameter passed to the * initialization function defines the driver behavior. If * freertos_driver_parameters.options_flags had the USE_RX_ACCESS_MUTEX bit * set, then the driver will only read from the UART buffer if it has * first gained exclusive access to it. block_time_ticks specifies the * maximum amount of time the driver will wait to get exclusive access * before aborting the read operation. * * If the number of bytes available is less than the number requested then * freertos_uart_serial_read_packet() will wait for more bytes to become * available. block_time_ticks specifies the maximum amount of time the * driver will wait before returning fewer bytes than were requested. * * block_time_ticks is specified in RTOS tick periods. To specify a block * time in milliseconds, divide the milliseconds value by portTICK_RATE_MS, * and pass the result in block_time_ticks. portTICK_RATE_MS is defined by * FreeRTOS. * * \return The number of bytes that were copied into data. This will be * less than the requested number of bytes if a time out occurred. */ uint32_t freertos_uart_serial_read_packet(freertos_uart_if p_uart, uint8_t *data, uint32_t len, portTickType block_time_ticks) { portBASE_TYPE uart_index, attempt_read; Uart *uart_base; xTimeOutType time_out_definition; uint32_t bytes_read = 0; uart_base = (Uart *) p_uart; uart_index = get_pdc_peripheral_details(all_uart_definitions, MAX_UARTS, (void *) uart_base); /* It is possible to initialise the peripheral to only use Tx and not Rx. Check that Rx has been initialised. */ configASSERT(rx_buffer_definitions[uart_index].next_byte_to_read); configASSERT(rx_buffer_definitions[uart_index].next_byte_to_read != RX_NOT_USED); /* Only do anything if the UART is valid. */ if (uart_index < MAX_UARTS) { /* Must not request more bytes than will fit in the buffer. */ if (len <= (rx_buffer_definitions[uart_index].past_rx_buffer_end_address - rx_buffer_definitions[uart_index].rx_buffer_start_address)) { /* Remember the time on entry. */ vTaskSetTimeOutState(&time_out_definition); /* If an Rx mutex is in use, attempt to obtain it. */ if (rx_buffer_definitions[uart_index].rx_access_mutex != NULL) { /* Attempt to obtain the mutex. */ attempt_read = xSemaphoreTake( rx_buffer_definitions[uart_index].rx_access_mutex, block_time_ticks); if (attempt_read == pdTRUE) { /* The semaphore was obtained, adjust the block_time_ticks to take into account the time taken to obtain the semaphore. */ if (xTaskCheckForTimeOut(&time_out_definition, &block_time_ticks) == pdTRUE) { attempt_read = pdFALSE; /* The port is not going to be used, so return the mutex now. */ xSemaphoreGive(rx_buffer_definitions[uart_index].rx_access_mutex); } } } else { attempt_read = pdTRUE; } if (attempt_read == pdTRUE) { do { /* Wait until data is available. */ xSemaphoreTake(rx_buffer_definitions[uart_index].rx_event_semaphore, block_time_ticks); /* Copy as much data as is available, up to however much a maximum of the total number of requested bytes. */ bytes_read += freertos_copy_bytes_from_pdc_circular_buffer( &(rx_buffer_definitions[uart_index]), all_uart_definitions[uart_index].pdc_base_address->PERIPH_RPR, &(data[bytes_read]), (len - bytes_read)); /* The Rx DMA will have stopped if the Rx buffer had become full before this read operation. If bytes were removed by this read then there is guaranteed to be space in the Rx buffer and the Rx DMA can be restarted. */ if (bytes_read > 0) { taskENTER_CRITICAL(); { if(rx_buffer_definitions[uart_index].rx_pdc_parameters.ul_size == 0UL) { configure_rx_dma(uart_index, data_removed); } } taskEXIT_CRITICAL(); } /* Until all the requested bytes are received, or the function runs out of time. */ } while ((bytes_read < len) && (xTaskCheckForTimeOut( &time_out_definition, &block_time_ticks) == pdFALSE)); if (rx_buffer_definitions[uart_index].rx_access_mutex != NULL) { /* Return the mutex. */ xSemaphoreGive(rx_buffer_definitions[uart_index].rx_access_mutex); } } } } return bytes_read; }
signed portBASE_TYPE xQueueSend( xQueueHandle pxQueue, const void *pvItemToQueue, portTickType xTicksToWait ) { signed portBASE_TYPE xReturn = pdPASS; xTimeOutType xTimeOut; /* Make sure other tasks do not access the queue. */ vTaskSuspendAll(); /* Capture the current time status for future reference. */ vTaskSetTimeOutState( &xTimeOut ); /* It is important that this is the only thread/ISR that modifies the ready or delayed lists until xTaskResumeAll() is called. Places where the ready/delayed lists are modified include: + vTaskDelay() - Nothing can call vTaskDelay as the scheduler is suspended, vTaskDelay() cannot be called from an ISR. + vTaskPrioritySet() - Has a critical section around the access. + vTaskSwitchContext() - This will not get executed while the scheduler is suspended. + prvCheckDelayedTasks() - This will not get executed while the scheduler is suspended. + xTaskCreate() - Has a critical section around the access. + vTaskResume() - Has a critical section around the access. + xTaskResumeAll() - Has a critical section around the access. + xTaskRemoveFromEventList - Checks to see if the scheduler is suspended. If so then the TCB being removed from the event is removed from the event and added to the xPendingReadyList. */ /* Make sure interrupts do not access the queue event list. */ prvLockQueue( pxQueue ); /* It is important that interrupts to not access the event list of the queue being modified here. Places where the event list is modified include: + xQueueSendFromISR(). This checks the lock on the queue to see if it has access. If the queue is locked then the Tx lock count is incremented to signify that a task waiting for data can be made ready once the queue lock is removed. If the queue is not locked then a task can be moved from the event list, but will not be removed from the delayed list or placed in the ready list until the scheduler is unlocked. + xQueueReceiveFromISR(). As per xQueueSendFromISR(). */ /* If the queue is already full we may have to block. */ do { if( prvIsQueueFull( pxQueue ) ) { /* The queue is full - do we want to block or just leave without posting? */ if( xTicksToWait > ( portTickType ) 0 ) { /* We are going to place ourselves on the xTasksWaitingToSend event list, and will get woken should the delay expire, or space become available on the queue. As detailed above we do not require mutual exclusion on the event list as nothing else can modify it or the ready lists while we have the scheduler suspended and queue locked. It is possible that an ISR has removed data from the queue since we checked if any was available. If this is the case then the data will have been copied from the queue, and the queue variables updated, but the event list will not yet have been checked to see if anything is waiting as the queue is locked. */ vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); /* Force a context switch now as we are blocked. We can do this from within a critical section as the task we are switching to has its own context. When we return here (i.e. we unblock) we will leave the critical section as normal. It is possible that an ISR has caused an event on an unrelated and unlocked queue. If this was the case then the event list for that queue will have been updated but the ready lists left unchanged - instead the readied task will have been added to the pending ready list. */ taskENTER_CRITICAL(); { /* We can safely unlock the queue and scheduler here as interrupts are disabled. We must not yield with anything locked, but we can yield from within a critical section. Tasks that have been placed on the pending ready list cannot be tasks that are waiting for events on this queue. See in comment xTaskRemoveFromEventList(). */ prvUnlockQueue( pxQueue ); /* Resuming the scheduler may cause a yield. If so then there is no point yielding again here. */ if( !xTaskResumeAll() ) { taskYIELD(); } /* We want to check to see if the queue is still full before leaving the critical section. This is to prevent this task placing an item into the queue due to an interrupt making space on the queue between critical sections (when there might be a higher priority task blocked on the queue that cannot run yet because the scheduler gets suspended). */ if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) { /* We unblocked but there is no space in the queue, we probably timed out. */ xReturn = errQUEUE_FULL; } /* Before leaving the critical section we have to ensure exclusive access again. */ vTaskSuspendAll(); prvLockQueue( pxQueue ); } taskEXIT_CRITICAL(); } } /* If xReturn is errQUEUE_FULL then we unblocked when the queue was still full. Don't check it again now as it is possible that an interrupt has removed an item from the queue since we left the critical section and we don't want to write to the queue in case there is a task of higher priority blocked waiting for space to be available on the queue. If this is the case the higher priority task will execute when the scheduler is unsupended. */ if( xReturn != errQUEUE_FULL ) { /* When we are here it is possible that we unblocked as space became available on the queue. It is also possible that an ISR posted to the queue since we left the critical section, so it may be that again there is no space. This would only happen if a task and ISR post onto the same queue. */ taskENTER_CRITICAL(); { if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) { /* There is room in the queue, copy the data into the queue. */ prvCopyQueueData( pxQueue, pvItemToQueue ); xReturn = pdPASS; /* Update the TxLock count so prvUnlockQueue knows to check for tasks waiting for data to become available in the queue. */ ++( pxQueue->xTxLock ); } else { xReturn = errQUEUE_FULL; } } taskEXIT_CRITICAL(); } if( xReturn == errQUEUE_FULL ) { if( xTicksToWait > 0 ) { if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { xReturn = queueERRONEOUS_UNBLOCK; } } } } while( xReturn == queueERRONEOUS_UNBLOCK ); prvUnlockQueue( pxQueue ); xTaskResumeAll(); return xReturn; }
signed portBASE_TYPE xQueueGenericReceive( xQueueHandle pxQueue, void * const pvBuffer, portTickType xTicksToWait, portBASE_TYPE xJustPeeking ) { signed portBASE_TYPE xReturn = pdTRUE; xTimeOutType xTimeOut; signed portCHAR *pcOriginalReadPosition; do { /* If there are no messages in the queue we may have to block. */ if( xTicksToWait > ( portTickType ) 0 ) { vTaskSuspendAll(); prvLockQueue( pxQueue ); if( xReturn == pdTRUE ) { /* This is the first time through - we need to capture the time while the scheduler is locked to ensure we attempt to block at least once. */ vTaskSetTimeOutState( &xTimeOut ); } if( prvIsQueueEmpty( pxQueue ) ) { /* Need to call xTaskCheckForTimeout again as time could have passed since it was last called if this is not the first time around this loop. */ if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); #if ( configUSE_MUTEXES == 1 ) { if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) { portENTER_CRITICAL(); vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder ); portEXIT_CRITICAL(); } } #endif vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); prvUnlockQueue( pxQueue ); if( !xTaskResumeAll() ) { taskYIELD(); } } else { prvUnlockQueue( pxQueue ); ( void ) xTaskResumeAll(); } } else { prvUnlockQueue( pxQueue ); ( void ) xTaskResumeAll(); } } /* The two tasks are blocked on the queue, the low priority task is polling/running. */ /* An interrupt occurs here - which unblocks the HP tasks, but they do not run. */ taskENTER_CRITICAL(); { /* Because the interrupt occurred the LP task manages to grab the data as the other two tasks are not yet running. */ if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 ) { /* Remember our read position in case we are just peeking. */ pcOriginalReadPosition = pxQueue->pcReadFrom; prvCopyDataFromQueue( pxQueue, pvBuffer ); if( xJustPeeking == pdFALSE ) { traceQUEUE_RECEIVE( pxQueue ); /* We are actually removing data. */ --( pxQueue->uxMessagesWaiting ); #if ( configUSE_MUTEXES == 1 ) { if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) { /* Record the information required to implement priority inheritance should it become necessary. */ pxQueue->pxMutexHolder = xTaskGetCurrentTaskHandle(); } } #endif if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE ) { taskYIELD(); } } } else { traceQUEUE_PEEK( pxQueue ); /* We are not removing the data, so reset our read pointer. */ pxQueue->pcReadFrom = pcOriginalReadPosition; /* The data is being left in the queue, so see if there are any other tasks waiting for the data. */ if( !listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) ) { /* Tasks that are removed from the event list will get added to the pending ready list as the scheduler is still suspended. */ if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority than this task. */ taskYIELD(); } } } xReturn = pdPASS; } else { xReturn = errQUEUE_EMPTY; } } taskEXIT_CRITICAL(); if( xReturn == errQUEUE_EMPTY ) { if( xTicksToWait > ( portTickType ) 0 ) { if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { xReturn = queueERRONEOUS_UNBLOCK; } else { traceQUEUE_RECEIVE_FAILED( pxQueue ); } } else { traceQUEUE_RECEIVE_FAILED( pxQueue ); } } } while( xReturn == queueERRONEOUS_UNBLOCK ); return xReturn; }
signed portBASE_TYPE xQueueAltGenericSend( xQueueHandle pxQueue, const void * const pvItemToQueue, portTickType xTicksToWait, portBASE_TYPE xCopyPosition ) { signed portBASE_TYPE xReturn = pdPASS; xTimeOutType xTimeOut; /* The source code that implements the alternative (Alt) API is simpler because it makes more use of critical sections. This is the approach taken by many other RTOSes, but FreeRTOS.org has the preferred fully featured API too. The fully featured API has more complex code that takes longer to execute, but makes less use of critical sections. */ do { /* If xTicksToWait is zero then we are not going to block even if there is no room in the queue to post. */ if( xTicksToWait > ( portTickType ) 0 ) { portENTER_CRITICAL(); { if( xReturn == pdPASS ) { /* This is the first time through - capture the time inside the critical section to ensure we attempt to block at least once. */ vTaskSetTimeOutState( &xTimeOut ); } if( prvIsQueueFull( pxQueue ) ) { /* Need to call xTaskCheckForTimeout again as time could have passed since it was last called if this is not the first time around this loop. */ if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { traceBLOCKING_ON_QUEUE_SEND( pxQueue ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); /* This will exit the critical section, then re-enter when the task next runs. */ taskYIELD(); } } } portEXIT_CRITICAL(); } /* Higher priority tasks and interrupts can execute during this time and could possible refill the queue - even if we unblocked because space became available. */ taskENTER_CRITICAL(); { /* Is there room on the queue now? To be running we must be the highest priority task wanting to access the queue. */ if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) { traceQUEUE_SEND( pxQueue ); prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); xReturn = pdPASS; /* If there was a task waiting for data to arrive on the queue then unblock it now. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE ) { /* The unblocked task has a priority higher than our own so yield immediately. */ taskYIELD(); } } } else { /* Setting xReturn to errQUEUE_FULL will force its timeout to be re-evaluated. This is necessary in case interrupts and higher priority tasks accessed the queue between this task being unblocked and subsequently attempting to write to the queue. */ xReturn = errQUEUE_FULL; } } taskEXIT_CRITICAL(); if( xReturn == errQUEUE_FULL ) { if( xTicksToWait > ( portTickType ) 0 ) { if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { xReturn = queueERRONEOUS_UNBLOCK; } else { traceQUEUE_SEND_FAILED( pxQueue ); } } else { traceQUEUE_SEND_FAILED( pxQueue ); } } } while( xReturn == queueERRONEOUS_UNBLOCK ); return xReturn; }
signed portBASE_TYPE xQueueReceive( xQueueHandle pxQueue, void *pvBuffer, portTickType xTicksToWait ) { signed portBASE_TYPE xReturn = pdTRUE; xTimeOutType xTimeOut; /* This function is very similar to xQueueSend(). See comments within xQueueSend() for a more detailed explanation. Make sure other tasks do not access the queue. */ vTaskSuspendAll(); /* Capture the current time status for future reference. */ vTaskSetTimeOutState( &xTimeOut ); /* Make sure interrupts do not access the queue. */ prvLockQueue( pxQueue ); do { /* If there are no messages in the queue we may have to block. */ if( prvIsQueueEmpty( pxQueue ) ) { /* There are no messages in the queue, do we want to block or just leave with nothing? */ if( xTicksToWait > ( portTickType ) 0 ) { vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); taskENTER_CRITICAL(); { prvUnlockQueue( pxQueue ); if( !xTaskResumeAll() ) { taskYIELD(); } if( pxQueue->uxMessagesWaiting == ( unsigned portBASE_TYPE ) 0 ) { /* We unblocked but the queue is empty. We probably timed out. */ xReturn = errQUEUE_EMPTY; } vTaskSuspendAll(); prvLockQueue( pxQueue ); } taskEXIT_CRITICAL(); } } if( xReturn != errQUEUE_EMPTY ) { taskENTER_CRITICAL(); { if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 ) { pxQueue->pcReadFrom += pxQueue->uxItemSize; if( pxQueue->pcReadFrom >= pxQueue->pcTail ) { pxQueue->pcReadFrom = pxQueue->pcHead; } --( pxQueue->uxMessagesWaiting ); if(pvBuffer != (void*)0UL) { memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); } /* Increment the lock count so prvUnlockQueue knows to check for tasks waiting for space to become available on the queue. */ ++( pxQueue->xRxLock ); xReturn = pdPASS; } else { xReturn = errQUEUE_EMPTY; } } taskEXIT_CRITICAL(); } if( xReturn == errQUEUE_EMPTY ) { if( xTicksToWait > 0 ) { if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { xReturn = queueERRONEOUS_UNBLOCK; } } } } while( xReturn == queueERRONEOUS_UNBLOCK ); /* We no longer require exclusive access to the queue. */ prvUnlockQueue( pxQueue ); xTaskResumeAll(); return xReturn; }
signed portBASE_TYPE xQueueAltGenericReceive( xQueueHandle pxQueue, void * const pvBuffer, portTickType xTicksToWait, portBASE_TYPE xJustPeeking ) { signed portBASE_TYPE xReturn = pdTRUE; xTimeOutType xTimeOut; signed portCHAR *pcOriginalReadPosition; /* The source code that implements the alternative (Alt) API is simpler because it makes more use of critical sections. This is the approach taken by many other RTOSes, but FreeRTOS.org has the preferred fully featured API too. The fully featured API has more complex code that takes longer to execute, but makes less use of critical sections. */ do { /* If there are no messages in the queue we may have to block. */ if( xTicksToWait > ( portTickType ) 0 ) { portENTER_CRITICAL(); { if( xReturn == pdPASS ) { /* This is the first time through - capture the time inside the critical section to ensure we attempt to block at least once. */ vTaskSetTimeOutState( &xTimeOut ); } if( prvIsQueueEmpty( pxQueue ) ) { /* Need to call xTaskCheckForTimeout again as time could have passed since it was last called if this is not the first time around this loop. */ if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); #if ( configUSE_MUTEXES == 1 ) { if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) { vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder ); } } #endif vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); taskYIELD(); } } } portEXIT_CRITICAL(); } taskENTER_CRITICAL(); { if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 ) { /* Remember our read position in case we are just peeking. */ pcOriginalReadPosition = pxQueue->pcReadFrom; prvCopyDataFromQueue( pxQueue, pvBuffer ); if( xJustPeeking == pdFALSE ) { traceQUEUE_RECEIVE( pxQueue ); /* We are actually removing data. */ --( pxQueue->uxMessagesWaiting ); #if ( configUSE_MUTEXES == 1 ) { if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) { /* Record the information required to implement priority inheritance should it become necessary. */ pxQueue->pxMutexHolder = xTaskGetCurrentTaskHandle(); } } #endif if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE ) { taskYIELD(); } } } else { traceQUEUE_PEEK( pxQueue ); /* We are not removing the data, so reset our read pointer. */ pxQueue->pcReadFrom = pcOriginalReadPosition; /* The data is being left in the queue, so see if there are any other tasks waiting for the data. */ if( !listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) ) { /* Tasks that are removed from the event list will get added to the pending ready list as the scheduler is still suspended. */ if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority that this task. */ taskYIELD(); } } } xReturn = pdPASS; } else { xReturn = errQUEUE_EMPTY; } } taskEXIT_CRITICAL(); if( xReturn == errQUEUE_EMPTY ) { if( xTicksToWait > ( portTickType ) 0 ) { if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { xReturn = queueERRONEOUS_UNBLOCK; } else { traceQUEUE_RECEIVE_FAILED( pxQueue ); } } else { traceQUEUE_RECEIVE_FAILED( pxQueue ); } } } while( xReturn == queueERRONEOUS_UNBLOCK ); return xReturn; }
int32_t FreeRTOS_sendto( xSocket_t xSocket, const void *pvBuffer, size_t xTotalDataLength, uint32_t ulFlags, const struct freertos_sockaddr *pxDestinationAddress, socklen_t xDestinationAddressLength ) { xNetworkBufferDescriptor_t *pxNetworkBuffer; xIPFragmentParameters_t *pxFragmentParameters; size_t xBytesToSend, xBytesRemaining; xIPStackEvent_t xStackTxEvent = { eStackTxEvent, NULL }; extern xQueueHandle xNetworkEventQueue; uint8_t *pucBuffer; xTimeOutType xTimeOut; TickType_t xTicksToWait; uint16_t usFragmentOffset; xFreeRTOS_Socket_t *pxSocket; pxSocket = ( xFreeRTOS_Socket_t * ) xSocket; /* The function prototype is designed to maintain the expected Berkeley sockets standard, but this implementation does not use all the parameters. */ ( void ) xDestinationAddressLength; configASSERT( xNetworkEventQueue ); configASSERT( pvBuffer ); xBytesRemaining = xTotalDataLength; if( socketSOCKET_IS_BOUND( pxSocket ) == pdFALSE ) { /* If the socket is not already bound to an address, bind it now. Passing NULL as the address parameter tells FreeRTOS_bind() to select the address to bind to. */ FreeRTOS_bind( xSocket, NULL, 0 ); } if( socketSOCKET_IS_BOUND( pxSocket ) != pdFALSE ) { /* pucBuffer will be reset if this send turns out to be a zero copy send because in that case pvBuffer is actually a pointer to an xUserData_t structure, not the UDP payload. */ pucBuffer = ( uint8_t * ) pvBuffer; vTaskSetTimeOutState( &xTimeOut ); xTicksToWait = pxSocket->xSendBlockTime; /* The data being transmitted will be sent in ipMAX_UDP_PAYLOAD_LENGTH chunks if xDataLength is greater than the network buffer payload size. Loop until all the data is sent. */ while( xBytesRemaining > 0 ) { if( xBytesRemaining > ipMAX_UDP_PAYLOAD_LENGTH ) { /* Cap the amount being sent in this packet to the maximum UDP payload size. This will be a multiple of 8 already, removing the need to check in the code. */ xBytesToSend = ipMAX_UDP_PAYLOAD_LENGTH; } else { /* Send all remaining bytes - which may well be the total number of bytes if the packet was not chopped up. */ xBytesToSend = xBytesRemaining; } /* If the zero copy flag is set, then the data is already in a network buffer. Otherwise, get a new network buffer. */ if( ( ulFlags & FREERTOS_ZERO_COPY ) == 0 ) { if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdTRUE ) { xTicksToWait = 0; } pxNetworkBuffer = pxNetworkBufferGet( xBytesToSend + sizeof( xUDPPacket_t ), xTicksToWait ); } else { if( xTotalDataLength > ipMAX_UDP_PAYLOAD_LENGTH ) { /* The packet needs fragmenting, but zero copy buffers cannot be fragmented. */ pxNetworkBuffer = NULL; } else { /* When zero copy is used, pvBuffer is a pointer to the payload of a buffer that has already been obtained from the stack. Obtain the network buffer pointer from the buffer. */ pucBuffer = ( uint8_t * ) pvBuffer; pucBuffer -= ( ipBUFFER_PADDING + sizeof( xUDPPacket_t ) ); pxNetworkBuffer = * ( ( xNetworkBufferDescriptor_t ** ) pucBuffer ); } } if( pxNetworkBuffer != NULL ) { /* Use the part of the network buffer that will be completed by the IP layer as temporary storage to pass extra information required by the IP layer. */ pxFragmentParameters = ( xIPFragmentParameters_t * ) &( pxNetworkBuffer->pucEthernetBuffer[ ipFRAGMENTATION_PARAMETERS_OFFSET ] ); pxFragmentParameters->ucSocketOptions = pxSocket->ucSocketOptions; if( xBytesRemaining > ipMAX_UDP_PAYLOAD_LENGTH ) { /* The packet is being chopped up, and more data will follow. */ pxFragmentParameters->ucSocketOptions = ( pxSocket->ucSocketOptions | FREERTOS_NOT_LAST_IN_FRAGMENTED_PACKET ); } if( xTotalDataLength > ipMAX_UDP_PAYLOAD_LENGTH ) { /* Let the IP layer know this packet has been chopped up, and supply the IP layer with any addition information it needs to make sense of it. */ pxFragmentParameters->ucSocketOptions |= FREERTOS_FRAGMENTED_PACKET; usFragmentOffset = ( uint16_t ) ( xTotalDataLength - xBytesRemaining ); pxFragmentParameters->usFragmentedPacketOffset = usFragmentOffset; pxFragmentParameters->usFragmentLength = ( uint16_t ) xBytesToSend; } else { usFragmentOffset = 0; } /* Write the payload into the packet. The IP layer is queried to find where in the IP payload the data should be written. This is because the necessary offset is different for the first packet, because the first packet leaves space for a UDP header. Note that this changes usFragmentOffset from the offset in the entire UDP packet, to the offset in the IP packet. */ if( ( ulFlags & FREERTOS_ZERO_COPY ) == 0 ) { /* Only copy the data if it is not already in the expected location. */ usFragmentOffset = ipGET_UDP_PAYLOAD_OFFSET_FOR_FRAGMENT( usFragmentOffset ); memcpy( ( void * ) &( pxNetworkBuffer->pucEthernetBuffer[ usFragmentOffset ] ), ( void * ) pucBuffer, xBytesToSend ); } pxNetworkBuffer->xDataLength = xTotalDataLength; pxNetworkBuffer->usPort = pxDestinationAddress->sin_port; pxNetworkBuffer->usBoundPort = ( uint16_t ) socketGET_SOCKET_ADDRESS( pxSocket ); pxNetworkBuffer->ulIPAddress = pxDestinationAddress->sin_addr; /* Tell the networking task that the packet needs sending. */ xStackTxEvent.pvData = pxNetworkBuffer; if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdTRUE ) { xTicksToWait = 0; } if( xQueueSendToBack( xNetworkEventQueue, &xStackTxEvent, xTicksToWait ) != pdPASS ) { /* If the buffer was allocated in this function, release it. */ if( ( ulFlags & FREERTOS_ZERO_COPY ) == 0 ) { vNetworkBufferRelease( pxNetworkBuffer ); } iptraceSTACK_TX_EVENT_LOST( ipSTACK_TX_EVENT ); break; } /* Adjust counters ready to either exit the loop, or send another chunk of data. */ xBytesRemaining -= xBytesToSend; pucBuffer += xBytesToSend; } else { /* If errno was available, errno would be set to FREERTOS_ENOPKTS. As it is, the function must return the number of transmitted bytes, so the calling function knows how much data was actually sent. */ break; } } } return ( xTotalDataLength - xBytesRemaining ); } /* Tested */
int32_t FreeRTOS_sendto( xSocket_t xSocket, const void *pvBuffer, size_t xTotalDataLength, uint32_t ulFlags, const struct freertos_sockaddr *pxDestinationAddress, socklen_t xDestinationAddressLength ) { xNetworkBufferDescriptor_t *pxNetworkBuffer; xIPStackEvent_t xStackTxEvent = { eStackTxEvent, NULL }; extern xQueueHandle xNetworkEventQueue; xTimeOutType xTimeOut; TickType_t xTicksToWait; int32_t lReturn = 0; xFreeRTOS_Socket_t *pxSocket; uint8_t *pucBuffer; pxSocket = ( xFreeRTOS_Socket_t * ) xSocket; /* The function prototype is designed to maintain the expected Berkeley sockets standard, but this implementation does not use all the parameters. */ ( void ) xDestinationAddressLength; configASSERT( xNetworkEventQueue ); configASSERT( pvBuffer ); if( xTotalDataLength <= ipMAX_UDP_PAYLOAD_LENGTH ) { if( socketSOCKET_IS_BOUND( pxSocket ) == pdFALSE ) { /* If the socket is not already bound to an address, bind it now. Passing NULL as the address parameter tells FreeRTOS_bind() to select the address to bind to. */ FreeRTOS_bind( pxSocket, NULL, 0 ); } if( socketSOCKET_IS_BOUND( pxSocket ) != pdFALSE ) { xTicksToWait = pxSocket->xSendBlockTime; if( ( ulFlags & FREERTOS_ZERO_COPY ) == 0 ) { /* Zero copy is not set, so obtain a network buffer into which the payload will be copied. */ vTaskSetTimeOutState( &xTimeOut ); pxNetworkBuffer = pxNetworkBufferGet( xTotalDataLength + sizeof( xUDPPacket_t ), xTicksToWait ); if( pxNetworkBuffer != NULL ) { memcpy( ( void * ) &( pxNetworkBuffer->pucEthernetBuffer[ ipUDP_PAYLOAD_OFFSET ] ), ( void * ) pvBuffer, xTotalDataLength ); if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdTRUE ) { /* The entire block time has been used up. */ xTicksToWait = 0; } } } else { /* When zero copy is used, pvBuffer is a pointer to the payload of a buffer that has already been obtained from the stack. Obtain the network buffer pointer from the buffer. */ pucBuffer = ( uint8_t * ) pvBuffer; pucBuffer -= ( ipBUFFER_PADDING + sizeof( xUDPPacket_t ) ); pxNetworkBuffer = * ( ( xNetworkBufferDescriptor_t ** ) pucBuffer ); } if( pxNetworkBuffer != NULL ) { pxNetworkBuffer->xDataLength = xTotalDataLength; pxNetworkBuffer->usPort = pxDestinationAddress->sin_port; pxNetworkBuffer->usBoundPort = ( uint16_t ) socketGET_SOCKET_ADDRESS( pxSocket ); pxNetworkBuffer->ulIPAddress = pxDestinationAddress->sin_addr; /* The socket options are passed to the IP layer in the space that will eventually get used by the Ethernet header. */ pxNetworkBuffer->pucEthernetBuffer[ ipSOCKET_OPTIONS_OFFSET ] = pxSocket->ucSocketOptions; /* Tell the networking task that the packet needs sending. */ xStackTxEvent.pvData = pxNetworkBuffer; if( xQueueSendToBack( xNetworkEventQueue, &xStackTxEvent, xTicksToWait ) != pdPASS ) { /* If the buffer was allocated in this function, release it. */ if( ( ulFlags & FREERTOS_ZERO_COPY ) == 0 ) { vNetworkBufferRelease( pxNetworkBuffer ); } iptraceSTACK_TX_EVENT_LOST( ipSTACK_TX_EVENT ); } else { lReturn = ( int32_t ) xTotalDataLength; } } else { /* If errno was available, errno would be set to FREERTOS_ENOPKTS. As it is, the function must return the number of transmitted bytes, so the calling function knows how much data was actually sent. */ iptraceNO_BUFFER_FOR_SENDTO(); } } else { iptraceSENDTO_SOCKET_NOT_BOUND(); } } else { /* The data is longer than the available buffer space. Setting ipconfigCAN_FRAGMENT_OUTGOING_PACKETS to 1 may allow this packet to be sent. */ iptraceSENDTO_DATA_TOO_LONG(); } return lReturn; } /* Tested */
/* * ======== MessageQCopy_send ======== */ Int MessageQCopy_send(UInt32 dstEndpt, UInt32 srcEndpt, Ptr data, UInt16 len, portTickType timeout) { Int16 token; MessageQCopy_Msg msg; Int length; xTimeOutType tmchk; /* Send to remote processor: */ taskENTER_CRITICAL(&virtQueLock); token = VirtQueue_getAvailBuf(vQueToHost, (Void **)&msg, &length); if(token < 0){ if(timeout == 0){ taskEXIT_CRITICAL(&virtQueLock); return MessageQCopy_E_TIMEOUT; } vTaskSetTimeOutState(&tmchk); waitAvaileBuf++; for(;;){ portTickType waitEvent; /* Because this function is not an application code, */ /* there is not the problem with using vTaskPlaceOnEventList. */ waitEvent = (timeout > 5) ? 5 : timeout; vTaskPlaceOnEventList(&availBufList, waitEvent); portYIELD_WITHIN_API(); taskEXIT_CRITICAL(&virtQueLock); taskENTER_CRITICAL(&virtQueLock); token = VirtQueue_getAvailBuf(vQueToHost, (Void **)&msg, &length); if(token < 0){ if(xTaskCheckForTimeOut(&tmchk, &timeout) == pdFALSE){ continue; } waitAvaileBuf--; taskEXIT_CRITICAL(&virtQueLock); return MessageQCopy_E_TIMEOUT; } waitAvaileBuf--; if(waitAvaileBuf == 0){ /* No need to know be kicked about added buffers anymore */ PROHIBIT_VRING_NOTIFY(vQueToHost); } break; } } if(len > RP_MSG_PAYLOAD_SIZE){ len = RP_MSG_PAYLOAD_SIZE; } /* Copy the payload and set message header: */ memcpy(msg->payload, data, len); msg->dataLen = len; msg->dstAddr = dstEndpt; msg->srcAddr = srcEndpt; msg->flags = 0; msg->reserved = 0; VirtQueue_addUsedBuf(vQueToHost, token, RP_MSG_BUF_SIZE); VirtQueue_kick(vQueToHost); taskEXIT_CRITICAL(&virtQueLock); return MessageQCopy_S_SUCCESS; }