/** * Receive packet callback. * * @param[in] rxPacketHandle Contains a handle that points to the memory that * the packet was stored in. This handle will be the same as something * returned by the RAILCb_AllocateMemory() API. To convert this into a receive * packet info struct use the *** function. * * This function is called whenever a packet is received and returns to you the * memory handle for where this received packet and its appended information was * stored. After this callback is done we will release the memory handle so you * must somehow increment a reference count or copy the data out within this * function. */ void RAILCb_RxPacketReceived(void *rxPacketHandle) { RAIL_RxPacketInfo_t* rxPacketInfo = (RAIL_RxPacketInfo_t*) memoryPtrFromHandle(rxPacketHandle); if(rxPacketInfo->appendedInfo.crcStatus) { /* If this is an ACK, deal with it */ if( rxPacketInfo->dataLength == 4 && rxPacketInfo->dataPtr[3] == (current_tx_sequence) && waiting_for_ack) { /* Tell the radio to not ACK an ACK */ RAIL_AutoAckCancelAck(); waiting_for_ack = false; /* Save the pending bit */ last_ack_pending_bit = (rxPacketInfo->dataPtr[1] & (1 << 4)) != 0; /* Tell the stack we got an ACK */ tr_debug("rACK\n"); device_driver.phy_tx_done_cb( rf_radio_driver_id, current_tx_handle, PHY_LINK_TX_DONE, 1, 1); } else { /* Figure out whether we want to not ACK this packet */ /* * dataPtr[0] = length * dataLength = length w/o length byte * dataptr[1:2] = 0x61C9 -> 0b01100001 0b1100 1001 (version 1, dest 3, src 2, ACKreq, type = 1) * [1] => b[0:2] frame type, b[3] = security enabled, b[4] = frame pending, b[5] = ACKreq, b[6] = intrapan * [2] => b[2:3] destmode, b[4:5] version, b[6:7] srcmode */ if( (rxPacketInfo->dataPtr[1] & (1 << 5)) == 0 ) { /* Cancel the ACK if the sender did not request one */ RAIL_AutoAckCancelAck(); } tr_debug("rPKT %d\n", rxPacketInfo->dataLength); /* Feed the received packet into the stack */ device_driver.phy_rx_cb(rxPacketInfo->dataPtr + 1, rxPacketInfo->dataLength - 1, //TODO: take a new RAIL release that exposes LQI, or have LQI as function of RSSI 255, rxPacketInfo->appendedInfo.rssiLatch, rf_radio_driver_id); } } }
/** * Callback that notifies the application when searching for an ACK has timed * out. * * @return void * * This callback function is called whenever the timeout for searching for an * ack is exceeded. */ void RAILCb_RxAckTimeout(void) { if(waiting_for_ack) { waiting_for_ack = false; device_driver.phy_tx_done_cb( rf_radio_driver_id, current_tx_handle, PHY_LINK_TX_FAIL, 1, 1); } }
/** * Interrupt level callback to signify when the packet was sent * @param txPacketInfo Information about the packet that was transmitted. * @note that this structure is only valid during the timeframe of the * callback. */ void RAILCb_TxPacketSent(RAIL_TxPacketInfo_t *txPacketInfo) { if(device_driver.phy_tx_done_cb != NULL) { device_driver.phy_tx_done_cb( rf_radio_driver_id, current_tx_handle, // Normally we'd switch on ACK requested here, but Nanostack does that for us. PHY_LINK_TX_SUCCESS, // Succeeded, so how many times we tried is really not relevant. 1, 1); } last_tx = RAIL_GetTime(); radio_state = RADIO_RX; }
/** * Interrupt level callback * Allows the user finer granularity in tx radio events. * * Radio Statuses: * RAIL_TX_CONFIG_BUFFER_UNDERFLOW * RAIL_TX_CONFIG_CHANNEL_BUSY * * @param[in] status A bit field that defines what event caused the callback */ void RAILCb_TxRadioStatus(uint8_t status) { tr_debug("Packet TX error %d\n", status); if(device_driver.phy_tx_done_cb != NULL) { if(status == RAIL_TX_CONFIG_BUFFER_UNDERFLOW || status == RAIL_TX_CONFIG_CHANNEL_BUSY || status == RAIL_TX_CONFIG_TX_ABORTED || status == RAIL_TX_CONFIG_TX_BLOCKED) { waiting_for_ack = false; device_driver.phy_tx_done_cb( rf_radio_driver_id, current_tx_handle, PHY_LINK_CCA_FAIL, 8, 1); } } radio_state = RADIO_RX; }
/** * Interrupt level callback * Allows the user finer granularity in tx radio events. * * Radio Statuses: * RAIL_TX_CONFIG_BUFFER_UNDERFLOW * RAIL_TX_CONFIG_CHANNEL_BUSY * * @param[in] status A bit field that defines what event caused the callback */ void RAILCb_TxRadioStatus(uint8_t status) { if(device_driver.phy_tx_done_cb != NULL) { if(status == RAIL_TX_CONFIG_BUFFER_UNDERFLOW || status == RAIL_TX_CONFIG_CHANNEL_BUSY || status == RAIL_TX_CONFIG_TX_ABORTED || status == RAIL_TX_CONFIG_TX_BLOCKED) { waiting_for_ack = false; #ifdef MBED_CONF_RTOS_PRESENT osSignalSet(rf_thread_id, SL_TX_ERR); } #else device_driver.phy_tx_done_cb(rf_radio_driver_id, current_tx_handle, PHY_LINK_CCA_FAIL, 8, 1); } else { SL_DEBUG_PRINT("Packet TX error %d\n", status); } #endif }
static void rf_thread_loop(const void *arg) { SL_DEBUG_PRINT("rf_thread_loop: starting (id: %d)\n", rf_thread_id); for (;;) { osEvent event = osSignalWait(0, osWaitForever); if (event.status != osEventSignal) { continue; } platform_enter_critical(); if (event.value.signals & SL_RX_DONE) { while(rx_queue_tail != rx_queue_head) { void* handle = (void*) rx_queue[rx_queue_tail]; RAIL_RxPacketInfo_t* info = (RAIL_RxPacketInfo_t*) memoryPtrFromHandle(handle); device_driver.phy_rx_cb( info->dataPtr + 1, info->dataLength - 1, info->appendedInfo.lqi, info->appendedInfo.rssiLatch, rf_radio_driver_id); memoryFree(handle); rx_queue[rx_queue_tail] = NULL; rx_queue_tail = (rx_queue_tail + 1) % RF_QUEUE_SIZE; } } else if (event.value.signals & SL_TX_DONE) { device_driver.phy_tx_done_cb(rf_radio_driver_id, current_tx_handle, PHY_LINK_TX_SUCCESS, 1, 1); } else if (event.value.signals & SL_ACK_RECV) { device_driver.phy_tx_done_cb( rf_radio_driver_id, current_tx_handle, (event.value.signals & SL_ACK_PEND) ? PHY_LINK_TX_DONE_PENDING : PHY_LINK_TX_DONE, 1, 1); } else if (event.value.signals & SL_ACK_TIMEOUT) { waiting_for_ack = false; device_driver.phy_tx_done_cb(rf_radio_driver_id, current_tx_handle, PHY_LINK_TX_FAIL, 1, 1); } else if(event.value.signals & SL_TX_ERR) { device_driver.phy_tx_done_cb( rf_radio_driver_id, current_tx_handle, PHY_LINK_CCA_FAIL, 8, 1); } else if(event.value.signals & SL_CAL_REQ) { SL_DEBUG_PRINT("rf_thread_loop: SL_CAL_REQ signal received (unhandled)\n"); } else if(event.value.signals & SL_RXFIFO_ERR) { SL_DEBUG_PRINT("rf_thread_loop: SL_RXFIFO_ERR signal received (unhandled)\n"); } else if(event.value.signals & SL_TXFIFO_ERR) { SL_DEBUG_PRINT("rf_thread_loop: SL_TXFIFO_ERR signal received (unhandled)\n"); } else if(event.value.signals & SL_QUEUE_FULL) { SL_DEBUG_PRINT("rf_thread_loop: SL_QUEUE_FULL signal received (packet dropped)\n"); } else { SL_DEBUG_PRINT("rf_thread_loop unhandled event status: %d value: %d\n", event.status, event.value.signals); } platform_exit_critical(); } }
/** * Event handler for RAIL-fired events. Usually gets called from IRQ context. * Due to IRQ latency and tailchaining, multiple event flags might be set simultaneously, * so we have to check all of them */ static void radioEventHandler(RAIL_Handle_t railHandle, RAIL_Events_t events) { /* RAIL_Events_t is a 64-bit event mask, but a thread only supports 32 * signal bits. This means we have to convert from a RAIL event mask to * our own custom event mask. */ if (railHandle != gRailHandle) return; #ifdef MBED_CONF_RTOS_PRESENT if(rf_thread_id == 0) { return; } #endif size_t index = 0; do { if (events & 1ull) { switch(index) { /* * Occurs when the AGC averaged RSSI is done. * It occurs in response to RAIL_StartAverageRssi() to indicate that the * hardware has completed averaging. Call \ref RAIL_GetAverageRssi to get the * result. */ case RAIL_EVENT_RSSI_AVERAGE_DONE_SHIFT: #ifdef MBED_CONF_RTOS_PRESENT osSignalSet(rf_thread_id, SL_RSSI_DONE); #else SL_DEBUG_PRINT("RSSI done (%d)\n", RAIL_GetAverageRssi(gRailHandle)); #endif break; /* * Notifies the application when searching for an ACK has timed * out. This event occurs whenever the timeout for searching for an * ACK is exceeded. */ case RAIL_EVENT_RX_ACK_TIMEOUT_SHIFT: if(waiting_for_ack) { waiting_for_ack = false; #ifdef MBED_CONF_RTOS_PRESENT osSignalSet(rf_thread_id, SL_ACK_TIMEOUT); #else device_driver.phy_tx_done_cb( rf_radio_driver_id, current_tx_handle, PHY_LINK_TX_FAIL, 1, 1); #endif } break; /* * Occurs when the receive FIFO exceeds the configured threshold * value. Call \ref RAIL_GetRxFifoBytesAvailable to get the number of bytes * available. */ case RAIL_EVENT_RX_FIFO_ALMOST_FULL_SHIFT: #ifdef MBED_CONF_RTOS_PRESENT osSignalSet(rf_thread_id, SL_RXFIFO_ERR); #else SL_DEBUG_PRINT("RX near full (%d)\n", RAIL_GetRxFifoBytesAvailable(gRailHandle)); #endif break; /* * Occurs whenever a packet is received. * Call RAIL_GetRxPacketInfo() to get basic packet information along * with a handle to this packet for subsequent use with * RAIL_PeekRxPacket(), RAIL_GetRxPacketDetails(), * RAIL_HoldRxPacket(), and RAIL_ReleaseRxPacket() as needed. * * If \ref RAIL_RX_OPTION_IGNORE_CRC_ERRORS is set, this event also occurs * for packets with CRC errors. */ case RAIL_EVENT_RX_PACKET_RECEIVED_SHIFT: { /* Get RX packet that got signaled */ RAIL_RxPacketInfo_t rxPacketInfo; RAIL_RxPacketHandle_t rxHandle = RAIL_GetRxPacketInfo(gRailHandle, RAIL_RX_PACKET_HANDLE_NEWEST, &rxPacketInfo ); /* Only process the packet if it had a correct CRC */ if(rxPacketInfo.packetStatus == RAIL_RX_PACKET_READY_SUCCESS) { uint8_t header[4]; RAIL_PeekRxPacket(gRailHandle, rxHandle, header, 4, 0); /* If this is an ACK, deal with it early */ if( (header[0] == 5) && (header[3] == current_tx_sequence) && waiting_for_ack) { /* Tell the radio to not ACK an ACK */ RAIL_CancelAutoAck(gRailHandle); waiting_for_ack = false; /* Save the pending bit */ last_ack_pending_bit = (header[1] & (1 << 4)) != 0; /* Release packet */ RAIL_ReleaseRxPacket(gRailHandle, rxHandle); /* Tell the stack we got an ACK */ #ifdef MBED_CONF_RTOS_PRESENT osSignalSet(rf_thread_id, SL_ACK_RECV | (last_ack_pending_bit ? SL_ACK_PEND : 0)); #else SL_DEBUG_PRINT("rACK\n"); device_driver.phy_tx_done_cb( rf_radio_driver_id, current_tx_handle, last_ack_pending_bit ? PHY_LINK_TX_DONE_PENDING : PHY_LINK_TX_DONE, 1, 1); #endif } else { /* Get RSSI and LQI information about this packet */ RAIL_RxPacketDetails_t rxPacketDetails; rxPacketDetails.timeReceived.timePosition = RAIL_PACKET_TIME_DEFAULT; rxPacketDetails.timeReceived.totalPacketBytes = 0; RAIL_GetRxPacketDetails(gRailHandle, rxHandle, &rxPacketDetails); #ifdef MBED_CONF_RTOS_PRESENT /* Drop this packet if we're out of space */ if (((rx_queue_head + 1) % RF_QUEUE_SIZE) == rx_queue_tail) { osSignalSet(rf_thread_id, SL_QUEUE_FULL); RAIL_ReleaseRxPacket(gRailHandle, rxHandle); break; } /* Copy into queue */ uint8_t* packetBuffer = (uint8_t*)rx_queue[rx_queue_head]; #else /* Packet going temporarily onto stack for bare-metal apps */ uint8_t packetBuffer[MAC_PACKET_MAX_LENGTH + MAC_PACKET_INFO_LENGTH]; #endif /* First two bytes are RSSI and LQI, respecitvely */ packetBuffer[MAC_PACKET_OFFSET_RSSI] = (uint8_t)rxPacketDetails.rssi; packetBuffer[MAC_PACKET_OFFSET_LQI] = (uint8_t)rxPacketDetails.lqi; /* Copy packet payload from circular FIFO into contiguous memory */ RAIL_CopyRxPacket(&packetBuffer[MAC_PACKET_INFO_LENGTH], &rxPacketInfo); /* Release RAIL resources early */ RAIL_ReleaseRxPacket(gRailHandle, rxHandle); /* Figure out whether we want to not ACK this packet */ /* * packetBuffer[0] = length * dataLength = length w/o length byte * packetBuffer[1:2] = 0x61C9 -> 0b01100001 0b1100 1001 (version 1, dest 3, src 2, ACKreq, type = 1) * [1] => b[0:2] frame type, b[3] = security enabled, b[4] = frame pending, b[5] = ACKreq, b[6] = intrapan * [2] => b[2:3] destmode, b[4:5] version, b[6:7] srcmode */ if( (packetBuffer[MAC_PACKET_INFO_LENGTH + 1] & (1 << 5)) == 0 ) { /* Cancel the ACK if the sender did not request one */ RAIL_CancelAutoAck(gRailHandle); } #ifdef MBED_CONF_RTOS_PRESENT rx_queue_head = (rx_queue_head + 1) % RF_QUEUE_SIZE; osSignalSet(rf_thread_id, SL_RX_DONE); #else SL_DEBUG_PRINT("rPKT %d\n", packetBuffer[MAC_PACKET_INFO_LENGTH] - 2); device_driver.phy_rx_cb(&packetBuffer[MAC_PACKET_INFO_LENGTH + 1], /* Data payload for Nanostack starts at FCS */ packetBuffer[MAC_PACKET_INFO_LENGTH] - 2, /* Payload length is part of frame, but need to subtract CRC bytes */ packetBuffer[MAC_PACKET_OFFSET_LQI], /* LQI in second byte */ packetBuffer[MAC_PACKET_OFFSET_RSSI], /* RSSI in first byte */ rf_radio_driver_id); #endif } } } break; /* Event for preamble detection */ case RAIL_EVENT_RX_PREAMBLE_DETECT_SHIFT: break; /* Event for detection of the first sync word */ case RAIL_EVENT_RX_SYNC1_DETECT_SHIFT: break; /** Event for detection of the second sync word */ case RAIL_EVENT_RX_SYNC2_DETECT_SHIFT: break; /* Event for detection of frame errors * * For efr32xg1x parts, frame errors include violations of variable length * minimum/maximum limits, frame coding errors, and CRC errors. If \ref * RAIL_RX_OPTION_IGNORE_CRC_ERRORS is set, \ref RAIL_EVENT_RX_FRAME_ERROR * will not occur for CRC errors. */ case RAIL_EVENT_RX_FRAME_ERROR_SHIFT: break; /* Occurs when RX buffer is full */ case RAIL_EVENT_RX_FIFO_OVERFLOW_SHIFT: break; /* Occurs when a packet is address filtered */ case RAIL_EVENT_RX_ADDRESS_FILTERED_SHIFT: break; /* Occurs when an RX event times out */ case RAIL_EVENT_RX_TIMEOUT_SHIFT: break; /* Occurs when the scheduled RX window ends */ case RAIL_EVENT_RX_SCHEDULED_RX_END_SHIFT: break; /* An event for an aborted packet. It is triggered when a more specific * reason isn't known for why the packet was aborted (e.g. * \ref RAIL_EVENT_RX_ADDRESS_FILTERED). */ case RAIL_EVENT_RX_PACKET_ABORTED_SHIFT: break; /* * Occurs when the packet has passed any configured address and frame * filtering options. */ case RAIL_EVENT_RX_FILTER_PASSED_SHIFT: break; /* Occurs when modem timing is lost */ case RAIL_EVENT_RX_TIMING_LOST_SHIFT: break; /* Occurs when modem timing is detected */ case RAIL_EVENT_RX_TIMING_DETECT_SHIFT: break; /* * Indicates a Data Request is being received when using IEEE 802.15.4 * functionality. This occurs when the command byte of an incoming frame is * for a data request, which requests an ACK. This callback is called before * the packet is fully received to allow the node to have more time to decide * whether to set the frame pending in the outgoing ACK. This event only ever * occurs if the RAIL IEEE 802.15.4 functionality is enabled. * * Call \ref RAIL_IEEE802154_GetAddress to get the source address of the * packet. */ case RAIL_EVENT_IEEE802154_DATA_REQUEST_COMMAND_SHIFT: if(data_pending) { RAIL_IEEE802154_SetFramePending(gRailHandle); } break; // TX Event Bitmasks /* * Occurs when the transmit FIFO falls under the configured * threshold value. It only occurs if a rising edge occurs across this * threshold. This event does not occur on initailization or after resetting * the transmit FIFO with RAIL_ResetFifo(). * Call \ref RAIL_GetTxFifoSpaceAvailable to get the number of bytes * available in the transmit FIFO at the time of the callback dispatch. */ case RAIL_EVENT_TX_FIFO_ALMOST_EMPTY_SHIFT: #ifdef MBED_CONF_RTOS_PRESENT osSignalSet(rf_thread_id, SL_TXFIFO_ERR); #else SL_DEBUG_PRINT("TX near empty (%d)\n", spaceAvailable); #endif break; /* * Interrupt level event to signify when the packet was sent. * Call RAIL_GetTxPacketDetails() to get information about the packet * that was transmitted. * @note that this structure is only valid during the timeframe of the * \ref RAIL_Config_t::eventsCallback. */ case RAIL_EVENT_TX_PACKET_SENT_SHIFT: #ifdef MBED_CONF_RTOS_PRESENT osSignalSet(rf_thread_id, SL_TX_DONE); #else if(device_driver.phy_tx_done_cb != NULL) { device_driver.phy_tx_done_cb( rf_radio_driver_id, current_tx_handle, // Normally we'd switch on ACK requested here, but Nanostack does that for us. PHY_LINK_TX_SUCCESS, // Succeeded, so how many times we tried is really not relevant. 1, 1); } #endif last_tx = RAIL_GetTime(); radio_state = RADIO_RX; break; /* * An interrupt level event to signify when the packet was sent. * Call RAIL_GetTxPacketDetails() to get information about the packet * that was transmitted. * @note This structure is only valid during the timeframe of the * \ref RAIL_Config_t::eventsCallback. */ case RAIL_EVENT_TXACK_PACKET_SENT_SHIFT: break; /* Occurs when a TX is aborted by the user */ case RAIL_EVENT_TX_ABORTED_SHIFT: #ifdef MBED_CONF_RTOS_PRESENT osSignalSet(rf_thread_id, SL_TX_TIMEOUT); #else device_driver.phy_tx_done_cb(rf_radio_driver_id, current_tx_handle, PHY_LINK_CCA_FAIL, 8, 1); #endif waiting_for_ack = false; radio_state = RADIO_RX; break; /* Occurs when a TX ACK is aborted by the user */ case RAIL_EVENT_TXACK_ABORTED_SHIFT: break; /* Occurs when a TX is blocked by something like PTA or RHO */ case RAIL_EVENT_TX_BLOCKED_SHIFT: #ifdef MBED_CONF_RTOS_PRESENT osSignalSet(rf_thread_id, SL_TX_TIMEOUT); #else device_driver.phy_tx_done_cb(rf_radio_driver_id, current_tx_handle, PHY_LINK_CCA_FAIL, 8, 1); #endif waiting_for_ack = false; radio_state = RADIO_RX; break; /* Occurs when a TX ACK is blocked by something like PTA or RHO */ case RAIL_EVENT_TXACK_BLOCKED_SHIFT: break; /* Occurs when the TX buffer underflows */ case RAIL_EVENT_TX_UNDERFLOW_SHIFT: #ifdef MBED_CONF_RTOS_PRESENT osSignalSet(rf_thread_id, SL_TX_TIMEOUT); #else device_driver.phy_tx_done_cb(rf_radio_driver_id, current_tx_handle, PHY_LINK_CCA_FAIL, 8, 1); #endif waiting_for_ack = false; radio_state = RADIO_RX; break; /* Occurs when the buffer used for TX acking underflows */ case RAIL_EVENT_TXACK_UNDERFLOW_SHIFT: break; /* Occurs when CCA/CSMA/LBT succeeds */ case RAIL_EVENT_TX_CHANNEL_CLEAR_SHIFT: break; /* Occurs when CCA/CSMA/LBT fails */ case RAIL_EVENT_TX_CHANNEL_BUSY_SHIFT: #ifdef MBED_CONF_RTOS_PRESENT osSignalSet(rf_thread_id, SL_TX_TIMEOUT); #else device_driver.phy_tx_done_cb(rf_radio_driver_id, current_tx_handle, PHY_LINK_CCA_FAIL, 8, 1); #endif waiting_for_ack = false; radio_state = RADIO_RX; break; /* Occurs when a CCA check is being retried */ case RAIL_EVENT_TX_CCA_RETRY_SHIFT: break; /** Occurs when a clear channel assessment (CCA) is begun */ case RAIL_EVENT_TX_START_CCA_SHIFT: break; // Scheduler Event Bitmasks: Not used /* Event for when the scheduler switches away from this configuration */ case RAIL_EVENT_CONFIG_UNSCHEDULED_SHIFT: break; /* Event for when the scheduler switches to this configuration */ case RAIL_EVENT_CONFIG_SCHEDULED_SHIFT: break; /* Event for when the status of the scheduler changes */ case RAIL_EVENT_SCHEDULER_STATUS_SHIFT: break; // Other Event Bitmasks /* * Notifies the application that a calibration is needed. * It occurs whenever the RAIL library detects that a * calibration is needed. An application determines a valid * window to call \ref RAIL_Calibrate(). */ case RAIL_EVENT_CAL_NEEDED_SHIFT: #ifdef MBED_CONF_RTOS_PRESENT osSignalSet(rf_thread_id, SL_CAL_REQ); #else SL_DEBUG_PRINT("!!!! Calling for calibration\n"); #endif break; default: break; } } events = events >> 1; index += 1; } while (events != 0); }
static void rf_thread_loop(const void *arg) { SL_DEBUG_PRINT("rf_thread_loop: starting (id: %d)\n", (int)rf_thread_id); for (;;) { osEvent event = osSignalWait(0, osWaitForever); if (event.status != osEventSignal) { continue; } platform_enter_critical(); if (event.value.signals & SL_RX_DONE) { while(rx_queue_tail != rx_queue_head) { uint8_t* packet = (uint8_t*) rx_queue[rx_queue_tail]; SL_DEBUG_PRINT("rPKT %d\n", packet[MAC_PACKET_INFO_LENGTH] - 2); device_driver.phy_rx_cb( &packet[MAC_PACKET_INFO_LENGTH + 1], /* Data payload for Nanostack starts at FCS */ packet[MAC_PACKET_INFO_LENGTH] - 2, /* Payload length is part of frame, but need to subtract CRC bytes */ packet[MAC_PACKET_OFFSET_LQI], /* LQI in second byte */ packet[MAC_PACKET_OFFSET_RSSI], /* RSSI in first byte */ rf_radio_driver_id); rx_queue_tail = (rx_queue_tail + 1) % RF_QUEUE_SIZE; } } else if (event.value.signals & SL_TX_DONE) { device_driver.phy_tx_done_cb(rf_radio_driver_id, current_tx_handle, PHY_LINK_TX_SUCCESS, 1, 1); } else if (event.value.signals & SL_ACK_RECV) { device_driver.phy_tx_done_cb( rf_radio_driver_id, current_tx_handle, (event.value.signals & SL_ACK_PEND) ? PHY_LINK_TX_DONE_PENDING : PHY_LINK_TX_DONE, 1, 1); } else if (event.value.signals & SL_ACK_TIMEOUT) { waiting_for_ack = false; device_driver.phy_tx_done_cb(rf_radio_driver_id, current_tx_handle, PHY_LINK_TX_FAIL, 1, 1); } else if(event.value.signals & SL_TX_ERR) { device_driver.phy_tx_done_cb( rf_radio_driver_id, current_tx_handle, PHY_LINK_CCA_FAIL, 8, 1); } else if(event.value.signals & SL_TX_TIMEOUT) { device_driver.phy_tx_done_cb( rf_radio_driver_id, current_tx_handle, PHY_LINK_CCA_FAIL, 8, 1); } else if(event.value.signals & SL_CAL_REQ) { SL_DEBUG_PRINT("rf_thread_loop: SL_CAL_REQ signal received (unhandled)\n"); } else if(event.value.signals & SL_RXFIFO_ERR) { SL_DEBUG_PRINT("rf_thread_loop: SL_RXFIFO_ERR signal received (unhandled)\n"); } else if(event.value.signals & SL_TXFIFO_ERR) { SL_DEBUG_PRINT("rf_thread_loop: SL_TXFIFO_ERR signal received (unhandled)\n"); } else if(event.value.signals & SL_QUEUE_FULL) { SL_DEBUG_PRINT("rf_thread_loop: SL_QUEUE_FULL signal received (packet dropped)\n"); } else { SL_DEBUG_PRINT("rf_thread_loop unhandled event status: %d value: %d\n", event.status, (int)event.value.signals); } platform_exit_critical(); } }