bool gnrc_gomach_find_next_tx_neighbor(gnrc_netif_t *netif) { assert(netif != NULL); int next = -1; /* If current neighbor pointer is not NULL, it means we have pending packet from last * t2u or t2k or bcast to send. In this case, return immediately. */ if (netif->mac.tx.current_neighbor != NULL) { return true; } /* First check whether we have broadcast packet to send. */ if (gnrc_priority_pktqueue_length(&netif->mac.tx.neighbors[0].queue) > 0) { next = 0; } else { /* Find the next neighbor to send data packet to. */ /* Don't always start checking with ID 0, take turns to check every neighbor's queue, * thus to be more fair. */ uint8_t j = netif->mac.tx.last_tx_neighbor_id + 1; if (j >= GNRC_MAC_NEIGHBOR_COUNT) { j = 1; } for (uint8_t i = 1; i < GNRC_MAC_NEIGHBOR_COUNT; i++) { if (gnrc_priority_pktqueue_length(&netif->mac.tx.neighbors[j].queue) > 0) { netif->mac.tx.last_tx_neighbor_id = j; next = (int) j; break; } else { j++; if (j >= GNRC_MAC_NEIGHBOR_COUNT) { j = 1; } } } } if (next >= 0) { gnrc_pktsnip_t *pkt = gnrc_priority_pktqueue_pop(&netif->mac.tx.neighbors[next].queue); if (pkt != NULL) { netif->mac.tx.packet = pkt; netif->mac.tx.current_neighbor = &netif->mac.tx.neighbors[next]; netif->mac.tx.tx_seq = 0; netif->mac.tx.t2u_retry_counter = 0; return true; } else { return false; } } return false; }
int gnrc_gomach_send_data(gnrc_netif_t *netif, netopt_enable_t csma_enable) { assert(netif != NULL); gnrc_pktsnip_t *pkt = netif->mac.tx.packet; assert(pkt != NULL); /* Insert GoMacH header above NETIF header. */ gnrc_gomach_frame_data_t *gomach_data_hdr_pointer; gnrc_pktsnip_t *gomach_snip = gnrc_pktsnip_search_type(pkt, GNRC_NETTYPE_GOMACH); if (gomach_snip != NULL) { gomach_data_hdr_pointer = gomach_snip->data; } else { gomach_data_hdr_pointer = NULL; } if (gomach_data_hdr_pointer == NULL) { /* No GoMacH header yet, build one. */ gnrc_gomach_frame_data_t gomach_data_hdr; gomach_data_hdr.header.type = GNRC_GOMACH_FRAME_DATA; /* Set the queue-length indicator according to its current queue situation. */ gomach_data_hdr.queue_indicator = gnrc_priority_pktqueue_length(&netif->mac.tx.current_neighbor->queue); /* Save the payload pointer. */ gnrc_pktsnip_t *payload = netif->mac.tx.packet->next; pkt->next = gnrc_pktbuf_add(pkt->next, &gomach_data_hdr, sizeof(gomach_data_hdr), GNRC_NETTYPE_GOMACH); if (pkt->next == NULL) { LOG_ERROR("ERROR: [GOMACH]: pktbuf add failed in gnrc_gomach_send_data().\n"); /* Make append payload after netif header again. */ netif->mac.tx.packet->next = payload; return -ENOBUFS; } } else { /* GoMacH header exists, update the queue-indicator. */ gomach_data_hdr_pointer->queue_indicator = gnrc_priority_pktqueue_length(&netif->mac.tx.current_neighbor->queue); } gnrc_pktbuf_hold(netif->mac.tx.packet, 1); /* Send the data packet here. */ return gnrc_gomach_send(netif, netif->mac.tx.packet, csma_enable); }
gnrc_pktsnip_t* gnrc_priority_pktqueue_head(gnrc_priority_pktqueue_t* queue) { if(!queue || (gnrc_priority_pktqueue_length(queue) == 0)){ return NULL; } return (gnrc_pktsnip_t *)queue->first->data; }
gnrc_pktsnip_t* gnrc_priority_pktqueue_pop(gnrc_priority_pktqueue_t* queue) { if(!queue || (gnrc_priority_pktqueue_length(queue) == 0)){ return NULL; } priority_queue_node_t *head = priority_queue_remove_head(queue); gnrc_pktsnip_t* pkt = (gnrc_pktsnip_t*) head->data; _free_node((gnrc_priority_pktqueue_node_t *)head); return pkt; }
void gnrc_priority_pktqueue_flush(gnrc_priority_pktqueue_t* queue) { assert(queue != NULL); if(gnrc_priority_pktqueue_length(queue) == 0){ return; } gnrc_priority_pktqueue_node_t* node; while( (node = (gnrc_priority_pktqueue_node_t *)priority_queue_remove_head(queue)) ) { gnrc_pktbuf_release(node->pkt); _free_node(node); } }
static void _lwmac_update_listening(gnrc_netif_t *netif) { /* In case has pending packet to send, clear rtt alarm thus to goto * transmission initialization (in SLEEPING management) right after the * listening period */ if ((_next_tx_neighbor(netif) != NULL) || (netif->mac.tx.current_neighbor != NULL)) { rtt_handler(GNRC_LWMAC_EVENT_RTT_PAUSE, netif); } /* Set timeout for if there's no successful rx transaction that will * change state to SLEEPING. */ if (!gnrc_lwmac_timeout_is_running(netif, GNRC_LWMAC_TIMEOUT_WAKEUP_PERIOD)) { gnrc_lwmac_set_timeout(netif, GNRC_LWMAC_TIMEOUT_WAKEUP_PERIOD, GNRC_LWMAC_WAKEUP_DURATION_US); } else if (gnrc_lwmac_timeout_is_expired(netif, GNRC_LWMAC_TIMEOUT_WAKEUP_PERIOD)) { /* Dispatch first as there still may be broadcast packets. */ gnrc_mac_dispatch(&netif->mac.rx); netif->mac.lwmac.state = GNRC_LWMAC_SLEEPING; /* Enable duty cycling again */ rtt_handler(GNRC_LWMAC_EVENT_RTT_RESUME, netif); _gnrc_lwmac_set_netdev_state(netif, NETOPT_STATE_SLEEP); gnrc_lwmac_clear_timeout(netif, GNRC_LWMAC_TIMEOUT_WAKEUP_PERIOD); /* if there is a packet for transmission, schedule update to start * transmission initialization immediately. */ gnrc_mac_tx_neighbor_t *neighbour = _next_tx_neighbor(netif); if ((neighbour != NULL) || (netif->mac.tx.current_neighbor != NULL)) { /* This triggers packet sending procedure in sleeping immediately. */ lwmac_schedule_update(netif); return; } } if (gnrc_priority_pktqueue_length(&netif->mac.rx.queue) > 0) { /* Do wake-up extension in each packet reception. */ gnrc_lwmac_clear_timeout(netif, GNRC_LWMAC_TIMEOUT_WAKEUP_PERIOD); lwmac_set_state(netif, GNRC_LWMAC_RECEIVING); } }
static gnrc_mac_tx_neighbor_t *_next_tx_neighbor(gnrc_netif_t *netif) { gnrc_mac_tx_neighbor_t *next = NULL; uint32_t phase_nearest = GNRC_LWMAC_PHASE_MAX; for (unsigned i = 0; i < GNRC_MAC_NEIGHBOR_COUNT; i++) { if (gnrc_priority_pktqueue_length(&netif->mac.tx.neighbors[i].queue) > 0) { /* Unknown destinations are initialized with their phase at the end * of the local interval, so known destinations that still wakeup * in this interval will be preferred. */ uint32_t phase_check = _gnrc_lwmac_ticks_until_phase(netif->mac.tx.neighbors[i].phase); if (phase_check <= phase_nearest) { next = &(netif->mac.tx.neighbors[i]); phase_nearest = phase_check; DEBUG("[LWMAC-int] Advancing queue #%u\n", i); } } } return next; }
static void _sleep_management(gnrc_netif_t *netif) { /* If a packet is scheduled, no other (possible earlier) packet can be * sent before the first one is handled, even no broadcast */ if (!gnrc_lwmac_timeout_is_running(netif, GNRC_LWMAC_TIMEOUT_WAIT_DEST_WAKEUP)) { gnrc_mac_tx_neighbor_t *neighbour; /* Check if there is packet remaining for retransmission */ if (netif->mac.tx.current_neighbor != NULL) { neighbour = netif->mac.tx.current_neighbor; } else { /* Check if there are broadcasts to send and transmit immediately */ if (gnrc_priority_pktqueue_length(&(netif->mac.tx.neighbors[0].queue)) > 0) { netif->mac.tx.current_neighbor = &(netif->mac.tx.neighbors[0]); lwmac_set_state(netif, GNRC_LWMAC_TRANSMITTING); return; } neighbour = _next_tx_neighbor(netif); } if (neighbour != NULL) { /* if phase is unknown, send immediately. */ if (neighbour->phase > RTT_TICKS_TO_US(GNRC_LWMAC_WAKEUP_INTERVAL_US)) { netif->mac.tx.current_neighbor = neighbour; gnrc_lwmac_set_tx_continue(netif, false); netif->mac.tx.tx_burst_count = 0; lwmac_set_state(netif, GNRC_LWMAC_TRANSMITTING); return; } /* Offset in microseconds when the earliest (phase) destination * node wakes up that we have packets for. */ uint32_t time_until_tx = RTT_TICKS_TO_US(_gnrc_lwmac_ticks_until_phase(neighbour->phase)); /* If there's not enough time to prepare a WR to catch the phase * postpone to next interval */ if (time_until_tx < GNRC_LWMAC_WR_PREPARATION_US) { time_until_tx += GNRC_LWMAC_WAKEUP_INTERVAL_US; } time_until_tx -= GNRC_LWMAC_WR_PREPARATION_US; /* add a random time before goto TX, for avoiding one node for * always holding the medium (if the receiver's phase is recorded earlier in this * particular node) */ uint32_t random_backoff; random_backoff = random_uint32_range(0, GNRC_LWMAC_TIME_BETWEEN_WR_US); time_until_tx = time_until_tx + random_backoff; gnrc_lwmac_set_timeout(netif, GNRC_LWMAC_TIMEOUT_WAIT_DEST_WAKEUP, time_until_tx); /* Register neighbour to be the next */ netif->mac.tx.current_neighbor = neighbour; /* Stop dutycycling, we're preparing to send. This prevents the * timeout arriving late, so that the destination phase would * be missed. */ /* TODO: bad for power savings */ rtt_handler(GNRC_LWMAC_EVENT_RTT_PAUSE, netif); } } else if (gnrc_lwmac_timeout_is_expired(netif, GNRC_LWMAC_TIMEOUT_WAIT_DEST_WAKEUP)) { LOG_DEBUG("[LWMAC] Got timeout for dest wakeup, ticks: %" PRIu32 "\n", rtt_get_counter()); gnrc_lwmac_set_tx_continue(netif, false); netif->mac.tx.tx_burst_count = 0; lwmac_set_state(netif, GNRC_LWMAC_TRANSMITTING); } }
/* return false if send data failed, otherwise return true */ static bool _send_data(gnrc_netif_t *netif) { assert(netif != NULL); gnrc_pktsnip_t *pkt = netif->mac.tx.packet; gnrc_pktsnip_t *pkt_payload; assert(pkt != NULL); /* Enable Auto ACK again */ netopt_enable_t autoack = NETOPT_ENABLE; netif->dev->driver->set(netif->dev, NETOPT_AUTOACK, &autoack, sizeof(autoack)); /* It's okay to retry sending DATA. Timing doesn't matter anymore and * destination is waiting for a certain amount of time. */ uint8_t csma_retries = GNRC_LWMAC_DATA_CSMA_RETRIES; netif->dev->driver->set(netif->dev, NETOPT_CSMA_RETRIES, &csma_retries, sizeof(csma_retries)); netif->mac.mac_info |= GNRC_NETIF_MAC_INFO_CSMA_ENABLED; netopt_enable_t csma_enable = NETOPT_ENABLE; netif->dev->driver->set(netif->dev, NETOPT_CSMA, &csma_enable, sizeof(csma_enable)); pkt_payload = pkt->next; /* Insert LWMAC header above NETIF header. The burst (consecutive) transmission * scheme works here (sender side). If the sender finds it has pending packets * for the receiver (and under burst limit), it sets the packet type to * FRAMETYPE_DATA_PENDING, to notice the receiver for next incoming packet. * In case the sender has no more packet for the receiver, it simply sets the * data type to FRAMETYPE_DATA. */ gnrc_lwmac_hdr_t hdr; if ((gnrc_priority_pktqueue_length(&netif->mac.tx.current_neighbor->queue) > 0) && (netif->mac.tx.tx_burst_count < GNRC_LWMAC_MAX_TX_BURST_PKT_NUM)) { hdr.type = GNRC_LWMAC_FRAMETYPE_DATA_PENDING; gnrc_lwmac_set_tx_continue(netif, true); netif->mac.tx.tx_burst_count++; } else { hdr.type = GNRC_LWMAC_FRAMETYPE_DATA; gnrc_lwmac_set_tx_continue(netif, false); } pkt->next = gnrc_pktbuf_add(pkt->next, &hdr, sizeof(hdr), GNRC_NETTYPE_LWMAC); if (pkt->next == NULL) { LOG_ERROR("ERROR: [LWMAC-tx] Cannot allocate pktbuf of type GNRC_NETTYPE_LWMAC\n"); LOG_ERROR("ERROR: [LWMAC-tx] Memory maybe full, drop the data packet\n"); netif->mac.tx.packet->next = pkt_payload; gnrc_pktbuf_release(netif->mac.tx.packet); /* clear packet point to avoid TX retry */ netif->mac.tx.packet = NULL; return false; } /* if found ongoing transmission, quit this cycle for collision avoidance. * Data packet will be re-queued and try to send in the next cycle. */ if (_gnrc_lwmac_get_netdev_state(netif) == NETOPT_STATE_RX) { /* save pointer to netif header */ gnrc_pktsnip_t *netif_snip = pkt->next->next; /* remove LWMAC header */ pkt->next->next = NULL; gnrc_pktbuf_release(pkt->next); /* make append netif header after payload again */ pkt->next = netif_snip; if (!gnrc_mac_queue_tx_packet(&netif->mac.tx, 0, netif->mac.tx.packet)) { gnrc_pktbuf_release(netif->mac.tx.packet); LOG_WARNING("WARNING: [LWMAC-tx] TX queue full, drop packet\n"); } /* drop pointer so it wont be free'd */ netif->mac.tx.packet = NULL; return false; } /* Send data */ int res = _gnrc_lwmac_transmit(netif, pkt); if (res < 0) { LOG_ERROR("ERROR: [LWMAC-tx] Send data failed."); gnrc_pktbuf_release(pkt); /* clear packet point to avoid TX retry */ netif->mac.tx.packet = NULL; return false; } /* Packet has been released by netdev, so drop pointer */ netif->mac.tx.packet = NULL; DEBUG("[LWMAC-tx]: spent %lu WR in TX\n", (unsigned long)netif->mac.tx.wr_sent); #if (LWMAC_ENABLE_DUTYCYLE_RECORD == 1) netif->mac.prot.lwmac.pkt_start_sending_time_ticks = rtt_get_counter() - netif->mac.prot.lwmac.pkt_start_sending_time_ticks; DEBUG("[LWMAC-tx]: pkt sending delay in TX: %lu us\n", RTT_TICKS_TO_US(netif->mac.prot.lwmac.pkt_start_sending_time_ticks)); #endif return true; }