void ICACHE_FLASH_ATTR tfp_recv_callback(void *arg, char *pdata, unsigned short len) { espconn *con = (espconn *)arg; TFPConnection *tfp_con = (TFPConnection *)con->reverse; if(!configuration_current.mesh_enable) { packet_counter(con, PACKET_COUNT_RX); } for(uint32_t i = 0; i < len; i++) { tfp_con->recv_buffer[tfp_con->recv_buffer_index] = pdata[i]; if(tfp_con->recv_buffer_index == TFP_RECV_INDEX_LENGTH) { // TODO: Sanity-check length tfp_con->recv_buffer_expected_length = tfp_con->recv_buffer[TFP_RECV_INDEX_LENGTH]; } tfp_con->recv_buffer_index++; if(tfp_con->recv_buffer_index == tfp_con->recv_buffer_expected_length) { if(!com_handle_message(tfp_con->recv_buffer, tfp_con->recv_buffer_expected_length, tfp_con->cid)) { brickd_route_from(tfp_con->recv_buffer, tfp_con->cid); tfp_handle_packet(tfp_con->recv_buffer, tfp_con->recv_buffer_expected_length); } tfp_con->recv_buffer_index = 0; tfp_con->recv_buffer_expected_length = TFP_MIN_LENGTH; } } if(ringbuffer_get_free(&tfp_rb) < (5*MTU_LENGTH + 2)) { tfp_recv_hold(); } }
void ICACHE_FLASH_ATTR tfp_handle_packet(const uint8_t *data, const uint8_t length) { // If ringbuffer not empty we add data to ringbuffer (we want to keep order) // Else if we can't immediately send to master brick we also add to ringbuffer if(!ringbuffer_is_empty(&tfp_rb) || uart_con_send(data, length) == 0) { if(ringbuffer_get_free(&tfp_rb) > (length + 2)) { for(uint8_t i = 0; i < length; i++) { if(!ringbuffer_add(&tfp_rb, data[i])) { // Should be unreachable loge("Ringbuffer full!\n"); } } } else { logw("Message does not fit in Buffer: %d < %d\n", ringbuffer_get_free(&tfp_rb), length + 2); } } }
void ICACHE_FLASH_ATTR tfp_recv_unhold(void) { if(tfp_is_hold) { for(uint8_t i = 0; i < TFP_MAX_CONNECTIONS; i++) { if(tfp_cons[i].state == TFP_CON_STATE_OPEN || tfp_cons[i].state == TFP_CON_STATE_SENDING) { espconn_recv_unhold(tfp_cons[i].con); } } logd("unhold: %d\n", ringbuffer_get_free(&tfp_rb)); tfp_is_hold = false; } }
void ICACHE_FLASH_ATTR com_send(const void *data, const uint8_t length, const int8_t cid) { if(cid == -2) { // UART tfp_handle_packet(data, length); } else { // WIFI // If ringbuffer not empty we add data to ringbuffer (we want to keep order) // Else if we can't immediately send to master brick we also add to ringbuffer if(!ringbuffer_is_empty(&com_out_rb) || tfp_send_w_cid(data, length, cid) == 0) { if(ringbuffer_get_free(&com_out_rb) > (length + 2 + 1)) { ringbuffer_add(&com_out_rb, cid); for(uint8_t i = 0; i < length; i++) { if(!ringbuffer_add(&com_out_rb, ((uint8_t*)data)[i])) { // Should be unreachable loge("Ringbuffer (com out) full!\n"); } } } else { logw("Message does not fit in Buffer (com out): %d < %d\n", ringbuffer_get_free(&com_out_rb), length + 2); } } } }
/** * @brief FSM handling function for receiving data. * * @param[in,out] tcb TCB holding the connection information. * @param[in,out] buf Buffer to store received data into. * @param[in] len Maximum number of bytes to receive. * * @returns Number of successfully received bytes. */ static int _fsm_call_recv(gnrc_tcp_tcb_t *tcb, void *buf, size_t len) { DEBUG("gnrc_tcp_fsm.c : _fsm_call_recv()\n"); if (ringbuffer_empty(&tcb->rcv_buf)) { return 0; } /* Read data into 'buf' up to 'len' bytes from receive buffer */ size_t rcvd = ringbuffer_get(&(tcb->rcv_buf), buf, len); /* If receive buffer can store more than GNRC_TCP_MSS: open window to available buffer size */ if (ringbuffer_get_free(&tcb->rcv_buf) >= GNRC_TCP_MSS) { tcb->rcv_wnd = ringbuffer_get_free(&(tcb->rcv_buf)); /* Send ACK to anounce window update */ gnrc_pktsnip_t *out_pkt = NULL; uint16_t seq_con = 0; _pkt_build(tcb, &out_pkt, &seq_con, MSK_ACK, tcb->snd_nxt, tcb->rcv_nxt, NULL, 0); _pkt_send(tcb, out_pkt, seq_con, false); } return rcvd; }
bool ICACHE_FLASH_ATTR tfp_send(const uint8_t *data, const uint8_t length) { uint8_t i = 0; // TODO: Sanity check length again? // TODO: Are we sure that data is always a full TFP packet? // cid == -2 => send back via UART if(com_handle_message(data, length, -2)) { return true; } // We only peak at the routing table here (and delete the route manually if // we can fit the data in our buffer). It would be very expensive to first // peak and then discover the route again. BrickdRouting *match = NULL; int8_t cid = brickd_route_to_peak(data, &match); if(!brickd_check_auth((const MessageHeader*)data, cid)) { return true; } /* * First let's check if everything fits in the buffers. This is only done if * mesh isn't enabled. When mesh is enabled, dedicated buffer is used for * sending. */ if(!configuration_current.mesh_enable) { if(!tfp_send_check_buffer(cid)) { return false; } } // Add websocket header if necessary uint8_t data_with_websocket_header[TFP_SEND_BUFFER_SIZE + sizeof(WebsocketFrameClientToServer)]; int16_t length_with_websocket_header = tfpw_insert_websocket_header(cid, data_with_websocket_header, data, length); if(length_with_websocket_header == -1) { // -1 = We use websocket but state is not OK for sending return false; } // Remove match from brickd routing table only if we now that we can fit // the data in the buffer if(match != NULL) { match->uid = 0; match->func_id = 0; match->sequence_number = 0; match->cid = -1; } /* * FIXME: Shouldn't the buffering while sending mechanism be also used for * non-mesh case? As it is documented that packets should be sent from the * sent callback of the previous packet. */ // Broadcast. if(cid == -1) { /* * Broadcast is handled differently when mesh is enabled as then there is * only one socket connection invovled. */ if (!configuration_current.mesh_enable) { for(uint8_t i = 0; i < TFP_MAX_CONNECTIONS; i++) { if(tfp_cons[i].state == TFP_CON_STATE_OPEN) { // TODO: sent data (return value) tfp_cons[i].state = TFP_CON_STATE_SENDING; uint8_t length_to_send = length; if(tfp_cons[i].websocket_state == WEBSOCKET_STATE_NO_WEBSOCKET) { os_memcpy(tfp_cons[i].send_buffer, data, length); } else { os_memcpy(tfp_cons[i].send_buffer, data_with_websocket_header, length_with_websocket_header); length_to_send = length_with_websocket_header; } espconn_send(tfp_cons[i].con, tfp_cons[i].send_buffer, length_to_send); } } } else { os_memcpy(tfp_cons[0].send_buffer, data, length); // Check if the socket is in a state to be able to send. if(tfp_cons[0].state == TFP_CON_STATE_OPEN) { tfp_mesh_send(tfp_cons[0].con, tfp_cons[0].send_buffer, length); } /* * If the socket can't send at the moment buffer the packet in TFP mesh * send buffer for sending in future. */ else { if(tfp_mesh_send_check_buffer(length)) { for(i = 0; i < length; i++) { if(!ringbuffer_add(&tfp_mesh_send_rb, data[i])) { return false; } } } else { return false; } } } } // Unicast. else { uint8_t length_to_send = length; if(!configuration_current.mesh_enable) { // When mesh mode is enabled this socket state is updated from tfp_mesh_send(). tfp_cons[cid].state = TFP_CON_STATE_SENDING; } if(tfp_cons[cid].websocket_state == WEBSOCKET_STATE_NO_WEBSOCKET) { os_memcpy(tfp_cons[cid].send_buffer, data, length); } else { os_memcpy(tfp_cons[cid].send_buffer, data_with_websocket_header, length_with_websocket_header); length_to_send = length_with_websocket_header; } if(configuration_current.mesh_enable) { // Check if the socket is in a state to be able to send. if(tfp_cons[0].state == TFP_CON_STATE_OPEN) { tfp_mesh_send(tfp_cons[0].con, tfp_cons[0].send_buffer, length_to_send); } /* * If the socket can't send at the moment buffer the packet in TFP mesh * send buffer for sending in future. */ else { if(tfp_mesh_send_check_buffer(length_to_send)) { for(i = 0; i < length_to_send; i++) { if(!ringbuffer_add(&tfp_mesh_send_rb, data[i])) { return false; } } } else { return false; } } } else { espconn_send(tfp_cons[cid].con, tfp_cons[cid].send_buffer, length_to_send); } } if(ringbuffer_get_free(&tfp_rb) > (6*MTU_LENGTH + 2)) { tfp_recv_unhold(); } return true; }