static struct net_buf *get_rx(int timeout) { BT_DBG("type 0x%02x, evt 0x%02x", rx.type, rx.evt.evt); if (rx.type == H4_EVT && (rx.evt.evt == BT_HCI_EVT_CMD_COMPLETE || rx.evt.evt == BT_HCI_EVT_CMD_STATUS)) { return bt_buf_get_cmd_complete(timeout); } if (rx.type == H4_ACL) { return bt_buf_get_rx(BT_BUF_ACL_IN, timeout); } else { return bt_buf_get_rx(BT_BUF_EVT, timeout); } }
static inline struct net_buf *get_evt_buf(u8_t evt) { struct net_buf *buf; switch (evt) { case BT_HCI_EVT_CMD_COMPLETE: case BT_HCI_EVT_CMD_STATUS: buf = bt_buf_get_cmd_complete(K_NO_WAIT); break; default: buf = bt_buf_get_rx(BT_BUF_EVT, K_NO_WAIT); break; } if (buf) { net_buf_add_u8(h5.rx_buf, evt); } return buf; }
static void bt_spi_rx_thread(void) { struct net_buf *buf; u8_t header_master[5] = { SPI_READ, 0x00, 0x00, 0x00, 0x00 }; u8_t header_slave[5]; struct bt_hci_acl_hdr acl_hdr; u8_t size; memset(&txmsg, 0xFF, SPI_MAX_MSG_LEN); while (true) { k_sem_take(&sem_request, K_FOREVER); /* Disable IRQ pin callback to avoid spurious IRQs */ gpio_pin_disable_callback(irq_dev, GPIO_IRQ_PIN); k_sem_take(&sem_busy, K_FOREVER); do { #if defined(CONFIG_BLUETOOTH_SPI_BLUENRG) gpio_pin_write(cs_dev, GPIO_CS_PIN, 1); gpio_pin_write(cs_dev, GPIO_CS_PIN, 0); #endif /* CONFIG_BLUETOOTH_SPI_BLUENRG */ spi_transceive(spi_dev, header_master, 5, header_slave, 5); } while (header_slave[STATUS_HEADER_TOREAD] == 0 || header_slave[STATUS_HEADER_TOREAD] == 0xFF); size = header_slave[STATUS_HEADER_TOREAD]; do { spi_transceive(spi_dev, &txmsg, size, &rxmsg, size); } while (rxmsg[0] == 0); gpio_pin_enable_callback(irq_dev, GPIO_IRQ_PIN); #if defined(CONFIG_BLUETOOTH_SPI_BLUENRG) gpio_pin_write(cs_dev, GPIO_CS_PIN, 1); #endif /* CONFIG_BLUETOOTH_SPI_BLUENRG */ k_sem_give(&sem_busy); spi_dump_message("RX:ed", rxmsg, size); switch (rxmsg[PACKET_TYPE]) { case HCI_EVT: switch (rxmsg[EVT_HEADER_EVENT]) { case BT_HCI_EVT_VENDOR: /* Vendor events are currently unsupported */ bt_spi_handle_vendor_evt(rxmsg); continue; case BT_HCI_EVT_CMD_COMPLETE: case BT_HCI_EVT_CMD_STATUS: buf = bt_buf_get_cmd_complete(K_FOREVER); break; default: buf = bt_buf_get_rx(K_FOREVER); break; } bt_buf_set_type(buf, BT_BUF_EVT); net_buf_add_mem(buf, &rxmsg[1], rxmsg[EVT_HEADER_SIZE] + 2); break; case HCI_ACL: buf = bt_buf_get_rx(K_FOREVER); bt_buf_set_type(buf, BT_BUF_ACL_IN); memcpy(&acl_hdr, &rxmsg[1], sizeof(acl_hdr)); net_buf_add_mem(buf, &acl_hdr, sizeof(acl_hdr)); net_buf_add_mem(buf, &rxmsg[5], sys_le16_to_cpu(acl_hdr.len)); break; default: BT_ERR("Unknown BT buf type %d", rxmsg[0]); continue; } if (rxmsg[PACKET_TYPE] == HCI_EVT && bt_hci_evt_is_prio(rxmsg[EVT_HEADER_EVENT])) { bt_recv_prio(buf); } else { bt_recv(buf); } } }
static void bt_uart_isr(struct device *unused) { static int remaining; u8_t byte; int ret; static u8_t hdr[4]; ARG_UNUSED(unused); while (uart_irq_update(h5_dev) && uart_irq_is_pending(h5_dev)) { if (!uart_irq_rx_ready(h5_dev)) { if (uart_irq_tx_ready(h5_dev)) { BT_DBG("transmit ready"); } else { BT_DBG("spurious interrupt"); } /* Only the UART RX path is interrupt-enabled */ break; } ret = uart_fifo_read(h5_dev, &byte, sizeof(byte)); if (!ret) { continue; } switch (h5.rx_state) { case START: if (byte == SLIP_DELIMITER) { h5.rx_state = HEADER; remaining = sizeof(hdr); } break; case HEADER: /* In a case we confuse ending slip delimeter * with starting one. */ if (byte == SLIP_DELIMITER) { remaining = sizeof(hdr); continue; } if (h5_unslip_byte(&byte) < 0) { h5_reset_rx(); continue; } memcpy(&hdr[sizeof(hdr) - remaining], &byte, 1); remaining--; if (remaining) { break; } remaining = H5_HDR_LEN(hdr); switch (H5_HDR_PKT_TYPE(hdr)) { case HCI_EVENT_PKT: /* The buffer is allocated only once we know * the exact event type. */ h5.rx_state = PAYLOAD; break; case HCI_ACLDATA_PKT: h5.rx_buf = bt_buf_get_rx(BT_BUF_ACL_IN, K_NO_WAIT); if (!h5.rx_buf) { BT_WARN("No available data buffers"); h5_reset_rx(); continue; } h5.rx_state = PAYLOAD; break; case HCI_3WIRE_LINK_PKT: case HCI_3WIRE_ACK_PKT: h5.rx_buf = net_buf_alloc(&h5_pool, K_NO_WAIT); if (!h5.rx_buf) { BT_WARN("No available signal buffers"); h5_reset_rx(); continue; } h5.rx_state = PAYLOAD; break; default: BT_ERR("Wrong packet type %u", H5_HDR_PKT_TYPE(hdr)); h5.rx_state = END; break; } break; case PAYLOAD: if (h5_unslip_byte(&byte) < 0) { h5_reset_rx(); continue; } /* Allocate HCI event buffer now that we know the * exact event type. */ if (!h5.rx_buf) { h5.rx_buf = get_evt_buf(byte); if (!h5.rx_buf) { BT_WARN("No available event buffers"); h5_reset_rx(); continue; } } net_buf_add_mem(h5.rx_buf, &byte, sizeof(byte)); remaining--; if (!remaining) { h5.rx_state = END; } break; case END: if (byte != SLIP_DELIMITER) { BT_ERR("Missing ending SLIP_DELIMITER"); h5_reset_rx(); break; } BT_DBG("Received full packet: type %u", H5_HDR_PKT_TYPE(hdr)); /* Check when full packet is received, it can be done * when parsing packet header but we need to receive * full packet anyway to clear UART. */ if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5.tx_ack) { BT_ERR("Seq expected %u got %u. Drop packet", h5.tx_ack, H5_HDR_SEQ(hdr)); h5_reset_rx(); break; } h5_process_complete_packet(hdr); h5.rx_state = START; break; } } }