void bt_l2cap_recv(struct bt_conn *conn, struct net_buf *buf) { struct bt_l2cap_hdr *hdr = (void *)buf->data; struct bt_l2cap_chan *chan; uint16_t cid; if (IS_ENABLED(CONFIG_BLUETOOTH_BREDR) && conn->type == BT_CONN_TYPE_BR) { bt_l2cap_br_recv(conn, buf); return; } if (buf->len < sizeof(*hdr)) { BT_ERR("Too small L2CAP PDU received"); net_buf_unref(buf); return; } cid = sys_le16_to_cpu(hdr->cid); net_buf_pull(buf, sizeof(*hdr)); BT_DBG("Packet for CID %u len %u", cid, buf->len); chan = bt_l2cap_le_lookup_rx_cid(conn, cid); if (!chan) { BT_WARN("Ignoring data for unknown CID 0x%04x", cid); net_buf_unref(buf); return; } l2cap_chan_recv(chan, buf); net_buf_unref(buf); }
static int unprovisioned_beacon_send(void) { #if defined(CONFIG_BT_MESH_PB_ADV) const struct bt_mesh_prov *prov; struct net_buf *buf; u8_t uri_hash[16] = { 0 }; u16_t oob_info; BT_DBG(""); buf = bt_mesh_adv_create(BT_MESH_ADV_BEACON, UNPROV_XMIT_COUNT, UNPROV_XMIT_INT, K_NO_WAIT); if (!buf) { BT_ERR("Unable to allocate beacon buffer"); return -ENOBUFS; } prov = bt_mesh_prov_get(); net_buf_add_u8(buf, BEACON_TYPE_UNPROVISIONED); net_buf_add_mem(buf, prov->uuid, 16); /* for tmall ble profile, OOB info default is 0x0000 URI hash is optional */ if (prov->uri && bt_mesh_s1(prov->uri, uri_hash) == 0) { oob_info = prov->oob_info | BT_MESH_PROV_OOB_URI; } else { oob_info = prov->oob_info; } net_buf_add_be16(buf, oob_info); net_buf_add_mem(buf, uri_hash, 4); bt_mesh_adv_send(buf, NULL, NULL); net_buf_unref(buf); if (prov->uri) { size_t len; buf = bt_mesh_adv_create(BT_MESH_ADV_URI, UNPROV_XMIT_COUNT, UNPROV_XMIT_INT, K_NO_WAIT); if (!buf) { BT_ERR("Unable to allocate URI buffer"); return -ENOBUFS; } len = strlen(prov->uri); if (net_buf_tailroom(buf) < len) { BT_WARN("Too long URI to fit advertising data"); } else { net_buf_add_mem(buf, prov->uri, len); bt_mesh_adv_send(buf, NULL, NULL); } net_buf_unref(buf); } #endif /* CONFIG_BT_MESH_PB_ADV */ return 0; }
static int l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan *ch, struct net_buf *buf) { int ret, sent, total_len; if (buf->len > ch->tx.mtu) { return -EMSGSIZE; } total_len = buf->len; /* Add SDU length for the first segment */ ret = l2cap_chan_le_send(ch, buf, BT_L2CAP_SDU_HDR_LEN); if (ret < 0) { return ret; } /* Send remaining segments */ for (sent = ret; sent < total_len; sent += ret) { ret = l2cap_chan_le_send(ch, buf, 0); if (ret < 0) { return ret; } } BT_DBG("ch %p cid 0x%04x sent %u", ch, ch->tx.cid, sent); net_buf_unref(buf); return sent; }
static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch, struct net_buf *buf, uint16_t sdu_hdr_len) { int len; /* Wait for credits */ if (k_sem_take(&ch->tx.credits, K_NO_WAIT)) { BT_DBG("No credits to transmit packet"); return -EAGAIN; } buf = l2cap_chan_create_seg(ch, buf, sdu_hdr_len); if (!buf) { return -ENOMEM; } /* Channel may have been disconnected while waiting for credits */ if (!ch->chan.conn) { net_buf_unref(buf); return -ECONNRESET; } BT_DBG("ch %p cid 0x%04x len %u credits %u", ch, ch->tx.cid, buf->len, k_sem_count_get(&ch->tx.credits)); len = buf->len; bt_l2cap_send(ch->chan.conn, ch->tx.cid, buf); return len; }
static uint8_t att_exec_write_rsp(struct bt_conn *conn, uint8_t flags) { struct flush_data data; memset(&data, 0, sizeof(data)); data.buf = bt_att_create_pdu(conn, BT_ATT_OP_EXEC_WRITE_RSP, 0); if (!data.buf) { return BT_ATT_ERR_UNLIKELY; } data.conn = conn; data.flags = flags; /* Apply to the whole database */ bt_gatt_foreach_attr(0x0001, 0xffff, flush_cb, &data); /* In case of error discard data */ if (data.err) { net_buf_unref(data.buf); return data.err; } bt_l2cap_send(conn, BT_L2CAP_CID_ATT, data.buf); return 0; }
static uint8_t att_handle_rsp(struct bt_att *att, void *pdu, uint16_t len, uint8_t err) { struct bt_att_req req; if (!att->req.func) { return 0; } /* Release cloned buffer */ if (att->req.buf) { net_buf_unref(att->req.buf); att->req.buf = NULL; } /* Reset request before callback so another request can be queued */ memcpy(&req, &att->req, sizeof(req)); att->req.func = NULL; req.func(att->chan.conn, err, pdu, len, req.user_data); att_req_destroy(&req); return 0; }
static void l2cap_chan_del(struct bt_l2cap_chan *chan) { struct bt_l2cap_le_chan *ch = LE_CHAN(chan); BT_DBG("conn %p chan %p cid 0x%04x", chan->conn, ch, ch->rx.cid); ch->chan.conn = NULL; if (chan->ops && chan->ops->disconnected) { chan->ops->disconnected(chan); } /* There could be a writer waiting for credits so return a dummy credit * to wake it up. */ if (!ch->tx.credits.nsig) { l2cap_chan_tx_give_credits(ch, 1); } /* Destroy segmented SDU if it exists */ if (ch->_sdu) { net_buf_unref(ch->_sdu); ch->_sdu = NULL; ch->_sdu_len = 0; } }
static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch, struct net_buf *buf, uint16_t sdu_hdr_len) { int len; /* Wait for credits */ nano_sem_take(&ch->tx.credits, TICKS_UNLIMITED); buf = l2cap_chan_create_seg(ch, buf, sdu_hdr_len); if (!buf) { return -ENOMEM; } /* Channel may have been disconnected while waiting for credits */ if (!ch->chan.conn) { net_buf_unref(buf); return -ECONNRESET; } BT_DBG("ch %p cid 0x%04x len %u credits %u", ch, ch->tx.cid, buf->len, ch->tx.credits.nsig); len = buf->len; bt_l2cap_send(ch->chan.conn, ch->tx.cid, buf); return len; }
static uint8_t att_find_info_rsp(struct bt_att *att, uint16_t start_handle, uint16_t end_handle) { struct bt_conn *conn = att->chan.conn; struct find_info_data data; memset(&data, 0, sizeof(data)); data.buf = bt_att_create_pdu(conn, BT_ATT_OP_FIND_INFO_RSP, 0); if (!data.buf) { return BT_ATT_ERR_UNLIKELY; } data.att = att; bt_gatt_foreach_attr(start_handle, end_handle, find_info_cb, &data); if (!data.rsp) { net_buf_unref(data.buf); /* Respond since handle is set */ send_err_rsp(conn, BT_ATT_OP_FIND_INFO_REQ, start_handle, BT_ATT_ERR_ATTRIBUTE_NOT_FOUND); return 0; } bt_l2cap_send(conn, BT_L2CAP_CID_ATT, data.buf); return 0; }
static uint8_t att_read_group_rsp(struct bt_att *att, struct bt_uuid *uuid, uint16_t start_handle, uint16_t end_handle) { struct bt_conn *conn = att->chan.conn; struct read_group_data data; memset(&data, 0, sizeof(data)); data.buf = bt_att_create_pdu(conn, BT_ATT_OP_READ_GROUP_RSP, sizeof(*data.rsp)); if (!data.buf) { return BT_ATT_ERR_UNLIKELY; } data.att = att; data.uuid = uuid; data.rsp = net_buf_add(data.buf, sizeof(*data.rsp)); data.rsp->len = 0; data.group = NULL; bt_gatt_foreach_attr(start_handle, end_handle, read_group_cb, &data); if (!data.rsp->len) { net_buf_unref(data.buf); /* Respond here since handle is set */ send_err_rsp(conn, BT_ATT_OP_READ_GROUP_REQ, start_handle, BT_ATT_ERR_ATTRIBUTE_NOT_FOUND); return 0; } bt_l2cap_send(conn, BT_L2CAP_CID_ATT, data.buf); return 0; }
static int bt_hci_connect_br_cancel(struct bt_conn *conn) { struct bt_hci_cp_connect_cancel *cp; struct bt_hci_rp_connect_cancel *rp; struct net_buf *buf, *rsp; int err; buf = bt_hci_cmd_create(BT_HCI_OP_CONNECT_CANCEL, sizeof(*cp)); if (!buf) { return -ENOBUFS; } cp = net_buf_add(buf, sizeof(*cp)); memcpy(&cp->bdaddr, &conn->br.dst, sizeof(cp->bdaddr)); err = bt_hci_cmd_send_sync(BT_HCI_OP_CONNECT_CANCEL, buf, &rsp); if (err) { return err; } rp = (void *)rsp->data; err = rp->status ? -EIO : 0; net_buf_unref(rsp); return err; }
void ip_buf_unref(struct net_buf *buf) #endif { if (!buf) { #ifdef DEBUG_IP_BUFS NET_DBG("*** ERROR *** buf %p (%s():%d)\n", buf, caller, line); #else NET_DBG("*** ERROR *** buf %p\n", buf); #endif return; } if (!buf->ref) { #ifdef DEBUG_IP_BUFS NET_DBG("*** ERROR *** buf %p is freed already (%s():%d)\n", buf, caller, line); #else NET_DBG("*** ERROR *** buf %p is freed already\n", buf); #endif return; } #ifdef DEBUG_IP_BUFS NET_DBG("%s [%d] buf %p ref %d (%s():%d)\n", type2str(ip_buf_type(buf)), get_frees(ip_buf_type(buf)), buf, buf->ref - 1, caller, line); #else NET_DBG("%s buf %p ref %d\n", type2str(ip_buf_type(buf)), buf, buf->ref - 1); #endif net_buf_unref(buf); }
static void h5_reset_rx(void) { if (h5.rx_buf) { net_buf_unref(h5.rx_buf); h5.rx_buf = NULL; } h5.rx_state = START; }
static void rx_thread(void) { BT_DBG(""); while (true) { struct net_buf *buf; buf = net_buf_get(&h5.rx_queue, K_FOREVER); hexdump("=> ", buf->data, buf->len); if (!memcmp(buf->data, sync_req, sizeof(sync_req))) { if (h5.link_state == ACTIVE) { /* TODO Reset H5 */ } h5_send(sync_rsp, HCI_3WIRE_LINK_PKT, sizeof(sync_rsp)); } else if (!memcmp(buf->data, sync_rsp, sizeof(sync_rsp))) { if (h5.link_state == ACTIVE) { /* TODO Reset H5 */ } h5.link_state = INIT; h5_set_txwin(conf_req); h5_send(conf_req, HCI_3WIRE_LINK_PKT, sizeof(conf_req)); } else if (!memcmp(buf->data, conf_req, 2)) { /* * The Host sends Config Response messages without a * Configuration Field. */ h5_send(conf_rsp, HCI_3WIRE_LINK_PKT, sizeof(conf_rsp)); /* Then send Config Request with Configuration Field */ h5_set_txwin(conf_req); h5_send(conf_req, HCI_3WIRE_LINK_PKT, sizeof(conf_req)); } else if (!memcmp(buf->data, conf_rsp, 2)) { h5.link_state = ACTIVE; if (buf->len > 2) { /* Configuration field present */ h5.tx_win = (buf->data[2] & 0x07); } BT_DBG("Finished H5 configuration, tx_win %u", h5.tx_win); } else { BT_ERR("Not handled yet %x %x", buf->data[0], buf->data[1]); } net_buf_unref(buf); /* Make sure we don't hog the CPU if the rx_queue never * gets empty. */ k_yield(); } }
int bt_conn_send(struct bt_conn *conn, struct net_buf *buf) { BT_DBG("conn handle %u buf len %u", conn->handle, buf->len); if (buf->user_data_size < BT_BUF_USER_DATA_MIN) { BT_ERR("Too small user data size"); net_buf_unref(buf); return -EINVAL; } if (conn->state != BT_CONN_CONNECTED) { BT_ERR("not connected!"); net_buf_unref(buf); return -ENOTCONN; } nano_fifo_put(&conn->tx_queue, buf); return 0; }
static void bt_conn_reset_rx_state(struct bt_conn *conn) { if (!conn->rx_len) { return; } net_buf_unref(conn->rx); conn->rx = NULL; conn->rx_len = 0; }
static int l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan *ch, struct net_buf *buf, int sent) { int ret, total_len; struct net_buf *frag; total_len = net_buf_frags_len(buf) + sent; if (total_len > ch->tx.mtu) { return -EMSGSIZE; } frag = buf; if (!frag->len && frag->frags) { frag = frag->frags; } if (!sent) { /* Add SDU length for the first segment */ sent = l2cap_chan_le_send(ch, frag, BT_L2CAP_SDU_HDR_LEN); if (sent < 0) { if (sent == -EAGAIN) { sent = 0; /* Store sent data into user_data */ memcpy(net_buf_user_data(buf), &sent, sizeof(sent)); } return sent; } } /* Send remaining segments */ for (ret = 0; sent < total_len; sent += ret) { /* Proceed to next fragment */ if (!frag->len) { frag = net_buf_frag_del(buf, frag); } ret = l2cap_chan_le_send(ch, frag, 0); if (ret < 0) { if (ret == -EAGAIN) { /* Store sent data into user_data */ memcpy(net_buf_user_data(buf), &sent, sizeof(sent)); } return ret; } } BT_DBG("ch %p cid 0x%04x sent %u", ch, ch->tx.cid, sent); net_buf_unref(buf); return ret; }
/* * Free pre-reserved RX buffers */ static void free_rx_bufs(struct ring_buf *rx_frag_list) { struct net_buf *buf; for (int i = 0; i < rx_frag_list->len; i++) { buf = (struct net_buf *)rx_frag_list->buf; if (buf) { net_buf_unref(buf); } } }
static void att_req_destroy(struct bt_att_req *req) { if (req->buf) { net_buf_unref(req->buf); } if (req->destroy) { req->destroy(req->user_data); } memset(req, 0, sizeof(*req)); }
void bt_l2cap_recv(struct bt_conn *conn, struct net_buf *buf) { struct bt_l2cap_hdr *hdr = (void *)buf->data; struct bt_l2cap_chan *chan; uint16_t cid; if (buf->len < sizeof(*hdr)) { BT_ERR("Too small L2CAP PDU received"); net_buf_unref(buf); return; } cid = sys_le16_to_cpu(hdr->cid); net_buf_pull(buf, sizeof(*hdr)); BT_DBG("Packet for CID %u len %u", cid, buf->len); switch (conn->type) { case BT_CONN_TYPE_LE: chan = bt_l2cap_le_lookup_rx_cid(conn, cid); break; #if defined(CONFIG_BLUETOOTH_BREDR) case BT_CONN_TYPE_BR: chan = bt_l2cap_br_lookup_rx_cid(conn, cid); break; #endif /* CONFIG_BLUETOOTH_BREDR */ default: chan = NULL; break; } if (!chan) { BT_WARN("Ignoring data for unknown CID 0x%04x", cid); net_buf_unref(buf); return; } l2cap_chan_recv(chan, buf); net_buf_unref(buf); }
static void l2cap_chan_destroy(struct bt_l2cap_chan *chan) { struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan); struct net_buf *buf; BT_DBG("chan %p cid 0x%04x", ch, ch->rx.cid); /* Cancel ongoing work */ k_delayed_work_cancel(&chan->rtx_work); /* Remove buffers on the TX queue */ while ((buf = net_buf_get(&ch->tx_queue, K_NO_WAIT))) { net_buf_unref(buf); } /* Destroy segmented SDU if it exists */ if (ch->_sdu) { net_buf_unref(ch->_sdu); ch->_sdu = NULL; ch->_sdu_len = 0; } }
static void conn_tx_fiber(int arg1, int arg2) { struct bt_conn *conn = (struct bt_conn *)arg1; struct net_buf *buf; BT_DBG("Started for handle %u", conn->handle); while (conn->state == BT_CONN_CONNECTED) { /* Get next ACL packet for connection */ buf = nano_fifo_get(&conn->tx_queue, TICKS_UNLIMITED); if (conn->state != BT_CONN_CONNECTED) { net_buf_unref(buf); break; } if (!send_buf(conn, buf)) { net_buf_unref(buf); } } BT_DBG("handle %u disconnected - cleaning up", conn->handle); /* Give back any allocated buffers */ while ((buf = nano_fifo_get(&conn->tx_queue, TICKS_NONE))) { net_buf_unref(buf); } /* Return any unacknowledged packets */ if (conn->pending_pkts) { while (conn->pending_pkts--) { nano_fiber_sem_give(bt_conn_get_pkts(conn)); } } bt_conn_reset_rx_state(conn); BT_DBG("handle %u exiting", conn->handle); bt_conn_unref(conn); }
static int gatt_send(struct bt_conn *conn, struct net_buf *buf, bt_att_func_t func, void *user_data, bt_att_destroy_t destroy) { int err; err = bt_att_send(conn, buf, func, user_data, destroy); if (err) { BT_ERR("Error sending ATT PDU: %d", err); net_buf_unref(buf); } return err; }
static inline void process_tx(void) { int bytes; if (!tx.buf) { tx.buf = net_buf_get(&tx.fifo, K_NO_WAIT); if (!tx.buf) { BT_ERR("TX interrupt but no pending buffer!"); uart_irq_tx_disable(h4_dev); return; } } if (!tx.type) { switch (bt_buf_get_type(tx.buf)) { case BT_BUF_ACL_OUT: tx.type = H4_ACL; break; case BT_BUF_CMD: tx.type = H4_CMD; break; default: BT_ERR("Unknown buffer type"); goto done; } bytes = uart_fifo_fill(h4_dev, &tx.type, 1); if (bytes != 1) { BT_WARN("Unable to send H:4 type"); tx.type = H4_NONE; return; } } bytes = uart_fifo_fill(h4_dev, tx.buf->data, tx.buf->len); net_buf_pull(tx.buf, bytes); if (tx.buf->len) { return; } done: tx.type = H4_NONE; net_buf_unref(tx.buf); tx.buf = net_buf_get(&tx.fifo, K_NO_WAIT); if (!tx.buf) { uart_irq_tx_disable(h4_dev); } }
static void process_unack(void) { uint8_t next_seq = h5.tx_seq; uint8_t number_removed = unack_queue_len; if (!unack_queue_len) { return; } BT_DBG("rx_ack %u tx_ack %u tx_seq %u unack_queue_len %u", h5.rx_ack, h5.tx_ack, h5.tx_seq, unack_queue_len); while (unack_queue_len > 0) { if (next_seq == h5.rx_ack) { /* Next sequence number is the same as last received * ack number */ break; } number_removed--; /* Similar to (n - 1) % 8 with unsigned conversion */ next_seq = (next_seq - 1) & 0x07; } if (next_seq != h5.rx_ack) { BT_ERR("Wrong sequence: rx_ack %u tx_seq %u next_seq %u", h5.rx_ack, h5.tx_seq, next_seq); } BT_DBG("Need to remove %u packet from the queue", number_removed); while (number_removed) { struct net_buf *buf = nano_fifo_get(&h5.unack_queue, TICKS_NONE); if (!buf) { BT_ERR("Unack queue is empty"); break; } /* TODO: print or do something with packet */ BT_DBG("Remove buf from the unack_queue"); net_buf_unref(buf); unack_queue_len--; number_removed--; } }
static void l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan *chan, struct net_buf *buf) { struct net_buf *frag; uint16_t len; BT_DBG("chan %p len %u sdu %zu", chan, buf->len, net_buf_frags_len(chan->_sdu)); if (net_buf_frags_len(chan->_sdu) + buf->len > chan->_sdu_len) { BT_ERR("SDU length mismatch"); bt_l2cap_chan_disconnect(&chan->chan); return; } /* Jump to last fragment */ frag = net_buf_frag_last(chan->_sdu); while (buf->len) { /* Check if there is any space left in the current fragment */ if (!net_buf_tailroom(frag)) { frag = l2cap_alloc_frag(chan); if (!frag) { BT_ERR("Unable to store SDU"); bt_l2cap_chan_disconnect(&chan->chan); return; } } len = min(net_buf_tailroom(frag), buf->len); net_buf_add_mem(frag, buf->data, len); net_buf_pull(buf, len); BT_DBG("frag %p len %u", frag, frag->len); } if (net_buf_frags_len(chan->_sdu) == chan->_sdu_len) { /* Receiving complete SDU, notify channel and reset SDU buf */ chan->chan.ops->recv(&chan->chan, chan->_sdu); net_buf_unref(chan->_sdu); chan->_sdu = NULL; chan->_sdu_len = 0; } l2cap_chan_update_credits(chan); }
static uint8_t att_read_mult_req(struct bt_att *att, struct net_buf *buf) { struct bt_conn *conn = att->chan.conn; struct read_data data; uint16_t handle; memset(&data, 0, sizeof(data)); data.buf = bt_att_create_pdu(conn, BT_ATT_OP_READ_MULT_RSP, 0); if (!data.buf) { return BT_ATT_ERR_UNLIKELY; } data.att = att; while (buf->len >= sizeof(uint16_t)) { handle = net_buf_pull_le16(buf); BT_DBG("handle 0x%04x ", handle); /* An Error Response shall be sent by the server in response to * the Read Multiple Request [....] if a read operation is not * permitted on any of the Characteristic Values. * * If handle is not valid then return invalid handle error. * If handle is found error will be cleared by read_cb. */ data.err = BT_ATT_ERR_INVALID_HANDLE; bt_gatt_foreach_attr(handle, handle, read_cb, &data); /* Stop reading in case of error */ if (data.err) { net_buf_unref(data.buf); /* Respond here since handle is set */ send_err_rsp(conn, BT_ATT_OP_READ_MULT_REQ, handle, data.err); return 0; } } bt_l2cap_send(conn, BT_L2CAP_CID_ATT, data.buf); return 0; }
static int h4_send(enum bt_buf_type buf_type, struct net_buf *buf) { if (buf_type == BT_ACL_OUT) { uart_poll_out(h4_dev, H4_ACL); } else if (buf_type == BT_CMD) { uart_poll_out(h4_dev, H4_CMD); } else { return -EINVAL; } while (buf->len) { uart_poll_out(h4_dev, net_buf_pull_u8(buf)); } net_buf_unref(buf); return 0; }
static struct net_buf *create_frag(struct bt_conn *conn, struct net_buf *buf) { struct net_buf *frag; uint16_t frag_len; frag = bt_conn_create_pdu(&frag_buf, 0); if (conn->state != BT_CONN_CONNECTED) { net_buf_unref(frag); return NULL; } frag_len = min(conn_mtu(conn), net_buf_tailroom(frag)); memcpy(net_buf_add(frag, frag_len), buf->data, frag_len); net_buf_pull(buf, frag_len); return frag; }
static int bt_hci_stop_scanning(void) { #ifdef NOT_USED_FOR_NOW struct net_buf *buf, *rsp; struct bt_hci_cp_le_set_scan_enable *scan_enable; int err; if (!atomic_test_bit(bt_dev.flags, BT_DEV_SCANNING)) { return -EALREADY; } buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_SCAN_ENABLE, sizeof(*scan_enable)); if (!buf) { return -ENOBUFS; } scan_enable = net_buf_add(buf, sizeof(*scan_enable)); memset(scan_enable, 0, sizeof(*scan_enable)); scan_enable->filter_dup = BT_HCI_LE_SCAN_FILTER_DUP_DISABLE; scan_enable->enable = BT_HCI_LE_SCAN_DISABLE; err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_SCAN_ENABLE, buf, &rsp); if (err) { return err; } /* Update scan state in case of success (0) status */ err = rsp->data[0]; if (!err) { atomic_clear_bit(bt_dev.flags, BT_DEV_SCANNING); } net_buf_unref(rsp); return err; #endif nble_gap_stop_scan_req(); return 0; }