Exemplo n.º 1
0
/* Delayed work taking care about retransmitting packets */
static void retx_timeout(struct k_work *work)
{
	ARG_UNUSED(work);

	BT_DBG("unack_queue_len %u", unack_queue_len);

	if (unack_queue_len) {
		struct k_fifo tmp_queue;
		struct net_buf *buf;

		k_fifo_init(&tmp_queue);

		/* Queue to temperary queue */
		while ((buf = net_buf_get(&h5.tx_queue, K_NO_WAIT))) {
			net_buf_put(&tmp_queue, buf);
		}

		/* Queue unack packets to the beginning of the queue */
		while ((buf = net_buf_get(&h5.unack_queue, K_NO_WAIT))) {
			/* include also packet type */
			net_buf_push(buf, sizeof(u8_t));
			net_buf_put(&h5.tx_queue, buf);
			h5.tx_seq = (h5.tx_seq - 1) & 0x07;
			unack_queue_len--;
		}

		/* Queue saved packets from temp queue */
		while ((buf = net_buf_get(&tmp_queue, K_NO_WAIT))) {
			net_buf_put(&h5.tx_queue, buf);
		}
	}
}
Exemplo n.º 2
0
static uint8_t att_signed_write_cmd(struct bt_att *att, struct net_buf *buf)
{
	struct bt_conn *conn = att->chan.conn;
	struct bt_att_signed_write_cmd *req;
	uint16_t handle;
	int err;

	req = (void *)buf->data;

	handle = sys_le16_to_cpu(req->handle);

	BT_DBG("handle 0x%04x", handle);

	/* Verifying data requires full buffer including attribute header */
	net_buf_push(buf, sizeof(struct bt_att_hdr));
	err = bt_smp_sign_verify(conn, buf);
	if (err) {
		BT_ERR("Error verifying data");
		/* No response for this command */
		return 0;
	}

	net_buf_pull(buf, sizeof(struct bt_att_hdr));
	net_buf_pull(buf, sizeof(*req));

	return att_write_rsp(conn, 0, 0, handle, 0, buf->data,
			     buf->len - sizeof(struct bt_att_signature));
}
Exemplo n.º 3
0
void bt_l2cap_send(struct bt_conn *conn, uint16_t cid, struct net_buf *buf)
{
	struct bt_l2cap_hdr *hdr;

	hdr = net_buf_push(buf, sizeof(*hdr));
	hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
	hdr->cid = sys_cpu_to_le16(cid);

	bt_conn_send(conn, buf);
}
Exemplo n.º 4
0
static bool send_frag(struct bt_conn *conn, struct net_buf *buf, uint8_t flags,
		      bool always_consume)
{
	struct bt_hci_acl_hdr *hdr;
	int err;

	BT_DBG("conn %p buf %p len %u flags 0x%02x", conn, buf, buf->len,
	       flags);

	/* Wait until the controller can accept ACL packets */
	nano_fiber_sem_take(bt_conn_get_pkts(conn), TICKS_UNLIMITED);

	/* Check for disconnection while waiting for pkts_sem */
	if (conn->state != BT_CONN_CONNECTED) {
		goto fail;
	}

	hdr = net_buf_push(buf, sizeof(*hdr));
	hdr->handle = sys_cpu_to_le16(bt_acl_handle_pack(conn->handle, flags));
	hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));

	bt_buf_set_type(buf, BT_BUF_ACL_OUT);

	err = bt_send(buf);
	if (err) {
		BT_ERR("Unable to send to driver (err %d)", err);
		goto fail;
	}

	conn->pending_pkts++;
	return true;

fail:
	nano_fiber_sem_give(bt_conn_get_pkts(conn));
	if (always_consume) {
		net_buf_unref(buf);
	}
	return false;
}
Exemplo n.º 5
0
/* Delayed fiber taking care about retransmitting packets */
static void retx_fiber(int arg1, int arg2)
{
	ARG_UNUSED(arg1);
	ARG_UNUSED(arg2);

	BT_DBG("unack_queue_len %u", unack_queue_len);

	h5.retx_to = NULL;

	if (unack_queue_len) {
		struct nano_fifo tmp_queue;
		struct net_buf *buf;

		nano_fifo_init(&tmp_queue);

		/* Queue to temperary queue */
		while ((buf = nano_fifo_get(&h5.tx_queue, TICKS_NONE))) {
			nano_fifo_put(&tmp_queue, buf);
		}

		/* Queue unack packets to the beginning of the queue */
		while ((buf = nano_fifo_get(&h5.unack_queue, TICKS_NONE))) {
			/* include also packet type */
			net_buf_push(buf, sizeof(uint8_t));
			nano_fifo_put(&h5.tx_queue, buf);
			h5.tx_seq = (h5.tx_seq - 1) & 0x07;
			unack_queue_len--;
		}

		/* Queue saved packets from temp queue */
		while ((buf = nano_fifo_get(&tmp_queue, TICKS_NONE))) {
			nano_fifo_put(&h5.tx_queue, buf);
		}

		/* Analyze stack */
		stack_analyze("retx_stack", retx_stack, sizeof(retx_stack));
	}
}
Exemplo n.º 6
0
static int h5_queue(struct net_buf *buf)
{
	uint8_t type;

	BT_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);

	switch (bt_buf_get_type(buf)) {
	case BT_BUF_CMD:
		type = HCI_COMMAND_PKT;
		break;
	case BT_BUF_ACL_OUT:
		type = HCI_ACLDATA_PKT;
		break;
	default:
		BT_ERR("Unknown packet type %u", bt_buf_get_type(buf));
		return -1;
	}

	memcpy(net_buf_push(buf, sizeof(type)), &type, sizeof(type));

	nano_fifo_put(&h5.tx_queue, buf);

	return 0;
}
Exemplo n.º 7
0
static int eth_tx(struct net_if *iface, struct net_pkt *pkt)
{
	struct device *const dev = net_if_get_device(iface);
	const struct eth_sam_dev_cfg *const cfg = DEV_CFG(dev);
	struct eth_sam_dev_data *const dev_data = DEV_DATA(dev);
	Gmac *gmac = cfg->regs;
	struct gmac_queue *queue = &dev_data->queue_list[0];
	struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list;
	struct gmac_desc *tx_desc;
	struct net_buf *frag;
	u8_t *frag_data;
	u16_t frag_len;
	u32_t err_tx_flushed_count_at_entry = queue->err_tx_flushed_count;
	unsigned int key;

	__ASSERT(pkt, "buf pointer is NULL");
	__ASSERT(pkt->frags, "Frame data missing");

	SYS_LOG_DBG("ETH tx");

	/* First fragment is special - it contains link layer (Ethernet
	 * in our case) header. Modify the data pointer to account for more data
	 * in the beginning of the buffer.
	 */
	net_buf_push(pkt->frags, net_pkt_ll_reserve(pkt));

	frag = pkt->frags;
	while (frag) {
		frag_data = frag->data;
		frag_len = frag->len;

		/* Assure cache coherency before DMA read operation */
		DCACHE_CLEAN(frag_data, frag_len);

		k_sem_take(&queue->tx_desc_sem, K_FOREVER);

		/* The following section becomes critical and requires IRQ lock
		 * / unlock protection only due to the possibility of executing
		 * tx_error_handler() function.
		 */
		key = irq_lock();

		/* Check if tx_error_handler() function was executed */
		if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) {
			irq_unlock(key);
			return -EIO;
		}

		tx_desc = &tx_desc_list->buf[tx_desc_list->head];

		/* Update buffer descriptor address word */
		tx_desc->w0 = (u32_t)frag_data;
		/* Guarantee that address word is written before the status
		 * word to avoid race condition.
		 */
		__DMB();  /* data memory barrier */
		/* Update buffer descriptor status word (clear used bit) */
		tx_desc->w1 =
			  (frag_len & GMAC_TXW1_LEN)
			| (!frag->frags ? GMAC_TXW1_LASTBUFFER : 0)
			| (tx_desc_list->head == tx_desc_list->len - 1
			   ? GMAC_TXW1_WRAP : 0);

		/* Update descriptor position */
		MODULO_INC(tx_desc_list->head, tx_desc_list->len);

		__ASSERT(tx_desc_list->head != tx_desc_list->tail,
			 "tx_desc_list overflow");

		irq_unlock(key);

		/* Continue with the rest of fragments (only data) */
		frag = frag->frags;
	}

	key = irq_lock();

	/* Check if tx_error_handler() function was executed */
	if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) {
		irq_unlock(key);
		return -EIO;
	}

	/* Ensure the descriptor following the last one is marked as used */
	tx_desc = &tx_desc_list->buf[tx_desc_list->head];
	tx_desc->w1 |= GMAC_TXW1_USED;

	/* Account for a sent frame */
	ring_buf_put(&queue->tx_frames, POINTER_TO_UINT(pkt));

	irq_unlock(key);

	/* Start transmission */
	gmac->GMAC_NCR |= GMAC_NCR_TSTART;

	return 0;
}
Exemplo n.º 8
0
static struct net_pkt *build_reply_pkt(const char *name,
				       struct net_context *context,
				       struct net_pkt *pkt)
{
	struct net_pkt *reply_pkt;
	struct net_buf *frag, *tmp;
	int header_len, recv_len, reply_len;

	NET_INFO("%s received %d bytes", name,
		 net_pkt_appdatalen(pkt));

	if (net_pkt_appdatalen(pkt) == 0) {
		return NULL;
	}

	reply_pkt = net_pkt_get_tx(context, K_FOREVER);

	NET_ASSERT(reply_pkt);

	recv_len = net_pkt_get_len(pkt);

	tmp = pkt->frags;

	/* First fragment will contain IP header so move the data
	 * down in order to get rid of it.
	 */
	header_len = net_pkt_appdata(pkt) - tmp->data;

	NET_ASSERT(header_len < CONFIG_NET_BUF_DATA_SIZE);

	/* After this pull, the tmp->data points directly to application
	 * data.
	 */
	net_buf_pull(tmp, header_len);

	while (tmp) {
		frag = net_pkt_get_data(context, K_FOREVER);

		if (!net_buf_headroom(tmp)) {
			/* If there is no link layer headers in the
			 * received fragment, then get rid of that also
			 * in the sending fragment. We end up here
			 * if MTU is larger than fragment size, this
			 * is typical for ethernet.
			 */
			net_buf_push(frag, net_buf_headroom(frag));

			frag->len = 0; /* to make fragment empty */

			/* Make sure to set the reserve so that
			 * in sending side we add the link layer
			 * header if needed.
			 */
			net_pkt_set_ll_reserve(reply_pkt, 0);
		}

		NET_ASSERT(net_buf_tailroom(frag) >= tmp->len);

		memcpy(net_buf_add(frag, tmp->len), tmp->data, tmp->len);

		net_pkt_frag_add(reply_pkt, frag);

		tmp = net_pkt_frag_del(pkt, NULL, tmp);
	}

	reply_len = net_pkt_get_len(reply_pkt);

	NET_ASSERT_INFO((recv_len - header_len) == reply_len,
			"Received %d bytes, sending %d bytes",
			recv_len - header_len, reply_len);

	return reply_pkt;
}