Пример #1
0
/* Delayed work taking care about retransmitting packets */
static void retx_timeout(struct k_work *work)
{
	ARG_UNUSED(work);

	BT_DBG("unack_queue_len %u", unack_queue_len);

	if (unack_queue_len) {
		struct k_fifo tmp_queue;
		struct net_buf *buf;

		k_fifo_init(&tmp_queue);

		/* Queue to temperary queue */
		while ((buf = net_buf_get(&h5.tx_queue, K_NO_WAIT))) {
			net_buf_put(&tmp_queue, buf);
		}

		/* Queue unack packets to the beginning of the queue */
		while ((buf = net_buf_get(&h5.unack_queue, K_NO_WAIT))) {
			/* include also packet type */
			net_buf_push(buf, sizeof(u8_t));
			net_buf_put(&h5.tx_queue, buf);
			h5.tx_seq = (h5.tx_seq - 1) & 0x07;
			unack_queue_len--;
		}

		/* Queue saved packets from temp queue */
		while ((buf = net_buf_get(&tmp_queue, K_NO_WAIT))) {
			net_buf_put(&h5.tx_queue, buf);
		}
	}
}
Пример #2
0
static void net_tx_fiber(void)
{
	NET_DBG("Starting TX fiber\n");

	while (1) {
		struct net_buf *buf;
		uint8_t run;

		/* Get next packet from application - wait if necessary */
		buf = nano_fifo_get_wait(&netdev.tx_queue);

		NET_DBG("Sending (buf %p, len %u) to IP stack\n",
			buf, buf->len);

		if (check_and_send_packet(buf) < 0) {
			/* Release buffer on error */
			net_buf_put(buf);
			continue;
		}

		NET_BUF_CHECK_IF_NOT_IN_USE(buf);

		/* Check for any events that we might need to process */
		do {
			run = process_run(buf);
		} while (run > 0);

		/* Check stack usage (no-op if not enabled) */
		analyze_stacks(buf, &buf);
	}
}
Пример #3
0
int bt_l2cap_chan_send(struct bt_l2cap_chan *chan, struct net_buf *buf)
{
	int err;

	if (!buf) {
		return -EINVAL;
	}

	BT_DBG("chan %p buf %p len %u", chan, buf, buf->len);

	if (!chan->conn || chan->conn->state != BT_CONN_CONNECTED) {
		return -ENOTCONN;
	}

	if (IS_ENABLED(CONFIG_BLUETOOTH_BREDR) &&
	    chan->conn->type == BT_CONN_TYPE_BR) {
		return bt_l2cap_br_chan_send(chan, buf);
	}

	err = l2cap_chan_le_send_sdu(BT_L2CAP_LE_CHAN(chan), buf, 0);
	if (err < 0) {
		if (err == -EAGAIN) {
			/* Queue buffer to be sent later */
			net_buf_put(&(BT_L2CAP_LE_CHAN(chan))->tx_queue, buf);
			return *((int *)net_buf_user_data(buf));
		}
		BT_ERR("failed to send message %d", err);
	}

	return err;
}
Пример #4
0
Файл: h4.c Проект: 01org/zephyr
static int h4_send(struct net_buf *buf)
{
	BT_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);

	net_buf_put(&tx.fifo, buf);
	uart_irq_tx_enable(h4_dev);

	return 0;
}
Пример #5
0
int bt_recv(struct net_buf *buf)
{
	BT_DBG("buf %p len %u", buf, buf->len);

	bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);

	/* Queue to RAW rx queue */
	net_buf_put(raw_rx, buf);

	return 0;
}
Пример #6
0
static void net_rx_fiber(void)
{
	struct net_buf *buf;

	NET_DBG("Starting RX fiber\n");

	while (1) {
		buf = nano_fifo_get_wait(&netdev.rx_queue);

		/* Check stack usage (no-op if not enabled) */
		analyze_stacks(buf, &buf);

		if (!tcpip_input(buf)) {
			net_buf_put(buf);
		} else {
			NET_BUF_CHECK_IF_NOT_IN_USE(buf);
		}
	}
}
Пример #7
0
static void tx_thread(void)
{
	BT_DBG("");

	/* FIXME: make periodic sending */
	h5_send(sync_req, HCI_3WIRE_LINK_PKT, sizeof(sync_req));

	while (true) {
		struct net_buf *buf;
		u8_t type;

		BT_DBG("link_state %u", h5.link_state);

		switch (h5.link_state) {
		case UNINIT:
			/* FIXME: send sync */
			k_sleep(100);
			break;
		case INIT:
			/* FIXME: send conf */
			k_sleep(100);
			break;
		case ACTIVE:
			buf = net_buf_get(&h5.tx_queue, K_FOREVER);
			type = h5_get_type(buf);

			h5_send(buf->data, type, buf->len);

			/* buf is dequeued from tx_queue and queued to unack
			 * queue.
			 */
			net_buf_put(&h5.unack_queue, buf);
			unack_queue_len++;

			k_delayed_work_submit(&retx_work, H5_TX_ACK_TIMEOUT);

			break;
		}
	}
}
Пример #8
0
static void h5_process_complete_packet(u8_t *hdr)
{
	struct net_buf *buf;

	BT_DBG("");

	/* rx_ack should be in every packet */
	h5.rx_ack = H5_HDR_ACK(hdr);

	if (reliable_packet(H5_HDR_PKT_TYPE(hdr))) {
		/* For reliable packet increment next transmit ack number */
		h5.tx_ack = (h5.tx_ack + 1) % 8;
		/* Submit delayed work to ack the packet */
		k_delayed_work_submit(&ack_work, H5_RX_ACK_TIMEOUT);
	}

	h5_print_header(hdr, "RX: >");

	process_unack();

	buf = h5.rx_buf;
	h5.rx_buf = NULL;

	switch (H5_HDR_PKT_TYPE(hdr)) {
	case HCI_3WIRE_ACK_PKT:
		net_buf_unref(buf);
		break;
	case HCI_3WIRE_LINK_PKT:
		net_buf_put(&h5.rx_queue, buf);
		break;
	case HCI_EVENT_PKT:
	case HCI_ACLDATA_PKT:
		hexdump("=> ", buf->data, buf->len);
		bt_recv(buf);
		break;
	}
}
Пример #9
0
/**
 * Vendor handler is executed in the ISR context, queue data for
 * later processing
 */
static int wpanusb_vendor_handler(struct usb_setup_packet *setup,
				  int32_t *len, uint8_t **data)
{
	struct net_buf *pkt, *buf;

	pkt = net_nbuf_get_reserve_tx(0);
	buf = net_nbuf_get_reserve_data(0);
	net_buf_frag_insert(pkt, buf);

	net_buf_add_u8(buf, setup->bRequest);

	/* Add seq to TX */
	if (setup->bRequest == TX) {
		net_buf_add_u8(buf, setup->wIndex);
	}

	memcpy(net_buf_add(buf, *len), *data, *len);

	SYS_LOG_DBG("len %u seq %u", *len, setup->wIndex);

	net_buf_put(&tx_queue, pkt);

	return 0;
}
Пример #10
0
static int h5_queue(struct net_buf *buf)
{
	u8_t type;

	BT_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);

	switch (bt_buf_get_type(buf)) {
	case BT_BUF_CMD:
		type = HCI_COMMAND_PKT;
		break;
	case BT_BUF_ACL_OUT:
		type = HCI_ACLDATA_PKT;
		break;
	default:
		BT_ERR("Unknown packet type %u", bt_buf_get_type(buf));
		return -1;
	}

	memcpy(net_buf_push(buf, sizeof(type)), &type, sizeof(type));

	net_buf_put(&h5.tx_queue, buf);

	return 0;
}
Пример #11
0
Файл: h4.c Проект: 01org/zephyr
static inline void read_payload(void)
{
	struct net_buf *buf;
	bool prio;
	int read;

	if (!rx.buf) {
		rx.buf = get_rx(K_NO_WAIT);
		if (!rx.buf) {
			if (rx.discardable) {
				BT_WARN("Discarding event 0x%02x", rx.evt.evt);
				rx.discard = rx.remaining;
				reset_rx();
				return;
			}

			BT_WARN("Failed to allocate, deferring to rx_thread");
			uart_irq_rx_disable(h4_dev);
			return;
		}

		BT_DBG("Allocated rx.buf %p", rx.buf);

		if (rx.remaining > net_buf_tailroom(rx.buf)) {
			BT_ERR("Not enough space in buffer");
			rx.discard = rx.remaining;
			reset_rx();
			return;
		}

		copy_hdr(rx.buf);
	}

	read = uart_fifo_read(h4_dev, net_buf_tail(rx.buf), rx.remaining);
	net_buf_add(rx.buf, read);
	rx.remaining -= read;

	BT_DBG("got %d bytes, remaining %u", read, rx.remaining);
	BT_DBG("Payload (len %u): %s", rx.buf->len,
	       bt_hex(rx.buf->data, rx.buf->len));

	if (rx.remaining) {
		return;
	}

	prio = (rx.type == H4_EVT && bt_hci_evt_is_prio(rx.evt.evt));

	buf = rx.buf;
	rx.buf = NULL;

	if (rx.type == H4_EVT) {
		bt_buf_set_type(buf, BT_BUF_EVT);
	} else {
		bt_buf_set_type(buf, BT_BUF_ACL_IN);
	}

	reset_rx();

	if (prio) {
		BT_DBG("Calling bt_recv_prio(%p)", buf);
		bt_recv_prio(buf);
	} else {
		BT_DBG("Putting buf %p to rx fifo", buf);
		net_buf_put(&rx.fifo, buf);
	}
}
Пример #12
0
/*---------------------------------------------------------------------------*/
static void
eventhandler(process_event_t ev, process_data_t data, struct net_buf *buf)
{
#if UIP_TCP
  static unsigned char i;
  register struct listenport *l;
#endif /*UIP_TCP*/
  struct process *p;

  switch(ev) {
    case PROCESS_EVENT_EXITED:
      /* This is the event we get if a process has exited. We go through
         the TCP/IP tables to see if this process had any open
         connections or listening TCP ports. If so, we'll close those
         connections. */

      p = (struct process *)data;
#if UIP_TCP
      l = s.listenports;
      for(i = 0; i < UIP_LISTENPORTS; ++i) {
        if(l->p == p) {
          uip_unlisten(l->port);
          l->port = 0;
          l->p = PROCESS_NONE;
        }
        ++l;
      }
	 
      {
        struct uip_conn *cptr;
	    
        for(cptr = &uip_conns[0]; cptr < &uip_conns[UIP_CONNS]; ++cptr) {
          if(cptr->appstate.p == p) {
            cptr->appstate.p = PROCESS_NONE;
            cptr->tcpstateflags = UIP_CLOSED;
          }
        }
      }
#endif /* UIP_TCP */
#if UIP_UDP
      {
        struct uip_udp_conn *cptr;

        for(cptr = &uip_udp_conns[0];
            cptr < &uip_udp_conns[UIP_UDP_CONNS]; ++cptr) {
          if(cptr->appstate.p == p) {
            cptr->lport = 0;
          }
        }
      }
#endif /* UIP_UDP */
      break;

    case PROCESS_EVENT_TIMER:
      /* We get this event if one of our timers have expired. */
      {
        /* Check the clock so see if we should call the periodic uIP
           processing. */
        if(data == &periodic &&
           etimer_expired(&periodic)) {
#if UIP_TCP
          for(i = 0; i < UIP_CONNS; ++i) {
            if(uip_conn_active(i)) {
              /* Only restart the timer if there are active
                 connections. */
              etimer_restart(&periodic);
              uip_periodic(i);
#if NETSTACK_CONF_WITH_IPV6
	      if (!tcpip_ipv6_output(buf)) {
                net_buf_put(buf);
	      }
#else
              if(uip_len(buf) > 0) {
		PRINTF("tcpip_output from periodic len %d\n", uip_len(buf));
                tcpip_output(buf, NULL);
		PRINTF("tcpip_output after periodic len %d\n", uip_len(buf));
              }
#endif /* NETSTACK_CONF_WITH_IPV6 */
            }
          }
#endif /* UIP_TCP */
#if UIP_CONF_IP_FORWARD
          uip_fw_periodic();
#endif /* UIP_CONF_IP_FORWARD */
        }
        
#if NETSTACK_CONF_WITH_IPV6
#if UIP_CONF_IPV6_REASSEMBLY
        /*
         * check the timer for reassembly
         */
        if(data == &uip_reass_timer &&
           etimer_expired(&uip_reass_timer)) {
          uip_reass_over();
          tcpip_ipv6_output(buf);
        }
#endif /* UIP_CONF_IPV6_REASSEMBLY */
        /*
         * check the different timers for neighbor discovery and
         * stateless autoconfiguration
         */
        /*if(data == &uip_ds6_timer_periodic &&
           etimer_expired(&uip_ds6_timer_periodic)) {
          uip_ds6_periodic();
          tcpip_ipv6_output();
        }*/
#if !UIP_CONF_ROUTER
        if(data == &uip_ds6_timer_rs &&
           etimer_expired(&uip_ds6_timer_rs)) {
          uip_ds6_send_rs(buf);
	  if (!tcpip_ipv6_output(buf)) {
            net_buf_put(buf);
	  }
        }
#endif /* !UIP_CONF_ROUTER */
        if(data == &uip_ds6_timer_periodic &&
           etimer_expired(&uip_ds6_timer_periodic)) {
          uip_ds6_periodic(buf);
	  if (!tcpip_ipv6_output(buf)) {
            net_buf_put(buf);
	  }
        }
#endif /* NETSTACK_CONF_WITH_IPV6 */
      }
      break;
	 
#if UIP_TCP
    case TCP_POLL:
      if(data != NULL) {
        uip_poll_conn(data);
#if NETSTACK_CONF_WITH_IPV6
        tcpip_ipv6_output(buf);
#else /* NETSTACK_CONF_WITH_IPV6 */
        if(uip_len(buf) > 0) {
	  PRINTF("tcpip_output from tcp poll len %d\n", uip_len(buf));
          tcpip_output(buf, NULL);
        }
#endif /* NETSTACK_CONF_WITH_IPV6 */
        /* Start the periodic polling, if it isn't already active. */
        start_periodic_tcp_timer();
      }
      break;
#endif /* UIP_TCP */
#if UIP_UDP
    case UDP_POLL:
      if(data != NULL) {
        uip_udp_periodic_conn(buf, data);
#if NETSTACK_CONF_WITH_IPV6
        if (!tcpip_ipv6_output(buf)) {
          net_buf_put(buf);
	}
#else
        if(uip_len(buf) > 0) {
          tcpip_output(buf, NULL);
        }
#endif /* UIP_UDP */
      }
      break;
#endif /* UIP_UDP */

    case PACKET_INPUT:
      packet_input(buf);
      break;
  };
}