Пример #1
0
char *zephyr_getline(void)
{
  static struct uart_console_input *cmd;

  /* Recycle cmd buffer returned previous time */
  if (cmd != NULL)
  {
    nano_fifo_put(&free_queue, cmd);
  }

  cmd = nano_fifo_get(&used_queue, TICKS_UNLIMITED);
  return cmd->line;
}
int
sol_mainloop_event_post(const struct mainloop_event *me)
{
    struct me_fifo_entry *mfe;

    mfe = nano_fifo_get(&_sol_mainloop_free_events, TICKS_NONE);
    SOL_NULL_CHECK(mfe, -ENOMEM);

    mfe->me = *me;
    nano_fifo_put(&_sol_mainloop_pending_events, mfe);

    return 0;
}
Пример #3
0
/* Delayed fiber taking care about retransmitting packets */
static void retx_fiber(int arg1, int arg2)
{
	ARG_UNUSED(arg1);
	ARG_UNUSED(arg2);

	BT_DBG("unack_queue_len %u", unack_queue_len);

	h5.retx_to = NULL;

	if (unack_queue_len) {
		struct nano_fifo tmp_queue;
		struct net_buf *buf;

		nano_fifo_init(&tmp_queue);

		/* Queue to temperary queue */
		while ((buf = nano_fifo_get(&h5.tx_queue, TICKS_NONE))) {
			nano_fifo_put(&tmp_queue, buf);
		}

		/* Queue unack packets to the beginning of the queue */
		while ((buf = nano_fifo_get(&h5.unack_queue, TICKS_NONE))) {
			/* include also packet type */
			net_buf_push(buf, sizeof(uint8_t));
			nano_fifo_put(&h5.tx_queue, buf);
			h5.tx_seq = (h5.tx_seq - 1) & 0x07;
			unack_queue_len--;
		}

		/* Queue saved packets from temp queue */
		while ((buf = nano_fifo_get(&tmp_queue, TICKS_NONE))) {
			nano_fifo_put(&h5.tx_queue, buf);
		}

		/* Analyze stack */
		stack_analyze("retx_stack", retx_stack, sizeof(retx_stack));
	}
}
Пример #4
0
void zephyr_getline_init(void)
{
  int i;

  nano_fifo_init(&used_queue);
  nano_fifo_init(&free_queue);
  for (i = 0; i < sizeof(line_bufs) / sizeof(*line_bufs); i++)
  {
    nano_fifo_put(&free_queue, &line_bufs[i]);
  }

  /* Zephyr UART handler takes an empty buffer from free_queue,
     stores UART input in it until EOL, and then puts it into
     used_queue. */
  uart_register_input(&free_queue, &used_queue, NULL);
}
Пример #5
0
static void tx_fiber(void)
{
	BT_DBG("");

	/* FIXME: make periodic sending */
	h5_send(sync_req, HCI_3WIRE_LINK_PKT, sizeof(sync_req));

	while (true) {
		struct net_buf *buf;
		uint8_t type;

		BT_DBG("link_state %u", h5.link_state);

		switch (h5.link_state) {
		case UNINIT:
			/* FIXME: send sync */
			fiber_sleep(10);
			break;
		case INIT:
			/* FIXME: send conf */
			fiber_sleep(10);
			break;
		case ACTIVE:
			buf = nano_fifo_get(&h5.tx_queue, TICKS_UNLIMITED);
			type = h5_get_type(buf);

			h5_send(buf->data, type, buf->len);

			/* buf is dequeued from tx_queue and queued to unack
			 * queue.
			 */
			nano_fifo_put(&h5.unack_queue, buf);
			unack_queue_len++;

			if (h5.retx_to) {
				fiber_delayed_start_cancel(h5.retx_to);
			}

			h5.retx_to = fiber_delayed_start(retx_stack,
							 sizeof(retx_stack),
							 retx_fiber, 0, 0, 7, 0,
							 H5_TX_ACK_TIMEOUT);
			break;
		}
	}
}
Пример #6
0
void tester_init(void)
{
	int i;

	nano_fifo_init(&cmds_queue);
	nano_fifo_init(&avail_queue);

	for (i = 0; i < CMD_QUEUED; i++) {
		nano_fifo_put(&avail_queue, &cmd_buf[i * BTP_MTU]);
	}

	task_fiber_start(stack, STACKSIZE, cmd_handler, 0, 0, 7, 0);

	uart_pipe_register(nano_fifo_get(&avail_queue, TICKS_NONE),
			   BTP_MTU, recv_cb);

	printk("BT tester initialized\n");
}
int
sol_mainloop_impl_platform_init(void)
{
    int i;

    sol_mainloop_zephyr_common_init();

    nano_fifo_init(&_sol_mainloop_pending_events);
    nano_fifo_init(&_sol_mainloop_free_events);
    for (i = 0; i < SOL_UTIL_ARRAY_SIZE(_events); i++) {
        struct me_fifo_entry *mfe;

        mfe = &_events[i];
        nano_fifo_put(&_sol_mainloop_free_events, mfe);
    }

    return 0;
}
Пример #8
0
int bt_conn_send(struct bt_conn *conn, struct net_buf *buf)
{
	BT_DBG("conn handle %u buf len %u", conn->handle, buf->len);

	if (buf->user_data_size < BT_BUF_USER_DATA_MIN) {
		BT_ERR("Too small user data size");
		net_buf_unref(buf);
		return -EINVAL;
	}

	if (conn->state != BT_CONN_CONNECTED) {
		BT_ERR("not connected!");
		net_buf_unref(buf);
		return -ENOTCONN;
	}

	nano_fifo_put(&conn->tx_queue, buf);
	return 0;
}
Пример #9
0
int
sol_mainloop_impl_platform_init(void)
{
    int i;

    main_thread_id = sys_thread_self_get();
    nano_sem_init(&_sol_mainloop_lock);
    nano_sem_give(&_sol_mainloop_lock);

    nano_fifo_init(&_sol_mainloop_pending_events);
    nano_fifo_init(&_sol_mainloop_free_events);
    for (i = 0; i < sol_util_array_size(_events); i++) {
        struct me_fifo_entry *mfe;

        mfe = &_events[i];
        nano_fifo_put(&_sol_mainloop_free_events, mfe);
    }

    return 0;
}
Пример #10
0
static void udp_packet_receive(struct simple_udp_connection *c,
			       const uip_ipaddr_t *source_addr,
			       uint16_t source_port,
			       const uip_ipaddr_t *dest_addr,
			       uint16_t dest_port,
			       const uint8_t *data, uint16_t datalen,
			       void *user_data,
			       struct net_buf *buf)
{
	struct net_context *context = user_data;

	if (!context) {
		return;
	}

	uip_appdatalen(buf) = datalen;

	NET_DBG("packet received buf %p context %p len %d\n",
		buf, context, datalen);

	nano_fifo_put(net_context_get_queue(context), buf);
}
Пример #11
0
static void h5_process_complete_packet(uint8_t *hdr)
{
	struct net_buf *buf;

	BT_DBG("");

	/* rx_ack should be in every packet */
	h5.rx_ack = H5_HDR_ACK(hdr);

	if (reliable_packet(H5_HDR_PKT_TYPE(hdr))) {
		/* For reliable packet increment next transmit ack number */
		h5.tx_ack = (h5.tx_ack + 1) % 8;
		/* Start delayed fiber to ack the packet */
		h5.ack_to = fiber_delayed_start(ack_stack, sizeof(ack_stack),
						ack_fiber, 0, 0, 7, 0,
						H5_RX_ACK_TIMEOUT);
	}

	h5_print_header(hdr, "RX: >");

	process_unack();

	buf = h5.rx_buf;
	h5.rx_buf = NULL;

	switch (H5_HDR_PKT_TYPE(hdr)) {
	case HCI_3WIRE_ACK_PKT:
		net_buf_unref(buf);
		break;
	case HCI_3WIRE_LINK_PKT:
		nano_fifo_put(&h5.rx_queue, buf);
		break;
	case HCI_EVENT_PKT:
	case HCI_ACLDATA_PKT:
		hexdump("=> ", buf->data, buf->len);
		bt_recv(buf);
		break;
	}
}
Пример #12
0
static void udp_packet_reply(struct simple_udp_connection *c,
			     const uip_ipaddr_t *source_addr,
			     uint16_t source_port,
			     const uip_ipaddr_t *dest_addr,
			     uint16_t dest_port,
			     const uint8_t *data, uint16_t datalen,
			     void *user_data,
			     struct net_buf *not_used)
{
	struct net_buf *buf = user_data;
	struct nano_fifo *queue;

	if (!buf->context) {
		return;
	}

	NET_DBG("packet reply buf %p context %p len %d\n", buf, buf->context,
		buf->len);

	queue = net_context_get_queue(buf->context);

	nano_fifo_put(queue, buf);
}
Пример #13
0
static int h5_queue(struct net_buf *buf)
{
	uint8_t type;

	BT_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);

	switch (bt_buf_get_type(buf)) {
	case BT_BUF_CMD:
		type = HCI_COMMAND_PKT;
		break;
	case BT_BUF_ACL_OUT:
		type = HCI_ACLDATA_PKT;
		break;
	default:
		BT_ERR("Unknown packet type %u", bt_buf_get_type(buf));
		return -1;
	}

	memcpy(net_buf_push(buf, sizeof(type)), &type, sizeof(type));

	nano_fifo_put(&h5.tx_queue, buf);

	return 0;
}
Пример #14
0
/* This is called by contiki/ip/tcpip.c:tcpip_uipcall() when packet
 * is processed.
 */
PROCESS_THREAD(tcp, ev, data, buf, user_data)
{
	PROCESS_BEGIN();

	while(1) {
		PROCESS_YIELD_UNTIL(ev == tcpip_event);

		if (POINTER_TO_INT(data) == TCP_WRITE_EVENT) {
			/* We want to send data to peer. */
			struct net_context *context = user_data;

			if (!context) {
				continue;
			}

			do {
				context = user_data;
				if (!context || !buf) {
					break;
				}

				if (!context->ps.net_buf ||
				    context->ps.net_buf != buf) {
					NET_DBG("psock init %p buf %p\n",
						&context->ps, buf);
					PSOCK_INIT(&context->ps, buf);
				}

				handle_tcp_connection(&context->ps,
						      POINTER_TO_INT(data),
						      buf);

				PROCESS_WAIT_EVENT_UNTIL(ev == tcpip_event);

				if (POINTER_TO_INT(data) != TCP_WRITE_EVENT) {
					goto read_data;
				}
			} while(!(uip_closed(buf)  ||
				  uip_aborted(buf) ||
				  uip_timedout(buf)));

			context = user_data;

			if (context &&
			    context->tcp_type == NET_TCP_TYPE_CLIENT) {
				NET_DBG("\nConnection closed.\n");
				ip_buf_sent_status(buf) = -ECONNRESET;
			}

			continue;
		}

	read_data:
		/* We are receiving data from peer. */
		if (buf && uip_newdata(buf)) {
			struct net_buf *clone;

			if (!uip_len(buf)) {
				continue;
			}

			/* Note that uIP stack will reuse the buffer when
			 * sending ACK to peer host. The sending will happen
			 * right after this function returns. Because of this
			 * we cannot use the same buffer to pass data to
			 * application.
			 */
			clone = net_buf_clone(buf);
			if (!clone) {
				NET_ERR("No enough RX buffers, "
					"packet %p discarded\n", buf);
				continue;
			}

			ip_buf_appdata(clone) = uip_buf(clone) +
				(ip_buf_appdata(buf) - (void *)uip_buf(buf));
			ip_buf_appdatalen(clone) = uip_len(buf);
			ip_buf_len(clone) = ip_buf_len(buf);
			ip_buf_context(clone) = user_data;
			uip_set_conn(clone) = uip_conn(buf);
			uip_flags(clone) = uip_flags(buf);
			uip_flags(clone) |= UIP_CONNECTED;

			NET_DBG("packet received context %p buf %p len %d "
				"appdata %p appdatalen %d\n",
				ip_buf_context(clone),
				clone,
				ip_buf_len(clone),
				ip_buf_appdata(clone),
				ip_buf_appdatalen(clone));

			nano_fifo_put(net_context_get_queue(user_data), clone);

			/* We let the application to read the data now */
			fiber_yield();
		}
	}

	PROCESS_END();
}
Пример #15
0
void put_scratch_packet(void *packet)
{
	nano_fifo_put(&scratch_q_packets_fifo, packet);
}
Пример #16
0
void bt_conn_set_state(struct bt_conn *conn, bt_conn_state_t state)
{
	bt_conn_state_t old_state;

	BT_DBG("%s -> %s", state2str(conn->state), state2str(state));

	if (conn->state == state) {
		BT_WARN("no transition");
		return;
	}

	old_state = conn->state;
	conn->state = state;

	/* Actions needed for exiting the old state */
	switch (old_state) {
	case BT_CONN_DISCONNECTED:
		/* Take a reference for the first state transition after
		 * bt_conn_add_le() and keep it until reaching DISCONNECTED
		 * again.
		 */
		bt_conn_ref(conn);
		break;
	case BT_CONN_CONNECT:
		if (conn->timeout) {
			fiber_delayed_start_cancel(conn->timeout);
			conn->timeout = NULL;

			/* Drop the reference taken by timeout fiber */
			bt_conn_unref(conn);
		}
		break;
	default:
		break;
	}

	/* Actions needed for entering the new state */
	switch (conn->state) {
	case BT_CONN_CONNECTED:
		nano_fifo_init(&conn->tx_queue);
		fiber_start(conn->stack, sizeof(conn->stack), conn_tx_fiber,
			    (int)bt_conn_ref(conn), 0, 7, 0);

		bt_l2cap_connected(conn);
		notify_connected(conn);
		break;
	case BT_CONN_DISCONNECTED:
		/* Notify disconnection and queue a dummy buffer to wake
		 * up and stop the tx fiber for states where it was
		 * running.
		 */
		if (old_state == BT_CONN_CONNECTED ||
		    old_state == BT_CONN_DISCONNECT) {
			bt_l2cap_disconnected(conn);
			notify_disconnected(conn);

			nano_fifo_put(&conn->tx_queue, net_buf_get(&dummy, 0));
		} else if (old_state == BT_CONN_CONNECT) {
			/* conn->err will be set in this case */
			notify_connected(conn);
		} else if (old_state == BT_CONN_CONNECT_SCAN && conn->err) {
			/* this indicate LE Create Connection failed */
			notify_connected(conn);
		}

		/* Release the reference we took for the very first
		 * state transition.
		 */
		bt_conn_unref(conn);

		break;
	case BT_CONN_CONNECT_SCAN:
		break;
	case BT_CONN_CONNECT:
		/*
		 * Timer is needed only for LE. For other link types controller
		 * will handle connection timeout.
		 */
		if (conn->type != BT_CONN_TYPE_LE) {
			break;
		}

		/* Add LE Create Connection timeout */
		conn->timeout = fiber_delayed_start(conn->stack,
						    sizeof(conn->stack),
						    timeout_fiber,
						    (int)bt_conn_ref(conn),
						    0, 7, 0, CONN_TIMEOUT);
		break;
	case BT_CONN_DISCONNECT:
		break;
	default:
		BT_WARN("no valid (%u) state was set", state);

		break;
	}
}
Пример #17
0
static inline void free_tx_bufs_func(struct net_buf *buf)
{
	inc_free_tx_bufs_func(buf);

	nano_fifo_put(buf->free, buf);
}