Example #1
0
int destiny_socket_accept(int s, sockaddr6_t *addr, uint32_t *addrlen)
{
    socket_internal_t *server_socket = get_socket(s);

    if (is_tcp_socket(s) && (server_socket->socket_values.tcp_control.state == LISTEN)) {
        socket_internal_t *current_queued_socket =
            get_waiting_connection_socket(s, NULL, NULL);

        if (current_queued_socket != NULL) {
            return handle_new_tcp_connection(current_queued_socket,
                                             server_socket, thread_getpid());
        }
        else {
            /* No waiting connections, waiting for message from TCP Layer */
            msg_t msg_recv_client_syn;
            msg_recv_client_syn.type = UNDEFINED;

            while (msg_recv_client_syn.type != TCP_SYN) {
                msg_receive(&msg_recv_client_syn);
            }

            current_queued_socket = get_waiting_connection_socket(s, NULL, NULL);

            return handle_new_tcp_connection(current_queued_socket,
                                             server_socket, thread_getpid());
        }
    }
    else {
        return -1;
    }
}
static void run_client(int max_cnt)
{
    printf("client (pid=%" PRIkernel_pid "): start\n", thread_getpid());

    handle = ndn_app_create();
    if (handle == NULL) {
	printf("client (pid=%" PRIkernel_pid "): cannot create app handle\n",
	       thread_getpid());
	return;
    }

    max_count = max_cnt;
    count = 0;
    begin = xtimer_now_usec();

    if (send_interest() == NDN_APP_ERROR) {
	printf("client (pid=%" PRIkernel_pid "): cannot send interest"
	       " (%"PRIu32")\n", handle->id, count);
	ndn_app_destroy(handle);
	return;
    }

    ndn_app_run(handle);

    ndn_app_destroy(handle);
}
Example #3
0
static int pthread_rwlock_lock(pthread_rwlock_t *rwlock,
                               bool (*is_blocked)(const pthread_rwlock_t *rwlock),
                               bool is_writer,
                               int incr_when_held,
                               bool allow_spurious)
{
    if (rwlock == NULL) {
        DEBUG("Thread %" PRIkernel_pid": pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
              thread_getpid(), "lock", is_writer, allow_spurious, "rwlock=NULL");
        return EINVAL;
    }

    mutex_lock(&rwlock->mutex);
    if (!is_blocked(rwlock)) {
        DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
              thread_getpid(), "lock", is_writer, allow_spurious, "is open");
        rwlock->readers += incr_when_held;
    }
    else {
        DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
              thread_getpid(), "lock", is_writer, allow_spurious, "is locked");

        /* queue for the lock */
        __pthread_rwlock_waiter_node_t waiting_node = {
            .is_writer = is_writer,
            .thread = (thread_t *) sched_active_thread,
            .qnode = {
                .next = NULL,
                .data = (uintptr_t) &waiting_node,
                .priority = sched_active_thread->priority,
            },
            .continue_ = false,
        };
        priority_queue_add(&rwlock->queue, &waiting_node.qnode);

        while (1) {
            /* wait to be unlocked, so this thread can try to acquire the lock again */
            mutex_unlock_and_sleep(&rwlock->mutex);

            mutex_lock(&rwlock->mutex);
            if (waiting_node.continue_) {
                /* pthread_rwlock_unlock() already set rwlock->readers */
                DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
                      thread_getpid(), "lock", is_writer, allow_spurious, "continued");
                break;
            }
            else if (allow_spurious) {
                DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
                      thread_getpid(), "lock", is_writer, allow_spurious, "is timed out");
                priority_queue_remove(&rwlock->queue, &waiting_node.qnode);
                mutex_unlock(&rwlock->mutex);
                return ETIMEDOUT;
            }
        }
    }
    mutex_unlock(&rwlock->mutex);

    return 0;
}
Example #4
0
void icn_initInterest(uint16_t seq) {
    if (WANT_CONTENT) {
        uint32_t tmp1;
        uint16_t tmp2;
        tmp1 = _getSmallestMissing();
        LOG_DEBUG("Smallest missing is %lu\n", tmp1);
        tmp2 = seq;

        if ((tmp1 < NUMBER_OF_CHUNKS) && (tmp1 >= 0)) {
            LOG_INFO("Scheduling retransmission for %lu\n", tmp1);
            vtimer_remove(&retry_vt);
            vtimer_set_msg(&retry_vt, retry_interval, thread_getpid(), ICN_RESEND_INTEREST, &tmp1);
        }
        if (bf_isset(received_chunks, seq)) {
            LOG_INFO("Already received a chunk for %u, not sending again\n", seq);
            return;
        }
#if FLOW_CONTROL
        if (seq > (receive_counter + FLOW_THR)) {
            LOG_INFO("Flow control, seq is %u, receive counter is %u\n",
                    seq, receive_counter);


            return;
        }
#endif
        /* create packet */
        ng_pktsnip_t *pkt;
        icn_pkt_t icn_pkt;
        icn_pkt.type = ICN_INTEREST;
        icn_pkt.seq = seq;
        memcpy(icn_pkt.payload, interest, strlen(interest) + 1);

        pkt = ng_pktbuf_add(NULL, &icn_pkt, sizeof(icn_pkt_t), NG_NETTYPE_UNDEF);

        // send interest packet
        if (tmp2 < NUMBER_OF_CHUNKS) {
                    LOG_INFO("Sending Interest for %u to %s\n", seq,
                    ng_netif_addr_to_str(l2addr_str, sizeof(l2addr_str),
                        CONTENT_STORE->uint8, ADDR_LEN_64B));

            icn_send(CONTENT_STORE, pkt);
        }
        if (tmp2 < NUMBER_OF_CHUNKS) {
            tmp2++;
#if TIMED_SENDING
            vtimer_remove(&periodic_vt);
            vtimer_set_msg(&periodic_vt, interval, thread_getpid(), ICN_SEND_INTEREST, &tmp2);
#else
            icn_initInterest(tmp2);
#endif
        }
    }
    else {
        LOG_DEBUG("nothing to do\n");
    }
}
Example #5
0
/**
 * Function executed by NHDP thread receiving messages in an endless loop
 */
static void *_nhdp_runner(void *arg)
{
    nhdp_if_entry_t *if_entry;
    msg_t msg_rcvd, msg_queue[NHDP_MSG_QUEUE_SIZE];

    (void)arg;
    msg_init_queue(msg_queue, NHDP_MSG_QUEUE_SIZE);

    while (1) {
        msg_receive(&msg_rcvd);

        switch (msg_rcvd.type) {
            case HELLO_TIMER:
                mutex_lock(&send_rcv_mutex);
                if_entry = msg_rcvd.content.ptr;

                nhdp_writer_send_hello(if_entry);

                /* TODO: Add jitter */

                /* Schedule next sending */
                xtimer_set_msg64(&if_entry->if_timer,
                                 timex_uint64(if_entry->hello_interval),
                                 &msg_rcvd, thread_getpid());
                mutex_unlock(&send_rcv_mutex);
                break;

#if (NHDP_METRIC_NEEDS_TIMER)
            case NHDP_METRIC_TIMER:
                mutex_lock(&send_rcv_mutex);
                /* Process necessary metric computations */
                iib_process_metric_refresh();

                /* Schedule next sending */
                metric_msg.type = NHDP_METRIC_TIMER;
                metric_msg.content.ptr = NULL;
                xtimer_set_msg64(&metric_timer, timex_uint64(metric_interval),
                                 metric_msg, thread_getpid());
                mutex_unlock(&send_rcv_mutex);
                break;
#endif
            default:
                break;
        }
    }

    return 0;
}
Example #6
0
gnrc_nettest_res_t gnrc_nettest_send_iface(kernel_pid_t pid, gnrc_pktsnip_t *in,
        unsigned int exp_pkts,
        const kernel_pid_t *exp_senders,
        const gnrc_pktsnip_t **exp_out)
{
    gnrc_nettest_res_t res;

    gnrc_netif_add(thread_getpid());

    res = _pkt_test(GNRC_NETAPI_MSG_TYPE_SND, pid, in, exp_pkts, exp_senders,
                    exp_out);

    gnrc_netif_remove(thread_getpid());

    return res;
}
Example #7
0
uint8_t semtech_loramac_send(semtech_loramac_t *mac, uint8_t *data, uint8_t len)
{
    mac->link_chk.available = false;
    if (!_is_mac_joined(mac)) {
        DEBUG("[semtech-loramac] network is not joined\n");
        return SEMTECH_LORAMAC_NOT_JOINED;
    }

    /* Correctly set the caller pid */
    mac->caller_pid = thread_getpid();

    loramac_send_params_t params;
    params.payload = data;
    params.len = len;

    _semtech_loramac_call(_send, &params);

    /* Wait for TX status information from the MAC */
    msg_t msg;
    msg_receive(&msg);
    if (msg.type != MSG_TYPE_LORAMAC_TX_STATUS) {
        return SEMTECH_LORAMAC_TX_ERROR;
    }

    return (uint8_t)msg.content.value;
}
Example #8
0
int32_t destiny_socket_recvfrom(int s, void *buf, uint32_t len, int flags,
                                sockaddr6_t *from, uint32_t *fromlen)
{
    if (isUDPSocket(s)) {
        msg_t m_recv, m_send;
        ipv6_hdr_t *ipv6_header;
        udp_hdr_t *udp_header;
        uint8_t *payload;
        get_socket(s)->recv_pid = thread_getpid();

        msg_receive(&m_recv);

        ipv6_header = ((ipv6_hdr_t *)m_recv.content.ptr);
        udp_header = ((udp_hdr_t *)(m_recv.content.ptr + IPV6_HDR_LEN));
        payload = (uint8_t *)(m_recv.content.ptr + IPV6_HDR_LEN + UDP_HDR_LEN);

        memset(buf, 0, len);
        memcpy(buf, payload, udp_header->length - UDP_HDR_LEN);
        memcpy(&from->sin6_addr, &ipv6_header->srcaddr, 16);
        from->sin6_family = AF_INET6;
        from->sin6_flowinfo = 0;
        from->sin6_port = udp_header->src_port;
        *fromlen = sizeof(sockaddr6_t);

        msg_reply(&m_recv, &m_send);
        return udp_header->length - UDP_HDR_LEN;
    }
    else if (is_tcp_socket(s)) {
        return destiny_socket_recv(s, buf, len, flags);
    }
    else {
        printf("Socket Type not supported!\n");
        return -1;
    }
}
Example #9
0
int vtimer_sleep(timex_t time) {
    int ret;
    vtimer_t t;
    ret = vtimer_set_wakeup(&t, time, thread_getpid());
    thread_sleep();
    return ret;
}
Example #10
0
void *beaconing(void *arg)
{
    (void) arg;
    xtimer_t status_timer;
    msg_t msg;
    msg_t update_msg;
    kernel_pid_t mypid = thread_getpid();

    /* initialize message queue */
    msg_init_queue(_beac_msg_q, Q_SZ);

    /* start periodic timer */
    update_msg.type = MSG_UPDATE_EVENT;
    xtimer_set_msg(&status_timer, UPDATE_INTERVAL, &update_msg, mypid);

    while(1) {
        msg_receive(&msg);

        switch (msg.type) {
            case MSG_UPDATE_EVENT:
                xtimer_set_msg(&status_timer, UPDATE_INTERVAL, &update_msg, mypid);
                send_update(initial_pos, p_buf);
                break;
            default:
                break;
        }
    }

    /* never reached */
    return NULL;
}
Example #11
0
int main(void)
{
    uint32_t count = 0;

    indicator = 0;
    main_pid = thread_getpid();

    kernel_pid_t second_pid = thread_create(stack,
                  sizeof(stack),
                  THREAD_PRIORITY_MAIN - 1,
                  THREAD_CREATE_WOUT_YIELD | THREAD_CREATE_STACKTEST,
                  second_thread,
                  NULL,
                  "second_thread");

    while (1) {
        mutex_lock(&mutex);
        thread_wakeup(second_pid);
        indicator++;
        count++;

        if ((indicator > 1) || (indicator < -1)) {
            printf("[ERROR] threads did not sleep properly (%d).\n", indicator);
            return 1;
        }
        if ((count % 100000) == 0) {
            printf("[ALIVE] alternated %"PRIu32"k times.\n", (count / 1000));
        }
        mutex_unlock_and_sleep(&mutex);
    }
}
Example #12
0
int32_t udp_recvfrom(int s, void *buf, uint32_t len, int flags, sockaddr6_t *from, uint32_t *fromlen)
{
    (void) flags;

    msg_t m_recv, m_send;
    ipv6_hdr_t *ipv6_header;
    udp_hdr_t *udp_header;
    uint8_t *payload;
    socket_base_get_socket(s)->recv_pid = thread_getpid();

    msg_receive(&m_recv);

    ipv6_header = ((ipv6_hdr_t *)m_recv.content.ptr);
    udp_header = ((udp_hdr_t *)(m_recv.content.ptr + IPV6_HDR_LEN));
    payload = (uint8_t *)(m_recv.content.ptr + IPV6_HDR_LEN + UDP_HDR_LEN);

    memset(buf, 0, len);
    /* cppcheck: the memset sets parts of the buffer to 0 even though it will
     * be overwritten by the next memcpy. However without the memset the buffer
     * could contain stale data (if the copied data is less then the buffer
     * length) and setting just the left over part of the buffer to 0 would
     * introduce overhead (calculation how much needs to be zeroed).
     */
    /* cppcheck-suppress redundantCopy */
    memcpy(buf, payload, NTOHS(udp_header->length) - UDP_HDR_LEN);
    memcpy(&from->sin6_addr, &ipv6_header->srcaddr, 16);
    from->sin6_family = AF_INET6;
    from->sin6_flowinfo = 0;
    from->sin6_port = udp_header->src_port;
    *fromlen = sizeof(sockaddr6_t);

    msg_reply(&m_recv, &m_send);
    return NTOHS(udp_header->length) - UDP_HDR_LEN;
}
Example #13
0
int msg_send_int(msg_t *m, unsigned int target_pid)
{
    tcb_t *target = (tcb_t *) sched_threads[target_pid];

    if (target == NULL) {
        DEBUG("msg_send_int(): target thread does not exist\n");
        return -1;
    }

    if (target->status == STATUS_RECEIVE_BLOCKED) {
        DEBUG("msg_send_int: Direct msg copy from %i to %i.\n", thread_getpid(), target_pid);

        m->sender_pid = target_pid;

        /* copy msg to target */
        msg_t *target_message = (msg_t*) target->wait_data;
        *target_message = *m;
        sched_set_status(target, STATUS_PENDING);

        sched_context_switch_request = 1;
        return 1;
    }
    else {
        DEBUG("msg_send_int: Receiver not waiting.\n");
        return (queue_msg(target, m));
    }
}
Example #14
0
File: nhdp.c Project: cococolo/RIOT
/**
 * Function executed by NHDP thread receiving messages in an endless loop
 */
static void *_nhdp_runner(void *arg)
{
    nhdp_if_entry_t *if_entry;
    msg_t msg_rcvd, msg_queue[NHDP_MSG_QUEUE_SIZE];

    (void)arg;
    msg_init_queue(msg_queue, NHDP_MSG_QUEUE_SIZE);

    while (1) {
        msg_receive(&msg_rcvd);

        switch (msg_rcvd.type) {
            case MSG_TIMER:
                mutex_lock(&send_rcv_mutex);
                if_entry = (nhdp_if_entry_t *) msg_rcvd.content.ptr;

                nhdp_writer_send_hello(if_entry);

                /* TODO: Add jitter */

                /* Schedule next sending */
                vtimer_set_msg(&if_entry->if_timer, if_entry->hello_interval,
                               thread_getpid(), MSG_TIMER, (void *) if_entry);
                mutex_unlock(&send_rcv_mutex);
                break;

            default:
                break;
        }
    }

    return 0;
}
Example #15
0
int msg_send_int(msg_t *m, kernel_pid_t target_pid)
{
#ifdef DEVELHELP
    if (!pid_is_valid(target_pid)) {
        DEBUG("msg_send(): target_pid is invalid, continuing anyways\n");
    }
#endif /* DEVELHELP */

    thread_t *target = (thread_t *) sched_threads[target_pid];

    if (target == NULL) {
        DEBUG("msg_send_int(): target thread does not exist\n");
        return -1;
    }

    m->sender_pid = KERNEL_PID_ISR;
    if (target->status == STATUS_RECEIVE_BLOCKED) {
        DEBUG("msg_send_int: Direct msg copy from %" PRIkernel_pid " to %"
              PRIkernel_pid ".\n", thread_getpid(), target_pid);


        /* copy msg to target */
        msg_t *target_message = (msg_t*) target->wait_data;
        *target_message = *m;
        sched_set_status(target, STATUS_PENDING);

        sched_context_switch_request = 1;
        return 1;
    }
    else {
        DEBUG("msg_send_int: Receiver not waiting.\n");
        return (queue_msg(target, m));
    }
}
Example #16
0
int main(void)
{
    indicator = 0;
    count = 0;

    main_pid = thread_getpid();

    kernel_pid_t second_pid = thread_create(stack,
                  sizeof(stack),
                  THREAD_PRIORITY_MAIN - 1,
                  THREAD_CREATE_WOUT_YIELD | THREAD_CREATE_STACKTEST,
                  second_thread,
                  NULL,
                  "second_thread");

    while (1) {
        mutex_lock(&mutex);
        thread_wakeup(second_pid);
        indicator++;
        count++;

        if (indicator > 1 || indicator < -1) {
            printf("Error, threads did not sleep properly. [indicator: %d]\n", indicator);
            return -1;
        }

        if ((count % 100000) == 0) {
            printf("Still alive alternated [count: %dk] times.\n", count / 1000);
        }

        mutex_unlock_and_sleep(&mutex);
    }
}
Example #17
0
thread_flags_t thread_flags_clear(thread_flags_t mask)
{
    thread_t *me = (thread_t*) sched_active_thread;
    mask = _thread_flags_clear_atomic(me, mask);
    DEBUG("thread_flags_clear(): pid %"PRIkernel_pid" clearing 0x%08x\n", thread_getpid(), mask);
    return mask;
}
Example #18
0
ng_nettest_res_t ng_nettest_send_iface(kernel_pid_t pid, ng_pktsnip_t *in,
                                       unsigned int exp_pkts,
                                       kernel_pid_t exp_senders[],
                                       ng_pktsnip_t *exp_out[])
{
    ng_nettest_res_t res;

    ng_netif_add(thread_getpid());

    res = _pkt_test(NG_NETAPI_MSG_TYPE_SND, pid, in, exp_pkts, exp_senders,
                    exp_out);

    ng_netif_remove(thread_getpid());

    return res;
}
Example #19
0
int gnrc_tftp_server(tftp_data_cb_t data_cb, tftp_start_cb_t start_cb, tftp_stop_cb_t stop_cb, bool use_options)
{
    /* check if there is only one TFTP server running */
    if (_tftp_kernel_pid != KERNEL_PID_UNDEF) {
        DEBUG("tftp: only one TFTP server allowed\n");
        return -1;
    }

    /* context will be initialized when a connection is established */
    tftp_context_t ctxt;
    ctxt.data_cb = data_cb;
    ctxt.start_cb = start_cb;
    ctxt.stop_cb = stop_cb;
    ctxt.enable_options = use_options;

    /* validate our arguments */
    assert(data_cb);
    assert(start_cb);
    assert(stop_cb);

    /* save our kernel PID */
    _tftp_kernel_pid = thread_getpid();

    /* start the server */
    int ret = _tftp_server(&ctxt);

    /* remove possibly stale timer */
    xtimer_remove(&(ctxt.timer));

    /* reset the kernel PID */
    _tftp_kernel_pid = KERNEL_PID_UNDEF;

    return ret;
}
Example #20
0
static void _rx_event(ng_netdev_eth_t *netdev)
{
    dev_eth_t *dev = netdev->ethdev;
    int nread = dev->driver->recv(dev, (char*)recv_buffer, ETHERNET_MAX_LEN);

    DEBUG("ng_netdev_eth: read %d bytes\n", nread);

    if (nread > 0) {
        ethernet_hdr_t *hdr = (ethernet_hdr_t *)recv_buffer;
        ng_pktsnip_t *netif_hdr, *pkt;
        ng_nettype_t receive_type = NG_NETTYPE_UNDEF;
        size_t data_len = (nread - sizeof(ethernet_hdr_t));

        /* TODO: implement multicast groups? */

        netif_hdr = ng_pktbuf_add(NULL, NULL,
                                  sizeof(ng_netif_hdr_t) + (2 * ETHERNET_ADDR_LEN),
                                  NG_NETTYPE_NETIF);

        if (netif_hdr == NULL) {
            DEBUG("ng_netdev_eth: no space left in packet buffer\n");
            return;
        }

        ng_netif_hdr_init(netif_hdr->data, ETHERNET_ADDR_LEN, ETHERNET_ADDR_LEN);
        ng_netif_hdr_set_src_addr(netif_hdr->data, hdr->src, ETHERNET_ADDR_LEN);
        ng_netif_hdr_set_dst_addr(netif_hdr->data, hdr->dst, ETHERNET_ADDR_LEN);
        ((ng_netif_hdr_t *)netif_hdr->data)->if_pid = thread_getpid();

        receive_type = ng_nettype_from_ethertype(byteorder_ntohs(hdr->type));

        DEBUG("ng_netdev_eth: received packet from %02x:%02x:%02x:%02x:%02x:%02x "
              "of length %zu\n",
              hdr->src[0], hdr->src[1], hdr->src[2], hdr->src[3], hdr->src[4],
              hdr->src[5], data_len);
#if defined(MODULE_OD) && ENABLE_DEBUG
        od_hex_dump(hdr, nread, OD_WIDTH_DEFAULT);
#endif

        /* Mark netif header and payload for next layer */
        if ((pkt = ng_pktbuf_add(netif_hdr, recv_buffer + sizeof(ethernet_hdr_t),
                                 data_len, receive_type)) == NULL) {
            ng_pktbuf_release(netif_hdr);
            DEBUG("ng_netdev_eth: no space left in packet buffer\n");
            return;
        }

        if (netdev->event_cb != NULL) {
            netdev->event_cb(NETDEV_EVENT_RX_COMPLETE, pkt);
        }
        else {
            ng_pktbuf_release(pkt); /* netif_hdr is released automatically too */
        }
    }
    else {
        DEBUG("ng_netdev_eth: spurious _rx_event: %d\n", nread);
    }
}
Example #21
0
bool
sol_mainloop_impl_main_thread_check(void)
{
#ifdef THREADS
    return thread_getpid() == _main_pid;
#else
    return true;
#endif
}
Example #22
0
File: pir.c Project: JMR-b/RIOT
int pir_register_thread(pir_t *dev)
{
    if (dev->msg_thread_pid != KERNEL_PID_UNDEF) {
        if (dev->msg_thread_pid != thread_getpid()) {
            DEBUG("pir_register_thread: already registered to another thread\n");
            return -2;
        }
    }
    else {
        DEBUG("pir_register_thread: activating interrupt for %p..\n", dev);
        if (pir_activate_int(dev) != 0) {
            DEBUG("\tfailed\n");
            return -1;
        }
        DEBUG("\tsuccess\n");
    }
    dev->msg_thread_pid = thread_getpid();

    return 0;
}
Example #23
0
int main(void)
{
    char *status = "I am written to the UART every 2 seconds\n";
    char buf[128];
    int res;
    msg_t msg;

    main_pid = thread_getpid();
    printf("Main thread pid %i \n", main_pid);

    printf("Testing interrupt driven mode of UART driver\n\n");

    printf("Setting up buffers...\n");
    ringbuffer_init(&rx_buf, rx_mem, 128);
    ringbuffer_init(&tx_buf, tx_mem, 128);

    printf("Initializing UART @ %i", BAUD);
    if (uart_init(DEV, BAUD, rx, NULL, tx, NULL) >= 0) {
        printf("   ...done\n");
    }
    else {
        printf("   ...failed\n");
        return 1;
    }

    ringbuffer_add(&tx_buf, status, strlen(status));
    uart_tx_begin(DEV);

    while (1) {
        printf("Going into receive message state\n");
        //msg_receive(&msg);

        if (status) {
            printf("INPUT: ");
            res = ringbuffer_get(&rx_buf, buf, rx_buf.avail);
            buf[res] = '\0';
            printf("%s", buf);
            status = 0;
        }

/*        printf("got message");

        if (msg.type == MSG_LINE_RDY) {
            printf("INPUT: ");
            res = ringbuffer_get(&rx_buf, buf, rx_buf.avail);
            buf[res] = '\0';
            printf("%s", buf);
        }
*/

    }

    return 0;
}
Example #24
0
int
sol_mainloop_impl_platform_init(void)
{
#ifdef THREADS
    mutex_init(&_lock);
    _main_pid = thread_getpid();
#endif
    sol_interrupt_scheduler_set_pid(sched_active_pid);
    msg_init_queue(msg_buffer, MSG_BUFFER_SIZE);
    return 0;
}
Example #25
0
/* SLIP receive handler */
static void _slip_receive(ng_slip_dev_t *dev, size_t bytes)
{
    ng_netif_hdr_t *hdr;
    ng_netreg_entry_t *sendto;
    ng_pktsnip_t *pkt, *netif_hdr;

    pkt = ng_pktbuf_add(NULL, NULL, bytes, NG_NETTYPE_UNDEF);

    if (pkt == NULL) {
        DEBUG("slip: no space left in packet buffer\n");
        return;
    }

    netif_hdr = ng_pktbuf_add(pkt, NULL, sizeof(ng_netif_hdr_t),
                              NG_NETTYPE_NETIF);

    if (netif_hdr == NULL) {
        DEBUG("slip: no space left in packet buffer\n");
        ng_pktbuf_release(pkt);
        return;
    }

    hdr = netif_hdr->data;
    ng_netif_hdr_init(hdr, 0, 0);
    hdr->if_pid = thread_getpid();

    if (ringbuffer_get(dev->in_buf, pkt->data, bytes) != bytes) {
        DEBUG("slip: could not read %zu bytes from ringbuffer\n", bytes);
        ng_pktbuf_release(pkt);
        return;
    }

#ifdef MODULE_NG_IPV6
    if ((pkt->size >= sizeof(ipv6_hdr_t)) && ipv6_hdr_is(pkt->data)) {
        pkt->type = NG_NETTYPE_IPV6;
    }
#endif

    sendto = ng_netreg_lookup(pkt->type, NG_NETREG_DEMUX_CTX_ALL);

    if (sendto == NULL) {
        DEBUG("slip: unable to forward packet of type %i\n", pkt->type);
        ng_pktbuf_release(pkt);
    }

    ng_pktbuf_hold(pkt, ng_netreg_num(pkt->type, NG_NETREG_DEMUX_CTX_ALL) - 1);

    while (sendto != NULL) {
        DEBUG("slip: sending pkt %p to PID %u\n", pkt, sendto->pid);
        ng_netapi_receive(sendto->pid, pkt);
        sendto = ng_netreg_getnext(sendto);
    }
}
Example #26
0
void tcp_general_timer(void)
{
    vtimer_t tcp_vtimer;
    timex_t interval = timex_set(0, TCP_TIMER_RESOLUTION);

    while (1) {
        inc_global_variables();
        check_sockets();
        vtimer_set_wakeup(&tcp_vtimer, interval, thread_getpid());
        thread_sleep();
    }
}
static void run_server(int sig_type, int sz)
{
    printf("server (pid=%" PRIkernel_pid "): start\n", thread_getpid());

    handle = ndn_app_create();
    if (handle == NULL) {
	printf("server (pid=%" PRIkernel_pid "): cannot create app handle\n",
	       thread_getpid());
	return;
    }
    signature_type = sig_type;
    content_size = sz;

    ndn_shared_block_t* sp = ndn_name_from_uri(uri, strlen(uri));
    if (sp == NULL) {
	printf("server (pid=%" PRIkernel_pid "): cannot create name from uri "
	       "\"%s\"\n", handle->id, uri);
	return;
    }

    printf("server (pid=%" PRIkernel_pid "): register prefix \"%s\"\n",
	   handle->id, uri);
    // pass ownership of "sp" to the API
    if (ndn_app_register_prefix(handle, sp, on_interest) != 0) {
	printf("server (pid=%" PRIkernel_pid "): failed to register prefix\n",
	       handle->id);
	ndn_app_destroy(handle);
	return;
    }

    printf("server (pid=%" PRIkernel_pid "): enter app run loop\n",
	   handle->id);

    ndn_app_run(handle);

    printf("server (pid=%" PRIkernel_pid "): returned from app run loop\n",
	   handle->id);

    ndn_app_destroy(handle);
}
Example #28
0
int main(void)
{
    memset(apps, '\0', sizeof(apps));
    apps[0] = clock_app_init();
    apps[1] = alarm_app_init();

    gpioint_set(2, BUTTON_STAR_PIN, (GPIOINT_RISING_EDGE | GPIOINT_DEBOUNCE), button_star);
    button_thread = thread_getpid();

    int active_app = 0;
    
    msg_t m;

    //buzzer_beep(15, 5000);
    
    printf("ukleos\n");

    m.type = MSG_ACTIVATE;
    msg_send(&m, apps[active_app], true);

    while(1) {
        msg_receive(&m);

        switch (m.type) {
            case MSG_BUTTON_STAR: { 
                    m.type = MSG_DEACTIVATE;
                    msg_send(&m, apps[active_app], true);
                    
                    active_app++;

                    if (active_app == (NUM_APPS)) active_app = 0;

                    m.type = MSG_ACTIVATE;
                    msg_send(&m, apps[active_app], true);
                    
      //              buzzer_beep(15, 5000);

                    break;
                }
            case MSG_BUTTON_HASH: 
                {
                    m.type = MSG_BUTTON_HASH;
                    msg_send(&m, apps[active_app], true);
                    break;
                }
            default:
                {
                    printf("msg\n");
                }
        }
    }
}
Example #29
0
File: main.c Project: jpfender/RIOT
void sub_thread(void)
{
    int pid = thread_getpid();
    printf("THREAD %s (pid:%i) start\n", thread_getname(pid), pid);

    msg_t msg;

    msg.content.ptr = (char*)thread_getname(pid);

    msg_send(&msg, 1, 1);

    printf("THREAD %s (pid:%i) end.\n", thread_getname(pid), pid);
}
Example #30
0
File: main.c Project: A-L-E-X/RIOT
int main(void)
{
    printf("CCN!\n");

    _relay_pid = thread_getpid();

    thread_create(t2_stack, sizeof(t2_stack), PRIORITY_MAIN + 1,
                  CREATE_STACKTEST, second_thread, NULL, "helper thread");

    printf("starting ccn-lite relay...\n");
    ccnl_riot_relay_start(NULL);

    return 0;
}