示例#1
0
static void mutex_wait(struct mutex_t *mutex)
{
    unsigned irqstate = disableIRQ();
    DEBUG("%s: Mutex in use. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val));

    if (atomic_set_to_one(&mutex->val)) {
        /* somebody released the mutex. return. */
        DEBUG("%s: mutex_wait early out. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val));
        restoreIRQ(irqstate);
        return;
    }

    sched_set_status((thread_t*) sched_active_thread, STATUS_MUTEX_BLOCKED);

    priority_queue_node_t n;
    n.priority = (unsigned int) sched_active_thread->priority;
    n.data = (unsigned int) sched_active_thread;
    n.next = NULL;

    DEBUG("%s: Adding node to mutex queue: prio: %" PRIu32 "\n", sched_active_thread->name, n.priority);

    priority_queue_add(&(mutex->queue), &n);

    restoreIRQ(irqstate);

    thread_yield_higher();

    /* we were woken up by scheduler. waker removed us from queue. we have the mutex now. */
}
示例#2
0
static int _start_dow(int argc, char **argv)
{
    printf("Settings: %s D:%u X:", (DOW_DEPUTY ? "Deputy" : ""), DOW_D);
    if ((DOW_X) < 1) {
        printf("0.%u", (unsigned) (100 *  DOW_X));
    }
    else {
        printf("%u", (unsigned) DOW_X);
    }
    printf(" p:%u Y:%u CS:%u P-MDMR:%u Q:%u PER:%u KEEP_ALIVE:%u PSR:%u BC:%u %s\n",
           (unsigned) (100U * DOW_P), DOW_Y, dow_cache_size,
           (unsigned) DOW_PRIO_CACHE, (unsigned) (100U * DOW_Q),
           (unsigned) DOW_PER, (unsigned) DOW_KEEP_ALIVE_PFX,
           (unsigned) DOW_PSR, (unsigned) DOW_BC_COUNT,
           (DOW_HARDWIRED ? "hardwired" : ""));

    if (argc > 1) {
        dow_my_id = (uint32_t) strtol(argv[1], NULL, 10);
#if DOW_HARDWIRED
        dow_manual_id = true;
#endif
    }
    if (argc > 2) {
        dow_num_src = (uint16_t) strtol(argv[2], NULL, 10);
    }
    if (argc > 3) {
        dow_is_source = false;
        puts("not producing data");
    }

    dow_init();
    thread_yield_higher();
    return 0;
}
示例#3
0
文件: timer.c 项目: ReneHerthel/RIOT
/**
 * @brief Reload the timer with the given timeout, or spin if timeout is too small
 *
 * @pre IRQs masked, timer running
 */
static inline void lptmr_reload_or_spin(uint8_t dev, uint16_t timeout)
{
    LPTMR_Type *hw = lptmr_config[dev].dev;
    /* Disable timer and set target, 1 to 2 ticks will be dropped by the
     * hardware during the disable-enable cycle */
    /* Disable the timer interrupt first */
    hw->CSR = LPTMR_CSR_TEN_MASK | LPTMR_CSR_TFC_MASK;
    if (timeout <= LPTMR_RELOAD_OVERHEAD) {
        /* we spin if the timeout is too short to reload the timer */
        hw->CNR = 0;
        uint16_t cnr_begin = hw->CNR;
        while ((hw->CNR - cnr_begin) <= timeout) {
            hw->CNR = 0;
        }
        /* Emulate IRQ handler behaviour */
        lptmr[dev].running = 0;
        if (lptmr[dev].isr_ctx.cb != NULL) {
            lptmr[dev].isr_ctx.cb(lptmr[dev].isr_ctx.arg, 0);
        }
        thread_yield_higher();
        return;
    }
    /* Update reference */
    hw->CNR = 0;
    lptmr[dev].cnr += hw->CNR + LPTMR_RELOAD_OVERHEAD;
    /* Disable timer */
    hw->CSR = 0;
    hw->CMR = timeout - LPTMR_RELOAD_OVERHEAD;
    /* Enable timer and IRQ */
    hw->CSR = LPTMR_CSR_TEN_MASK | LPTMR_CSR_TFC_MASK | LPTMR_CSR_TIE_MASK;
}
示例#4
0
static void _thread_flags_wait(thread_flags_t mask, thread_t *thread, unsigned threadstate, unsigned irqstate)
{
    DEBUG("_thread_flags_wait: me->flags=0x%08x me->mask=0x%08x. going blocked.\n",
            (unsigned)thread->flags, (unsigned)mask);

    thread->wait_data = (void *)(unsigned)mask;
    sched_set_status(thread, threadstate);
    irq_restore(irqstate);
    thread_yield_higher();
}
示例#5
0
void thread_flags_set(thread_t *thread, thread_flags_t mask)
{
    DEBUG("thread_flags_set(): setting 0x%08x for pid %"PRIkernel_pid"\n", mask, thread->pid);
    unsigned state = irq_disable();
    thread->flags |= mask;
    if (thread_flags_wake(thread)) {
        irq_restore(state);
        thread_yield_higher();
    }
    else {
        irq_restore(state);
    }
}
示例#6
0
文件: evtimer.c 项目: A-Paul/RIOT
void evtimer_add(evtimer_t *evtimer, evtimer_event_t *event)
{
    unsigned state = irq_disable();

    DEBUG("evtimer_add(): adding event with offset %" PRIu32 "\n", event->offset);

    _update_head_offset(evtimer);
    evtimer_add_event_to_list(evtimer, event);
    if (evtimer->events == event) {
        _set_timer(&evtimer->timer, event->offset);
    }
    irq_restore(state);
    if (sched_context_switch_request) {
        thread_yield_higher();
    }
}
示例#7
0
文件: pipe.c 项目: 4dahalibut/RIOT
static ssize_t pipe_rw(ringbuffer_t *rb,
                       void *buf,
                       size_t n,
                       tcb_t **other_op_blocked,
                       tcb_t **this_op_blocked,
                       ringbuffer_op_t ringbuffer_op)
{
    if (n == 0) {
        return 0;
    }

    while (1) {
        unsigned old_state = disableIRQ();

        unsigned count = ringbuffer_op(rb, buf, n);

        if (count > 0) {
            tcb_t *other_thread = *other_op_blocked;
            int other_prio = -1;
            if (other_thread) {
                *other_op_blocked = NULL;
                other_prio = other_thread->priority;
                sched_set_status(other_thread, STATUS_PENDING);
            }

            restoreIRQ(old_state);

            if (other_prio >= 0) {
                sched_switch(other_prio);
            }

            return count;
        }
        else if (*this_op_blocked || inISR()) {
            restoreIRQ(old_state);
            return 0;
        }
        else {
            *this_op_blocked = (tcb_t *) sched_active_thread;

            sched_set_status((tcb_t *) sched_active_thread, STATUS_SLEEPING);
            restoreIRQ(old_state);
            thread_yield_higher();
        }
    }
}
示例#8
0
int main(void)
{
#ifdef MODULE_TLSF
    tlsf_create_with_pool(_tlsf_heap, sizeof(_tlsf_heap));
#endif
    msg_init_queue(_main_msg_queue, MAIN_QUEUE_SIZE);

    printf("CCN caching started\n");

#if DOW_AUTOSTART
    dow_init();
    thread_yield_higher();
#endif
    /* start the shell */
    char line_buf[SHELL_DEFAULT_BUFSIZE];
    shell_run(shell_commands, line_buf, SHELL_DEFAULT_BUFSIZE);
    return 0;
}
示例#9
0
void mutex_unlock_and_sleep(struct mutex_t *mutex)
{
    DEBUG("%s: unlocking mutex. val: %u pid: %" PRIkernel_pid ", and taking a nap\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val), sched_active_pid);
    unsigned irqstate = disableIRQ();

    if (ATOMIC_VALUE(mutex->val) != 0) {
        priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue));
        if (next) {
            thread_t *process = (thread_t *) next->data;
            DEBUG("%s: waking up waiter.\n", process->name);
            sched_set_status(process, STATUS_PENDING);
        }
        else {
            ATOMIC_VALUE(mutex->val) = 0; /* This is safe, interrupts are disabled */
        }
    }
    DEBUG("%s: going to sleep.\n", sched_active_thread->name);
    sched_set_status((thread_t*) sched_active_thread, STATUS_SLEEPING);
    restoreIRQ(irqstate);
    thread_yield_higher();
}
示例#10
0
void sched_switch(uint16_t other_prio)
{
    thread_t *active_thread = (thread_t *) sched_active_thread;
    uint16_t current_prio = active_thread->priority;
    int on_runqueue = (active_thread->status >= STATUS_ON_RUNQUEUE);

    DEBUG("sched_switch: active pid=%" PRIkernel_pid" prio=%" PRIu16 " on_runqueue=%i "
          ", other_prio=%" PRIu16 "\n",
          active_thread->pid, current_prio, on_runqueue, other_prio);

    if (!on_runqueue || (current_prio > other_prio)) {
        if (inISR()) {
            DEBUG("sched_switch: setting sched_context_switch_request.\n");
            sched_context_switch_request = 1;
        }
        else {
            DEBUG("sched_switch: yielding immediately.\n");
            thread_yield_higher();
        }
    }
    else {
        DEBUG("sched_switch: continuing without yield.\n");
    }
}
示例#11
0
文件: msg.c 项目: AdamRLukaitis/RIOT
static int _msg_send(msg_t *m, kernel_pid_t target_pid, bool block, unsigned state)
{
#ifdef DEVELHELP
    if (!pid_is_valid(target_pid)) {
        DEBUG("msg_send(): target_pid is invalid, continuing anyways\n");
    }
#endif /* DEVELHELP */

    thread_t *target = (thread_t*) sched_threads[target_pid];

    m->sender_pid = sched_active_pid;

    if (target == NULL) {
        DEBUG("msg_send(): target thread does not exist\n");
        irq_restore(state);
        return -1;
    }

    thread_t *me = (thread_t *) sched_active_thread;

    DEBUG("msg_send() %s:%i: Sending from %" PRIkernel_pid " to %" PRIkernel_pid
          ". block=%i src->state=%i target->state=%i\n", RIOT_FILE_RELATIVE,
          __LINE__, sched_active_pid, target_pid,
          block, me->status, target->status);

    if (target->status != STATUS_RECEIVE_BLOCKED) {
        DEBUG("msg_send() %s:%i: Target %" PRIkernel_pid " is not RECEIVE_BLOCKED.\n",
              RIOT_FILE_RELATIVE, __LINE__, target_pid);

        if (queue_msg(target, m)) {
            DEBUG("msg_send() %s:%i: Target %" PRIkernel_pid
                  " has a msg_queue. Queueing message.\n", RIOT_FILE_RELATIVE,
                  __LINE__, target_pid);
            irq_restore(state);
            if (me->status == STATUS_REPLY_BLOCKED) {
                thread_yield_higher();
            }
            return 1;
        }

        if (!block) {
            DEBUG("msg_send: %" PRIkernel_pid ": Receiver not waiting, block=%u\n",
                  me->pid, block);
            irq_restore(state);
            return 0;
        }

        DEBUG("msg_send: %" PRIkernel_pid ": going send blocked.\n",
              me->pid);

        me->wait_data = (void*) m;

        int newstatus;

        if (me->status == STATUS_REPLY_BLOCKED) {
            newstatus = STATUS_REPLY_BLOCKED;
        }
        else {
            newstatus = STATUS_SEND_BLOCKED;
        }

        sched_set_status((thread_t*) me, newstatus);

        thread_add_to_list(&(target->msg_waiters), me);

        irq_restore(state);
        thread_yield_higher();

        DEBUG("msg_send: %" PRIkernel_pid ": Back from send block.\n",
              me->pid);
    }
    else {
        DEBUG("msg_send: %" PRIkernel_pid ": Direct msg copy from %"
              PRIkernel_pid " to %" PRIkernel_pid ".\n",
              me->pid, thread_getpid(), target_pid);
        /* copy msg to target */
        msg_t *target_message = (msg_t*) target->wait_data;
        *target_message = *m;
        sched_set_status(target, STATUS_PENDING);

        irq_restore(state);
        thread_yield_higher();
    }

    return 1;
}
示例#12
0
文件: msg.c 项目: AdamRLukaitis/RIOT
static int _msg_receive(msg_t *m, int block)
{
    unsigned state = irq_disable();
    DEBUG("_msg_receive: %" PRIkernel_pid ": _msg_receive.\n",
          sched_active_thread->pid);

    thread_t *me = (thread_t*) sched_threads[sched_active_pid];

    int queue_index = -1;

    if (me->msg_array) {
        queue_index = cib_get(&(me->msg_queue));
    }

    /* no message, fail */
    if ((!block) && ((!me->msg_waiters.next) && (queue_index == -1))) {
        irq_restore(state);
        return -1;
    }

    if (queue_index >= 0) {
        DEBUG("_msg_receive: %" PRIkernel_pid ": _msg_receive(): We've got a queued message.\n",
              sched_active_thread->pid);
        *m = me->msg_array[queue_index];
    }
    else {
        me->wait_data = (void *) m;
    }

    list_node_t *next = list_remove_head(&me->msg_waiters);

    if (next == NULL) {
        DEBUG("_msg_receive: %" PRIkernel_pid ": _msg_receive(): No thread in waiting list.\n",
              sched_active_thread->pid);

        if (queue_index < 0) {
            DEBUG("_msg_receive(): %" PRIkernel_pid ": No msg in queue. Going blocked.\n",
                  sched_active_thread->pid);
            sched_set_status(me, STATUS_RECEIVE_BLOCKED);

            irq_restore(state);
            thread_yield_higher();

            /* sender copied message */
        }
        else {
            irq_restore(state);
        }

        return 1;
    }
    else {
        DEBUG("_msg_receive: %" PRIkernel_pid ": _msg_receive(): Waking up waiting thread.\n",
              sched_active_thread->pid);

        thread_t *sender = container_of((clist_node_t*)next, thread_t, rq_entry);

        if (queue_index >= 0) {
            /* We've already got a message from the queue. As there is a
             * waiter, take it's message into the just freed queue space.
             */
            m = &(me->msg_array[cib_put(&(me->msg_queue))]);
        }

        /* copy msg */
        msg_t *sender_msg = (msg_t*) sender->wait_data;
        *m = *sender_msg;

        /* remove sender from queue */
        uint16_t sender_prio = THREAD_PRIORITY_IDLE;
        if (sender->status != STATUS_REPLY_BLOCKED) {
            sender->wait_data = NULL;
            sched_set_status(sender, STATUS_PENDING);
            sender_prio = sender->priority;
        }

        irq_restore(state);
        if (sender_prio < THREAD_PRIORITY_IDLE) {
            sched_switch(sender_prio);
        }
        return 1;
    }

    DEBUG("This should have never been reached!\n");
}
示例#13
0
static int _send(netdev_t *netdev, const iolist_t *iolist)
{
    #if ESP_NOW_UNICAST
    if (!_esp_now_scan_peers_done) {
        return -ENODEV;
    }
    #endif

    DEBUG("%s: %p %p\n", __func__, netdev, iolist);

    CHECK_PARAM_RET (netdev != NULL, -ENODEV);
    CHECK_PARAM_RET (iolist != NULL, -EINVAL);

    esp_now_netdev_t* dev = (esp_now_netdev_t*)netdev;

    mutex_lock(&dev->dev_lock);
    dev->tx_len = 0;

    /* load packet data into TX buffer */
    for (const iolist_t *iol = iolist; iol; iol = iol->iol_next) {
        if (dev->tx_len + iol->iol_len > ESP_NOW_MAX_SIZE) {
            mutex_unlock(&dev->dev_lock);
            return -EOVERFLOW;
        }
        memcpy (dev->tx_buf + dev->tx_len, iol->iol_base, iol->iol_len);
        dev->tx_len += iol->iol_len;
    }

    #if ENABLE_DEBUG
    printf ("%s: send %d byte\n", __func__, dev->tx_len);
    /* esp_hexdump (dev->tx_buf, dev->tx_len, 'b', 16); */
    #endif

    _esp_now_sending = 1;

    uint8_t* _esp_now_dst = 0;

    #if ESP_NOW_UNICAST
    ipv6_hdr_t* ipv6_hdr = (ipv6_hdr_t*)dev->tx_buf;
    uint8_t  _esp_now_dst_from_iid[6];

    if (ipv6_hdr->dst.u8[0] == 0xff) {
        /* packets to multicast prefix ff::/8 are sent to all peers */
        DEBUG("multicast to all peers\n");
        _esp_now_dst = 0;
        _esp_now_sending = dev->peers_all;

        #ifdef MODULE_NETSTATS_L2
        netdev->stats.tx_mcast_count++;
        #endif
    }

    else if ((byteorder_ntohs(ipv6_hdr->dst.u16[0]) & 0xffc0) == 0xfe80) {
        /* for link local addresses fe80::/10, the MAC address is derived from dst address */
        _get_mac_from_iid(&ipv6_hdr->dst.u8[8], _esp_now_dst_from_iid);
        DEBUG("link local to %02x:%02x:%02x:%02x:%02x:%02x\n",
              _esp_now_dst_from_iid[0], _esp_now_dst_from_iid[1],
              _esp_now_dst_from_iid[2], _esp_now_dst_from_iid[3],
              _esp_now_dst_from_iid[4], _esp_now_dst_from_iid[5]);
        _esp_now_dst = _esp_now_dst_from_iid;
        _esp_now_sending = 1;
    }

    else {
        #ifdef MODULE_GNRC_IPV6_NIB
        /* for other addresses, try to find an entry in NIB cache */
        gnrc_ipv6_nib_nc_t nce;
        int ret = gnrc_ipv6_nib_get_next_hop_l2addr (&ipv6_hdr->dst, dev->netif,
                                                     NULL, &nce);
        if (ret == 0) {
            /* entry was found in NIB, use MAC adress from the NIB cache entry */
            DEBUG("global, next hop to neighbor %02x:%02x:%02x:%02x:%02x:%02x\n",
                  nce.l2addr[0], nce.l2addr[1], nce.l2addr[2],
                  nce.l2addr[3], nce.l2addr[4], nce.l2addr[5]);
            _esp_now_dst = nce.l2addr;
            _esp_now_sending = 1;
        }
        else {
        #endif
            /* entry was not found in NIB, send to all peers */
            DEBUG("global, no neibhbor found, multicast to all peers\n");
            _esp_now_dst = 0;
            _esp_now_sending = dev->peers_all;

            #ifdef MODULE_NETSTATS_L2
            netdev->stats.tx_mcast_count++;
            #endif

        #ifdef MODULE_GNRC_IPV6_NIB
        }
        #endif
    }

    #else /* ESP_NOW_UNICAST */

    ipv6_hdr_t* ipv6_hdr = (ipv6_hdr_t*)dev->tx_buf;
    uint8_t  _esp_now_dst_from_iid[6];

    _esp_now_dst = (uint8_t*)_esp_now_mac;
    _esp_now_sending = 1;

    if (ipv6_hdr->dst.u8[0] == 0xff) {
        /* packets to multicast prefix ff::/8 are sent to all peers */
        DEBUG("multicast to all peers\n");

        #ifdef MODULE_NETSTATS_L2
        netdev->stats.tx_mcast_count++;
        #endif
    }

    else if ((byteorder_ntohs(ipv6_hdr->dst.u16[0]) & 0xffc0) == 0xfe80) {
        /* for link local addresses fe80::/10, the MAC address is derived from dst address */
        _get_mac_from_iid(&ipv6_hdr->dst.u8[8], _esp_now_dst_from_iid);
        DEBUG("link local to %02x:%02x:%02x:%02x:%02x:%02x\n",
              _esp_now_dst_from_iid[0], _esp_now_dst_from_iid[1],
              _esp_now_dst_from_iid[2], _esp_now_dst_from_iid[3],
              _esp_now_dst_from_iid[4], _esp_now_dst_from_iid[5]);
        if (esp_now_is_peer_exist(_esp_now_dst_from_iid) > 0) {
            _esp_now_dst = _esp_now_dst_from_iid;
        }
    }

    else
    {
        /* for other addresses, try to find an entry in NIB cache */
          gnrc_ipv6_nib_nc_t nce;
        int ret = gnrc_ipv6_nib_get_next_hop_l2addr (&ipv6_hdr->dst, dev->netif,
                                                     NULL, &nce);
        if (ret == 0 && esp_now_is_peer_exist(nce.l2addr) > 0) {
            /* entry was found in NIB, use MAC adress from the NIB cache entry */
            DEBUG("global, next hop to neighbor %02x:%02x:%02x:%02x:%02x:%02x\n",
                  nce.l2addr[0], nce.l2addr[1], nce.l2addr[2],
                  nce.l2addr[3], nce.l2addr[4], nce.l2addr[5]);
            _esp_now_dst = nce.l2addr;
        }
        else {
            /* entry was not found in NIB, send to all peers */
            DEBUG("global, no neibhbor found, multicast to all peers\n");

            #ifdef MODULE_NETSTATS_L2
            netdev->stats.tx_mcast_count++;
            #endif
        }
    }

    #endif /* ESP_NOW_UNICAST */
    if (_esp_now_dst) {
        DEBUG("%s: send to esp_now addr %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
              _esp_now_dst[0], _esp_now_dst[1], _esp_now_dst[2],
              _esp_now_dst[3], _esp_now_dst[4], _esp_now_dst[5]);
    }

    /* send the the packet to the peer(s) mac address */
    if (esp_now_send (_esp_now_dst, dev->tx_buf, dev->tx_len) == 0) {
        while (_esp_now_sending > 0) {
            thread_yield_higher();
        }

        #ifdef MODULE_NETSTATS_L2
        netdev->stats.tx_bytes += dev->tx_len;
        netdev->event_callback(netdev, NETDEV_EVENT_TX_COMPLETE);
        #endif

        mutex_unlock(&dev->dev_lock);
        return dev->tx_len;
    }
    else {
        #ifdef MODULE_NETSTATS_L2
        netdev->stats.tx_failed++;
        #endif
    }

    mutex_unlock(&dev->dev_lock);
    return -EIO;
}
示例#14
0
文件: main.c 项目: mali/RIOT
int main(void)
{
    puts("Context swap race condition test application");

    kernel_pid_t pid;

    puts("Starting IRQ check thread");
    pid = thread_create(iqr_check_stack, sizeof(iqr_check_stack),
                        THREAD_PRIORITY_MAIN - 1,
                        THREAD_CREATE_SLEEPING,
                        _thread_irq_check, NULL, "irqchk");

    printf("Checking for working context swap (to detect false positives)... ");
    irq_occurred = 0;
    _thread_wake_wo_yield(pid);

    thread_yield_higher();

    /* Delay so we are not testing for race conditions also */
    _spin();

    if (irq_occurred == 1) {
        puts("[Success]");
    }
    else {
        puts("[Failed]");
        return -1;
    }

    printf("Checking for reset of swaps (to detect false positives)... ");
    irq_occurred = 0;
    _thread_wake_wo_yield(pid);

    thread_yield_higher();

    /* Delay so we are not testing for race conditions also */
    _spin();

    if (irq_occurred == 1) {
        puts("[Success]");
    }
    else {
        puts("[Failed]");
        return -1;
    }

    /* Volatile so it is not messed with by optimizations */
    volatile uint8_t race_test;

    printf("Checking for context swap race condition... ");
    irq_occurred = 0;
    _thread_wake_wo_yield(pid);

    thread_yield_higher();

    /* Race instruction */
    race_test = irq_occurred;

    if (race_test == 1) {
        puts("[Success]");
    }
    else {
        puts("[Failed]");
        return -1;
    }

    return 0;
}