static void vtimer_callback_msg(vtimer_t *timer) { msg_t msg; msg.type = timer->type; msg.content.value = (unsigned int) timer->arg; msg_send_int(&msg, timer->pid); }
static void pir_send_msg(pir_t *dev, pir_event_t event) { DEBUG("pir_send_msg\n"); msg_t m = { .type = event, .content.ptr = (char*) dev, }; int ret = msg_send_int(&m, dev->msg_thread_pid); DEBUG("pir_send_msg: msg_send_int: %i\n", ret); switch (ret) { case 0: DEBUG("pir_send_msg: msg_thread_pid not receptive, event is lost"); break; case 1: DEBUG("pir_send_msg: OK"); break; case -1: DEBUG("pir_send_msg: msg_thread_pid is gone, clearing it"); dev->msg_thread_pid = KERNEL_PID_UNDEF; break; } } static void pir_callback(void *arg) { DEBUG("pir_callback: %p\n", arg); pir_t *dev = (pir_t*) arg; if (dev->msg_thread_pid != KERNEL_PID_UNDEF) { pir_send_msg(dev, pir_get_status(dev)); } }
interrupt(RTC_VECTOR) __attribute__((naked)) rtc_isr(void) { __enter_isr(); /* RTC is save to write for up to one second now */ if (RTCIV == RTC_RTCRDYIFG) { /* disable interrupt */ //RTCCTL0 &= ~RTCRDYIE; if (set_time) { set_time = 0; /* set previous set time and reset it */ RTCSEC = time_to_set.tm_sec; RTCMIN = time_to_set.tm_min; RTCHOUR = time_to_set.tm_hour; RTCDAY = time_to_set.tm_mday; RTCDOW = time_to_set.tm_wday; RTCMON = time_to_set.tm_mon + 1; RTCYEARL = (time_to_set.tm_year + 1900) & 0xFF; RTCYEARH = (time_to_set.tm_year + 1900) >> 0x08; } if (rtc_second_pid != KERNEL_PID_UNDEF) { static msg_t m; m.type = RTCSEC; msg_send_int(&m, rtc_second_pid); } } /* RTC alarm */ else if (RTCIV == RTC_RTCAIFG) {
/* * Radio interrupt routine */ void isr_radio(void) { msg_t msg; if (NRF_RADIO->EVENTS_END == 1) { NRF_RADIO->EVENTS_END = 0; /* did we just send or receive something? */ if (_state == STATE_RX) { /* drop packet on invalid CRC */ if (NRF_RADIO->CRCSTATUS != 1) { return; } msg.type = NG_NETDEV_MSG_TYPE_EVENT; msg.content.value = ISR_EVENT_RX_DONE; msg_send_int(&msg, _netdev->mac_pid); /* switch buffer */ _rx_next = _rx_next ^ 1; NRF_RADIO->PACKETPTR = (uint32_t)&(_rx_buf[_rx_next]); /* go back into receive mode */ NRF_RADIO->TASKS_START = 1; } else if (_state == STATE_TX) { /* disable radio again */ _switch_to_idle(); /* if radio was receiving before, go back into RX state */ if (_tx_prestate == STATE_RX) { _switch_to_rx(); } } } if (sched_context_switch_request) { thread_yield(); } }
void vtimer_callback(void *ptr) { vtimer_t *timer; in_callback = true; hwtimer_id = -1; timer = (vtimer_t *)queue_remove_head(&shortterm_queue_root); #ifdef ENABLE_DEBUG vtimer_print(timer); #endif DEBUG("vtimer_callback(): Shooting %lu.\n", timer->absolute.microseconds); /* shoot timer */ if (timer->action == (void*) msg_send_int) { msg_t msg; msg.type = MSG_TIMER; msg.content.value = (unsigned int) timer->arg; msg_send_int(&msg, timer->pid); } else { timer->action(timer->arg); } in_callback = false; update_shortterm(); }
void at86rf231_rx_handler(void) { uint8_t lqi, fcs_rssi; // read packet length at86rf231_read_fifo(&at86rf231_rx_buffer[rx_buffer_next].length, 1); // read psdu, read packet with length as first byte and lqi as last byte. uint8_t *buf = buffer[rx_buffer_next]; at86rf231_read_fifo(buf, at86rf231_rx_buffer[rx_buffer_next].length); // read lqi which is appended after the psdu lqi = buf[at86rf231_rx_buffer[rx_buffer_next].length - 1]; // read fcs and rssi, from a register fcs_rssi = at86rf231_reg_read(AT86RF231_REG__PHY_RSSI); // build package at86rf231_rx_buffer[rx_buffer_next].lqi = lqi; // RSSI has no meaning here, it should be read during packet reception. at86rf231_rx_buffer[rx_buffer_next].rssi = fcs_rssi & 0x0F; // bit[4:0] // bit7, boolean, 1 FCS valid, 0 FCS not valid at86rf231_rx_buffer[rx_buffer_next].crc = (fcs_rssi >> 7) & 0x01; if (at86rf231_rx_buffer[rx_buffer_next].crc == 0) { DEBUG("Got packet with invalid crc.\n"); return; } ieee802154_frame_read(&buf[1], &at86rf231_rx_buffer[rx_buffer_next].frame, at86rf231_rx_buffer[rx_buffer_next].length); if (at86rf231_rx_buffer[rx_buffer_next].frame.fcf.frame_type != 2) { #if DEBUG_ENABLED ieee802154_frame_print_fcf_frame(&at86rf231_rx_buffer[rx_buffer_next].frame); #endif /* notify transceiver thread if any */ if (transceiver_pid) { msg_t m; m.type = (uint16_t) RCV_PKT_AT86RF231; m.content.value = rx_buffer_next; msg_send_int(&m, transceiver_pid); } } else { #if DEBUG_ENABLED DEBUG("GOT ACK for SEQ %u\n", at86rf231_rx_buffer[rx_buffer_next].frame.seq_nr); ieee802154_frame_print_fcf_frame(&at86rf231_rx_buffer[rx_buffer_next].frame); #endif } // shift to next buffer element if (++rx_buffer_next == AT86RF231_RX_BUF_SIZE) { rx_buffer_next = 0; } // Read IRQ to clear it at86rf231_reg_read(AT86RF231_REG__IRQ_STATUS); }
static void update_callback_default(uint8_t reg_c) { if (update_pid != KERNEL_PID_UNDEF) { msg_t m; m.type = reg_c | (RTC_REG_B_INT_UPDATE << 8); m.content.value = update_msg_content; msg_send_int(&m, update_pid); } }
static void periodic_callback_default(uint8_t reg_c) { if (periodic_pid != KERNEL_PID_UNDEF) { msg_t m; m.type = reg_c | (RTC_REG_B_INT_PERIODIC << 8); m.content.value = periodic_msg_content; msg_send_int(&m, periodic_pid); } }
static void alarm_callback_default(uint8_t reg_c) { if (alarm_pid != KERNEL_PID_UNDEF) { msg_t m; m.type = reg_c | (RTC_REG_B_INT_ALARM << 8); m.content.value = alarm_msg_content; msg_send_int(&m, alarm_pid); } }
int msg_try_send(msg_t *m, kernel_pid_t target_pid) { if (irq_is_in()) { return msg_send_int(m, target_pid); } if (sched_active_pid == target_pid) { return msg_send_to_self(m); } return _msg_send(m, target_pid, false, irq_disable()); }
int msg_send(msg_t *m, kernel_pid_t target_pid) { if (inISR()) { return msg_send_int(m, target_pid); } if (sched_active_pid == target_pid) { return msg_send_to_self(m); } return _msg_send(m, target_pid, true, disableIRQ()); }
/* Run in interrupt context */ static void gpio_cb(void *data) { msg_t m; struct interrupt_data *int_data = data; int_data->base.refcount++; m.type = GPIO; m.content.ptr = data; msg_send_int(&m, pid); }
/* Run in interrupt context */ static int uart_tx_cb(void *data) { msg_t m; struct uart_interrupt_data *int_data = data; int_data->base.refcount++; m.type = UART_TX; m.content.ptr = data; msg_send_int(&m, pid); return 0; }
static void interrupt_scheduler_notify_main_thread(uint16_t type, struct interrupt_data_base *handler) { msg_t m; if (handler->pending) return; handler->pending = true; m.type = type; m.content.ptr = (char *)handler; msg_send_int(&m, pid); }
void pthread_exit(void *retval) { pthread_t self_id = pthread_self(); if (self_id == 0) { DEBUG("ERROR called pthread_self() returned 0 in \"%s\"!\n", __func__); } else { pthread_thread_t *self = pthread_sched_threads[self_id - 1]; while (self->cleanup_top) { __pthread_cleanup_datum_t *ct = self->cleanup_top; self->cleanup_top = ct->__next; ct->__routine(ct->__arg); } /* Prevent linking in pthread_tls.o if no TSS functions were used. */ extern void __pthread_keys_exit(int self_id) __attribute__((weak)); if (__pthread_keys_exit) { __pthread_keys_exit(self_id); } self->thread_pid = KERNEL_PID_UNDEF; DEBUG("pthread_exit(%p), self == %p\n", retval, (void *) self); if (self->status != PTS_DETACHED) { self->returnval = retval; self->status = PTS_ZOMBIE; if (self->joining_thread) { /* our thread got an other thread waiting for us */ thread_wakeup(self->joining_thread); } } dINT(); if (self->stack) { msg_t m; m.content.ptr = self->stack; msg_send_int(&m, pthread_reaper_pid); } } sched_task_exit(); }
void _nativenet_handle_packet(radio_packet_t *packet) { radio_address_t dst_addr = packet->dst; /* address filter / monitor mode */ if (_native_net_monitor == 1) { DEBUG("_nativenet_handle_packet: monitoring, not filtering address \n"); } else { /* own addr check */ if (dst_addr == _native_net_addr) { DEBUG("_nativenet_handle_packet: accept packet, addressed to us\n"); } else if (dst_addr == 0) { DEBUG("_nativenet_handle_packet: accept packet, broadcast\n"); } else { DEBUG("_nativenet_handle_packet: discard packet addressed to someone else\n"); return; } } /* copy packet to rx buffer */ DEBUG("\n\t\trx_buffer_next: %i\n\n", rx_buffer_next); memcpy(&_nativenet_rx_buffer[rx_buffer_next].data, packet->data, packet->length); memcpy(&_nativenet_rx_buffer[rx_buffer_next].packet, packet, sizeof(radio_packet_t)); _nativenet_rx_buffer[rx_buffer_next].packet.data = (uint8_t *) &_nativenet_rx_buffer[rx_buffer_next].data; /* notify transceiver thread if any */ if (_native_net_tpid) { DEBUG("_nativenet_handle_packet: notifying transceiver thread!\n"); msg_t m; m.type = (uint16_t) RCV_PKT_NATIVE; m.content.value = rx_buffer_next; msg_send_int(&m, _native_net_tpid); } else { DEBUG("_nativenet_handle_packet: no one to notify =(\n"); } /* shift to next buffer element */ if (++rx_buffer_next == RX_BUF_SIZE) { rx_buffer_next = 0; } }
/* UART callbacks */ static void _slip_rx_cb(void *arg, char data) { if (data == _SLIP_END) { msg_t msg; msg.type = _SLIP_MSG_TYPE; msg.content.value = _SLIP_DEV(arg)->in_bytes; msg_send_int(&msg, _SLIP_DEV(arg)->slip_pid); _SLIP_DEV(arg)->in_bytes = 0; } if (_SLIP_DEV(arg)->in_esc) { _SLIP_DEV(arg)->in_esc = 0; switch (data) { case (_SLIP_END_ESC): if (ringbuffer_add_one(_SLIP_DEV(arg)->in_buf, _SLIP_END) < 0) { _SLIP_DEV(arg)->in_bytes++; } break; case (_SLIP_ESC_ESC): if (ringbuffer_add_one(_SLIP_DEV(arg)->in_buf, _SLIP_ESC) < 0) { _SLIP_DEV(arg)->in_bytes++; } break; default: break; } } else if (data == _SLIP_ESC) { _SLIP_DEV(arg)->in_esc = 1; } else { if (ringbuffer_add_one(_SLIP_DEV(arg)->in_buf, data) < 0) { _SLIP_DEV(arg)->in_bytes++; } } }
/* Run in interrupt context */ static void uart_rx_cb(void *data, char char_read) { msg_t m; struct uart_interrupt_data *int_data = data; struct uart_rx_interrupt_data *rx_int_data = malloc(sizeof(struct uart_rx_interrupt_data)); rx_int_data = malloc(sizeof(struct uart_rx_interrupt_data)); if (!rx_int_data) return; rx_int_data->uart_int = int_data; rx_int_data->char_read = char_read; int_data->base.refcount++; m.type = UART_RX; m.content.ptr = (char *)rx_int_data; msg_send_int(&m, pid); }
void pthread_exit(void *retval) { pthread_t self_id = pthread_self(); if (self_id == 0) { DEBUG("ERROR called pthread_self() returned 0 in \"%s\"!\n", __func__); } else { pthread_thread_t *self = pthread_sched_threads[self_id-1]; while (self->cleanup_top) { __pthread_cleanup_datum_t *ct = self->cleanup_top; self->cleanup_top = ct->__next; ct->__routine(ct->__arg); } self->thread_pid = KERNEL_PID_NULL; DEBUG("pthread_exit(%p), self == %p\n", retval, (void *) self); if (self->status != PTS_DETACHED) { self->returnval = retval; self->status = PTS_ZOMBIE; if (self->joining_thread) { /* our thread got an other thread waiting for us */ thread_wakeup(self->joining_thread); } } dINT(); if (self->stack) { msg_t m; m.content.ptr = self->stack; msg_send_int(&m, pthread_reaper_pid); } } sched_task_exit(); }
void cc110x_rx_handler(void) { uint8_t res = 0; // Possible packet received, RX -> IDLE (0.1 us) rflags.CAA = 0; rflags.MAN_WOR = 0; cc110x_statistic.packets_in++; res = receive_packet((uint8_t*)&(cc110x_rx_buffer[rx_buffer_next].packet), sizeof(cc110x_packet_t)); if (res) { // If we are sending a burst, don't accept packets. // Only ACKs are processed (for stopping the burst). // Same if state machine is in TX lock. if (radio_state == RADIO_SEND_BURST || rflags.TX) { cc110x_statistic.packets_in_while_tx++; return; } cc110x_rx_buffer[rx_buffer_next].rssi = rflags._RSSI; cc110x_rx_buffer[rx_buffer_next].lqi = rflags._LQI; cc110x_strobe(CC1100_SFRX); // ...for flushing the RX FIFO // Valid packet. After a wake-up, the radio should be in IDLE. // So put CC1100 to RX for WOR_TIMEOUT (have to manually put // the radio back to sleep/WOR). //cc110x_spi_write_reg(CC1100_MCSM0, 0x08); // Turn off FS-Autocal cc110x_write_reg(CC1100_MCSM2, 0x07); // Configure RX_TIME (until end of packet) cc110x_strobe(CC1100_SRX); hwtimer_wait(IDLE_TO_RX_TIME); radio_state = RADIO_RX; #ifdef DBG_IGNORE if (is_ignored(cc110x_rx_buffer[rx_buffer_next].packet.phy_src)) { LED_RED_TOGGLE; return; } #endif /* notify transceiver thread if any */ if (transceiver_pid) { msg_t m; m.type = (uint16_t) RCV_PKT_CC1100; m.content.value = rx_buffer_next; msg_send_int(&m, transceiver_pid); } /* shift to next buffer element */ if (++rx_buffer_next == RX_BUF_SIZE) { rx_buffer_next = 0; } return; } else { // No ACK received so TOF is unpredictable rflags.TOF = 0; // CRC false or RX buffer full -> clear RX FIFO in both cases cc110x_strobe(CC1100_SIDLE); // Switch to IDLE (should already be)... cc110x_strobe(CC1100_SFRX); // ...for flushing the RX FIFO // If packet interrupted this nodes send call, // don't change anything after this point. if (radio_state == RADIO_AIR_FREE_WAITING) { cc110x_strobe(CC1100_SRX); hwtimer_wait(IDLE_TO_RX_TIME); return; } // If currently sending, exit here (don't go to RX/WOR) if (radio_state == RADIO_SEND_BURST) { cc110x_statistic.packets_in_while_tx++; return; } // No valid packet, so go back to RX/WOR as soon as possible cc110x_switch_to_rx(); } }
void cc110x_rx_handler(void *args) { uint8_t res = 0; /* Possible packet received, RX -> IDLE (0.1 us) */ cc110x_statistic.packets_in++; res = receive_packet((uint8_t *)&(cc110x_rx_buffer[rx_buffer_next].packet), sizeof(cc110x_packet_t)); if (res) { /* If we are sending a burst, don't accept packets. * Only ACKs are processed (for stopping the burst). * Same if state machine is in TX lock. */ if (radio_state == RADIO_SEND_BURST) { cc110x_statistic.packets_in_while_tx++; return; } cc110x_rx_buffer[rx_buffer_next].rssi = rflags._RSSI; cc110x_rx_buffer[rx_buffer_next].lqi = rflags._LQI; cc110x_strobe(CC1100_SFRX); /* ...for flushing the RX FIFO */ /* Valid packet. After a wake-up, the radio should be in IDLE. * So put CC110x to RX for WOR_TIMEOUT (have to manually put * the radio back to sleep/WOR). */ cc110x_write_reg(CC1100_MCSM2, 0x07); /* Configure RX_TIME (until end of packet) */ cc110x_strobe(CC1100_SRX); hwtimer_wait(IDLE_TO_RX_TIME); radio_state = RADIO_RX; #ifdef MODULE_TRANSCEIVER /* notify transceiver thread if any */ if (transceiver_pid != KERNEL_PID_UNDEF) { msg_t m; m.type = (uint16_t) RCV_PKT_CC1100; m.content.value = rx_buffer_next; msg_send_int(&m, transceiver_pid); } #endif #ifdef MODULE_NETDEV_BASE if (cc110x_recv_cb != NULL) { cc110x_packet_t p = cc110x_rx_buffer[rx_buffer_next].packet; cc110x_recv_cb(&cc110x_dev, &p.phy_src, sizeof(uint8_t), &p.address, sizeof(uint8_t), p.data, p.length - CC1100_HEADER_LENGTH); } #endif /* shift to next buffer element */ if (++rx_buffer_next == RX_BUF_SIZE) { rx_buffer_next = 0; } return; } else { /* CRC false or RX buffer full -> clear RX FIFO in both cases */ cc110x_strobe(CC1100_SIDLE); /* Switch to IDLE (should already be)... */ cc110x_strobe(CC1100_SFRX); /* ...for flushing the RX FIFO */ /* If currently sending, exit here (don't go to RX/WOR) */ if (radio_state == RADIO_SEND_BURST) { cc110x_statistic.packets_in_while_tx++; return; } /* No valid packet, so go back to RX/WOR as soon as possible */ cc110x_switch_to_rx(); } }
void uart0_notify_thread(void) { msg_t m; m.type = 0; msg_send_int(&m, uart0_handler_pid); }
int msg_send(msg_t *m, unsigned int target_pid, bool block) { if(inISR()) { return msg_send_int(m, target_pid); } tcb_t *target = (tcb_t*) sched_threads[target_pid]; m->sender_pid = thread_pid; if(m->sender_pid == target_pid) { return -1; } if(target == NULL) { return -1; } dINT(); if(target->status != STATUS_RECEIVE_BLOCKED) { if(target->msg_array && queue_msg(target, m)) { eINT(); return 1; } if(!block) { DEBUG("%s: receiver not waiting. block=%u\n", active_thread->name, block); eINT(); return 0; } DEBUG("%s: send_blocked.\n", active_thread->name); queue_node_t n; n.priority = active_thread->priority; n.data = (unsigned int) active_thread; DEBUG("%s: Adding node to msg_waiters:\n", active_thread->name); queue_priority_add(&(target->msg_waiters), &n); active_thread->wait_data = (void*) m; int newstatus; if(active_thread->status == STATUS_REPLY_BLOCKED) { newstatus = STATUS_REPLY_BLOCKED; } else { newstatus = STATUS_SEND_BLOCKED; } sched_set_status((tcb_t*) active_thread, newstatus); DEBUG("%s: back from send block.\n", active_thread->name); } else { DEBUG("%s: direct msg copy.\n", active_thread->name); /* copy msg to target */ msg_t *target_message = (msg_t*) target->wait_data; *target_message = *m; sched_set_status(target, STATUS_PENDING); } eINT(); thread_yield(); return 1; }
/* * Interrupt callbacks */ static void _rx_cb(void *arg, char _c) { unsigned char c = _c; xbee_t *dev = (xbee_t *)arg; msg_t msg; switch (dev->int_state) { case XBEE_INT_STATE_IDLE: /* check for beginning of new data frame */ if (c == API_START_DELIMITER) { dev->int_state = XBEE_INT_STATE_SIZE1; } break; case XBEE_INT_STATE_SIZE1: dev->int_size = ((uint16_t)c) << 8; dev->int_state = XBEE_INT_STATE_SIZE2; break; case XBEE_INT_STATE_SIZE2: dev->int_size += (uint8_t)c; dev->int_state = XBEE_INT_STATE_TYPE; break; case XBEE_INT_STATE_TYPE: if (c == API_ID_RX_SHORT_ADDR || c == API_ID_RX_LONG_ADDR) { /* in case old data was not processed, ignore incoming data */ if (dev->rx_count != 0) { dev->int_state = XBEE_INT_STATE_IDLE; return; } dev->rx_limit = dev->int_size + 1; dev->rx_buf[dev->rx_count++] = (uint8_t)c; dev->int_state = XBEE_INT_STATE_RX; } else if (c == API_ID_AT_RESP) { dev->resp_limit = dev->int_size; dev->int_state = XBEE_INT_STATE_RESP; } else { dev->int_state = XBEE_INT_STATE_IDLE; } break; case XBEE_INT_STATE_RESP: dev->resp_buf[dev->resp_count++] = (uint8_t)c; if (dev->resp_count == dev->resp_limit) { /* here we ignore the checksum to prevent deadlocks */ mutex_unlock(&(dev->resp_lock)); dev->int_state = XBEE_INT_STATE_IDLE; } break; case XBEE_INT_STATE_RX: dev->rx_buf[dev->rx_count++] = (uint8_t)c; if (dev->rx_count == dev->rx_limit) { /* packet is complete */ msg.type = GNRC_NETDEV_MSG_TYPE_EVENT; msg.content.value = ISR_EVENT_RX_DONE; msg_send_int(&msg, dev->mac_pid); dev->int_state = XBEE_INT_STATE_IDLE; } break; default: /* this should never be the case */ break; } }
int msg_send(msg_t *m, unsigned int target_pid, bool block) { if (inISR()) { return msg_send_int(m, target_pid); } if ((unsigned int)sched_active_pid == target_pid) { return msg_send_to_self(m); } dINT(); tcb_t *target = (tcb_t*) sched_threads[target_pid]; m->sender_pid = sched_active_pid; if (target == NULL) { DEBUG("msg_send(): target thread does not exist\n"); eINT(); return -1; } DEBUG("msg_send() %s:%i: Sending from %i to %i. block=%i src->state=%i target->state=%i\n", __FILE__, __LINE__, sched_active_pid, target_pid, block, sched_active_thread->status, target->status); if (target->status != STATUS_RECEIVE_BLOCKED) { DEBUG("msg_send() %s:%i: Target %i is not RECEIVE_BLOCKED.\n", __FILE__, __LINE__, target_pid); if (target->msg_array && queue_msg(target, m)) { DEBUG("msg_send() %s:%i: Target %i has a msg_queue. Queueing message.\n", __FILE__, __LINE__, target_pid); eINT(); if (sched_active_thread->status == STATUS_REPLY_BLOCKED) { thread_yield(); } return 1; } if (!block) { DEBUG("msg_send: %s: Receiver not waiting, block=%u\n", sched_active_thread->name, block); eINT(); return 0; } DEBUG("msg_send: %s: send_blocked.\n", sched_active_thread->name); queue_node_t n; n.priority = sched_active_thread->priority; n.data = (unsigned int) sched_active_thread; n.next = NULL; DEBUG("msg_send: %s: Adding node to msg_waiters:\n", sched_active_thread->name); queue_priority_add(&(target->msg_waiters), &n); sched_active_thread->wait_data = (void*) m; int newstatus; if (sched_active_thread->status == STATUS_REPLY_BLOCKED) { newstatus = STATUS_REPLY_BLOCKED; } else { newstatus = STATUS_SEND_BLOCKED; } sched_set_status((tcb_t*) sched_active_thread, newstatus); DEBUG("msg_send: %s: Back from send block.\n", sched_active_thread->name); } else { DEBUG("msg_send: %s: Direct msg copy from %i to %i.\n", sched_active_thread->name, thread_getpid(), target_pid); /* copy msg to target */ msg_t *target_message = (msg_t*) target->wait_data; *target_message = *m; sched_set_status(target, STATUS_PENDING); } eINT(); thread_yield(); return 1; }