static void recover_i2c_bus(void) { /* Switch to software GPIO mode for bus recovery */ release_sda(); release_scl(); if (!bus_quiet()) { const uint_fast8_t try_limit = 200; uint_fast8_t n; for (n = 0; n < try_limit; n++) { if (bus_quiet()) { DEBUG("%s(): SDA released after%4u SCL pulses.\n", __FUNCTION__, n); break; } assert_scl(); #ifdef MODULE_XTIMER xtimer_usleep(scl_delay); #else thread_yield(); #endif release_scl(); #ifdef MODULE_XTIMER xtimer_usleep(scl_delay); #else thread_yield(); #endif } if (n >= try_limit) { DEBUG("%s(): Failed to release SDA after%4u SCL pulses.\n", __FUNCTION__, n); } } /* Return to hardware mode for the I2C pins */ gpio_hardware_control(I2C_0_SCL_PIN); gpio_hardware_control(I2C_0_SDA_PIN); }
static inline void irq_handler(tim_t timer, TIM_TypeDef *dev0, TIM_TypeDef *dev1) { DEBUG("CNT: %08x SR/DIER: %08x\n", ((dev1->CNT<<16) | (0xffff & dev0->CNT)), ((dev0->SR<<16) | (0xffff & dev0->DIER))); if ((dev0->SR & TIM_SR_CC1IF) && (dev0->DIER & TIM_DIER_CC1IE)) { /* clear interrupt anyway */ dev0->SR &= ~TIM_SR_CC1IF; /* if higher 16bit also match */ if (dev1->CNT >= dev1->CCR1) { dev0->DIER &= ~TIM_DIER_CC1IE; config[timer].cb(0); } DEBUG("channel 1 CCR: %08x\n", ((dev1->CCR1<<16) | (0xffff & dev0->CCR1))); } else if ((dev0->SR & TIM_SR_CC2IF) && (dev0->DIER & TIM_DIER_CC2IE)) { /* clear interrupt anyway */ dev0->SR &= ~TIM_SR_CC2IF; /* if higher 16bit also match */ if (dev1->CNT >= dev1->CCR2) { dev0->DIER &= ~TIM_DIER_CC2IE; config[timer].cb(1); } DEBUG("channel 2 CCR: %08x\n", ((dev1->CCR2<<16) | (0xffff & dev0->CCR2))); } else if ((dev0->SR & TIM_SR_CC3IF) && (dev0->DIER & TIM_DIER_CC3IE)) { /* clear interrupt anyway */ dev0->SR &= ~TIM_SR_CC3IF; /* if higher 16bit also match */ if (dev1->CNT >= dev1->CCR3) { dev0->DIER &= ~TIM_DIER_CC3IE; config[timer].cb(2); } DEBUG("channel 3 CCR: %08x\n", ((dev1->CCR3<<16) | (0xffff & dev0->CCR3))); } else if ((dev0->SR & TIM_SR_CC4IF) && (dev0->DIER & TIM_DIER_CC4IE)) { /* clear interrupt anyway */ dev0->SR &= ~TIM_SR_CC4IF; /* if higher 16bit also match */ if (dev1->CNT >= dev1->CCR4) { dev0->DIER &= ~TIM_DIER_CC4IE; config[timer].cb(3); } DEBUG("channel 4 CCR: %08x\n", ((dev1->CCR4<<16) | (0xffff & dev0->CCR4))); } else { dev0->SR = 0; } if (sched_context_switch_request) { thread_yield(); } }
static void simple_thread_func (void *data_) { struct simple_thread_data *data = data_; int i; for (i = 0; i < ITER_CNT; i++) { lock_acquire (data->lock); *(*data->op)++ = data->id; lock_release (data->lock); thread_yield (); } }
/* * Do not modify this! */ static void work(unsigned elf_num) { int r; r = random() % NUM_TASKS; while (r != 0) { kprintf("Elf %3u: %s\n", elf_num, tasks[r]); r = random() % NUM_TASKS; thread_yield(); // cause some interleaving! } }
__attribute__((naked)) void isr_exti4(void) { ISR_ENTER(); if (EXTI->PR & EXTI_PR_PR4) { EXTI->PR |= EXTI_PR_PR4; /* clear status bit by writing a 1 to it */ config[GPIO_IRQ_4].cb(config[GPIO_IRQ_4].arg); } if (sched_context_switch_request) { thread_yield(); } ISR_EXIT(); }
__attribute__((naked)) void RTT_ISR(void) { ISR_ENTER(); if (RTT_DEV->CRL & RTC_CRL_ALRF) { RTT_DEV->CRL &= ~(RTC_CRL_ALRF); alarm_cb(alarm_arg); } if (sched_context_switch_request) { thread_yield(); } ISR_EXIT(); }
static void mythread(void) { int i; printf(1, "my thread running\n"); for (i = 0; i < 100; i++) { printf(1, "my thread 0x%x\n", (int) current_thread); thread_yield(); } printf(1, "my thread: exit\n"); current_thread->state = FREE; thread_schedule(); }
void thread_halt() { // right now the kill_queue will never be more than one // clean up a thread if one is on the queue thread_clean(threadq_pop(&kill_queue)); threadq_push(&kill_queue, cur_tc); cur_tc = NULL; thread_yield(); // WHAT IF THERE ARE NO MORE THREADS? HOW DO WE STOP? // when yield has no thread to run, it will return here! exit(); }
void tatas_lock(void * lock) { TATASLock *l = (TATASLock*)lock; while(true){ while(atomic_load_explicit(&l->lockFlag.value, memory_order_acquire)){ thread_yield(); } if( ! atomic_flag_test_and_set_explicit(&l->lockFlag.value, memory_order_acquire)){ return; } } }
void isr_exti0_1(void) { if (EXTI->PR & EXTI_PR_PR0) { EXTI->PR |= EXTI_PR_PR0; /* clear status bit by writing a 1 to it */ gpio_config[GPIO_IRQ_0].cb(gpio_config[GPIO_IRQ_0].arg); } else if (EXTI->PR & EXTI_PR_PR1) { EXTI->PR |= EXTI_PR_PR1; /* clear status bit by writing a 1 to it */ gpio_config[GPIO_IRQ_1].cb(gpio_config[GPIO_IRQ_1].arg); } if (sched_context_switch_request) { thread_yield(); } }
int main() { int err, i; for(i=0; i<10; i++) { printf("le main yield tout seul\n"); err = thread_yield(); assert(!err); } printf("le main est %p\n", (void*) thread_self()); return 0; }
static int event_signaller(void *arg) { printf("event signaller pausing\n"); thread_sleep(1000); // for (;;) { printf("signalling event\n"); event_signal(&e, true); printf("done signalling event\n"); thread_yield(); // } return 0; }
static inline void irq_handler_transfer(Spi *spi, spi_t dev) { if (spi->SPI_SR & SPI_SR_RDRF) { char data; data = spi->SPI_RDR & SPI_RDR_RD_Msk; data = spi_config[dev].cb(data); spi->SPI_TDR = SPI_TDR_TD(data); } /* See if a thread with higher priority wants to run now */ if (sched_context_switch_request) { thread_yield(); } }
static void* producer64_thread( object_t thread, void* arg ) { producer64_arg_t* parg = arg; hashtable64_t* table = parg->table; uint64_t key_offset = parg->key_offset; uint64_t key; FOUNDATION_UNUSED( thread ); for( key = 1; key < parg->key_num; ++key ) hashtable64_set( table, key + key_offset, key + key_offset ); thread_yield(); for( key = 1; key < parg->key_num; ++key ) hashtable64_erase( table, key + key_offset ); thread_yield(); for( key = 1; key < parg->key_num; ++key ) hashtable64_set( table, key + key_offset, 1 + ( ( key + key_offset ) % 17 ) ); return 0; }
int main() { int tid; HANDLE h; _beginthreadex(NULL, 0, run_func, NULL, 0, &tid); while (!thread_started) thread_yield(); /* Deliberately do not ask for query privs to test DrMi#1884 */ h = OpenThread(THREAD_TERMINATE, FALSE, tid); TerminateThread(h, 0); print("all done\n"); return 0; }
static inline void irq_handler(tim_t num, TIM_TypeDef *tim) { for (int i = 0; i < CHANNEL_NUMOF; i++) { uint16_t bit = (1 << (i + 1)); if ((tim->SR & bit) && (tim->DIER & bit)) { tim->SR &= ~(bit); tim->DIER &= ~(bit); isr_ctx[num].cb(i); } } if (sched_context_switch_request) { thread_yield(); } }
static inline void irq_handler(uart_t uartnum, USART_TypeDef *dev) { if (dev->SR & USART_SR_RXNE) { char data = (char)dev->DR; config[uartnum].rx_cb(config[uartnum].arg, data); } else if (dev->SR & USART_SR_ORE) { /* ORE is cleared by reading SR and DR sequentially */ dev->DR; } if (sched_context_switch_request) { thread_yield(); } }
static void * threadfunc(void * arg) { char *name = arg; fprintf(stderr, "je suis le thread %p, lancé avec l'argument %s\n", thread_self(), name); unsigned int i = 1; while(i++); thread_yield(); fprintf(stderr, "je suis encore le thread %p, lancé avec l'argument %s\n", thread_self(), name); thread_exit(arg); }
/* * Wait until specified server starts. */ static void wait_server(const char *name, object_t *pobj) { int i, error = 0; /* Give chance to run other servers. */ thread_yield(); /* * Wait for server loading. timeout is 1 sec. */ for (i = 0; i < 100; i++) { error = object_lookup((char *)name, pobj); if (error == 0) break; /* Wait 10msec */ timer_sleep(10, 0); thread_yield(); } if (error) sys_panic("pow: server not found"); }
void isr_exti2_3(void) { if (EXTI->PR & EXTI_PR_PR2) { EXTI->PR |= EXTI_PR_PR2; /* clear status bit by writing a 1 to it */ gpio_config[GPIO_IRQ_2].cb(gpio_config[GPIO_IRQ_2].arg); } else if (EXTI->PR & EXTI_PR_PR3) { EXTI->PR |= EXTI_PR_PR3; /* clear status bit by writing a 1 to it */ gpio_config[GPIO_IRQ_3].cb(gpio_config[GPIO_IRQ_3].arg); } if (sched_context_switch_request) { thread_yield(); } }
static inline void irq_handler(uint8_t uartnum, USART_TypeDef *dev) { if (dev->ISR & USART_ISR_RXNE) { char data = (char)dev->RDR; uart_config[uartnum].rx_cb(uart_config[uartnum].arg, data); } else if (dev->ISR & USART_ISR_ORE) { /* do nothing on overrun */ dev->ICR |= USART_ICR_ORECF; } if (sched_context_switch_request) { thread_yield(); } }
static inline void irq_handler_transfer(SPI_TypeDef *spi, spi_t dev) { if (spi->SR & SPI_SR_RXNE) { char data; data = spi->DR; data = spi_config[dev].cb(data); spi->DR = data; } /* see if a thread with higher priority wants to run now */ if (sched_context_switch_request) { thread_yield(); } }
static int display_server_thread(void *args) { for (;;) { // wait for start event dprintf(INFO, "%s: IDLE\n", __func__); if (event_wait(&e_start_server) < 0) { dprintf(INFO, "%p: event_wait() returned error\n", get_current_thread()); return -1; } // main worker loop dprintf(INFO, "%s: START\n", __func__); is_running = 1; // ignore first key to prevent unwanted interactions getkey(); int keycode = 0; for(;;) { // render frame if(renderer) renderer(keycode); // signal refresh event_signal(&e_frame_finished, true); // poll key while(!(keycode=getkey()) && !request_stop && !request_refresh) { thread_yield(); } // stop request if(request_stop) { request_stop = 0; break; } // refresh request if(request_refresh) { request_refresh = 0; } event_wait(&e_continue); } dprintf(INFO, "%s: EXIT\n", __func__); is_running = 0; } return 0; }
/*when release is be called, the lock do I hold func will be called to check if this thread is the lock holder, if yes then we change the lock_holder to NULL to make this lock avaliable for later use, and weak up all the thread are blocked on this lock's wchan. If no just put this this thread back to the ready Q*/ void lock_release(struct lock *lock) { KASSERT(lock != NULL); KASSERT(lock->lock_holder != NULL); spinlock_acquire(&lock->lock_lock); if (lock_do_i_hold(lock)) { lock->lock_holder = NULL; wchan_wakeall(lock->lock_wchan); } else thread_yield(); spinlock_release(&lock->lock_lock); }
void cc2538_i2c_init_master(uint32_t speed_hz) { SYS_CTRL_RCGCI2C |= 1; /**< Enable the I2C0 clock. */ SYS_CTRL_SCGCI2C |= 1; /**< Enable the I2C0 clock. */ SYS_CTRL_DCGCI2C |= 1; /**< Enable the I2C0 clock. */ /* Reset I2C peripheral */ SYS_CTRL_SRI2C |= 1; #ifdef MODULE_XTIMER xtimer_usleep(50); #else thread_yield(); #endif SYS_CTRL_SRI2C &= ~1; /* Clear all pin override flags except PUE (Pull-Up Enable) */ IOC_PXX_OVER[I2C_0_SCL_PIN] &= IOC_OVERRIDE_PUE; IOC_PXX_OVER[I2C_0_SDA_PIN] &= IOC_OVERRIDE_PUE; IOC_PXX_SEL[I2C_0_SCL_PIN] = I2C_SCL_OUT; IOC_PXX_SEL[I2C_0_SDA_PIN] = I2C_SDA_OUT; IOC_I2CMSSCL = I2C_0_SCL_PIN; IOC_I2CMSSDA = I2C_0_SDA_PIN; gpio_hardware_control(I2C_0_SCL_PIN); gpio_hardware_control(I2C_0_SDA_PIN); /* Initialize the I2C master by setting the Master Function Enable bit */ I2CM_CR |= MFE; /* Set the SCL clock speed */ uint32_t ps = sys_clock_freq(); uint32_t denom = 2 * (SCL_LP + SCL_HP) * speed_hz; ps += denom / 2; ps /= denom; I2CM_TPR = ps - 1; /* Enable I2C master interrupts */ NVIC_SetPriority(I2C_IRQn, I2C_IRQ_PRIO); NVIC_EnableIRQ(I2C_IRQn); i2cm_ctrl_write(STOP); /* Enable I2C master interrupts */ I2CM_IMR = 1; }
int main(int argc, char *argv[]) { errval_t err; if (argc != 2) { printf("Usage %s: <Num additional threads>\n", argv[0]); exit(-1); } //printf("main running on %d\n", disp_get_core_id()); int cores = strtol(argv[1], NULL, 10) + 1; NPROC = cores -1; BARINIT(barrier, NPROC); uint64_t before = rdtsc(); times[0] = before; trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 1); for (int i = 1; i < cores; i++) { err = domain_new_dispatcher(i + disp_get_core_id(), domain_spanned_callback, (void*)(uintptr_t)i); if (err_is_fail(err)) { USER_PANIC_ERR(err, "domain_new_dispatcher failed"); } } while (ndispatchers < cores) { thread_yield(); } uint64_t finish = rdtsc(); trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 0); //sys_print("\nDone\n", 6); printf("spantest: Done in %"PRIu64" cycles\n", finish-before); //trace_dump(); for(int i = 1; i < cores; i++) { err = domain_thread_create_on(i, remote, NULL); assert(err_is_ok(err)); } messages_handler_loop(); return 0; }
static void* error_thread(void* arg) { int ipass = 0; FOUNDATION_UNUSED(arg); thread_sleep(10); for (ipass = 0; ipass < 512; ++ipass) { if (error_test_thread()) return FAILED_TEST; thread_yield(); } return 0; }
/* Releases LOCK, which must be owned by the current thread. An interrupt handler cannot acquire a lock, so it does not make sense to try to release a lock within an interrupt handler. */ void lock_release (struct lock *lock) { ASSERT (lock != NULL); ASSERT (lock_held_by_current_thread (lock)); if(lock->old_priority != -1) { lock->holder->priority = lock->old_priority; lock->old_priority = -1; } lock->holder = NULL; sema_up (&lock->semaphore); thread_yield(); }
static inline void irq_handler(uint8_t uartnum, USART_TypeDef *dev) { if (dev->SR & USART_SR_RXNE) { char data = (char)dev->DR; uart_config[uartnum].rx_cb(uart_config[uartnum].arg, data); } else if (dev->SR & USART_SR_TXE) { if (uart_config[uartnum].tx_cb(uart_config[uartnum].arg) == 0) { dev->CR1 &= ~(USART_CR1_TXEIE); } } if (sched_context_switch_request) { thread_yield(); } }
void TIMER_0_ISR(void) { TIMER_TypeDef *tim = timer_config[0].timer.dev; for (int i = 0; i < CC_CHANNELS; i++) { if (tim->IF & (TIMER_IF_CC0 << i)) { tim->CC[i].CTRL = _TIMER_CC_CTRL_MODE_OFF; tim->IFC = (TIMER_IFC_CC0 << i); isr_ctx[0].cb(isr_ctx[0].arg, i); } } if (sched_context_switch_request) { thread_yield(); } }