int avr_read_(device_t dev, int count, char* buffer) { double value = 0.0; switch (dev) { case TEMP_0: selSensor(TEMP); kernel_sleep(1); value = ADCtoTempTable[readPort()]; break; case PRESSURE_0: selSensor(PRESSURE); kernel_sleep(1); value = ADCtoPressureTable[readPort()]; break; case SOLAR_0: selSensor(SOLAR); kernel_sleep(1); value = ADCtoSolarTable[readPort()]; break; default: return DRIVER_ERROR_NOT_SUPPORTED; } if (count < sizeof(double)) { return DRIVER_ERROR_CANNOT_READ; } *((double*) buffer) = value; return sizeof(double); }
void kernel_plumbing_panic(uint32_t on_duration, uint32_t off_duration) { for ( ; ; ) { led_on(); kernel_sleep(on_duration); led_off(); kernel_sleep(off_duration); } __builtin_unreachable(); }
static void mobj_waiter_sig_thread(void *arg) { uptr n = (uptr) arg; u64 ticks_to_sleep = (u64)(n + 1) * TIMER_HZ / 2; printk("[thread %u] sleep for %d ticks\n", n, ticks_to_sleep); kernel_sleep(ticks_to_sleep); printk("[thread %u] signal cond %d\n", n, n); kcond_signal_one(&conds[n]); mobj_se_test_signal_counter++; }
void selftest_sleep_short() { const u64 wait_ticks = TIMER_HZ; u64 before = get_ticks(); kernel_sleep(wait_ticks); u64 after = get_ticks(); u64 elapsed = after - before; printk("[sleeping_kthread] elapsed ticks: %llu (expected: %llu)\n", elapsed, wait_ticks); VERIFY((elapsed - wait_ticks) <= 2); regular_self_test_end(); }
sptr sys_select(int user_nfds, fd_set *user_rfds, fd_set *user_wfds, fd_set *user_efds, struct timeval *user_tv) { struct select_ctx ctx = (struct select_ctx) { .nfds = (u32)user_nfds, .sets = { 0 }, .u_sets = { user_rfds, user_wfds, user_efds }, .tv = NULL, .user_tv = user_tv, .cond_cnt = 0, .timeout_ticks = 0, }; sptr rc; if (user_nfds < 0 || user_nfds > MAX_HANDLES) return -EINVAL; if ((rc = select_read_user_sets(ctx.sets, ctx.u_sets))) return rc; if ((rc = select_read_user_tv(user_tv, &ctx.tv, &ctx.timeout_ticks))) return rc; if ((rc = (sptr)count_ready_streams(ctx.nfds, ctx.sets)) > 0) { return select_write_user_sets(&ctx); } //debug_dump_select_args(nfds, sets[0], sets[1], sets[2], tv); if ((rc = select_compute_cond_cnt(&ctx))) return rc; if (ctx.cond_cnt > 0 && (!user_tv || ctx.timeout_ticks > 0)) { /* * The count of condition variables for all the file descriptors is * greater than 0. That's typical. */ if ((rc = select_wait_on_cond(&ctx))) return rc; } else { /* * It is not that difficult cond_cnt to be 0: it's enough the specified * files to NOT have r/w/e get kcond functions. Also, all the sets might * be NULL (see the comment below). */ if (ctx.timeout_ticks > 0) { /* * Corner case: no conditions on which to wait, but timeout is > 0: * this is still a valid case. Many years ago the following call: * select(0, NULL, NULL, NULL, &tv) * was even used as a portable implementation of nanosleep(). */ kernel_sleep(ctx.timeout_ticks); } } return select_write_user_sets(&ctx); }