void sleep_manager_sleep_auto(void) { #ifdef MBED_SLEEP_TRACING_ENABLED sleep_tracker_print_stats(); #endif core_util_critical_section_enter(); us_timestamp_t start = read_us(); bool deep = false; // debug profile should keep debuggers attached, no deep sleep allowed #ifdef MBED_DEBUG hal_sleep(); #else if (sleep_manager_can_deep_sleep()) { deep = true; hal_deepsleep(); } else { hal_sleep(); } #endif us_timestamp_t end = read_us(); if (true == deep) { deep_sleep_time += end - start; } else { sleep_time += end - start; } core_util_critical_section_exit(); }
struct variable *sys_sleep(struct context *context) { struct variable *args = (struct variable*)stack_pop(context->operand_stack); int32_t milliseconds = param_int(args, 1); hal_sleep(milliseconds); return NULL; }
void sleep_manager_sleep_auto(void) { #ifdef MBED_SLEEP_TRACING_ENABLED sleep_tracker_print_stats(); #endif core_util_critical_section_enter(); // debug profile should keep debuggers attached, no deep sleep allowed #ifdef MBED_DEBUG hal_sleep(); #else if (sleep_manager_can_deep_sleep()) { hal_deepsleep(); } else { hal_sleep(); } #endif core_util_critical_section_exit(); }
void disk_page_io_wait(disk_page_io *me) { #if 0 // BUG! RACES! while( req.flag_pagein || req.flag_pageout ) hal_sleep( &req ); #else // BUG! polling! hal_sleep_msec( 1 ); //phantom_scheduler_yield(); while( me->req.flag_pagein || me->req.flag_pageout ) hal_sleep_msec( 10 ); #endif }
// execute jobs from timer and from run queue once void os_runloop_once () { osjob_t* j = NULL; hal_disableIRQs(); // check for runnable jobs if(OS.runnablejobs) { j = OS.runnablejobs; OS.runnablejobs = j->next; } else if(OS.scheduledjobs && hal_checkTimer(OS.scheduledjobs->deadline)) { // check for expired timed jobs j = OS.scheduledjobs; OS.scheduledjobs = j->next; } else { // nothing pending hal_sleep(); // wake by irq (timer already restarted) } hal_enableIRQs(); if(j) { // run job callback j->func(j); } }
/* * ToDo: Implement deepsleep() */ void hal_deepsleep(void) { hal_sleep(); }