/** * Set client in IDLE mode */ bool Control::idle() { if (is_idle()) { return true; } pms->log(MSG_DEBUG, 0, "Entering IDLE mode.\n"); set_is_idle(mpd_send_idle(conn->h())); return is_idle(); }
/** * Take client out of IDLE mode */ bool Control::noidle() { if (!is_idle()) { return true; } pms->log(MSG_DEBUG, 0, "Leaving IDLE mode.\n"); set_is_idle(mpd_send_noidle(conn->h())); return is_idle(); }
static struct fd_bo *find_in_bucket(struct fd_device *dev, struct fd_bo_bucket *bucket, uint32_t flags) { struct fd_bo *bo = NULL; /* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could * skip the busy check.. if it is only going to be a render target * then we probably don't need to stall.. * * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail * (MRU, since likely to be in GPU cache), rather than head (LRU).. */ pthread_mutex_lock(&table_lock); while (!LIST_IS_EMPTY(&bucket->list)) { bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list); if (0 /* TODO: if madvise tells us bo is gone... */) { list_del(&bo->list); bo_del(bo); bo = NULL; continue; } /* TODO check for compatible flags? */ if (is_idle(bo)) { list_del(&bo->list); break; } bo = NULL; break; } pthread_mutex_unlock(&table_lock); return bo; }
static void mcview_hook (void *v) { mcview_t *view = (mcview_t *) v; WPanel *panel; /* If the user is busy typing, wait until he finishes to update the screen */ if (!is_idle ()) { if (!hook_present (idle_hook, mcview_hook)) add_hook (&idle_hook, mcview_hook, v); return; } delete_hook (&idle_hook, mcview_hook); if (get_current_type () == view_listing) panel = current_panel; else if (get_other_type () == view_listing) panel = other_panel; else return; mcview_done (view); mcview_init (view); mcview_load (view, 0, panel->dir.list[panel->selected].fname, 0, 0, 0); mcview_display (view); }
static void maybe_chdir (WTree *tree) { if (!(xtree_mode && tree->is_panel)) return; if (is_idle ()) chdir_sel (tree); }
static int gc_thread_func(void *data) { struct f2fs_sb_info *sbi = data; wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; long wait_ms; wait_ms = GC_THREAD_MIN_SLEEP_TIME; do { if (try_to_freeze()) continue; else wait_event_interruptible_timeout(*wq, kthread_should_stop(), msecs_to_jiffies(wait_ms)); if (kthread_should_stop()) break; if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { wait_ms = GC_THREAD_MAX_SLEEP_TIME; continue; } /* * [GC triggering condition] * 0. GC is not conducted currently. * 1. There are enough dirty segments. * 2. IO subsystem is idle by checking the # of writeback pages. * 3. IO subsystem is idle by checking the # of requests in * bdev's request list. * * Note) We have to avoid triggering GCs too much frequently. * Because it is possible that some segments can be * invalidated soon after by user update or deletion. * So, I'd like to wait some time to collect dirty segments. */ if (!mutex_trylock(&sbi->gc_mutex)) continue; if (!is_idle(sbi)) { wait_ms = increase_sleep_time(wait_ms); mutex_unlock(&sbi->gc_mutex); continue; } if (has_enough_invalid_blocks(sbi)) wait_ms = decrease_sleep_time(wait_ms); else wait_ms = increase_sleep_time(wait_ms); #ifdef CONFIG_F2FS_STAT_FS sbi->bg_gc++; #endif /* if return value is not zero, no victim was selected */ if (f2fs_gc(sbi)) wait_ms = GC_THREAD_NOGC_SLEEP_TIME; } while (!kthread_should_stop()); return 0; }
static void frontend_dlg_run (WDialog * h) { Widget *wh = WIDGET (h); Gpm_Event event; event.x = -1; /* close opened editors, viewers, etc */ if (!widget_get_state (wh, WST_MODAL) && mc_global.midnight_shutdown) { send_message (h, NULL, MSG_VALIDATE, 0, NULL); return; } while (widget_get_state (wh, WST_ACTIVE)) { int d_key; if (mc_global.tty.winch_flag != 0) dialog_change_screen_size (); if (is_idle ()) { if (idle_hook) execute_hooks (idle_hook); while (widget_get_state (wh, WST_IDLE) && is_idle ()) send_message (wh, NULL, MSG_IDLE, 0, NULL); /* Allow terminating the dialog from the idle handler */ if (!widget_get_state (wh, WST_ACTIVE)) break; } update_cursor (h); /* Clear interrupt flag */ tty_got_interrupt (); d_key = tty_get_event (&event, h->mouse_status == MOU_REPEAT, TRUE); dlg_process_event (h, d_key, &event); if (widget_get_state (wh, WST_CLOSED)) send_message (h, NULL, MSG_VALIDATE, 0, NULL); } }
static int gc_thread_func(void *data) { struct f2fs_sb_info *sbi = data; struct f2fs_gc_kthread *gc_th = sbi->gc_thread; wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; long wait_ms; wait_ms = gc_th->min_sleep_time; do { if (try_to_freeze()) continue; else wait_event_interruptible_timeout(*wq, kthread_should_stop(), msecs_to_jiffies(wait_ms)); if (kthread_should_stop()) break; /* * [GC triggering condition] * 0. GC is not conducted currently. * 1. There are enough dirty segments. * 2. IO subsystem is idle by checking the # of writeback pages. * 3. IO subsystem is idle by checking the # of requests in * bdev's request list. * * Note) We have to avoid triggering GCs too much frequently. * Because it is possible that some segments can be * invalidated soon after by user update or deletion. * So, I'd like to wait some time to collect dirty segments. */ if (!mutex_trylock(&sbi->gc_mutex)) continue; if (!is_idle(sbi)) { wait_ms = increase_sleep_time(gc_th, wait_ms); mutex_unlock(&sbi->gc_mutex); continue; } if (has_enough_invalid_blocks(sbi)) wait_ms = decrease_sleep_time(gc_th, wait_ms); else wait_ms = increase_sleep_time(gc_th, wait_ms); stat_inc_bggc_count(sbi); /* if return value is not zero, no victim was selected */ if (f2fs_gc(sbi)) wait_ms = gc_th->no_gc_sleep_time; /* balancing f2fs's metadata periodically */ f2fs_balance_fs_bg(sbi); } while (!kthread_should_stop()); return 0; }
static void frontend_run_dlg (Dlg_head * h) { int d_key; Gpm_Event event; event.x = -1; /* close opened editors, viewers, etc */ if (!h->modal && mc_global.midnight_shutdown) { h->callback (h, NULL, DLG_VALIDATE, 0, NULL); return; } while (h->state == DLG_ACTIVE) { if (mc_global.tty.winch_flag) dialog_change_screen_size (); if (is_idle ()) { if (idle_hook) execute_hooks (idle_hook); while ((h->flags & DLG_WANT_IDLE) && is_idle ()) h->callback (h, NULL, DLG_IDLE, 0, NULL); /* Allow terminating the dialog from the idle handler */ if (h->state != DLG_ACTIVE) break; } update_cursor (h); /* Clear interrupt flag */ tty_got_interrupt (); d_key = tty_get_event (&event, h->mouse_status == MOU_REPEAT, TRUE); dlg_process_event (h, d_key, &event); if (h->state == DLG_CLOSED) h->callback (h, NULL, DLG_VALIDATE, 0, NULL); } }
void cancel() { error_code ec; deadline_timer_.cancel(ec); repeat_times_ = 0; status_ = NOT_WAITING; repeat_times_ = 0; repeated_times_ = 0; set_cancel(); next_op_stamp(); BOOST_ASSERT(is_idle()); }
void edit_update_screen (WEdit * e) { edit_scroll_screen_over_cursor (e); edit_update_curs_col (e); edit_status (e); /* pop all events for this window for internal handling */ if (!is_idle ()) e->force |= REDRAW_PAGE; else { if (e->force & REDRAW_COMPLETELY) e->force |= REDRAW_PAGE; edit_render_keypress (e); } }
void mcview_update (mcview_t * view) { static int dirt_limit = 1; if (view->dpy_bbar_dirty) { view->dpy_bbar_dirty = FALSE; mcview_set_buttonbar (view); buttonbar_redraw (find_buttonbar (view->widget.owner)); } if (view->dirty > dirt_limit) { /* Too many updates skipped -> force a update */ mcview_display (view); view->dirty = 0; /* Raise the update skipping limit */ dirt_limit++; if (dirt_limit > mcview_max_dirt_limit) dirt_limit = mcview_max_dirt_limit; } else if (view->dirty > 0) { if (is_idle ()) { /* We have time to update the screen properly */ mcview_display (view); view->dirty = 0; if (dirt_limit > 1) dirt_limit--; } else { /* We are busy -> skipping full update, only the status line is updated */ mcview_display_status (view); } /* Here we had a refresh, if fast scrolling does not work restore the refresh, although this should not happen */ } }
// Wait for the queue to be empty and for all the jobs to finish in step ticker void Conveyor::wait_for_idle(bool wait_for_motors) { // wait for the job queue to empty, this means cycling everything on the block queue into the job queue // forcing them to be jobs running = false; // stops on_idle calling check_queue while (!queue.is_empty()) { check_queue(true); // forces queue to be made available to stepticker THEKERNEL->call_event(ON_IDLE, this); } if(wait_for_motors) { // now we wait for all motors to stop moving while(!is_idle()) { THEKERNEL->call_event(ON_IDLE, this); } } running = true; // returning now means that everything has totally finished }
void edit_update_screen (WEdit * e) { WDialog *h = WIDGET (e)->owner; edit_scroll_screen_over_cursor (e); edit_update_curs_col (e); edit_status (e, widget_get_state (WIDGET (e), WST_FOCUSED)); /* pop all events for this window for internal handling */ if (!is_idle ()) e->force |= REDRAW_PAGE; else { if ((e->force & REDRAW_COMPLETELY) != 0) e->force |= REDRAW_PAGE; edit_render_keypress (e); } widget_redraw (WIDGET (find_buttonbar (h))); }
static void nd_detach_and_reset(struct device *dev, struct nd_namespace_common **_ndns) { /* detach the namespace and destroy / reset the device */ nd_detach_ndns(dev, _ndns); if (is_idle(dev, *_ndns)) { nd_device_unregister(dev, ND_ASYNC); } else if (is_nd_btt(dev)) { struct nd_btt *nd_btt = to_nd_btt(dev); nd_btt->lbasize = 0; kfree(nd_btt->uuid); nd_btt->uuid = NULL; } else if (is_nd_pfn(dev) || is_nd_dax(dev)) { struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); kfree(nd_pfn->uuid); nd_pfn->uuid = NULL; nd_pfn->mode = PFN_MODE_NONE; } }
void edit_update_screen (WEdit * e) { edit_scroll_screen_over_cursor (e); edit_update_curs_col (e); edit_status (e, (e->force & REDRAW_COMPLETELY) != 0 && (void *) e == ((Widget *) e)->owner->current->data); /* pop all events for this window for internal handling */ if (!is_idle ()) e->force |= REDRAW_PAGE; else { if ((e->force & REDRAW_COMPLETELY) != 0) e->force |= REDRAW_PAGE; edit_render_keypress (e); } buttonbar_redraw (find_buttonbar (((Widget *) e)->owner)); }
static struct drm_tegra_bo *find_in_bucket(struct drm_tegra_bo_bucket *bucket, uint32_t flags) { struct drm_tegra_bo *bo = NULL; /* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could * skip the busy check.. if it is only going to be a render target * then we probably don't need to stall.. * * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail * (MRU, since likely to be in GPU cache), rather than head (LRU).. */ if (!DRMLISTEMPTY(&bucket->list)) { bo = DRMLISTENTRY(struct drm_tegra_bo, bucket->list.next, bo_list); /* TODO check for compatible flags? */ if (is_idle(bo)) { DRMLISTDELINIT(&bo->bo_list); bucket->num_entries--; } else { bo = NULL; } }
/* Returns a character read from stdin with appropriate interpretation */ int get_event (Gpm_Event *event, int redo_event, int block) { int c; static int flag; /* Return value from select */ static int dirty = 3; if ((dirty == 1) || is_idle ()){ refresh (); doupdate (); dirty = 1; } else dirty++; vfs_timeout_handler (); c = block ? getch_with_delay () : get_key_code (1); if (!c) { /* Code is 0, so this is a Control key or mouse event */ return EV_NONE; /* FIXME: mouse not supported */ } return c; }
static void maybe_chdir (WTree * tree) { if (xtree_mode && tree->is_panel && is_idle ()) tree_chdir_sel (tree); }
int is_invisible_due_idle(struct player *victim) { if (status->gameup & GU_INL_DRAFTING) return 0; return is_idle(victim); }
inline void toggle(uint8_t mask) { while( ! is_idle() ) {}; relays_to_toggle_ = mask; }
static cb_ret_t edit_dialog_callback (Widget * w, Widget * sender, widget_msg_t msg, int parm, void *data) { WMenuBar *menubar; WButtonBar *buttonbar; WDialog *h = DIALOG (w); switch (msg) { case MSG_INIT: edit_dlg_init (); return MSG_HANDLED; case MSG_DRAW: /* don't use dlg_default_repaint() -- we don't need a frame */ tty_setcolor (EDITOR_BACKGROUND); dlg_erase (h); return MSG_HANDLED; case MSG_RESIZE: menubar = find_menubar (h); buttonbar = find_buttonbar (h); /* dlg_set_size() is surplus for this case */ w->lines = LINES; w->cols = COLS; widget_set_size (WIDGET (buttonbar), w->lines - 1, w->x, 1, w->cols); widget_set_size (WIDGET (menubar), w->y, w->x, 1, w->cols); menubar_arrange (menubar); g_list_foreach (h->widgets, (GFunc) edit_dialog_resize_cb, NULL); return MSG_HANDLED; case MSG_ACTION: { /* Handle shortcuts, menu, and buttonbar. */ cb_ret_t result; result = edit_dialog_command_execute (h, parm); /* We forward any commands coming from the menu, and which haven't been handled by the dialog, to the focused WEdit window. */ if (result == MSG_NOT_HANDLED && sender == WIDGET (find_menubar (h))) result = send_message (h->current->data, NULL, MSG_ACTION, parm, NULL); return result; } case MSG_KEY: { Widget *we = WIDGET (h->current->data); cb_ret_t ret = MSG_NOT_HANDLED; if (edit_widget_is_editor (we)) { WEdit *e = (WEdit *) we; long command; if (!e->extmod) command = keybind_lookup_keymap_command (editor_map, parm); else command = keybind_lookup_keymap_command (editor_x_map, parm); if (command == CK_IgnoreKey) e->extmod = FALSE; else { ret = edit_dialog_command_execute (h, command); /* if command was not handled, keep the extended mode for the further key processing */ if (ret == MSG_HANDLED) e->extmod = FALSE; } } /* * Due to the "end of bracket" escape the editor sees input with is_idle() == false * (expects more characters) and hence doesn't yet refresh the screen, but then * no further characters arrive (there's only an "end of bracket" which is swallowed * by tty_get_event()), so you end up with a screen that's not refreshed after pasting. * So let's trigger an IDLE signal. */ if (!is_idle ()) widget_idle (w, TRUE); return ret; } /* hardcoded menu hotkeys (see edit_drop_hotkey_menu) */ case MSG_UNHANDLED_KEY: return edit_drop_hotkey_menu (h, parm) ? MSG_HANDLED : MSG_NOT_HANDLED; case MSG_VALIDATE: edit_quit (h); return MSG_HANDLED; case MSG_END: edit_dlg_deinit (); return MSG_HANDLED; case MSG_IDLE: widget_idle (w, FALSE); return send_message (h->current->data, NULL, MSG_IDLE, 0, NULL); default: return dlg_default_callback (w, sender, msg, parm, data); } }
bool Control::exit_idle() { return (!(is_idle() && (!noidle() || !wait_until_noidle()))); }
bool is_local() const { return is_human() || is_ai() || is_idle(); }
void run() { long time, delta; int ret, cli_idx, cli; nfds_t nfds; if (allocate_mem() < 0) { free_mem(); return; } cli_cnt = 0; nfds = 2; fd_all[0].fd = pcap_fd; fd_all[0].events = POLLIN; fd_all[1].fd = listenfd; fd_all[1].events = POLLIN; for (cli = 0; cli < max_client; ++cli) avai_no[cli] = max_client - cli - 1; avai_cnt = max_client; result.item[max_query - 1].next = -1; for (ret = max_query - 2; ret >= 0; --ret) result.item[ret].next = ret + 1; result.avai = 0; waiting.item[max_query - 1].next = -1; for (ret = max_query - 2; ret >= 0; --ret) waiting.item[ret].next = ret + 1; waiting.avai = 0; waiting.head = -1; for (cli = 0; cli < max_client; ++cli) { in_buffer[cli].len = 0; in_buffer[cli].newline = in_buffer[cli].buf; out_buffer[cli].head = 0; out_buffer[cli].tail = 0; out_buffer[cli].tot = 0; pending[cli].in = -1; pending[cli].out = -1; } fprintf(stderr, "max_client: %d, max_query: %d. Exceeded will be rejected.\n", max_client, max_query); time = -1; while (running) { pop_waiting(); if (time == -1) delta = 1000; else delta = time - gettime(); while (delta >= 0) { for (cli_idx = 0; cli_idx < cli_cnt; ++cli_idx) { cli = cli_no[cli_idx]; push_waiting(cli); if ((fd_cli[cli_idx].events & POLLIN) == 0 && in_buffer[cli].len != GUK_MAX_QUERY_LEN) fd_cli[cli_idx].events |= POLLIN; } if ((ret = poll(fd_all, nfds, delta + 1)) > 0) { if (fd_all[0].revents == POLLIN) gk_cm_read_cap(); ret = (fd_all[1].revents == POLLIN); // ret here stand for new connection available nfds -= 2; for (cli_idx = 0; cli_idx < cli_cnt; ++cli_idx) { if (fd_cli[cli_idx].revents & (POLLERR | POLLNVAL | POLLHUP)) { fprintf(stderr, "Connection closed or broken."); close_client(cli_idx); --nfds; continue; } cli = cli_no[cli_idx]; if (fd_cli[cli_idx].revents & POLLOUT) { do { pop_result(cli); } while (try_write(cli)); if (all_written(cli)) fd_cli[cli_idx_of[cli]].events &= ~POLLOUT; last_act[cli] = gettime(); } if (fd_cli[cli_idx].revents & POLLIN) { while (try_read(cli)) { push_waiting(cli); } if (in_buffer[cli].len == GUK_MAX_QUERY_LEN) fd_cli[cli_idx].events &= ~POLLIN; last_act[cli] = gettime(); } else if (ret && is_idle(cli) && gettime() - last_act[cli] >= (GUK_SERV_TIMEOUT * 1000)) { fprintf(stderr, "Client timeout. "); close_client(cli_idx); --nfds; } } /* remove closed clients */ for (cli_idx = 0; cli_cnt > (int)nfds; ) { while (cli_idx < (int)nfds && fd_cli[cli_idx].fd >= 0) ++cli_idx; if (cli_idx == (int)nfds) { cli_cnt = cli_idx; break; } else { while (fd_cli[--cli_cnt].fd < 0); memcpy(fd_cli + cli_idx, fd_cli + cli_cnt, sizeof(struct pollfd)); cli_idx_of[(cli_no[cli_idx] = cli_no[cli_cnt])] = cli_idx; } } nfds += 2; if (ret) while (cli_cnt < max_client && accept_client() == 0) ++nfds; } else if (ret < 0) perror("poll"); time = gk_cm_conn_next_time(); if (time == -1) delta = 1000; else delta = time - gettime(); } time = gk_cm_conn_step(); } gk_cm_finalize(); for (cli_idx = 0; cli_idx < cli_cnt; ++cli_idx) { cli = cli_no[cli_idx]; do { pop_result(cli); } while (try_write(cli)); } for (cli_idx = 0; cli_idx < cli_cnt; ++cli_idx) close_client(cli_idx); free_mem(); }