static prg_t basic_prg(void) { char cnt = 0; for (;;) { if (HOUR || MINU) { if (!--d_s) { if (MINU) MINU--; else if (HOUR) { HOUR--; MINU = 59; } if (!MINU && !HOUR) next_timer(); if (!power[0]) return PRG_SHOW_STOP; d_s = 60*4; } } if ((MINU || HOUR) && cnt++ & 0x4) display_time(HOUR, MINU); else display_int(power[0]); switch (buttons_get()) { case BTN_UP: case BTN_UP_LONG: power_inc(0); cnt = 0; break; case BTN_DOWN: case BTN_DOWN_LONG: power_dec(0); cnt = 0; break; case BTN_BOTH: return PRG_SET_TIMER1; case BTN_IDLE: case BTN_UNDEFINED: case BTN_RESET: break; } sleep(); } }
void NDECL(init_timer) { mudstate.lastnow = mudstate.now; mudstate.now = time(NULL); mudstate.dump_counter = ((mudconf.dump_offset == 0) ? mudconf.dump_interval : mudconf.dump_offset) + mudstate.now; mudstate.check_counter = ((mudconf.check_offset == 0) ? mudconf.check_interval : mudconf.check_offset) + mudstate.now; mudstate.idle_counter = mudconf.idle_interval + mudstate.now; mudstate.rwho_counter = mudconf.rwho_interval + mudstate.now; mudstate.mstats_counter = 15 + mudstate.now; alarm (next_timer()); }
void NDECL(init_timer) { mudstate.nowmsec = time_ng(NULL); mudstate.now = (time_t) floor(mudstate.nowmsec); mudstate.lastnowmsec = mudstate.nowmsec; mudstate.lastnow = mudstate.now; mudstate.dump_counter = ((mudconf.dump_offset == 0) ? mudconf.dump_interval : mudconf.dump_offset) + mudstate.nowmsec; mudstate.check_counter = ((mudconf.check_offset == 0) ? mudconf.check_interval : mudconf.check_offset) + mudstate.nowmsec; mudstate.idle_counter = mudconf.idle_interval + mudstate.nowmsec; mudstate.rwho_counter = mudconf.rwho_interval + mudstate.nowmsec; mudstate.mstats_counter = 15.0 + mudstate.nowmsec; alarm_msec (next_timer()); }
void poll_loop(void) { SIG_ENTITY *sig; fd_set perm,set; int fds,ret; FD_ZERO(&perm); FD_SET(kernel,&perm); fds = kernel+1; for (sig = entities; sig; sig = sig->next) { FD_SET(sig->signaling,&perm); if (fds <= sig->signaling) fds = sig->signaling+1; } gettimeofday(&now,NULL); while (!stop) { set = perm; poll_signals(); /* * Here we have a small race condition: if a signal is delivered after * poll_signals tests for it but before select sleeps, we miss that * signal. If it is sent again, we're of course likely to get it. This * isn't worth fixing, because those signals are only used for * debugging anyway. */ ret = select(fds,&set,NULL,NULL,next_timer()); if (ret < 0) { if (errno != EINTR) perror("select"); } else { diag(COMPONENT,DIAG_DEBUG,"----------"); gettimeofday(&now,NULL); if (FD_ISSET(kernel,&set)) recv_kernel(); for (sig = entities; sig; sig = sig->next) if (FD_ISSET(sig->signaling,&set)) recv_signaling(sig); expire_timers(); /* expire timers after handling messges to make sure we don't time out unnecessarily because of scheduling delays */ } } }
static void window_appear(Window *window) { s_progress = 0; next_timer(); }
static void progress_callback(void *context) { s_progress += (s_progress < 100) ? 1 : -100; progress_layer_set_progress(s_progress_layer, s_progress); next_timer(); }
int expect(int s, int match, char *buf) { int size; char *p; int n; int code; int newline; char *ptr; int cc; size = 1024; alarm_msec(30); ptr = buf; n = recv(s, ptr, size, MSG_PEEK); if (n <= 0) { mudstate.alarm_triggered = 0; alarm_msec(next_timer()); return 0; } size -= n; buf[n] = '\0'; if ((p = strchr(ptr, '\n')) == 0) { do { cc = read(s, ptr, n); if (cc < 0) { alarm_msec(next_timer()); return 0; } if (cc != n) { alarm_msec(next_timer()); return 0; } ptr += n; if ((n = recv(s, ptr, size, MSG_PEEK)) <= 0) { alarm_msec(next_timer()); return 0; } size -= n; ptr[n] = '\0'; } while ((p = index(ptr, '\n')) == 0); newline = 1 + p - buf; *p = 0; } else newline = 1 + p - ptr; cc = read(s, buf, newline); if (cc < 0) { alarm_msec(next_timer()); return 0; } if (cc != newline) { alarm_msec(next_timer()); return 0; } buf[newline] = '\0'; mudstate.alarm_triggered = 0; alarm_msec(next_timer()); if (!isxdigit((int)*buf)) { return 0; } if (isdigit((int)*buf)) code = *buf - '0'; else { if (isupper((int)*buf)) *buf = tolower(*buf); code = 10 + *buf - 'a'; } if (code == match) return 1; return 0; }
void poll_loop(void) { ITF *itf,*next_itf; ENTRY *entry,*next_entry; VCC *vcc,*next_vcc; int fds,ret; gettimeofday(&now,NULL); while (1) { FD_ZERO(&rset); FD_ZERO(&cset); FD_SET(kernel,&rset); FD_SET(unix_sock,&rset); if (incoming >= 0) FD_SET(incoming,&rset); fds = incoming+1; if (kernel >= fds) fds = kernel+1; if (unix_sock >= fds) fds = unix_sock+1; for (itf = itfs; itf; itf = itf->next) for (entry = itf->table; entry; entry = entry->next) for (vcc = entry->vccs; vcc; vcc = vcc->next) { if (vcc->connecting) FD_SET(vcc->fd,&cset); else FD_SET(vcc->fd,&rset); if (vcc->fd >= fds) fds = vcc->fd+1; } for (entry = unknown_incoming; entry; entry = entry->next) { if (!entry->vccs || entry->vccs->next) { diag(COMPONENT,DIAG_ERROR,"internal error: bad unknown entry"); continue; } FD_SET(entry->vccs->fd,&rset); if (entry->vccs->fd >= fds) fds = entry->vccs->fd+1; } for (vcc = unidirectional_vccs; vcc; vcc = vcc->next) { FD_SET(vcc->fd,&rset); if (vcc->fd >= fds) fds = vcc->fd+1; } ret = select(fds,&rset,&cset,NULL,next_timer()); /* * Now here's something strange: < 0.32 needed the exception mask to be NULL * in order to work, due to a bug in atm_select. In 0.32, this has been fixed. * Also, 2.1 kernels use the poll mechanism and not select, so select is * emulated on top of poll. Now the funny bit is that, as soon as the exception * set is non-NULL, when a non-blocking connect finishes, select returns one * but has none if the possible bits set in either rset or cset. To make things * even stranger, no exception is actually found in sys_select, so this must be * some very odd side-effect ... The work-around for now is to simply pass NULL * for the exception mask (which is the right thing to do anyway, but it'd be * nice if doing a perfectly valid variation wouldn't blow up the system ...) */ #if 0 { int i; for (i = 0; i < sizeof(rset); i++) fprintf(stderr,"%02x:%02x ",((unsigned char *) &rset)[i], ((unsigned char *) &cset)[i]); fprintf(stderr,"\n"); } #endif if (ret < 0) { if (errno != EINTR) perror("select"); } else { diag(COMPONENT,DIAG_DEBUG,"----------"); gettimeofday(&now,NULL); if (FD_ISSET(kernel,&rset)) recv_kernel(); if (FD_ISSET(unix_sock,&rset)) recv_unix(); if (incoming >= 0 && FD_ISSET(incoming,&rset)) accept_new(); for (itf = itfs; itf; itf = next_itf) { next_itf = itf->next; for (entry = itf->table; entry; entry = next_entry) { next_entry = entry->next; for (vcc = entry->vccs; vcc; vcc = next_vcc) { next_vcc = vcc->next; if (FD_ISSET(vcc->fd,&rset)) recv_vcc(vcc); else if (FD_ISSET(vcc->fd,&cset)) complete_connect(vcc); } } } for (entry = unknown_incoming; entry; entry = next_entry) { next_entry = entry->next; if (FD_ISSET(entry->vccs->fd,&rset)) recv_vcc(entry->vccs); } for (vcc = unidirectional_vccs; vcc; vcc = next_vcc) { next_vcc = vcc->next; if (FD_ISSET(vcc->fd,&rset)) drain_vcc(vcc); } expire_timers(); /* expire timers after handling messages to make sure we don't time out unnecessarily because of scheduling delays */ } table_changed(); } }
void NDECL(dispatch) { char *cmdsave; cmdsave = mudstate.debug_cmd; mudstate.debug_cmd = (char *)"< dispatch >"; /* this routine can be used to poll from interface.c */ if (!mudstate.alarm_triggered) return; mudstate.alarm_triggered = 0; mudstate.lastnowmsec = mudstate.nowmsec; mudstate.lastnow = mudstate.now; mudstate.nowmsec = time_ng(NULL); mudstate.now = (time_t) floor(mudstate.nowmsec); do_second(); local_second(); /* Free list reconstruction */ if ((mudconf.control_flags & CF_DBCHECK) && (mudstate.check_counter <= mudstate.nowmsec)) { mudstate.check_counter = mudconf.check_interval + mudstate.nowmsec; mudstate.debug_cmd = (char *)"< dbck >"; cache_reset(0); do_dbck (NOTHING, NOTHING, 0); cache_reset(0); pcache_trim(); } /* Database dump routines */ if ((mudconf.control_flags & CF_CHECKPOINT) && (mudstate.dump_counter <= mudstate.nowmsec)) { mudstate.dump_counter = mudconf.dump_interval + mudstate.nowmsec; mudstate.debug_cmd = (char *)"< dump >"; fork_and_dump(0, (char *)NULL); } /* Idle user check */ if ((mudconf.control_flags & CF_IDLECHECK) && (mudstate.idle_counter <= mudstate.nowmsec)) { mudstate.idle_counter = mudconf.idle_interval + mudstate.nowmsec; mudstate.debug_cmd = (char *)"< idlecheck >"; cache_reset(0); check_idle(); } #ifdef HAVE_GETRUSAGE /* Memory use stats */ if (mudstate.mstats_counter <= mudstate.nowmsec) { int curr; mudstate.mstats_counter = 15 + mudstate.nowmsec; curr = mudstate.mstat_curr; if ( (curr >= 0 ) && (mudstate.now > mudstate.mstat_secs[curr]) ) { struct rusage usage; curr = 1-curr; getrusage(RUSAGE_SELF, &usage); mudstate.mstat_ixrss[curr] = usage.ru_ixrss; mudstate.mstat_idrss[curr] = usage.ru_idrss; mudstate.mstat_isrss[curr] = usage.ru_isrss; mudstate.mstat_secs[curr] = mudstate.now; mudstate.mstat_curr = curr; } } #endif #ifdef RWHO_IN_USE if ((mudconf.control_flags & CF_RWHO_XMIT) && (mudstate.rwho_counter <= mudstate.nowmsec)) { mudstate.rwho_counter = mudconf.rwho_interval + mudstate.nowmsec; mudstate.debug_cmd = (char *)"< rwho update >"; rwho_update(); } #endif /* reset alarm */ alarm_msec (next_timer()); mudstate.debug_cmd = cmdsave; }
static void progress_callback(void *context) { s_progress += (s_progress < 100) ? 1 : -100; layer_mark_dirty(s_progress_bar); next_timer(); }