/** Clean up a UPing structure, reporting results to the requester. * @param[in,out] pptr UPing results. */ void uping_end(struct UPing* pptr) { Debug((DEBUG_DEBUG, "uping_end: %p", pptr)); if (pptr->client) { if (pptr->lastsent) { if (0 < pptr->received) { sendcmdto_one(&me, CMD_NOTICE, pptr->client, "%C :UPING %s%s", pptr->client, pptr->name, pptr->buf); sendcmdto_one(&me, CMD_NOTICE, pptr->client, "%C :UPING Stats: " "sent %d recvd %d ; min/avg/max = %u/%u/%u ms", pptr->client, pptr->sent, pptr->received, pptr->ms_min, (2 * pptr->ms_ave) / (2 * pptr->received), pptr->ms_max); } else sendcmdto_one(&me, CMD_NOTICE, pptr->client, "%C :UPING: no response " "from %s within %d seconds", pptr->client, pptr->name, UPINGTIMEOUT); } else sendcmdto_one(&me, CMD_NOTICE, pptr->client, "%C :UPING: Could not " "start ping to %s", pptr->client, pptr->name); } close(pptr->fd); pptr->fd = -1; uping_erase(pptr); if (pptr->client) ClearUPing(pptr->client); if (pptr->freeable & UPING_PENDING_SOCKET) socket_del(&pptr->socket); if (pptr->freeable & UPING_PENDING_SENDER) timer_del(&pptr->sender); if (pptr->freeable & UPING_PENDING_KILLER) timer_del(&pptr->killer); }
void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit) { int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); uint64_t real_limit = limit & ~timer->disabled_mask; timer->disabled = (limit & timer->disabled_mask) ? 1 : 0; int64_t expires = cpu_to_timer_ticks(real_limit, timer->frequency) + timer->clock_offset; if (expires < now) { expires = now + 1; } TIMER_DPRINTF("%s set_limit limit=0x%016lx (%s) p=%p " "called with limit=0x%016lx at 0x%016lx (delta=0x%016lx)\n", timer->name, real_limit, timer->disabled?"disabled":"enabled", timer, limit, timer_to_cpu_ticks(now - timer->clock_offset, timer->frequency), timer_to_cpu_ticks(expires - now, timer->frequency)); if (!real_limit) { TIMER_DPRINTF("%s set_limit limit=ZERO - not starting timer\n", timer->name); timer_del(timer->qtimer); } else if (timer->disabled) { timer_del(timer->qtimer); } else { timer_mod(timer->qtimer, expires); } }
void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit) { int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); uint64_t real_limit = limit & ~timer->disabled_mask; timer->disabled = (limit & timer->disabled_mask) ? 1 : 0; int64_t expires = cpu_to_timer_ticks(real_limit, timer->frequency) + timer->clock_offset; if (expires < now) { expires = now + 1; } trace_sparc64_cpu_tick_set_limit(timer->name, real_limit, timer->disabled ? "disabled" : "enabled", timer, limit, timer_to_cpu_ticks( now - timer->clock_offset, timer->frequency ), timer_to_cpu_ticks( expires - now, timer->frequency )); if (!real_limit) { trace_sparc64_cpu_tick_set_limit_zero(timer->name); timer_del(timer->qtimer); } else if (timer->disabled) { timer_del(timer->qtimer); } else { timer_mod(timer->qtimer, expires); } }
int main(int argc, char *argv[]) { struct timespec start, curr; struct timers timers; struct list_head expired; struct timer t[PER_CONN_TIME]; unsigned int i, num; bool check = false; opt_register_noarg("-c|--check", opt_set_bool, &check, "Check timer structure during progress"); opt_parse(&argc, argv, opt_log_stderr_exit); num = argv[1] ? atoi(argv[1]) : (check ? 100000 : 100000000); list_head_init(&expired); curr = start = time_now(); timers_init(&timers, start); for (i = 0; i < num; i++) { curr = time_add(curr, time_from_msec(1)); if (check) timers_check(&timers, NULL); timers_expire(&timers, curr, &expired); if (check) timers_check(&timers, NULL); assert(list_empty(&expired)); if (i >= PER_CONN_TIME) { timer_del(&timers, &t[i%PER_CONN_TIME]); if (check) timers_check(&timers, NULL); } timer_add(&timers, &t[i%PER_CONN_TIME], time_add(curr, time_from_msec(CONN_TIMEOUT_MS))); if (check) timers_check(&timers, NULL); } if (num > PER_CONN_TIME) { for (i = 0; i < PER_CONN_TIME; i++) timer_del(&timers, &t[i]); } curr = time_sub(time_now(), start); if (check) timers_check(&timers, NULL); timers_cleanup(&timers); opt_free_table(); for (i = 0; i < ARRAY_SIZE(timers.level); i++) if (!timers.level[i]) break; printf("%u in %lu.%09lu (%u levels / %zu)\n", num, (long)curr.tv_sec, curr.tv_nsec, i, ARRAY_SIZE(timers.level)); return 0; }
static int wdt_diag288_handle_timer(DIAG288State *diag288, uint64_t func, uint64_t timeout) { switch (func) { case WDT_DIAG288_INIT: diag288->enabled = true; /* fall through */ case WDT_DIAG288_CHANGE: if (!diag288->enabled) { return -1; } timer_mod(diag288->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + timeout * get_ticks_per_sec()); break; case WDT_DIAG288_CANCEL: if (!diag288->enabled) { return -1; } diag288->enabled = false; timer_del(diag288->timer); break; default: return -1; } return 0; }
static void wdt_diag288_reset(DeviceState *dev) { DIAG288State *diag288 = DIAG288(dev); diag288->enabled = false; timer_del(diag288->timer); }
static void wdt_diag288_unrealize(DeviceState *dev, Error **errp) { DIAG288State *diag288 = DIAG288(dev); timer_del(diag288->timer); timer_free(diag288->timer); }
static void handle_msg(IPMIBmcExtern *ibe) { IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(ibe->parent.intf); if (ibe->in_escape) { ipmi_debug("msg escape not ended\n"); return; } if (ibe->inpos < 5) { ipmi_debug("msg too short\n"); return; } if (ibe->in_too_many) { ibe->inbuf[3] = IPMI_CC_REQUEST_DATA_TRUNCATED; ibe->inpos = 4; } else if (ipmb_checksum(ibe->inbuf, ibe->inpos, 0) != 0) { ipmi_debug("msg checksum failure\n"); return; } else { ibe->inpos--; /* Remove checkum */ } timer_del(ibe->extern_timer); ibe->waiting_rsp = false; k->handle_rsp(ibe->parent.intf, ibe->inbuf[0], ibe->inbuf + 1, ibe->inpos - 1); }
/* user logout, client timeout or something error, we need to * close the connection and release resource */ void close_connection(struct conn_server *server, struct connection *conn) { /* remove from fd-conn hash map */ close(conn->sfd); struct fd_entry *fd_entry; HASH_FIND_INT(server->fd_conn_map, &conn->sfd, fd_entry); if (fd_entry) { HASH_DEL(server->fd_conn_map, fd_entry); free(fd_entry); } /* remove from uin-conn hash map */ struct uin_entry *uin_entry; HASH_FIND_INT(server->uin_conn_map, &conn->uin, uin_entry); if (uin_entry) { HASH_DEL(server->uin_conn_map, uin_entry); free(uin_entry); } /* remove from timer */ timer_del(conn); conn_destroy(server, conn); /* free the conn struct */ allocator_free(&server->conn_allocator, conn); }
static void ipmi_bmc_extern_finalize(Object *obj) { IPMIBmcExtern *ibe = IPMI_BMC_EXTERN(obj); timer_del(ibe->extern_timer); timer_free(ibe->extern_timer); }
static void timer_cancel(void * data,int cpu, double time, long long * number, const char **str) { struct ftrace_so_timer * task = (struct ftrace_so_timer *)data; long long timer = number[FTRACE_SO_TIMER]; timer_del(task, cpu, time, timer); }
void sys_timer_unset( SysTimer timer ) { if (timer->timer) { timer_del( timer->timer ); } }
static void timerblock_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { TimerBlock *tb = (TimerBlock *)opaque; int64_t old; switch (addr) { case 0: /* Load */ tb->load = value; /* Fall through. */ case 4: /* Counter. */ if ((tb->control & 1) && tb->count) { /* Cancel the previous timer. */ timer_del(tb->timer); } tb->count = value; if (tb->control & 1) { timerblock_reload(tb, 1); } break; case 8: /* Control. */ old = tb->control; tb->control = value; if (((old & 1) == 0) && (value & 1)) { if (tb->count == 0 && (tb->control & 2)) { tb->count = tb->load; } timerblock_reload(tb, 1); } break; case 12: /* Interrupt status. */ tb->status &= ~value; timerblock_update_irq(tb); break; } }
/** Timer callback to send another outbound uping. * @param[in] ev Event for uping timer. */ static void uping_sender_callback(struct Event* ev) { struct UPing *pptr; assert(0 != ev_timer(ev)); assert(0 != t_data(ev_timer(ev))); pptr = (struct UPing*) t_data(ev_timer(ev)); Debug((DEBUG_SEND, "uping_sender_callback called, %p (%d)", pptr, ev_type(ev))); if (ev_type(ev) == ET_DESTROY) { /* being destroyed */ pptr->freeable &= ~UPING_PENDING_SENDER; if (!pptr->freeable) MyFree(pptr); /* done with it, finally */ } else { assert(ev_type(ev) == ET_EXPIRE); pptr->lastsent = CurrentTime; /* store last ping time */ uping_send(pptr); /* send a ping */ if (pptr->sent == pptr->count) /* done sending pings, don't send more */ timer_del(ev_timer(ev)); } }
int odp_hisi_timer_stop(struct odp_hisi_timer *tim) { union odp_hisi_timer_status prev_status, status; unsigned core_id = odp_core_id(); int ret; ret = timer_set_config_state(tim, &prev_status); if (ret < 0) return -1; __TIMER_STAT_ADD(stop, 1); if ((prev_status.state == ODP_HISI_TIMER_RUNNING) && (core_id < ODP_MAX_CORE)) priv_timer[core_id].updated = 1; if (prev_status.state == ODP_HISI_TIMER_PENDING) { timer_del(tim, prev_status, 0); __TIMER_STAT_ADD(pending, -1); } odp_mb_full(); status.state = ODP_HISI_TIMER_STOP; status.owner = ODP_HISI_TIMER_NO_OWNER; tim->status.u32 = status.u32; return 0; }
void popup(char *text, int time_out, int (*keypress_handler) (struct Screen *, int)){ if ( ! popup_active ){ popup_active = 1; screen_visible(cur_screen, 0); popup_save(); popup_win.flags |= WINFLG_VISIBLE; if (keypress_handler != NULL) popup_keypress_handler = keypress_handler; set_keypress_handler(popup_keypress_dispatch); if (time_out > 0){ timer_add(&popup_tmr, time_out, 0); close_popup_task = task_add(&close_popup); } } else { // already a popup active if (time_out > 0){ if (timed_popup) timer_set(&popup_tmr, time_out, 0); else { timer_add(&popup_tmr, time_out, 0); close_popup_task = task_add(close_popup); }; } else { if (timed_popup) { task_del(close_popup_task); timer_del(&popup_tmr); }; }; }; timed_popup = (time_out > 0); win_new_text(&popup_win, text); win_redraw(&popup_win); };
/* handle periodic timer */ static void periodic_timer_update(RTCState *s, int64_t current_time) { int period_code, period; int64_t cur_clock, next_irq_clock; period_code = s->cmos_data[RTC_REG_A] & 0x0f; if (period_code != 0 && (s->cmos_data[RTC_REG_B] & REG_B_PIE)) { if (period_code <= 2) period_code += 7; /* period in 32 Khz cycles */ period = 1 << (period_code - 1); #ifdef TARGET_I386 if (period != s->period) { s->irq_coalesced = (s->irq_coalesced * s->period) / period; DPRINTF_C("cmos: coalesced irqs scaled to %d\n", s->irq_coalesced); } s->period = period; #endif /* compute 32 khz clock */ cur_clock = muldiv64(current_time, RTC_CLOCK_RATE, get_ticks_per_sec()); next_irq_clock = (cur_clock & ~(period - 1)) + period; s->next_periodic_time = muldiv64(next_irq_clock, get_ticks_per_sec(), RTC_CLOCK_RATE) + 1; timer_mod(s->periodic_timer, s->next_periodic_time); } else { #ifdef TARGET_I386 s->irq_coalesced = 0; #endif timer_del(s->periodic_timer); } }
static void cpu_timer_reset(CPUTimer *timer) { timer->disabled = 1; timer->clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); timer_del(timer->qtimer); }
void sys_timer_set( SysTimer timer, SysTime when, SysCallback _callback, void* opaque ) { QEMUTimerCB* callback = (QEMUTimerCB*)_callback; if (callback == NULL) { /* unsetting the timer */ if (timer->timer) { timer_del( timer->timer ); timer_free( timer->timer ); timer->timer = NULL; } timer->callback = callback; timer->opaque = NULL; return; } if ( timer->timer ) { if ( timer->callback == callback && timer->opaque == opaque ) goto ReuseTimer; /* need to replace the timer */ timer_free( timer->timer ); } timer->timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, callback, opaque ); timer->callback = callback; timer->opaque = opaque; ReuseTimer: timer_mod( timer->timer, when ); }
static void dp8393x_do_software_reset(dp8393xState *s) { timer_del(s->watchdog); s->regs[SONIC_CR] &= ~(SONIC_CR_LCAM | SONIC_CR_RRRA | SONIC_CR_TXP | SONIC_CR_HTX); s->regs[SONIC_CR] |= SONIC_CR_RST | SONIC_CR_RXDIS; }
void cpu_openrisc_count_update(OpenRISCCPU *cpu) { uint64_t now, next; uint32_t wait; now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); if (!is_counting) { timer_del(cpu->env.timer); last_clk = now; return; } cpu->env.ttcr += (uint32_t)muldiv64(now - last_clk, TIMER_FREQ, get_ticks_per_sec()); last_clk = now; if ((cpu->env.ttmr & TTMR_TP) <= (cpu->env.ttcr & TTMR_TP)) { wait = TTMR_TP - (cpu->env.ttcr & TTMR_TP) + 1; wait += cpu->env.ttmr & TTMR_TP; } else { wait = (cpu->env.ttmr & TTMR_TP) - (cpu->env.ttcr & TTMR_TP); } next = now + muldiv64(wait, get_ticks_per_sec(), TIMER_FREQ); timer_mod(cpu->env.timer, next); }
static void _hwSensorClient_free( HwSensorClient* cl ) { /* remove from sensors's list */ if (cl->sensors) { HwSensorClient** pnode = &cl->sensors->clients; for (;;) { HwSensorClient* node = *pnode; if (node == NULL) break; if (node == cl) { *pnode = cl->next; break; } pnode = &node->next; } cl->next = NULL; cl->sensors = NULL; } /* close QEMUD client, if any */ if (cl->client) { qemud_client_close(cl->client); cl->client = NULL; } /* remove timer, if any */ if (cl->timer) { timer_del(cl->timer); timer_free(cl->timer); cl->timer = NULL; } AFREE(cl); }
static void curl_close(BlockDriverState *bs) { BDRVCURLState *s = bs->opaque; int i; DPRINTF("CURL: Close\n"); for (i=0; i<CURL_NUM_STATES; i++) { if (s->states[i].in_use) curl_clean_state(&s->states[i]); if (s->states[i].curl) { curl_easy_cleanup(s->states[i].curl); s->states[i].curl = NULL; } if (s->states[i].orig_buf) { g_free(s->states[i].orig_buf); s->states[i].orig_buf = NULL; } } if (s->multi) curl_multi_cleanup(s->multi); timer_del(&s->timer); g_free(s->url); }
void apic_init_reset(DeviceState *dev) { APICCommonState *s = APIC_COMMON(dev); int i; if (!s) { return; } s->tpr = 0; s->spurious_vec = 0xff; s->log_dest = 0; s->dest_mode = 0xf; memset(s->isr, 0, sizeof(s->isr)); memset(s->tmr, 0, sizeof(s->tmr)); memset(s->irr, 0, sizeof(s->irr)); for (i = 0; i < APIC_LVT_NB; i++) { s->lvt[i] = APIC_LVT_MASKED; } s->esr = 0; memset(s->icr, 0, sizeof(s->icr)); s->divide_conf = 0; s->count_shift = 0; s->initial_count = 0; s->initial_count_load_time = 0; s->next_time = 0; s->wait_for_sipi = !cpu_is_bsp(s->cpu); if (s->timer) { timer_del(s->timer); } s->timer_expiry = -1; }
static void colo_compare_timer_del(CompareState *s) { if (s->packet_check_timer) { timer_del(s->packet_check_timer); timer_free(s->packet_check_timer); s->packet_check_timer = NULL; } }
/* handle update-ended timer */ static void check_update_timer(RTCState *s) { uint64_t next_update_time; uint64_t guest_nsec; int next_alarm_sec; /* From the data sheet: "Holding the dividers in reset prevents * interrupts from operating, while setting the SET bit allows" * them to occur. However, it will prevent an alarm interrupt * from occurring, because the time of day is not updated. */ if ((s->cmos_data[RTC_REG_A] & 0x60) == 0x60) { timer_del(s->update_timer); return; } if ((s->cmos_data[RTC_REG_C] & REG_C_UF) && (s->cmos_data[RTC_REG_B] & REG_B_SET)) { timer_del(s->update_timer); return; } if ((s->cmos_data[RTC_REG_C] & REG_C_UF) && (s->cmos_data[RTC_REG_C] & REG_C_AF)) { timer_del(s->update_timer); return; } guest_nsec = get_guest_rtc_ns(s) % NSEC_PER_SEC; /* if UF is clear, reprogram to next second */ next_update_time = qemu_clock_get_ns(rtc_clock) + NSEC_PER_SEC - guest_nsec; /* Compute time of next alarm. One second is already accounted * for in next_update_time. */ next_alarm_sec = get_next_alarm(s); s->next_alarm_time = next_update_time + (next_alarm_sec - 1) * NSEC_PER_SEC; if (s->cmos_data[RTC_REG_C] & REG_C_UF) { /* UF is set, but AF is clear. Program the timer to target * the alarm time. */ next_update_time = s->next_alarm_time; } if (next_update_time != timer_expire_time_ns(s->update_timer)) { timer_mod(s->update_timer, next_update_time); } }
static void hid_del_idle_timer(HIDState *hs) { if (hs->idle_timer) { timer_del(hs->idle_timer); timer_free(hs->idle_timer); hs->idle_timer = NULL; } }
static void nic_cleanup(NetClientState *ncs) { #if 0 tnetw1130_t *s = qemu_get_nic_opaque(ncs); timer_del(d->poll_timer); timer_free(d->poll_timer); #endif }
static void apic_post_load(APICCommonState *s) { if (s->timer_expiry != -1) { timer_mod(s->timer, s->timer_expiry); } else { timer_del(s->timer); } }
/* destroy a timer */ static void throttle_timer_destroy(QEMUTimer **timer) { assert(*timer != NULL); timer_del(*timer); timer_free(*timer); *timer = NULL; }