static int sched_wakeup_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) { struct format_field *field; unsigned long long val; if (pevent_get_field_val(s, event, "pid", record, &val, 1)) return trace_seq_putc(s, '!'); field = pevent_find_any_field(event, "comm"); if (field) { write_and_save_comm(field, record, s, val); trace_seq_putc(s, ':'); } trace_seq_printf(s, "%lld", val); if (pevent_get_field_val(s, event, "prio", record, &val, 0) == 0) trace_seq_printf(s, " [%lld]", val); if (pevent_get_field_val(s, event, "success", record, &val, 1) == 0) trace_seq_printf(s, " success=%lld", val); if (pevent_get_field_val(s, event, "target_cpu", record, &val, 0) == 0) trace_seq_printf(s, " CPU:%03llu", val); return 0; }
static int sched_switch_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) { struct format_field *field; unsigned long long val; if (pevent_get_field_val(s, event, "prev_pid", record, &val, 1)) return trace_seq_putc(s, '!'); field = pevent_find_any_field(event, "prev_comm"); if (field) { write_and_save_comm(field, record, s, val); trace_seq_putc(s, ':'); } trace_seq_printf(s, "%lld ", val); if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0) trace_seq_printf(s, "[%lld] ", val); if (pevent_get_field_val(s, event, "prev_state", record, &val, 0) == 0) write_state(s, val); trace_seq_puts(s, " ==> "); if (pevent_get_field_val(s, event, "next_pid", record, &val, 1)) return trace_seq_putc(s, '!'); field = pevent_find_any_field(event, "next_comm"); if (field) { write_and_save_comm(field, record, s, val); trace_seq_putc(s, ':'); } trace_seq_printf(s, "%lld", val); if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0) trace_seq_printf(s, " [%lld]", val); return 0; }
static int function_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) { struct pevent *pevent = event->pevent; unsigned long long function; unsigned long long pfunction; const char *func; const char *parent; int index; if (pevent_get_field_val(s, event, "ip", record, &function, 1)) return trace_seq_putc(s, '!'); func = pevent_find_function(pevent, function); if (pevent_get_field_val(s, event, "parent_ip", record, &pfunction, 1)) return trace_seq_putc(s, '!'); parent = pevent_find_function(pevent, pfunction); index = add_and_get_index(parent, func, record->cpu); trace_seq_printf(s, "%*s", index*3, ""); if (func) trace_seq_printf(s, "%s", func); else trace_seq_printf(s, "0x%llx", function); trace_seq_printf(s, " <-- "); if (parent) trace_seq_printf(s, "%s", parent); else trace_seq_printf(s, "0x%llx", pfunction); return 0; }
int ras_extlog_mem_event_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) { int len; unsigned long long val; struct ras_events *ras = context; time_t now; struct tm *tm; struct ras_extlog_event ev; /* * Newer kernels (3.10-rc1 or upper) provide an uptime clock. * On previous kernels, the way to properly generate an event would * be to inject a fake one, measure its timestamp and diff it against * gettimeofday. We won't do it here. Instead, let's use uptime, * falling-back to the event report's time, if "uptime" clock is * not available (legacy kernels). */ if (ras->use_uptime) now = record->ts/user_hz + ras->uptime_diff; else now = time(NULL); tm = localtime(&now); if (tm) strftime(ev.timestamp, sizeof(ev.timestamp), "%Y-%m-%d %H:%M:%S %z", tm); trace_seq_printf(s, "%s ", ev.timestamp); if (pevent_get_field_val(s, event, "etype", record, &val, 1) < 0) return -1; ev.etype = val; if (pevent_get_field_val(s, event, "err_seq", record, &val, 1) < 0) return -1; ev.error_seq = val; if (pevent_get_field_val(s, event, "sev", record, &val, 1) < 0) return -1; ev.severity = val; if (pevent_get_field_val(s, event, "pa", record, &val, 1) < 0) return -1; ev.address = val; if (pevent_get_field_val(s, event, "pa_mask_lsb", record, &val, 1) < 0) return -1; ev.pa_mask_lsb = val; ev.cper_data = pevent_get_field_raw(s, event, "data", record, &len, 1); ev.cper_data_length = len; ev.fru_text = pevent_get_field_raw(s, event, "fru_text", record, &len, 1); ev.fru_id = pevent_get_field_raw(s, event, "fru_id", record, &len, 1); report_extlog_mem_event(ras, record, s, &ev); ras_store_extlog_mem_record(ras, &ev); return 0; }
void perf_power_bundle::handle_trace_point(void *trace, int cpunr, uint64_t time) { struct event_format *event; struct pevent_record rec; /* holder */ class abstract_cpu *cpu; int type; rec.data = trace; type = pevent_data_type(perf_event::pevent, &rec); event = pevent_find_event(perf_event::pevent, type); if (!event) return; if (cpunr >= (int)all_cpus.size()) { cout << "INVALID cpu nr in handle_trace_point\n"; return; } cpu = all_cpus[cpunr]; #if 0 unsigned int i; printf("Time is %llu \n", time); for (i = 0; i < system_level.children.size(); i++) if (system_level.children[i]) system_level.children[i]->validate(); #endif unsigned long long val; int ret; if (strcmp(event->name, "cpu_idle")==0) { ret = pevent_get_field_val(NULL, event, "state", &rec, &val, 0); if (ret < 0) { fprintf(stderr, _("cpu_idle event returned no state?\n")); exit(-1); } if (val == 4294967295) cpu->go_unidle(time); else cpu->go_idle(time); } if (strcmp(event->name, "power_frequency") == 0 || strcmp(event->name, "cpu_frequency") == 0){ ret = pevent_get_field_val(NULL, event, "state", &rec, &val, 0); if (ret < 0) { fprintf(stderr, _("power or cpu_frequecny event returned no state?\n")); exit(-1); } cpu->change_freq(time, val); } if (strcmp(event->name, "power_start")==0) cpu->go_idle(time); if (strcmp(event->name, "power_end")==0) cpu->go_unidle(time); #if 0 unsigned int i; for (i = 0; i < system_level.children.size(); i++) if (system_level.children[i]) system_level.children[i]->validate(); #endif }
int ras_mce_event_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) { unsigned long long val; struct ras_events *ras = context; struct mce_priv *mce = ras->mce_priv; struct mce_event e; int rc = 0; memset(&e, 0, sizeof(e)); /* Parse the MCE error data */ if (pevent_get_field_val(s, event, "mcgcap", record, &val, 1) < 0) return -1; e.mcgcap = val; if (pevent_get_field_val(s, event, "mcgstatus", record, &val, 1) < 0) return -1; e.mcgstatus = val; if (pevent_get_field_val(s, event, "status", record, &val, 1) < 0) return -1; e.status = val; if (pevent_get_field_val(s, event, "addr", record, &val, 1) < 0) return -1; e.addr = val; if (pevent_get_field_val(s, event, "misc", record, &val, 1) < 0) return -1; e.misc = val; if (pevent_get_field_val(s, event, "ip", record, &val, 1) < 0) return -1; e.ip = val; if (pevent_get_field_val(s, event, "tsc", record, &val, 1) < 0) return -1; e.tsc = val; if (pevent_get_field_val(s, event, "walltime", record, &val, 1) < 0) return -1; e.walltime = val; if (pevent_get_field_val(s, event, "cpu", record, &val, 1) < 0) return -1; e.cpu = val; if (pevent_get_field_val(s, event, "cpuid", record, &val, 1) < 0) return -1; e.cpuid = val; if (pevent_get_field_val(s, event, "apicid", record, &val, 1) < 0) return -1; e.apicid = val; if (pevent_get_field_val(s, event, "socketid", record, &val, 1) < 0) return -1; e.socketid = val; if (pevent_get_field_val(s, event, "cs", record, &val, 1) < 0) return -1; e.cs = val; if (pevent_get_field_val(s, event, "bank", record, &val, 1) < 0) return -1; e.bank = val; if (pevent_get_field_val(s, event, "cpuvendor", record, &val, 1) < 0) return -1; e.cpuvendor = val; switch (mce->cputype) { case CPU_GENERIC: break; case CPU_K8: rc = parse_amd_k8_event(ras, &e); break; default: /* All other CPU types are Intel */ rc = parse_intel_event(ras, &e); } if (rc) return rc; if (!*e.error_msg && *e.mcastatus_msg) mce_snprintf(e.error_msg, "%s", e.mcastatus_msg); report_mce_event(ras, record, s, &e); #ifdef HAVE_SQLITE3 ras_store_mce_record(ras, &e); #endif #ifdef HAVE_ABRT_REPORT /* Report event to ABRT */ ras_report_mce_event(ras, &e); #endif return 0; }
int ras_mc_event_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) { int len; unsigned long long val; struct ras_events *ras = context; time_t now; struct tm *tm; struct ras_mc_event ev; int parsed_fields = 0; /* * Newer kernels (3.10-rc1 or upper) provide an uptime clock. * On previous kernels, the way to properly generate an event would * be to inject a fake one, measure its timestamp and diff it against * gettimeofday. We won't do it here. Instead, let's use uptime, * falling-back to the event report's time, if "uptime" clock is * not available (legacy kernels). */ if (ras->use_uptime) now = record->ts/user_hz + ras->uptime_diff; else now = time(NULL); tm = localtime(&now); if (tm) strftime(ev.timestamp, sizeof(ev.timestamp), "%Y-%m-%d %H:%M:%S %z", tm); trace_seq_printf(s, "%s ", ev.timestamp); if (pevent_get_field_val(s, event, "error_count", record, &val, 1) < 0) goto parse_error; parsed_fields++; ev.error_count = val; trace_seq_printf(s, "%d ", ev.error_count); if (pevent_get_field_val(s, event, "error_type", record, &val, 1) < 0) goto parse_error; parsed_fields++; switch (val) { case HW_EVENT_ERR_CORRECTED: ev.error_type = "Corrected"; break; case HW_EVENT_ERR_UNCORRECTED: ev.error_type = "Uncorrected"; break; case HW_EVENT_ERR_FATAL: ev.error_type = "Fatal"; break; default: case HW_EVENT_ERR_INFO: ev.error_type = "Info"; } trace_seq_puts(s, ev.error_type); if (ev.error_count > 1) trace_seq_puts(s, " errors:"); else trace_seq_puts(s, " error:"); ev.msg = pevent_get_field_raw(s, event, "msg", record, &len, 1); if (!ev.msg) goto parse_error; parsed_fields++; if (*ev.msg) { trace_seq_puts(s, " "); trace_seq_puts(s, ev.msg); } ev.label = pevent_get_field_raw(s, event, "label", record, &len, 1); if (!ev.label) goto parse_error; parsed_fields++; if (*ev.label) { trace_seq_puts(s, " on "); trace_seq_puts(s, ev.label); } trace_seq_puts(s, " ("); if (pevent_get_field_val(s, event, "mc_index", record, &val, 1) < 0) goto parse_error; parsed_fields++; ev.mc_index = val; trace_seq_printf(s, "mc: %d", ev.mc_index); if (pevent_get_field_val(s, event, "top_layer", record, &val, 1) < 0) goto parse_error; parsed_fields++; ev.top_layer = (signed char) val; if (pevent_get_field_val(s, event, "middle_layer", record, &val, 1) < 0) goto parse_error; parsed_fields++; ev.middle_layer = (signed char) val; if (pevent_get_field_val(s, event, "lower_layer", record, &val, 1) < 0) goto parse_error; parsed_fields++; ev.lower_layer = (signed char) val; if (ev.top_layer >= 0 || ev.middle_layer >= 0 || ev.lower_layer >= 0) { if (ev.lower_layer >= 0) trace_seq_printf(s, " location: %d:%d:%d", ev.top_layer, ev.middle_layer, ev.lower_layer); else if (ev.middle_layer >= 0) trace_seq_printf(s, " location: %d:%d", ev.top_layer, ev.middle_layer); else trace_seq_printf(s, " location: %d", ev.top_layer); } if (pevent_get_field_val(s, event, "address", record, &val, 1) < 0) goto parse_error; parsed_fields++; ev.address = val; if (ev.address) trace_seq_printf(s, " address: 0x%08llx", ev.address); if (pevent_get_field_val(s, event, "grain_bits", record, &val, 1) < 0) goto parse_error; parsed_fields++; ev.grain = val; trace_seq_printf(s, " grain: %lld", ev.grain); if (pevent_get_field_val(s, event, "syndrome", record, &val, 1) < 0) goto parse_error; parsed_fields++; ev.syndrome = val; if (val) trace_seq_printf(s, " syndrome: 0x%08llx", ev.syndrome); ev.driver_detail = pevent_get_field_raw(s, event, "driver_detail", record, &len, 1); if (!ev.driver_detail) goto parse_error; parsed_fields++; if (*ev.driver_detail) { trace_seq_puts(s, " "); trace_seq_puts(s, ev.driver_detail); } trace_seq_puts(s, ")"); /* Insert data into the SGBD */ ras_store_mc_event(ras, &ev); #ifdef HAVE_ABRT_REPORT /* Report event to ABRT */ ras_report_mc_event(ras, &ev); #endif return 0; parse_error: /* FIXME: add a logic here to also store parse errors to SDBD */ log(ALL, LOG_ERR, "MC error handler: can't parse field #%d\n", parsed_fields); return 0; }
void perf_process_bundle::handle_trace_point(void *trace, int cpu, uint64_t time) { struct event_format *event; struct pevent_record rec; /* holder */ struct format_field *field; unsigned long long val; int type; int ret; rec.data = trace; type = pevent_data_type(perf_event::pevent, &rec); event = pevent_find_event(perf_event::pevent, type); if (!event) return; if (time < first_stamp) first_stamp = time; if (time > last_stamp) { last_stamp = time; measurement_time = (0.0001 + last_stamp - first_stamp) / 1000000000 ; } if (strcmp(event->name, "sched_switch") == 0) { class process *old_proc = NULL; class process *new_proc = NULL; const char *next_comm; int next_pid; int prev_pid; field = pevent_find_any_field(event, "next_comm"); if (!field || !(field->flags & FIELD_IS_STRING)) return; /* ?? */ next_comm = get_pevent_field_str(trace, event, field); ret = pevent_get_field_val(NULL, event, "next_pid", &rec, &val, 0); if (ret < 0) return; next_pid = (int)val; ret = pevent_get_field_val(NULL, event, "prev_pid", &rec, &val, 0); if (ret < 0) return; prev_pid = (int)val; /* find new process pointer */ new_proc = find_create_process(next_comm, next_pid); /* find the old process pointer */ while (consumer_depth(cpu) > 1) { pop_consumer(cpu); } if (consumer_depth(cpu) == 1) old_proc = (class process *)current_consumer(cpu); if (old_proc && strcmp(old_proc->name(), "process")) old_proc = NULL; /* retire the old process */ if (old_proc) { old_proc->deschedule_thread(time, prev_pid); old_proc->waker = NULL; } if (consumer_depth(cpu)) pop_consumer(cpu); push_consumer(cpu, new_proc); /* start new process */ new_proc->schedule_thread(time, next_pid); if (strncmp(next_comm,"migration/", 10) && strncmp(next_comm,"kworker/", 8) && strncmp(next_comm, "kondemand/",10)) { if (next_pid) { /* If someone woke us up.. blame him instead */ if (new_proc->waker) { change_blame(cpu, new_proc->waker, LEVEL_PROCESS); } else { change_blame(cpu, new_proc, LEVEL_PROCESS); } } consume_blame(cpu); } new_proc->waker = NULL; } else if (strcmp(event->name, "sched_wakeup") == 0) { class power_consumer *from = NULL; class process *dest_proc = NULL; class process *from_proc = NULL; const char *comm; int flags; int pid; ret = pevent_get_common_field_val(NULL, event, "common_flags", &rec, &val, 0); if (ret < 0) return; flags = (int)val; if ( (flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ)) { class timer *timer; timer = (class timer *) current_consumer(cpu); if (timer && strcmp(timer->name(), "timer")==0) { if (strcmp(timer->handler, "delayed_work_timer_fn") && strcmp(timer->handler, "hrtimer_wakeup") && strcmp(timer->handler, "it_real_fn")) from = timer; } /* woken from interrupt */ /* TODO: find the current irq handler and set "from" to that */ } else { from = current_consumer(cpu); } field = pevent_find_any_field(event, "comm"); if (!field || !(field->flags & FIELD_IS_STRING)) return; comm = get_pevent_field_str(trace, event, field); ret = pevent_get_field_val(NULL, event, "pid", &rec, &val, 0); if (ret < 0) return; pid = (int)val; dest_proc = find_create_process(comm, pid); if (from && strcmp(from->name(), "process")!=0){ /* not a process doing the wakeup */ from = NULL; from_proc = NULL; } else { from_proc = (class process *) from; } if (from_proc && (dest_proc->running == 0) && (dest_proc->waker == NULL) && (pid != 0) && !dont_blame_me(from_proc->comm)) dest_proc->waker = from; if (from) dest_proc->last_waker = from; /* Account processes that wake up X specially */ if (from && dest_proc && comm_is_xorg(dest_proc->comm)) from->xwakes ++ ; } else if (strcmp(event->name, "irq_handler_entry") == 0) { class interrupt *irq = NULL; const char *handler; int nr; field = pevent_find_any_field(event, "name"); if (!field || !(field->flags & FIELD_IS_STRING)) return; /* ?? */ handler = get_pevent_field_str(trace, event, field); ret = pevent_get_field_val(NULL, event, "irq", &rec, &val, 0); if (ret < 0) return; nr = (int)val; irq = find_create_interrupt(handler, nr, cpu); push_consumer(cpu, irq); irq->start_interrupt(time); if (strstr(irq->handler, "timer") ==NULL) change_blame(cpu, irq, LEVEL_HARDIRQ); } else if (strcmp(event->name, "irq_handler_exit") == 0) { class interrupt *irq = NULL; uint64_t t; /* find interrupt (top of stack) */ irq = (class interrupt *)current_consumer(cpu); if (!irq || strcmp(irq->name(), "interrupt")) return; pop_consumer(cpu); /* retire interrupt */ t = irq->end_interrupt(time); consumer_child_time(cpu, t); } else if (strcmp(event->name, "softirq_entry") == 0) { class interrupt *irq = NULL; const char *handler = NULL; int vec; ret = pevent_get_field_val(NULL, event, "vec", &rec, &val, 0); if (ret < 0) { fprintf(stderr, "softirq_entry event returned no vector number?\n"); return; } vec = (int)val; if (vec <= 9) handler = softirqs[vec]; if (!handler) return; irq = find_create_interrupt(handler, vec, cpu); push_consumer(cpu, irq); irq->start_interrupt(time); change_blame(cpu, irq, LEVEL_SOFTIRQ); } else if (strcmp(event->name, "softirq_exit") == 0) { class interrupt *irq = NULL; uint64_t t; irq = (class interrupt *) current_consumer(cpu); if (!irq || strcmp(irq->name(), "interrupt")) return; pop_consumer(cpu); /* pop irq */ t = irq->end_interrupt(time); consumer_child_time(cpu, t); } else if (strcmp(event->name, "timer_expire_entry") == 0) { class timer *timer = NULL; uint64_t function; uint64_t tmr; ret = pevent_get_field_val(NULL, event, "function", &rec, &val, 0); if (ret < 0) { fprintf(stderr, "timer_expire_entry event returned no fucntion value?\n"); return; } function = (uint64_t)val; timer = find_create_timer(function); if (timer->is_deferred()) return; ret = pevent_get_field_val(NULL, event, "timer", &rec, &val, 0); if (ret < 0) { fprintf(stderr, "softirq_entry event returned no timer ?\n"); return; } tmr = (uint64_t)val; push_consumer(cpu, timer); timer->fire(time, tmr); if (strcmp(timer->handler, "delayed_work_timer_fn")) change_blame(cpu, timer, LEVEL_TIMER); } else if (strcmp(event->name, "timer_expire_exit") == 0) { class timer *timer = NULL; uint64_t tmr; uint64_t t; ret = pevent_get_field_val(NULL, event, "timer", &rec, &val, 0); if (ret < 0) return; tmr = (uint64_t)val; timer = (class timer *) current_consumer(cpu); if (!timer || strcmp(timer->name(), "timer")) { return; } pop_consumer(cpu); t = timer->done(time, tmr); consumer_child_time(cpu, t); } else if (strcmp(event->name, "hrtimer_expire_entry") == 0) { class timer *timer = NULL; uint64_t function; uint64_t tmr; ret = pevent_get_field_val(NULL, event, "function", &rec, &val, 0); if (ret < 0) return; function = (uint64_t)val; timer = find_create_timer(function); ret = pevent_get_field_val(NULL, event, "hrtimer", &rec, &val, 0); if (ret < 0) return; tmr = (uint64_t)val; push_consumer(cpu, timer); timer->fire(time, tmr); if (strcmp(timer->handler, "delayed_work_timer_fn")) change_blame(cpu, timer, LEVEL_TIMER); } else if (strcmp(event->name, "hrtimer_expire_exit") == 0) { class timer *timer = NULL; uint64_t tmr; uint64_t t; timer = (class timer *) current_consumer(cpu); if (!timer || strcmp(timer->name(), "timer")) { return; } ret = pevent_get_field_val(NULL, event, "hrtimer", &rec, &val, 0); if (ret < 0) return; tmr = (uint64_t)val; pop_consumer(cpu); t = timer->done(time, tmr); consumer_child_time(cpu, t); } else if (strcmp(event->name, "workqueue_execute_start") == 0) { class work *work = NULL; uint64_t function; uint64_t wk; ret = pevent_get_field_val(NULL, event, "function", &rec, &val, 0); if (ret < 0) return; function = (uint64_t)val; ret = pevent_get_field_val(NULL, event, "work", &rec, &val, 0); if (ret < 0) return; wk = (uint64_t)val; work = find_create_work(function); push_consumer(cpu, work); work->fire(time, wk); if (strcmp(work->handler, "do_dbs_timer") != 0 && strcmp(work->handler, "vmstat_update") != 0) change_blame(cpu, work, LEVEL_WORK); } else if (strcmp(event->name, "workqueue_execute_end") == 0) { class work *work = NULL; uint64_t t; uint64_t wk; ret = pevent_get_field_val(NULL, event, "work", &rec, &val, 0); if (ret < 0) return; wk = (uint64_t)val; work = (class work *) current_consumer(cpu); if (!work || strcmp(work->name(), "work")) { return; } pop_consumer(cpu); t = work->done(time, wk); consumer_child_time(cpu, t); } else if (strcmp(event->name, "cpu_idle") == 0) { ret = pevent_get_field_val(NULL, event, "state", &rec, &val, 0); if (val == 4294967295) consume_blame(cpu); else set_wakeup_pending(cpu); } else if (strcmp(event->name, "power_start") == 0) { set_wakeup_pending(cpu); } else if (strcmp(event->name, "power_end") == 0) { consume_blame(cpu); } else if (strcmp(event->name, "i915_gem_ring_dispatch") == 0 || strcmp(event->name, "i915_gem_request_submit") == 0) { /* any kernel contains only one of the these tracepoints, * the latter one got replaced by the former one */ class power_consumer *consumer = NULL; int flags; ret = pevent_get_common_field_val(NULL, event, "common_flags", &rec, &val, 0); if (ret < 0) return; flags = (int)val; consumer = current_consumer(cpu); /* currently we don't count graphic requests submitted from irq contect */ if ( (flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ)) { consumer = NULL; } /* if we are X, and someone just woke us, account the GPU op to the guy waking us */ if (consumer && strcmp(consumer->name(), "process")==0) { class process *proc = NULL; proc = (class process *) consumer; if (comm_is_xorg(proc->comm) && proc->last_waker) { consumer = proc->last_waker; } } if (consumer) { consumer->gpu_ops++; } } else if (strcmp(event->name, "writeback_inode_dirty") == 0) { static uint64_t prev_time; class power_consumer *consumer = NULL; int dev; consumer = current_consumer(cpu); ret = pevent_get_field_val(NULL, event, "dev", &rec, &val, 0); if (ret < 0) return; dev = (int)val; if (consumer && strcmp(consumer->name(), "process")==0 && dev > 0) { consumer->disk_hits++; /* if the previous inode dirty was > 1 second ago, it becomes a hard hit */ if ((time - prev_time) > 1000000000) consumer->hard_disk_hits++; prev_time = time; } } }