static inline int __insert_record(struct t_buf *buf, unsigned long event, int extra, int cycles, int rec_size, unsigned char *extra_data) { struct t_rec *rec; unsigned char *dst; unsigned long extra_word = extra/sizeof(u32); int local_rec_size = calc_rec_size(cycles, extra); uint32_t next; BUG_ON(local_rec_size != rec_size); BUG_ON(extra & 3); /* Double-check once more that we have enough space. * Don't bugcheck here, in case the userland tool is doing * something stupid. */ if ( calc_bytes_avail(buf) < rec_size ) { printk("%s: %u bytes left (%u - ((%u - %u) %% %u) recsize %u.\n", __func__, calc_bytes_avail(buf), data_size, buf->prod, buf->cons, data_size, rec_size); return 0; } rmb(); rec = next_record(buf); rec->event = event; rec->extra_u32 = extra_word; dst = (unsigned char *)rec->u.nocycles.extra_u32; if ( (rec->cycles_included = cycles) != 0 ) { u64 tsc = (u64)get_cycles(); rec->u.cycles.cycles_lo = (uint32_t)tsc; rec->u.cycles.cycles_hi = (uint32_t)(tsc >> 32); dst = (unsigned char *)rec->u.cycles.extra_u32; }
static void kvm_add_trace(void *probe_private, void *call_data, const char *format, va_list *args) { struct kvm_trace_probe *p = probe_private; struct kvm_trace *kt = kvm_trace; struct kvm_trace_rec rec; struct kvm_vcpu *vcpu; int i, size; u32 extra; if (unlikely(kt->trace_state != KVM_TRACE_STATE_RUNNING)) return; rec.rec_val = TRACE_REC_EVENT_ID(va_arg(*args, u32)); vcpu = va_arg(*args, struct kvm_vcpu *); rec.pid = current->tgid; rec.vcpu_id = vcpu->vcpu_id; extra = va_arg(*args, u32); WARN_ON(!(extra <= KVM_TRC_EXTRA_MAX)); extra = min_t(u32, extra, KVM_TRC_EXTRA_MAX); rec.rec_val |= TRACE_REC_TCS(p->timestamp_in) | TRACE_REC_NUM_DATA_ARGS(extra); if (p->timestamp_in) { rec.u.timestamp.timestamp = ktime_to_ns(ktime_get()); for (i = 0; i < extra; i++) rec.u.timestamp.extra_u32[i] = va_arg(*args, u32); } else { for (i = 0; i < extra; i++) rec.u.notimestamp.extra_u32[i] = va_arg(*args, u32); } size = calc_rec_size(p->timestamp_in, extra * sizeof(u32)); relay_write(kt->rchan, &rec, size); }