static void __add_timestamp_user(struct timestamp *pre_recorded) { unsigned int seq_no; struct timestamp *ts; seq_no = fetch_and_inc((int *) &ts_seq_no); if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { *ts = *pre_recorded; ts->seq_no = seq_no; __save_irq_flags(ts); ft_buffer_finish_write(trace_ts_buf, ts); } }
static inline struct st_event_record* get_record(u8 type, struct task_struct* t) { struct st_event_record* rec = NULL; struct local_buffer* buf; buf = &get_cpu_var(st_event_buffer); if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) { rec->hdr.type = type; rec->hdr.cpu = smp_processor_id(); rec->hdr.pid = t ? t->pid : 0; rec->hdr.job = t ? t->rt_param.job_params.job_no : 0; } else { put_cpu_var(st_event_buffer); } /* rec will be NULL if it failed */ return rec; }
static inline void __save_timestamp_cpu(unsigned long event, uint8_t type, uint8_t cpu) { unsigned int seq_no; struct timestamp *ts; seq_no = fetch_and_inc((int *) &ts_seq_no); if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { ts->event = event; ts->seq_no = seq_no; ts->cpu = cpu; ts->task_type = type; __save_irq_flags(ts); barrier(); /* prevent re-ordering of ft_timestamp() */ ts->timestamp = ft_timestamp(); ft_buffer_finish_write(trace_ts_buf, ts); } }
feather_callback void save_task_latency(unsigned long event, unsigned long when_ptr) { lt_t now = litmus_clock(); lt_t *when = (lt_t*) when_ptr; unsigned int seq_no; int cpu = raw_smp_processor_id(); struct timestamp *ts; seq_no = fetch_and_inc((int *) &ts_seq_no); if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { ts->event = event; ts->timestamp = now - *when; ts->seq_no = seq_no; ts->cpu = cpu; ts->task_type = TSK_RT; __save_irq_flags(ts); ft_buffer_finish_write(trace_ts_buf, ts); } }