static int annotate_release(struct inode *inode, struct file *file)
{
	int cpu = 0;

	/* synchronize between cores */
	spin_lock(&annotate_lock);

	if (per_cpu(gator_buffer, cpu)[ANNOTATE_BUF] && buffer_check_space(cpu, ANNOTATE_BUF, MAXSIZE_PACK64 + 3 * MAXSIZE_PACK32)) {
		uint32_t pid = current->pid;

		gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, get_physical_cpu());
		gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, pid);
		/* time */
		gator_buffer_write_packed_int64(cpu, ANNOTATE_BUF, 0);
		/* size */
		gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, 0);
	}

	/* Check and commit; commit is set to occur once buffer is 3/4 full */
	buffer_check(cpu, ANNOTATE_BUF, gator_get_time());

	spin_unlock(&annotate_lock);

	return 0;
}
示例#2
0
GATOR_DEFINE_PROBE(cpu_migrate_begin, TP_PROTO(u64 timestamp, u32 cpu_hwid))
{
	const int cpu = get_physical_cpu();

	gator_timer_offline((void *)1);
	gator_timer_offline_dispatch(cpu, true);
}
static int gator_events_irq_read(int **buffer)
{
	int len, value;
	int cpu = get_physical_cpu();

	len = 0;
	if (hardirq_enabled) {
		value = atomic_read(&per_cpu(irqCnt, cpu)[HARDIRQ]);
		atomic_sub(value, &per_cpu(irqCnt, cpu)[HARDIRQ]);

		per_cpu(irqGet, cpu)[len++] = hardirq_key;
		per_cpu(irqGet, cpu)[len++] = value;
	}

	if (softirq_enabled) {
		value = atomic_read(&per_cpu(irqCnt, cpu)[SOFTIRQ]);
		atomic_sub(value, &per_cpu(irqCnt, cpu)[SOFTIRQ]);

		per_cpu(irqGet, cpu)[len++] = softirq_key;
		per_cpu(irqGet, cpu)[len++] = value;
	}

	if (buffer)
		*buffer = per_cpu(irqGet, cpu);

	return len;
}
static void marshal_backtrace_footer(u64 time)
{
	int cpu = get_physical_cpu();

	gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, MESSAGE_END_BACKTRACE);

	/* Check and commit; commit is set to occur once buffer is 3/4 full */
	buffer_check(cpu, BACKTRACE_BUF, time);
}
static void marshal_backtrace(unsigned long address, int cookie, int in_kernel)
{
	int cpu = get_physical_cpu();

	if (cookie == 0 && !in_kernel)
		cookie = UNRESOLVED_COOKIE;
	gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, cookie);
	gator_buffer_write_packed_int64(cpu, BACKTRACE_BUF, address);
}
static void marshal_cookie(int cookie, const char *text)
{
	int cpu = get_physical_cpu();
	/* buffer_check_space already called by marshal_cookie_header */
	gator_buffer_write_packed_int(cpu, NAME_BUF, MESSAGE_COOKIE);
	gator_buffer_write_packed_int(cpu, NAME_BUF, cookie);
	gator_buffer_write_string(cpu, NAME_BUF, text);
	buffer_check(cpu, NAME_BUF, gator_get_time());
}
示例#7
0
GATOR_DEFINE_PROBE(cpu_migrate_finish, TP_PROTO(u64 timestamp, u32 cpu_hwid))
{
	int cpu;

	gator_update_cpu_mapping(cpu_hwid);

	// get_physical_cpu must be called after gator_update_cpu_mapping
	cpu = get_physical_cpu();
	gator_timer_online_dispatch(cpu, true);
	gator_timer_online((void *)1);
}
GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct task_struct *prev, struct task_struct *next, unsigned int prev_ip))
#endif
{
	unsigned long flags;

	// disable interrupts to synchronize with gator_events_sched_read()
	// spinlocks not needed since percpu buffers are used
	local_irq_save(flags);
	per_cpu(schedCnt, get_physical_cpu())[SCHED_SWITCH]++;
	local_irq_restore(flags);
}
static bool marshal_event_header(u64 time)
{
	unsigned long flags, cpu = get_physical_cpu();
	bool retval = false;

	local_irq_save(flags);
	if (buffer_check_space(cpu, BLOCK_COUNTER_BUF, MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
		gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, 0);	/* key of zero indicates a timestamp */
		gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, time);
		retval = true;
	}
	local_irq_restore(flags);

	return retval;
}
static void marshal_core_name(const int core, const int cpuid, const char *name)
{
	int cpu = get_physical_cpu();
	unsigned long flags;

	local_irq_save(flags);
	if (buffer_check_space(cpu, SUMMARY_BUF, MAXSIZE_PACK32 + MAXSIZE_CORE_NAME)) {
		gator_buffer_write_packed_int(cpu, SUMMARY_BUF, MESSAGE_CORE_NAME);
		gator_buffer_write_packed_int(cpu, SUMMARY_BUF, core);
		gator_buffer_write_packed_int(cpu, SUMMARY_BUF, cpuid);
		gator_buffer_write_string(cpu, SUMMARY_BUF, name);
	}
	/* Commit core names now so that they can show up in live */
	local_irq_restore(flags);
	gator_commit_buffer(cpu, SUMMARY_BUF, gator_get_time());
}
static void marshal_thread_name(int pid, char *name)
{
	unsigned long flags, cpu;
	u64 time;

	local_irq_save(flags);
	cpu = get_physical_cpu();
	time = gator_get_time();
	if (buffer_check_space(cpu, NAME_BUF, TASK_COMM_LEN + 3 * MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
		gator_buffer_write_packed_int(cpu, NAME_BUF, MESSAGE_THREAD_NAME);
		gator_buffer_write_packed_int64(cpu, NAME_BUF, time);
		gator_buffer_write_packed_int(cpu, NAME_BUF, pid);
		gator_buffer_write_string(cpu, NAME_BUF, name);
	}
	local_irq_restore(flags);
	buffer_check(cpu, NAME_BUF, time);
}
static void marshal_idle(int core, int state)
{
	unsigned long flags, cpu;
	u64 time;

	local_irq_save(flags);
	cpu = get_physical_cpu();
	time = gator_get_time();
	if (buffer_check_space(cpu, IDLE_BUF, MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
		gator_buffer_write_packed_int(cpu, IDLE_BUF, state);
		gator_buffer_write_packed_int64(cpu, IDLE_BUF, time);
		gator_buffer_write_packed_int(cpu, IDLE_BUF, core);
	}
	local_irq_restore(flags);
	/* Check and commit; commit is set to occur once buffer is 3/4 full */
	buffer_check(cpu, IDLE_BUF, time);
}
static void __maybe_unused marshal_event_single64(int core, int key, long long value)
{
	unsigned long flags, cpu;
	u64 time;

	local_irq_save(flags);
	cpu = get_physical_cpu();
	time = gator_get_time();
	if (buffer_check_space(cpu, COUNTER_BUF, 2 * MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
		gator_buffer_write_packed_int64(cpu, COUNTER_BUF, time);
		gator_buffer_write_packed_int(cpu, COUNTER_BUF, core);
		gator_buffer_write_packed_int(cpu, COUNTER_BUF, key);
		gator_buffer_write_packed_int64(cpu, COUNTER_BUF, value);
	}
	local_irq_restore(flags);
	/* Check and commit; commit is set to occur once buffer is 3/4 full */
	buffer_check(cpu, COUNTER_BUF, time);
}
static bool marshal_backtrace_header(int exec_cookie, int tgid, int pid, u64 time)
{
	int cpu = get_physical_cpu();

	if (!buffer_check_space(cpu, BACKTRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32 + gator_backtrace_depth * 2 * MAXSIZE_PACK32)) {
		/* Check and commit; commit is set to occur once buffer is 3/4 full */
		buffer_check(cpu, BACKTRACE_BUF, time);

		return false;
	}

	gator_buffer_write_packed_int64(cpu, BACKTRACE_BUF, time);
	gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, exec_cookie);
	gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, tgid);
	gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, pid);

	return true;
}
static void marshal_link(int cookie, int tgid, int pid)
{
	unsigned long cpu = get_physical_cpu(), flags;
	u64 time;

	local_irq_save(flags);
	time = gator_get_time();
	if (buffer_check_space(cpu, ACTIVITY_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
		gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, MESSAGE_LINK);
		gator_buffer_write_packed_int64(cpu, ACTIVITY_BUF, time);
		gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, cookie);
		gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, tgid);
		gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, pid);
	}
	local_irq_restore(flags);
	/* Check and commit; commit is set to occur once buffer is 3/4 full */
	buffer_check(cpu, ACTIVITY_BUF, time);
}
static void marshal_sched_trace_exit(int tgid, int pid)
{
	unsigned long cpu = get_physical_cpu(), flags;
	u64 time;

	if (!per_cpu(gator_buffer, cpu)[SCHED_TRACE_BUF])
		return;

	local_irq_save(flags);
	time = gator_get_time();
	if (buffer_check_space(cpu, SCHED_TRACE_BUF, MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
		gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, MESSAGE_SCHED_EXIT);
		gator_buffer_write_packed_int64(cpu, SCHED_TRACE_BUF, time);
		gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, pid);
	}
	local_irq_restore(flags);
	/* Check and commit; commit is set to occur once buffer is 3/4 full */
	buffer_check(cpu, SCHED_TRACE_BUF, time);
}
示例#17
0
static int report_trace(struct stackframe *frame, void *d)
{
	unsigned int *depth = d, cookie = NO_COOKIE;
	unsigned long addr = frame->pc;

	if (*depth) {
#if defined(MODULE)
		unsigned int cpu = get_physical_cpu();
		struct module *mod = __module_address(addr);
		if (mod) {
			cookie = get_cookie(cpu, current, mod->name, false);
			addr = addr - (unsigned long)mod->module_core;
		}
#endif
		marshal_backtrace(addr & ~1, cookie);
		(*depth)--;
	}

	return *depth == 0;
}
static int gator_events_sched_read(int **buffer)
{
	unsigned long flags;
	int len, value;
	int cpu = get_physical_cpu();

	len = 0;
	if (sched_switch_enabled) {
		local_irq_save(flags);
		value = per_cpu(schedCnt, cpu)[SCHED_SWITCH];
		per_cpu(schedCnt, cpu)[SCHED_SWITCH] = 0;
		local_irq_restore(flags);
		per_cpu(schedGet, cpu)[len++] = sched_switch_key;
		per_cpu(schedGet, cpu)[len++] = value;
	}

	if (buffer)
		*buffer = per_cpu(schedGet, cpu);

	return len;
}
static int gator_events_irq_online(int **buffer, bool migrate)
{
	int len = 0, cpu = get_physical_cpu();

	// synchronization with the irq_exit functions is not necessary as the values are being reset
	if (hardirq_enabled) {
		atomic_set(&per_cpu(irqCnt, cpu)[HARDIRQ], 0);
		per_cpu(irqGet, cpu)[len++] = hardirq_key;
		per_cpu(irqGet, cpu)[len++] = 0;
	}

	if (softirq_enabled) {
		atomic_set(&per_cpu(irqCnt, cpu)[SOFTIRQ], 0);
		per_cpu(irqGet, cpu)[len++] = softirq_key;
		per_cpu(irqGet, cpu)[len++] = 0;
	}

	if (buffer)
		*buffer = per_cpu(irqGet, cpu);

	return len;
}
static void marshal_event64(int len, long long *buffer64)
{
	unsigned long i, flags, cpu = get_physical_cpu();

	if (len <= 0)
		return;

	/* length must be even since all data is a (key, value) pair */
	if (len & 0x1) {
		pr_err("gator: invalid counter data detected and discarded\n");
		return;
	}

	/* events must be written in key,value pairs */
	local_irq_save(flags);
	for (i = 0; i < len; i += 2) {
		if (!buffer_check_space(cpu, BLOCK_COUNTER_BUF, 2 * MAXSIZE_PACK64))
			break;
		gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, buffer64[i]);
		gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, buffer64[i + 1]);
	}
	local_irq_restore(flags);
}
static void marshal_activity_switch(int core, int key, int activity, int pid, int state)
{
	unsigned long cpu = get_physical_cpu(), flags;
	u64 time;

	if (!per_cpu(gator_buffer, cpu)[ACTIVITY_BUF])
		return;

	local_irq_save(flags);
	time = gator_get_time();
	if (buffer_check_space(cpu, ACTIVITY_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
		gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, MESSAGE_SWITCH);
		gator_buffer_write_packed_int64(cpu, ACTIVITY_BUF, time);
		gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, core);
		gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, key);
		gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, activity);
		gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, pid);
		gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, state);
	}
	local_irq_restore(flags);
	/* Check and commit; commit is set to occur once buffer is 3/4 full */
	buffer_check(cpu, ACTIVITY_BUF, time);
}
static ssize_t annotate_write(struct file *file, char const __user *buf, size_t count_orig, loff_t *offset)
{
	int pid, cpu, header_size, available, contiguous, length1, length2, size, count = count_orig & 0x7fffffff;
	bool interrupt_context;

	if (*offset)
		return -EINVAL;

	interrupt_context = in_interrupt();
	/* Annotations are not supported in interrupt context, but may work
	 * if you comment out the the next four lines of code. By doing so,
	 * annotations in interrupt context can result in deadlocks and lost
	 * data.
	 */
	if (interrupt_context) {
		pr_warning("gator: Annotations are not supported in interrupt context. Edit gator_annotate.c in the gator driver to enable annotations in interrupt context.\n");
		return -EINVAL;
	}

 retry:
	/* synchronize between cores and with collect_annotations */
	spin_lock(&annotate_lock);

	if (!collect_annotations) {
		/* Not collecting annotations, tell the caller everything was written */
		size = count_orig;
		goto annotate_write_out;
	}

	/* Annotation only uses a single per-cpu buffer as the data must be in order to the engine */
	cpu = 0;

        if (current == NULL)
		pid = 0;
	else
		pid = current->pid;

	/* determine total size of the payload */
	header_size = MAXSIZE_PACK32 * 3 + MAXSIZE_PACK64;
	available = buffer_bytes_available(cpu, ANNOTATE_BUF) - header_size;
	size = count < available ? count : available;

	if (size <= 0) {
		/* Buffer is full, wait until space is available */
		spin_unlock(&annotate_lock);

		/* Drop the annotation as blocking is not allowed in interrupt context */
		if (interrupt_context)
			return -EINVAL;

		wait_event_interruptible(gator_annotate_wait, buffer_bytes_available(cpu, ANNOTATE_BUF) > header_size || !collect_annotations);

		/* Check to see if a signal is pending */
		if (signal_pending(current))
			return -EINTR;

		goto retry;
	}

	/* synchronize shared variables annotateBuf and annotatePos */
	if (per_cpu(gator_buffer, cpu)[ANNOTATE_BUF]) {
		u64 time = gator_get_time();

		gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, get_physical_cpu());
		gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, pid);
		gator_buffer_write_packed_int64(cpu, ANNOTATE_BUF, time);
		gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, size);

		/* determine the sizes to capture, length1 + length2 will equal size */
		contiguous = contiguous_space_available(cpu, ANNOTATE_BUF);
		if (size < contiguous) {
			length1 = size;
			length2 = 0;
		} else {
			length1 = contiguous;
			length2 = size - contiguous;
		}

		if (annotate_copy(file, buf, length1) != 0) {
			size = -EINVAL;
			goto annotate_write_out;
		}

		if (length2 > 0 && annotate_copy(file, &buf[length1], length2) != 0) {
			size = -EINVAL;
			goto annotate_write_out;
		}

		/* Check and commit; commit is set to occur once buffer is 3/4 full */
		buffer_check(cpu, ANNOTATE_BUF, time);
	}

annotate_write_out:
	spin_unlock(&annotate_lock);

	/* return the number of bytes written */
	return size;
}
static int gator_events_meminfo_read_proc(long long **buffer, struct task_struct *task)
{
	struct mm_struct *mm;
	u64 share = 0;
	int i;
	long long value;
	int len = 0;
	int cpu = get_physical_cpu();
	long long *buf = per_cpu(proc_buffer, cpu);

	if (!proc_global_enabled)
		return 0;

	/* Collect the memory stats of the process instead of the thread */
	if (task->group_leader != NULL)
		task = task->group_leader;

	/* get_task_mm/mmput is not needed in this context because the task and it's mm are required as part of the sched_switch */
	mm = task->mm;
	if (mm == NULL)
		return 0;

	/* Derived from task_statm in fs/proc/task_mmu.c */
	if (meminfo_enabled[MEMINFO_MEMUSED] || proc_enabled[PROC_SHARE]) {
		share = get_mm_counter(mm,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)
							   file_rss
#else
							   MM_FILEPAGES
#endif
							   );
	}

	/* key of 1 indicates a pid */
	buf[len++] = 1;
	buf[len++] = task->pid;

	for (i = 0; i < PROC_COUNT; ++i) {
		if (proc_enabled[i]) {
			switch (i) {
			case PROC_SIZE:
				value = mm->total_vm;
				break;
			case PROC_SHARE:
				value = share;
				break;
			case PROC_TEXT:
				value = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> PAGE_SHIFT;
				break;
			case PROC_DATA:
				value = mm->total_vm - mm->shared_vm;
				break;
			}

			buf[len++] = proc_keys[i];
			buf[len++] = value * PAGE_SIZE;
		}
	}

	if (meminfo_enabled[MEMINFO_MEMUSED]) {
		value = share + get_mm_counter(mm,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)
									   anon_rss
#else
									   MM_ANONPAGES
#endif
									   );
		/* Send resident for this pid */
		buf[len++] = meminfo_keys[MEMINFO_MEMUSED];
		buf[len++] = value * PAGE_SIZE;
	}

	/* Clear pid */
	buf[len++] = 1;
	buf[len++] = 0;

	if (buffer)
		*buffer = buf;

	return len;
}
GATOR_DEFINE_PROBE(softirq_exit, TP_PROTO(unsigned int vec_nr))
#endif
{
	atomic_inc(&per_cpu(irqCnt, get_physical_cpu())[SOFTIRQ]);
}
GATOR_DEFINE_PROBE(irq_handler_exit,
		   TP_PROTO(int irq, struct irqaction *action, int ret))
{
	atomic_inc(&per_cpu(irqCnt, get_physical_cpu())[HARDIRQ]);
}
static bool marshal_cookie_header(const char *text)
{
	int cpu = get_physical_cpu();

	return buffer_check_space(cpu, NAME_BUF, strlen(text) + 3 * MAXSIZE_PACK32);
}