Exemple #1
0
void oprofile_shutdown(void)
{
	down(&start_sem);
	sync_stop();
	if (oprofile_ops.shutdown)
		oprofile_ops.shutdown();
	is_setup = 0;
	free_event_buffer();
	free_cpu_buffers();
	up(&start_sem);
}
Exemple #2
0
int oprofile_setup(void)
{
	int err;

	mutex_lock(&start_mutex);

	if ((err = alloc_cpu_buffers()))
		goto out;

	if ((err = alloc_event_buffer()))
		goto out1;

	if (oprofile_ops.setup && (err = oprofile_ops.setup()))
		goto out2;

	/* Note even though this starts part of the
	 * profiling overhead, it's necessary to prevent
	 * us missing task deaths and eventually oopsing
	 * when trying to process the event buffer.
	 */
	if (oprofile_ops.sync_start) {
		int sync_ret = oprofile_ops.sync_start();
		switch (sync_ret) {
		case 0:
			goto post_sync;
		case 1:
			goto do_generic;
		case -1:
			goto out3;
		default:
			goto out3;
		}
	}
do_generic:
	if ((err = sync_start()))
		goto out3;

post_sync:
	is_setup = 1;
	mutex_unlock(&start_mutex);
	return 0;

out3:
	if (oprofile_ops.shutdown)
		oprofile_ops.shutdown();
out2:
	free_event_buffer();
out1:
	free_cpu_buffers();
out:
	mutex_unlock(&start_mutex);
	return err;
}
Exemple #3
0
static int px_tp_d_release(struct inode *inode, struct file *fp)
{
	g_client_count--;

	if (g_client_count == 0)
	{
		/* stop profiling in case it is still running */
		stop_profiling();

		/* free buffers in case they are not freed */
		free_event_buffer();

		free_module_buffer();
	}

	return 0;
}
Exemple #4
0
/*
 * allocate event buffer
 */
static int allocate_event_buffer(unsigned int *size)
{
	unsigned int buffer_size;
	void * address;

	if (copy_from_user(&buffer_size, size, sizeof(unsigned int)) != 0)
	{
		return -EFAULT;
	}

	buffer_size = PAGE_ALIGN(buffer_size);

	/* free kernel buffers if it is already allocated */
	if (g_event_buffer.buffer.address != 0)
	{
		free_event_buffer();
	}

	//address = __vmalloc(buffer_size, GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
	address = vmalloc(buffer_size);

	if (address == NULL)
	{
		return -ENOMEM;
	}

	g_event_buffer.buffer.address      = address;
	g_event_buffer.buffer.size         = buffer_size;
	g_event_buffer.buffer.read_offset  = 0;
	g_event_buffer.buffer.write_offset = 0;
//	g_event_buffer.buffer.is_data_lost = false;
	g_event_buffer.is_full_event_set   = false;

	if (copy_to_user(size, &buffer_size, sizeof(unsigned int)) != 0)
	{
		return -EFAULT;
	}

	return 0;
}
Exemple #5
0
int oprofile_setup(void)
{
	int err;
 
	down(&start_sem);

	if ((err = alloc_cpu_buffers()))
		goto out;

	if ((err = alloc_event_buffer()))
		goto out1;
 
	if (oprofile_ops.setup && (err = oprofile_ops.setup()))
		goto out2;
 
	/* Note even though this starts part of the
	 * profiling overhead, it's necessary to prevent
	 * us missing task deaths and eventually oopsing
	 * when trying to process the event buffer.
	 */
	if ((err = sync_start()))
		goto out3;

	is_setup = 1;
	up(&start_sem);
	return 0;
 
out3:
	if (oprofile_ops.shutdown)
		oprofile_ops.shutdown();
out2:
	free_event_buffer();
out1:
	free_cpu_buffers();
out:
	up(&start_sem);
	return err;
}
Exemple #6
0
void oprofile_shutdown(void)
{
	mutex_lock(&start_mutex);
	if (oprofile_ops.sync_stop) {
		int sync_ret = oprofile_ops.sync_stop();
		switch (sync_ret) {
		case 0:
			goto post_sync;
		case 1:
			goto do_generic;
		default:
			goto post_sync;
		}
	}
do_generic:
	sync_stop();
post_sync:
	if (oprofile_ops.shutdown)
		oprofile_ops.shutdown();
	is_setup = 0;
	free_event_buffer();
	free_cpu_buffers();
	mutex_unlock(&start_mutex);
}
Exemple #7
0
static long px_tp_d_ioctl( struct file *fp,
                               unsigned int cmd,
                               unsigned long arg)
#endif
{
	switch (cmd)
	{
	case PX_TP_CMD_START_MODULE_TRACKING:
		return start_module_tracking();

	case PX_TP_CMD_START_SAMPLING:
		return start_sampling((bool *)arg);

	case PX_TP_CMD_STOP_PROFILING:
		return stop_profiling();

	case PX_TP_CMD_PAUSE_PROFILING:
		return pause_profiling();

	case PX_TP_CMD_RESUME_PROFILING:
		return resume_profiling();

	case PX_TP_CMD_ALLOC_EVENT_BUFFER:
		return allocate_event_buffer((unsigned int *)arg);

	case PX_TP_CMD_ALLOC_MODULE_BUFFER:
		return allocate_module_buffer((unsigned int *)arg);

	case PX_TP_CMD_FREE_EVENT_BUFFER:
		return free_event_buffer();

	case PX_TP_CMD_FREE_MODULE_BUFFER:
		return free_module_buffer();

	case PX_TP_CMD_SET_AUTO_LAUNCH_APP_PID:
		return set_auto_launch_app_pid((pid_t *)arg);

	case PX_TP_CMD_SET_WAIT_IMAGE_LOAD_NAME:
		return set_wait_image_load_name((char *)arg);

	case PX_TP_CMD_QUERY_REQUEST:
		return query_request((struct query_request_data *)arg);

	case PX_TP_CMD_GET_CPU_ID:
		return get_cpu_id((unsigned long *)arg);

	case PX_TP_CMD_GET_TARGET_RAW_DATA_LENGTH:
		return get_target_raw_data_length((unsigned long *)arg);

	case PX_TP_CMD_GET_TARGET_INFO:
		return get_target_info((unsigned long *)arg);

	case PX_TP_CMD_GET_CPU_FREQ:
		return get_cpu_freq((unsigned int *)arg);

	case PX_TP_CMD_GET_TIMESTAMP_FREQ:
		return get_timestamp_frequency((unsigned long *)arg);

	case PX_TP_CMD_GET_TIMESTAMP:
		return get_time_stamp((unsigned long long *)arg);

	case PX_TP_CMD_ADD_MODULE_RECORD:
		return add_module_record((struct add_module_data *)arg);
#if 0
	case PX_TP_CMD_RESET_EVENT_BUFFER_FULL:
		return reset_event_buffer_full((bool *)arg);

	case PX_TP_CMD_RESET_MODULE_BUFFER_FULL:
		return reset_module_buffer_full((bool *)arg);
#endif
//	case PX_TP_CMD_SET_KERNEL_FUNC_ADDR:
//		return set_kernel_func_addr((struct tp_kernel_func_addr *)arg);

//	case PX_TP_CMD_HOOK_ADDRESS:
//		return hook_address((struct tp_hook_address *)arg);

	case PX_TP_CMD_READ_EVENT_BUFFER:
		return read_event_buffer((struct read_buffer_data *)arg);

	case PX_TP_CMD_READ_MODULE_BUFFER:
		return read_module_buffer((struct read_buffer_data *)arg);

	case PX_TP_CMD_GET_POSSIBLE_CPU_NUM:
		return get_possible_cpu_number((unsigned int *)arg);

	case PX_TP_CMD_GET_ONLINE_CPU_NUM:
		return get_online_cpu_number((unsigned int *)arg);

	default:
		return -EINVAL;
	}
}