int alloc_cpu_buffers(void) { int i; unsigned long buffer_size = oprofile_cpu_buffer_size; unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + RB_EVENT_HDR_SIZE); op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); if (!op_ring_buffer) goto fail; for_each_possible_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); b->last_task = NULL; b->last_is_kernel = -1; b->tracing = 0; b->buffer_size = buffer_size; b->sample_received = 0; b->sample_lost_overflow = 0; b->backtrace_aborted = 0; b->sample_invalid_eip = 0; b->cpu = i; INIT_DELAYED_WORK(&b->work, wq_sync_buffer); } return 0; fail: free_cpu_buffers(); return -ENOMEM; }
int alloc_cpu_buffers(void) { int i; unsigned long buffer_size = fs_cpu_buffer_size; for_each_possible_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, cpu_to_node(i)); if (!b->buffer) goto fail; b->last_task = NULL; b->last_is_kernel = -1; b->tracing = 0; b->buffer_size = buffer_size; b->tail_pos = 0; b->head_pos = 0; b->sample_received = 0; b->sample_lost_overflow = 0; b->backtrace_aborted = 0; b->sample_invalid_eip = 0; b->cpu = i; INIT_DELAYED_WORK(&b->work, wq_sync_buffer); } return 0; fail: free_cpu_buffers(); return -ENOMEM; }
int alloc_cpu_buffers(void) { int i; unsigned long buffer_size = fs_cpu_buffer_size; for_each_online_cpu(i) { struct oprofile_cpu_buffer * b = &cpu_buffer[i]; b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, cpu_to_node(i)); if (!b->buffer) goto fail; b->last_task = NULL; b->last_cpu_mode = -1; b->tracing = 0; b->buffer_size = buffer_size; b->tail_pos = 0; b->head_pos = 0; b->sample_received = 0; b->sample_lost_overflow = 0; b->cpu = i; INIT_WORK(&b->work, wq_sync_buffer, b); } return 0; fail: free_cpu_buffers(); return -ENOMEM; }
void oprofile_shutdown(void) { down(&start_sem); sync_stop(); if (oprofile_ops.shutdown) oprofile_ops.shutdown(); is_setup = 0; free_event_buffer(); free_cpu_buffers(); up(&start_sem); }
int oprofile_setup(void) { int err; mutex_lock(&start_mutex); if ((err = alloc_cpu_buffers())) goto out; if ((err = alloc_event_buffer())) goto out1; if (oprofile_ops.setup && (err = oprofile_ops.setup())) goto out2; /* Note even though this starts part of the * profiling overhead, it's necessary to prevent * us missing task deaths and eventually oopsing * when trying to process the event buffer. */ if (oprofile_ops.sync_start) { int sync_ret = oprofile_ops.sync_start(); switch (sync_ret) { case 0: goto post_sync; case 1: goto do_generic; case -1: goto out3; default: goto out3; } } do_generic: if ((err = sync_start())) goto out3; post_sync: is_setup = 1; mutex_unlock(&start_mutex); return 0; out3: if (oprofile_ops.shutdown) oprofile_ops.shutdown(); out2: free_event_buffer(); out1: free_cpu_buffers(); out: mutex_unlock(&start_mutex); return err; }
int oprofile_setup(void) { int err; down(&start_sem); if ((err = alloc_cpu_buffers())) goto out; if ((err = alloc_event_buffer())) goto out1; if (oprofile_ops.setup && (err = oprofile_ops.setup())) goto out2; /* Note even though this starts part of the * profiling overhead, it's necessary to prevent * us missing task deaths and eventually oopsing * when trying to process the event buffer. */ if ((err = sync_start())) goto out3; is_setup = 1; up(&start_sem); return 0; out3: if (oprofile_ops.shutdown) oprofile_ops.shutdown(); out2: free_event_buffer(); out1: free_cpu_buffers(); out: up(&start_sem); return err; }
int alloc_cpu_buffers(void) { int i; unsigned long buffer_size = fs_cpu_buffer_size; for_each_online_cpu(i) { struct oprofile_cpu_buffer * b = &cpu_buffer[i]; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) //SLES10, RHEL5 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, cpu_to_node(i)); #else //RHEL4 b->buffer = vmalloc(sizeof(struct op_sample) * buffer_size); #endif if (!b->buffer) goto fail; b->last_task = NULL; b->last_cpu_mode = -1; b->tracing = 0; b->buffer_size = buffer_size; b->tail_pos = 0; b->head_pos = 0; b->sample_received = 0; b->sample_lost_overflow = 0; b->cpu = i; #ifdef CONFIG_CA_CSS b->ca_css_interval = 0; #endif INIT_WORK(&b->work, wq_sync_buffer, b); } return 0; fail: free_cpu_buffers(); return -ENOMEM; }
void oprofile_shutdown(void) { mutex_lock(&start_mutex); if (oprofile_ops.sync_stop) { int sync_ret = oprofile_ops.sync_stop(); switch (sync_ret) { case 0: goto post_sync; case 1: goto do_generic; default: goto post_sync; } } do_generic: sync_stop(); post_sync: if (oprofile_ops.shutdown) oprofile_ops.shutdown(); is_setup = 0; free_event_buffer(); free_cpu_buffers(); mutex_unlock(&start_mutex); }