Exemplo n.º 1
0
int alloc_cpu_buffers(void)
{
	int i;

	unsigned long buffer_size = oprofile_cpu_buffer_size;

	op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
	if (!op_ring_buffer_read)
		goto fail;
	op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
	if (!op_ring_buffer_write)
		goto fail;

	for_each_possible_cpu(i) {
		struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);

		b->last_task = NULL;
		b->last_is_kernel = -1;
		b->tracing = 0;
		b->buffer_size = buffer_size;
		b->sample_received = 0;
		b->sample_lost_overflow = 0;
		b->backtrace_aborted = 0;
		b->sample_invalid_eip = 0;
		b->cpu = i;
		INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
	}
	return 0;

fail:
	free_cpu_buffers();
	return -ENOMEM;
}
Exemplo n.º 2
0
/**
 * init_stats - Setup global state statistics for the hardware latency detector
 *
 * We use data to store various statistics and global state. We also use
 * a global ring buffer (ring_buffer) to keep raw samples of detected hardware
 * induced system latencies. This function initializes these structures and
 * allocates the global ring buffer also.
 */
static int init_stats(void)
{
	int ret = -ENOMEM;

	mutex_init(&data.lock);
	init_waitqueue_head(&data.wq);
	atomic_set(&data.sample_open, 0);

	ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS);

	if (WARN(!ring_buffer, KERN_ERR BANNER
			       "failed to allocate ring buffer!\n"))
		goto out;

	__reset_stats();
	data.threshold = DEFAULT_LAT_THRESHOLD;	    /* threshold us */
	data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
	data.sample_width = DEFAULT_SAMPLE_WIDTH;   /* width us */

	ret = 0;

out:
	return ret;

}
Exemplo n.º 3
0
static void
late_ack_tracker_init_instance(LateAckTracker *self, LogSource *source)
{
  self->super.source = source;
  source->ack_tracker = (AckTracker *)self;
  ring_buffer_alloc(&self->ack_record_storage, sizeof(LateAckRecord), log_source_get_init_window_size(source));
  g_static_mutex_init(&self->storage_mutex);
  _setup_callbacks(self);
}
Exemplo n.º 4
0
static void
late_ack_tracker_init_instance(LateAckTracker *self, LogSource *source)
{
  self->super.late = TRUE;
  self->super.source = source;
  source->ack_tracker = (AckTracker *)self;
  self->super.request_bookmark = late_ack_tracker_request_bookmark;
  self->super.track_msg = late_ack_tracker_track_msg;
  self->super.manage_msg_ack = late_ack_tracker_manage_msg_ack;
  ring_buffer_alloc(&self->ack_record_storage, sizeof(LateAckRecord), log_source_get_init_window_size(source));
  g_static_mutex_init(&self->storage_mutex);
}
int alloc_cpu_buffers(void)
{
	int i;

	unsigned long buffer_size = oprofile_cpu_buffer_size;
	unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
						 RB_EVENT_HDR_SIZE);

	op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
	if (!op_ring_buffer_read)
		goto fail;
	op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
	if (!op_ring_buffer_write)
		goto fail;

	for_each_possible_cpu(i) {
		struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);

		b->last_task = NULL;
		b->last_is_kernel = -1;
		b->tracing = 0;
		b->buffer_size = buffer_size;
		b->sample_received = 0;
		b->sample_lost_overflow = 0;
		b->backtrace_aborted = 0;
		b->sample_invalid_eip = 0;
		b->cpu = i;
#ifdef CONFIG_CA_CSS
		b->ca_css_interval = 0;
#endif
		INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
	}
	return 0;

fail:
	free_cpu_buffers();
	return -ENOMEM;
}
Exemplo n.º 6
0
static void
test_drop_elements()
{
  RingBuffer rb;
  const int rb_capacity = 103;
  const int drop = 31;

  ring_buffer_alloc(&rb, sizeof(TestData), rb_capacity);

  _ringbuffer_fill(&rb, rb_capacity, 1, TRUE);

  ring_buffer_drop(&rb, drop);
  assert_true(ring_buffer_count(&rb) == (rb_capacity - drop), "drop failed");

  ring_buffer_free(&rb);
}
Exemplo n.º 7
0
static int __stp_alloc_ring_buffer(void)
{
	int i;
	unsigned long buffer_size = _stp_bufsize * 1024 * 1024;

	if (!alloc_cpumask_var(&_stp_relay_data.trace_reader_cpumask,
			       (GFP_KERNEL & ~__GFP_WAIT)))
		goto fail;
	cpumask_clear(_stp_relay_data.trace_reader_cpumask);

	if (buffer_size == 0) {
		dbug_trans(1, "using default buffer size...\n");
		buffer_size = _stp_nsubbufs * _stp_subbuf_size;
	}
        dbug_trans(1, "using buffer size %lu...\n", buffer_size);

	/* The number passed to ring_buffer_alloc() is per cpu.  Our
	 * 'buffer_size' is a total number of bytes to allocate.  So,
	 * we need to divide buffer_size by the number of cpus. */
	buffer_size /= num_online_cpus();
	dbug_trans(1, "%lu\n", buffer_size);
	_stp_relay_data.rb = ring_buffer_alloc(buffer_size, 0);
	if (!_stp_relay_data.rb)
		goto fail;

        /* Increment _stp_allocated_memory and
           _stp_allocated_net_memory to approximately account for
           buffers allocated by ring_buffer_alloc. */
        {
#ifndef DIV_ROUND_UP
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#endif
                u64 relay_pages;
                relay_pages = DIV_ROUND_UP (buffer_size, PAGE_SIZE);
                if (relay_pages < 2) relay_pages = 2;
                relay_pages *= num_online_cpus();
                _stp_allocated_net_memory += relay_pages * PAGE_SIZE;
                _stp_allocated_memory += relay_pages * PAGE_SIZE;
        }

	dbug_trans(0, "size = %lu\n", ring_buffer_size(_stp_relay_data.rb));
	return 0;

fail:
	__stp_free_ring_buffer();
	return -ENOMEM;
}
Exemplo n.º 8
0
static void
test_tail()
{
  RingBuffer rb;
  TestData *td_tail;

  ring_buffer_alloc(&rb, sizeof(TestData), 103);
  _ringbuffer_fill2(&rb, 103, 0, TRUE);

  ring_buffer_pop(&rb);

  td_tail = ring_buffer_tail(&rb);
  td_tail->idx = 103;

  assert_true(ring_buffer_push(&rb) == td_tail, "Push should return last tail.");

  assert_test_data_idx_range_in(&rb, 1, 103);

  ring_buffer_free(&rb);
}
Exemplo n.º 9
0
static void
_ringbuffer_init(RingBuffer *self)
{
  ring_buffer_alloc(self, sizeof(TestData), capacity);
}