static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues, unsigned int idx, struct auxtrace_buffer *buffer) { u64 sz = buffer->size; bool consecutive = false; struct auxtrace_buffer *b; int err; while (sz > BUFFER_LIMIT_FOR_32_BIT) { b = memdup(buffer, sizeof(struct auxtrace_buffer)); if (!b) return -ENOMEM; b->size = BUFFER_LIMIT_FOR_32_BIT; b->consecutive = consecutive; err = auxtrace_queues__add_buffer(queues, idx, b); if (err) { auxtrace_buffer__free(b); return err; } buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT; sz -= BUFFER_LIMIT_FOR_32_BIT; consecutive = true; } buffer->size = sz; buffer->consecutive = consecutive; return 0; }
int auxtrace_queues__add_event(struct auxtrace_queues *queues, struct perf_session *session, union perf_event *event, off_t data_offset, struct auxtrace_buffer **buffer_ptr) { struct auxtrace_buffer *buffer; unsigned int idx; int err; buffer = zalloc(sizeof(struct auxtrace_buffer)); if (!buffer) return -ENOMEM; buffer->pid = -1; buffer->tid = event->auxtrace.tid; buffer->cpu = event->auxtrace.cpu; buffer->data_offset = data_offset; buffer->offset = event->auxtrace.offset; buffer->reference = event->auxtrace.reference; buffer->size = event->auxtrace.size; idx = event->auxtrace.idx; err = auxtrace_queues__add_event_buffer(queues, session, idx, buffer); if (err) goto out_err; if (buffer_ptr) *buffer_ptr = buffer; return 0; out_err: auxtrace_buffer__free(buffer); return err; }
void auxtrace_queues__free(struct auxtrace_queues *queues) { unsigned int i; for (i = 0; i < queues->nr_queues; i++) { while (!list_empty(&queues->queue_array[i].head)) { struct auxtrace_buffer *buffer; buffer = list_entry(queues->queue_array[i].head.next, struct auxtrace_buffer, list); list_del(&buffer->list); auxtrace_buffer__free(buffer); } } zfree(&queues->queue_array); queues->nr_queues = 0; }