コード例 #1
0
ファイル: lttng-ust-elf.c プロジェクト: compudj/lttng-ust-dev
/*
 * Compute the total in-memory size of the ELF file, in bytes.
 *
 * Returns 0 if successful, -1 if not. On success, the memory size is
 * returned through the out parameter `memsz`.
 */
int lttng_ust_elf_get_memsz(struct lttng_ust_elf *elf, uint64_t *memsz)
{
	uint16_t i;
	uint64_t _memsz = 0;

	if (!elf || !memsz) {
		goto error;
	}

	for (i = 0; i < elf->ehdr->e_phnum; ++i) {
		struct lttng_ust_elf_phdr *phdr;
		uint64_t align;

		phdr = lttng_ust_elf_get_phdr(elf, i);
		if (!phdr) {
			goto error;
		}

		/*
		 * Only PT_LOAD segments contribute to memsz. Skip
		 * other segments.
		 */
		if (phdr->p_type != PT_LOAD) {
			goto next_loop;
		}

		/*
		 * A p_align of 0 means no alignment, i.e. aligned to
		 * 1 byte.
		 */
		align = phdr->p_align == 0 ? 1 : phdr->p_align;
		/* Align the start of the segment. */
		_memsz += offset_align(_memsz, align);
		_memsz += phdr->p_memsz;
		/*
		 * Add padding at the end of the segment, so it ends
		 * on a multiple of the align value (which usually
		 * means a page boundary). This makes the computation
		 * valid even in cases where p_align would change from
		 * one segment to the next.
		 */
		_memsz += offset_align(_memsz, align);
	next_loop:
		free(phdr);
	}

	*memsz = _memsz;
	return 0;
error:
	return -1;
}
コード例 #2
0
ファイル: event-fields.c プロジェクト: cooljeanius/babeltrace
static
int bt_ctf_field_structure_serialize(struct bt_ctf_field *field,
		struct ctf_stream_pos *pos)
{
	size_t i;
	int ret = 0;
	struct bt_ctf_field_structure *structure = container_of(
		field, struct bt_ctf_field_structure, parent);

	while (!ctf_pos_access_ok(pos,
		offset_align(pos->offset,
			field->type->declaration->alignment))) {
		ret = increase_packet_size(pos);
		if (ret) {
			goto end;
		}
	}

	if (!ctf_align_pos(pos, field->type->declaration->alignment)) {
		ret = -1;
		goto end;
	}

	for (i = 0; i < structure->fields->len; i++) {
		struct bt_ctf_field *field = g_ptr_array_index(
			structure->fields, i);

		ret = bt_ctf_field_serialize(field, pos);
		if (ret) {
			break;
		}
	}
end:
	return ret;
}
コード例 #3
0
static
int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
{
	int32_t ret;
	uint32_t padding = offset_align((*fb)->b.len, align);
	uint32_t new_len = (*fb)->b.len + padding + len;
	uint32_t new_alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + new_len;
	uint32_t old_alloc_len = (*fb)->alloc_len;

	if (new_len > LTTNG_FILTER_MAX_LEN)
		return -EINVAL;

	if (new_alloc_len > old_alloc_len) {
		struct lttng_filter_bytecode_alloc *newptr;

		new_alloc_len =
			max_t(uint32_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
		newptr = realloc(*fb, new_alloc_len);
		if (!newptr)
			return -ENOMEM;
		*fb = newptr;
		/* We zero directly the memory from start of allocation. */
		memset(&((char *) *fb)[old_alloc_len], 0, new_alloc_len - old_alloc_len);
		(*fb)->alloc_len = new_alloc_len;
	}
	(*fb)->b.len += padding;
	ret = (*fb)->b.len;
	(*fb)->b.len += len;
	return ret;
}
コード例 #4
0
/**
 * channel_backend_init - initialize a channel backend
 * @chanb: channel backend
 * @name: channel name
 * @config: client ring buffer configuration
 * @parent: dentry of parent directory, %NULL for root directory
 * @subbuf_size: size of sub-buffers (> page size, power of 2)
 * @num_subbuf: number of sub-buffers (power of 2)
 * @lttng_ust_shm_handle: shared memory handle
 * @stream_fds: stream file descriptors.
 *
 * Returns channel pointer if successful, %NULL otherwise.
 *
 * Creates per-cpu channel buffers using the sizes and attributes
 * specified.  The created channel buffer files will be named
 * name_0...name_N-1.  File permissions will be %S_IRUSR.
 *
 * Called with CPU hotplug disabled.
 */
int channel_backend_init(struct channel_backend *chanb,
			 const char *name,
			 const struct lttng_ust_lib_ring_buffer_config *config,
			 size_t subbuf_size, size_t num_subbuf,
			 struct lttng_ust_shm_handle *handle,
			 const int *stream_fds)
{
	struct channel *chan = caa_container_of(chanb, struct channel, backend);
	unsigned int i;
	int ret;
	size_t shmsize = 0, num_subbuf_alloc;
	long page_size;

	if (!name)
		return -EPERM;

	page_size = sysconf(_SC_PAGE_SIZE);
	if (page_size <= 0) {
		return -ENOMEM;
	}
	/* Check that the subbuffer size is larger than a page. */
	if (subbuf_size < page_size)
		return -EINVAL;

	/*
	 * Make sure the number of subbuffers and subbuffer size are
	 * power of 2, and nonzero.
	 */
	if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
		return -EINVAL;
	if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
		return -EINVAL;
	/*
	 * Overwrite mode buffers require at least 2 subbuffers per
	 * buffer.
	 */
	if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
		return -EINVAL;

	ret = subbuffer_id_check_index(config, num_subbuf);
	if (ret)
		return ret;

	chanb->buf_size = num_subbuf * subbuf_size;
	chanb->subbuf_size = subbuf_size;
	chanb->buf_size_order = get_count_order(chanb->buf_size);
	chanb->subbuf_size_order = get_count_order(subbuf_size);
	chanb->num_subbuf_order = get_count_order(num_subbuf);
	chanb->extra_reader_sb =
			(config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
	chanb->num_subbuf = num_subbuf;
	strncpy(chanb->name, name, NAME_MAX);
	chanb->name[NAME_MAX - 1] = '\0';
	memcpy(&chanb->config, config, sizeof(*config));

	/* Per-cpu buffer size: control (prior to backend) */
	shmsize = offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer));
	shmsize += sizeof(struct lttng_ust_lib_ring_buffer);

	/* Per-cpu buffer size: backend */
	/* num_subbuf + 1 is the worse case */
	num_subbuf_alloc = num_subbuf + 1;
	shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
	shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
	shmsize += offset_align(shmsize, page_size);
	shmsize += subbuf_size * num_subbuf_alloc;
	shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
	shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages) * num_subbuf_alloc;
	shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
	shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf;
	/* Per-cpu buffer size: control (after backend) */
	shmsize += offset_align(shmsize, __alignof__(struct commit_counters_hot));
	shmsize += sizeof(struct commit_counters_hot) * num_subbuf;
	shmsize += offset_align(shmsize, __alignof__(struct commit_counters_cold));
	shmsize += sizeof(struct commit_counters_cold) * num_subbuf;

	if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
		struct lttng_ust_lib_ring_buffer *buf;
		/*
		 * We need to allocate for all possible cpus.
		 */
		for_each_possible_cpu(i) {
			struct shm_object *shmobj;

			shmobj = shm_object_table_alloc(handle->table, shmsize,
					SHM_OBJECT_SHM, stream_fds[i]);
			if (!shmobj)
				goto end;
			align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
			set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
			buf = shmp(handle, chanb->buf[i].shmp);
			if (!buf)
				goto end;
			set_shmp(buf->self, chanb->buf[i].shmp._ref);
			ret = lib_ring_buffer_create(buf, chanb, i,
					handle, shmobj);
			if (ret)
				goto free_bufs;	/* cpu hotplug locked */
		}
	} else {
コード例 #5
0
ファイル: lttng-ust-elf.c プロジェクト: compudj/lttng-ust-dev
/*
 * Internal method used to try and get the build_id from a PT_NOTE
 * segment ranging from `offset` to `segment_end`.
 *
 * If the function returns successfully, the out parameter `found`
 * indicates whether the build id information was present in the
 * segment or not. If `found` is not 0, the out parameters `build_id`
 * and `length` will both have been set with the retrieved
 * information.
 *
 * Returns 0 on success, -1 if an error occurred.
 */
static
int lttng_ust_elf_get_build_id_from_segment(
	struct lttng_ust_elf *elf, uint8_t **build_id, size_t *length,
	off_t offset, off_t segment_end)
{
	uint8_t *_build_id = NULL;	/* Silence old gcc warning. */
	size_t _length = 0;		/* Silence old gcc warning. */

	while (offset < segment_end) {
		struct lttng_ust_elf_nhdr nhdr;
		size_t read_len;

		/* Align start of note entry */
		offset += offset_align(offset, ELF_NOTE_ENTRY_ALIGN);
		if (offset >= segment_end) {
			break;
		}
		/*
		 * We seek manually because if the note isn't the
		 * build id the data following the header will not
		 * have been read.
		 */
		if (lseek(elf->fd, offset, SEEK_SET) < 0) {
			goto error;
		}
		if (lttng_ust_read(elf->fd, &nhdr, sizeof(nhdr))
				< sizeof(nhdr)) {
			goto error;
		}

		if (!is_elf_native_endian(elf)) {
			nhdr.n_namesz = bswap_32(nhdr.n_namesz);
			nhdr.n_descsz = bswap_32(nhdr.n_descsz);
			nhdr.n_type = bswap_32(nhdr.n_type);
		}

		offset += sizeof(nhdr) + nhdr.n_namesz;
		/* Align start of desc entry */
		offset += offset_align(offset, ELF_NOTE_DESC_ALIGN);

		if (nhdr.n_type != NT_GNU_BUILD_ID) {
			/*
			 * Ignore non build id notes but still
			 * increase the offset.
			 */
			offset += nhdr.n_descsz;
			continue;
		}

		_length = nhdr.n_descsz;
		_build_id = zmalloc(sizeof(uint8_t) * _length);
		if (!_build_id) {
			goto error;
		}

		if (lseek(elf->fd, offset, SEEK_SET) < 0) {
			goto error;
		}
		read_len = sizeof(*_build_id) * _length;
		if (lttng_ust_read(elf->fd, _build_id, read_len) < read_len) {
			goto error;
		}

		break;
	}

	if (_build_id) {
		*build_id = _build_id;
		*length = _length;
	}

	return 0;
error:
	free(_build_id);
	return -1;
}