int getFields(struct bt_ctf_event *ctf_event, struct bt_definition const *fields, GString* fieldsStr) { enum ctf_type_id fieldType = bt_ctf_field_type(bt_ctf_get_decl_from_def(fields)); int ret = 0, isSigned = -1, len = 0, i = 0; const struct bt_definition *index_def; switch (fieldType) { case CTF_TYPE_INTEGER: isSigned = bt_ctf_get_int_signedness(bt_ctf_get_decl_from_def(fields)); if (isSigned == 1) { g_string_append_printf(fieldsStr, "%lu", bt_ctf_get_int64(fields)); } else if (isSigned == 0) { g_string_append_printf(fieldsStr, "%" PRIu64 , bt_ctf_get_uint64(fields)); } break; case CTF_TYPE_STRING: g_string_append_printf(fieldsStr, "%s", bt_ctf_get_string(fields)); break; case CTF_TYPE_ARRAY: g_string_append_printf(fieldsStr, "[ "); len = bt_ctf_get_array_len(bt_ctf_get_decl_from_def(fields)); if ((index_def = bt_ctf_get_index(ctf_event, fields, i))) { for (i = 0; i < len; i++) { if (i > 0) { g_string_append_printf(fieldsStr, ", "); } //bt_ctf_field_type( bt_ctf_get_index(ctf_event, fields, i)); g_string_append_printf(fieldsStr, " "); g_string_append_printf(fieldsStr, "[%d] = ",i); getFields(ctf_event, bt_ctf_get_index(ctf_event, fields, i), fieldsStr); } } else { g_string_append_printf(fieldsStr, "%s", bt_ctf_get_char_array(fields)); } g_string_append_printf(fieldsStr, " ]"); break; case CTF_TYPE_UNKNOWN: g_string_append_printf(fieldsStr, "TYPE UNKNOWN"); default: g_string_append_printf(fieldsStr, "TYPE UNIMP %i",fieldType ); break; } return ret; }
struct definition_sequence *_bt_python_get_sequence_from_def( struct bt_definition *field) { if (field && bt_ctf_field_type( bt_ctf_get_decl_from_def(field)) == CTF_TYPE_SEQUENCE) { return container_of(field, struct definition_sequence, p); } return NULL; }
void print_fields(struct bt_ctf_event *event, const char *procname, int pid) { unsigned int cnt, i; const struct bt_definition *const * list; const struct bt_declaration *l; const struct bt_definition *scope; enum ctf_type_id type; const char *str; struct processtop *current_proc; struct files *current_file; int fd, fd_value = -1; scope = bt_ctf_get_top_level_scope(event, BT_EVENT_FIELDS); bt_ctf_get_field_list(event, scope, &list, &cnt); for (i = 0; i < cnt; i++) { if (i != 0) fprintf(output, ", "); fprintf(output, "%s = ", bt_ctf_field_name(list[i])); l = bt_ctf_get_decl_from_def(list[i]); if (strncmp(bt_ctf_field_name(list[i]), "fd", 2) == 0) fd = 1; else fd = 0; type = bt_ctf_field_type(l); if (type == CTF_TYPE_INTEGER) { if (bt_ctf_get_int_signedness(l) == 0) { fd_value = bt_ctf_get_uint64(list[i]); fprintf(output, "%" PRIu64, bt_ctf_get_uint64(list[i])); } else { fd_value = bt_ctf_get_int64(list[i]); fprintf(output, "%" PRId64, bt_ctf_get_int64(list[i])); } } else if (type == CTF_TYPE_STRING) { fprintf(output, "%s", bt_ctf_get_string(list[i])); } else if (type == CTF_TYPE_ARRAY) { str = bt_ctf_get_char_array(list[i]); if (!bt_ctf_field_get_error() && str) fprintf(output, "%s", str); } if (fd) { current_proc = find_process_tid(<tngtop, pid, procname); if (!current_proc) continue; current_file = get_file(current_proc, fd_value); if (!current_file || !current_file->name) continue; fprintf(output, "<%s>", current_file->name); } } }
struct definition_array *_bt_python_get_array_from_def( struct bt_definition *field) { const struct bt_declaration *array_decl; struct definition_array *array = NULL; if (!field) { goto end; } array_decl = bt_ctf_get_decl_from_def(field); if (bt_ctf_field_type(array_decl) == CTF_TYPE_ARRAY) { array = container_of(field, struct definition_array, p); }
static enum target_xfer_status ctf_xfer_partial (struct target_ops *ops, enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len) { /* We're only doing regular memory for now. */ if (object != TARGET_OBJECT_MEMORY) return -1; if (readbuf == NULL) error (_("ctf_xfer_partial: trace file is read-only")); if (get_traceframe_number () != -1) { struct bt_iter_pos *pos; int i = 0; enum target_xfer_status res; /* Records the lowest available address of all blocks that intersects the requested range. */ ULONGEST low_addr_available = 0; gdb_assert (ctf_iter != NULL); /* Save the current position. */ pos = bt_iter_get_pos (bt_ctf_get_iter (ctf_iter)); gdb_assert (pos->type == BT_SEEK_RESTORE); /* Iterate through the traceframe's blocks, looking for memory. */ while (1) { ULONGEST amt; uint64_t maddr; uint16_t mlen; enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ()); const struct bt_definition *scope; const struct bt_definition *def; struct bt_ctf_event *event = bt_ctf_iter_read_event (ctf_iter); const char *name = bt_ctf_event_name (event); if (name == NULL || strcmp (name, "frame") == 0) break; else if (strcmp (name, "memory") != 0) { if (bt_iter_next (bt_ctf_get_iter (ctf_iter)) < 0) break; continue; } scope = bt_ctf_get_top_level_scope (event, BT_EVENT_FIELDS); def = bt_ctf_get_field (event, scope, "address"); maddr = bt_ctf_get_uint64 (def); def = bt_ctf_get_field (event, scope, "length"); mlen = (uint16_t) bt_ctf_get_uint64 (def); /* If the block includes the first part of the desired range, return as much it has; GDB will re-request the remainder, which might be in a different block of this trace frame. */ if (maddr <= offset && offset < (maddr + mlen)) { const struct bt_definition *array = bt_ctf_get_field (event, scope, "contents"); const struct bt_declaration *decl = bt_ctf_get_decl_from_def (array); gdb_byte *contents; int k; contents = xmalloc (mlen); for (k = 0; k < mlen; k++) { const struct bt_definition *element = bt_ctf_get_index (event, array, k); contents[k] = (gdb_byte) bt_ctf_get_uint64 (element); } amt = (maddr + mlen) - offset; if (amt > len) amt = len; memcpy (readbuf, &contents[offset - maddr], amt); xfree (contents); /* Restore the position. */ bt_iter_set_pos (bt_ctf_get_iter (ctf_iter), pos); if (amt == 0) return TARGET_XFER_EOF; else { *xfered_len = amt; return TARGET_XFER_OK; } } if (offset < maddr && maddr < (offset + len)) if (low_addr_available == 0 || low_addr_available > maddr) low_addr_available = maddr; if (bt_iter_next (bt_ctf_get_iter (ctf_iter)) < 0) break; } /* Restore the position. */ bt_iter_set_pos (bt_ctf_get_iter (ctf_iter), pos); /* Requested memory is unavailable in the context of traceframes, and this address falls within a read-only section, fallback to reading from executable, up to LOW_ADDR_AVAILABLE */ if (offset < low_addr_available) len = min (len, low_addr_available - offset); res = exec_read_partial_read_only (readbuf, offset, len, xfered_len); if (res == TARGET_XFER_OK) return TARGET_XFER_OK; else { /* No use trying further, we know some memory starting at MEMADDR isn't available. */ *xfered_len = len; return TARGET_XFER_UNAVAILABLE; } } else { /* Fallback to reading from read-only sections. */ return section_table_read_available_memory (readbuf, offset, len, xfered_len); } }