int _dwarf_pro_transform_macro_info_to_disk(Dwarf_P_Debug dbg, Dwarf_Error * error) { /* Total num of bytes in .debug_macinfo section. */ Dwarf_Unsigned mac_num_bytes; /* Points to first byte of .debug_macinfo buffer. */ Dwarf_Small *macinfo; /* Fills in the .debug_macinfo buffer. */ Dwarf_Small *macinfo_ptr; /* Used to scan the section data buffers. */ struct dw_macinfo_block_s *m_prev; struct dw_macinfo_block_s *m_sect; /* Get the size of the debug_macinfo data */ mac_num_bytes = 0; for (m_sect = dbg->de_first_macinfo; m_sect != NULL; m_sect = m_sect->mb_next) { mac_num_bytes += m_sect->mb_used_len; } /* Tthe final entry has a type code of 0 to indicate It is final for this CU Takes just 1 byte. */ mac_num_bytes += 1; GET_CHUNK(dbg, dbg->de_elf_sects[DEBUG_MACINFO], macinfo, (unsigned long) mac_num_bytes, error); if (macinfo == NULL) { _dwarf_p_error(dbg, error, DW_DLE_ALLOC_FAIL); return (0); } macinfo_ptr = macinfo; m_prev = 0; for (m_sect = dbg->de_first_macinfo; m_sect != NULL; m_sect = m_sect->mb_next) { memcpy(macinfo_ptr, m_sect->mb_data, m_sect->mb_used_len); macinfo_ptr += m_sect->mb_used_len; if (m_prev) { _dwarf_p_dealloc(dbg, (Dwarf_Small *) m_prev); m_prev = 0; } m_prev = m_sect; } *macinfo_ptr = 0; /* the type code of 0 as last entry */ if (m_prev) { _dwarf_p_dealloc(dbg, (Dwarf_Small *) m_prev); m_prev = 0; } return (int) dbg->de_n_debug_sect; }
static ERL_NIF_TERM min_r(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifBinary bin; ERL_NIF_TERM r; ErlNifSInt64* vs; ErlNifSInt64* target; ErlNifSInt64 chunk; // size to be compressed ErlNifSInt64 target_i = 0; // target position ErlNifSInt64 aggr; // target position uint32_t pos; uint32_t count; uint32_t target_size; if (argc != 2) return enif_make_badarg(env); GET_CHUNK(chunk); GET_BIN(0, bin, count, vs); target_size = ceil((double) count / chunk) * sizeof(ErlNifSInt64); if (! (target = (ErlNifSInt64*) enif_make_new_binary(env, target_size, &r))) return enif_make_badarg(env); // TODO return propper error // If we don't have any input data we can return right away. if (count == 0) return r; // We know we have at least one element in the list so our // aggregator will start with this aggr = vs[0]; pos = 1; // We itterate over the remining i .. count-1 elements for (uint32_t i = 1; i < count; i++, pos++) { if (pos == chunk) { target[target_i] = aggr; target_i++; aggr = vs[i]; pos = 0; } else { if (vs[i] < aggr) { aggr = vs[i]; }; } } // Making sure the last aggregate is saved. if (target_i < target_size) target[target_i] = aggr; return r; }
static ERL_NIF_TERM avg_r(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifBinary bin; ERL_NIF_TERM r; ErlNifSInt64* vs; ErlNifSInt64* target; ErlNifSInt64 chunk; // size to be compressed ErlNifSInt64 aggr; // Aggregator uint32_t target_i = 0; // target position uint32_t count; uint32_t pos = 0; uint32_t target_size; if (argc != 2) return enif_make_badarg(env); GET_CHUNK(chunk); GET_BIN(0, bin, count, vs); target_size = ceil((double) count / chunk) * sizeof(ErlNifSInt64); if (! (target = (ErlNifSInt64*) enif_make_new_binary(env, target_size, &r))) return enif_make_badarg(env); // TODO return propper error if (count == 0) return r; aggr = vs[0]; pos++; for (uint32_t i = 1; i < count; i++, pos++) { if (pos == chunk) { target[target_i] = aggr / chunk; target_i++; aggr = vs[i]; pos = 0; } else { aggr += vs[i]; } } if (count % chunk) { aggr += vs[count - 1] * (chunk - (count % chunk)); } target[target_i] = aggr / chunk; return r; }
static ERL_NIF_TERM empty(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifBinary bin; ERL_NIF_TERM r; ErlNifSInt64* vs; ErlNifSInt64* target; ErlNifSInt64 chunk; // size to be compressed ErlNifSInt64 target_i = 0; // target position ErlNifSInt64 pos = 0; // position in chunk ErlNifSInt64 aggr = 0; // aggregated value for this chunk int count; int target_size; if (argc != 2) return enif_make_badarg(env); GET_CHUNK(chunk); GET_BIN(0, bin, count, vs); target_size = ceil((double) count / chunk) * sizeof(ErlNifSInt64); if (! (target = (ErlNifSInt64*) enif_make_new_binary(env, target_size, &r))) return enif_make_badarg(env); // TODO return propper error if (count == 0) return r; pos = 0; for (int i = 0; i < count; i++, pos++) { if (pos == chunk) { target[target_i] = TO_DDB(aggr); target_i++; aggr = !IS_SET(vs[i]); pos = 0; } else { aggr += !IS_SET(vs[i]); } } if (count % chunk) { aggr += (chunk - (count % chunk)); } target[target_i] = TO_DDB(aggr); return r; }
static ERL_NIF_TERM avg(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifBinary bin; ERL_NIF_TERM r; ErlNifSInt64* vs; ErlNifSInt64* target; ErlNifSInt64 chunk; // size to be compressed ErlNifSInt64 target_i = 0; // target position ErlNifSInt64 pos = 0; // position in chunk ErlNifSInt64 aggr = 0; // aggregated value for this chunk ErlNifSInt64 last = 0; int count; int has_value = 0; int has_last = 0; int target_size; if (argc != 2) return enif_make_badarg(env); GET_CHUNK(chunk); GET_BIN(0, bin, count, vs); target_size = ceil((double) count / chunk) * sizeof(ErlNifSInt64); if (! (target = (ErlNifSInt64*) enif_make_new_binary(env, target_size, &r))) return enif_make_badarg(env); // TODO return propper error // If we don't have any input data we can return right away. if (count == 0) return r; // We know we have at least one element in the list so our // aggregator will start with this if (IS_SET(vs[0])) { aggr = FROM_DDB(vs[0]); last = aggr; has_value = 1; has_last = 1; }; pos = 1; // We itterate over the remining i .. count-1 elements for (int i = 1; i < count; i++, pos++) { if (pos == chunk) { if (has_value) { target[target_i] = TO_DDB(aggr / chunk); } else { target[target_i] = 0; } target_i++; has_value = IS_SET(vs[i]); if (has_value) { aggr = FROM_DDB(vs[i]); last = aggr; has_last = 1; } else if (has_last) { aggr = last; has_value = 1; } pos = 0; } else if (!has_value) { if (IS_SET(vs[i])) { aggr = FROM_DDB(vs[i]); last = aggr; has_value = 1; has_last = 1; }; } else { if (IS_SET(vs[i])) { last = FROM_DDB(vs[i]); aggr += last; has_last = 1; has_value = 1; } else if (has_last) { aggr += last; has_value = 1; } } } if (has_value) { if (count % chunk) { aggr += (last * (chunk - (count % chunk))); } target[target_i] = TO_DDB(aggr / chunk); } else { target[target_i] = 0; } return r; }
int _dwarf_transform_arange_to_disk(Dwarf_P_Debug dbg, Dwarf_Signed *nbufs, Dwarf_Error * error) { /* Total num of bytes in .debug_aranges section. */ Dwarf_Unsigned arange_num_bytes = 0; /* Adjustment to align the start of the actual address ranges on a boundary aligned with twice the address size. */ Dwarf_Small remainder = 0; /* Total number of bytes excluding the length field. */ Dwarf_Unsigned adjusted_length = 0; /* Points to first byte of .debug_aranges buffer. */ Dwarf_Small *arange = 0; /* Fills in the .debug_aranges buffer. */ Dwarf_Small *arange_ptr = 0; /* Scans the list of address ranges provided by user. */ Dwarf_P_Arange given_arange = 0; /* Used to fill in 0. */ const Dwarf_Signed big_zero = 0; int extension_word_size = dbg->de_64bit_extension ? 4 : 0; int offset_size = dbg->de_offset_size; int upointer_size = dbg->de_pointer_size; /* All dwarf versions so far use 2 here. */ Dwarf_Half version = 2; int res = 0; /* ***** BEGIN CODE ***** */ /* Size of the .debug_aranges section header. */ arange_num_bytes = extension_word_size + offset_size + /* Size of length field. */ DWARF_HALF_SIZE + /* Size of version field. */ offset_size + /* Size of .debug_info offset. */ sizeof(Dwarf_Small) + /* Size of address size field. */ sizeof(Dwarf_Small); /* Size of segment size field. */ /* Adjust the size so that the set of aranges begins on a boundary that aligned with twice the address size. This is a Libdwarf requirement. */ remainder = arange_num_bytes % (2 * upointer_size); if (remainder != 0) arange_num_bytes += (2 * upointer_size) - remainder; /* Add the bytes for the actual address ranges. */ arange_num_bytes += upointer_size * 2 * (dbg->de_arange_count + 1); GET_CHUNK(dbg, dbg->de_elf_sects[DEBUG_ARANGES], arange, (unsigned long) arange_num_bytes, error); arange_ptr = arange; if (extension_word_size) { Dwarf_Word x = DISTINGUISHED_VALUE; WRITE_UNALIGNED(dbg, (void *) arange_ptr, (const void *) &x, sizeof(x), extension_word_size); arange_ptr += extension_word_size; } /* Write the total length of .debug_aranges section. */ adjusted_length = arange_num_bytes - offset_size - extension_word_size; { Dwarf_Unsigned du = adjusted_length; WRITE_UNALIGNED(dbg, (void *) arange_ptr, (const void *) &du, sizeof(du), offset_size); arange_ptr += offset_size; } /* Write the version as 2 bytes. */ { Dwarf_Half verstamp = version; WRITE_UNALIGNED(dbg, (void *) arange_ptr, (const void *) &verstamp, sizeof(verstamp), DWARF_HALF_SIZE); arange_ptr += DWARF_HALF_SIZE; } /* Write the .debug_info offset. This is always 0. */ WRITE_UNALIGNED(dbg, (void *) arange_ptr, (const void *) &big_zero, sizeof(big_zero), offset_size); arange_ptr += offset_size; { unsigned long count = dbg->de_arange_count + 1; int res2 = 0; Dwarf_P_Per_Reloc_Sect p_reloc = &dbg->de_reloc_sect[DEBUG_ARANGES]; if (dbg->de_relocate_pair_by_symbol) { count = (3 * dbg->de_arange_count) + 1; } /* The following is a small optimization: not needed for correctness. Does nothing if preloc->pr_first_block is non-null */ res2 = _dwarf_pro_pre_alloc_specific_reloc_slots(dbg, p_reloc, count); if (res2 != DW_DLV_OK) { _dwarf_p_error(dbg, error, DW_DLE_ALLOC_FAIL); return DW_DLV_ERROR; } } /* reloc for .debug_info */ res = dbg->de_relocate_by_name_symbol(dbg, DEBUG_ARANGES, extension_word_size + offset_size + DWARF_HALF_SIZE, dbg->de_sect_name_idx[DEBUG_INFO], dwarf_drt_data_reloc, offset_size); /* Write the size of addresses. */ *arange_ptr = dbg->de_pointer_size; arange_ptr++; /* Write the size of segment addresses. This is zero for MIPS architectures. */ *arange_ptr = 0; arange_ptr++; /* Skip over the padding to align the start of the actual address ranges to twice the address size. */ if (remainder != 0) arange_ptr += (2 * upointer_size) - remainder; /* The arange address, length are pointer-size fields of the target machine. */ for (given_arange = dbg->de_arange; given_arange != NULL; given_arange = given_arange->ag_next) { /* Write relocation record for beginning of address range. */ res = dbg->de_relocate_by_name_symbol(dbg, DEBUG_ARANGES, arange_ptr - arange, /* r_offset */ (long) given_arange->ag_symbol_index, dwarf_drt_data_reloc, upointer_size); if (res != DW_DLV_OK) { _dwarf_p_error(dbg, error, DW_DLE_ALLOC_FAIL); return DW_DLV_ERROR; } /* Copy beginning address of range. */ WRITE_UNALIGNED(dbg, (void *) arange_ptr, (const void *) &given_arange->ag_begin_address, sizeof(given_arange->ag_begin_address), upointer_size); arange_ptr += upointer_size; if (dbg->de_relocate_pair_by_symbol && given_arange->ag_end_symbol_index != 0 && given_arange->ag_length == 0) { /* symbolic reloc, need reloc for length What if we really know the length? If so, should use the other part of 'if'. */ Dwarf_Unsigned val; res = dbg->de_relocate_pair_by_symbol(dbg, DEBUG_ARANGES, arange_ptr - arange, /* r_offset */ given_arange->ag_symbol_index, given_arange->ag_end_symbol_index, dwarf_drt_first_of_length_pair, upointer_size); if (res != DW_DLV_OK) { _dwarf_p_error(dbg, error, DW_DLE_ALLOC_FAIL); return DW_DLV_ERROR; } /* arange pre-calc so assem text can do .word end - begin + val (gets val from stream) */ val = given_arange->ag_end_symbol_offset - given_arange->ag_begin_address; WRITE_UNALIGNED(dbg, (void *) arange_ptr, (const void *) &val, sizeof(val), upointer_size); arange_ptr += upointer_size; } else { /* plain old length to copy, no relocation at all */ WRITE_UNALIGNED(dbg, (void *) arange_ptr, (const void *) &given_arange->ag_length, sizeof(given_arange->ag_length), upointer_size); arange_ptr += upointer_size; } } WRITE_UNALIGNED(dbg, (void *) arange_ptr, (const void *) &big_zero, sizeof(big_zero), upointer_size); arange_ptr += upointer_size; WRITE_UNALIGNED(dbg, (void *) arange_ptr, (const void *) &big_zero, sizeof(big_zero), upointer_size); *nbufs = dbg->de_n_debug_sect; return DW_DLV_OK; }
/* Ensure each stream is a single buffer and add that single buffer to the set of stream buffers. By creating a new buffer and copying if necessary. Free the input set of buffers if we consolidate. Return -1 on error (malloc failure) Return DW_DLV_OK on success. Any other return indicates malloc failed. */ int _dwarf_stream_relocs_to_disk(Dwarf_P_Debug dbg, Dwarf_Signed * new_sec_count) { unsigned long total_size = 0; Dwarf_Small *data = 0; int sec_index = 0; unsigned long i = 0; Dwarf_Error err = 0; Dwarf_Error *error = &err; Dwarf_Signed sec_count = 0; Dwarf_P_Per_Reloc_Sect p_reloc = &dbg->de_reloc_sect[0]; for (i = 0; i < NUM_DEBUG_SECTIONS; ++i, ++p_reloc) { unsigned long ct = p_reloc->pr_reloc_total_count; unsigned len = 0; struct Dwarf_P_Relocation_Block_s *p_blk = 0; struct Dwarf_P_Relocation_Block_s *p_blk_last = 0; Dwarf_P_Per_Reloc_Sect prb = 0; if (ct == 0) { continue; } prb = &dbg->de_reloc_sect[i]; len = dbg->de_relocation_record_size; ++sec_count; total_size = ct * len; sec_index = prb->pr_sect_num_of_reloc_sect; if (sec_index == 0) { /* call de_func or de_func_b, getting section number of reloc sec */ int rel_section_index = 0; Dwarf_Unsigned name_idx = 0; int int_name = 0; int err = 0; if (dbg->de_func_b) { rel_section_index = dbg->de_func_b(_dwarf_rel_section_names[i], /* size */ dbg->de_relocation_record_size, /* type */ SHT_REL, /* flags */ 0, /* link to symtab, which we cannot know */ 0, /* info == link to sec rels apply to */ dbg->de_elf_sects[i], &name_idx, &err); } else { rel_section_index = dbg->de_func(_dwarf_rel_section_names[i], /* size */ dbg->de_relocation_record_size, /* type */ SHT_REL, /* flags */ 0, /* link to symtab, which we cannot know */ 0, /* info == link to sec rels apply to */ dbg->de_elf_sects[i], &int_name, &err); name_idx = int_name; } if (rel_section_index == -1) { { _dwarf_p_error(dbg, error, DW_DLE_ELF_SECT_ERR); return (DW_DLV_ERROR); } } prb->pr_sect_num_of_reloc_sect = rel_section_index; sec_index = rel_section_index; } GET_CHUNK(dbg, sec_index, data, total_size, &err); p_blk = p_reloc->pr_first_block; /* following loop executes at least once. Effects the consolidation to a single block or, if already a single block, simply copies to the output buffer. And frees the input block. The new block is in the de_debug_sects list. */ while (p_blk) { unsigned long len = p_blk->rb_where_to_add_next - p_blk->rb_data; memcpy(data, p_blk->rb_data, len); data += len; p_blk_last = p_blk; p_blk = p_blk->rb_next; _dwarf_p_dealloc(dbg, (Dwarf_Small *) p_blk_last); } /* ASSERT: sum of len copied == total_size */ /* We have copied the input, now drop the pointers to it. For debugging, leave the other data untouched. */ p_reloc->pr_first_block = 0; p_reloc->pr_last_block = 0; } *new_sec_count = sec_count; return DW_DLV_OK; }
/* _dwarf_transform_simplename_to_disk writes ".rel.debug_pubnames", ".rel.debug_funcnames", sgi extension ".rel.debug_typenames", sgi extension ".rel.debug_varnames", sgi extension ".rel.debug_weaknames", sgi extension to disk. section_index indexes one of those sections. entrykind is one of those 'kind's. */ int _dwarf_transform_simplename_to_disk(Dwarf_P_Debug dbg, enum dwarf_sn_kind entrykind, int section_index, /* in de_elf_sects etc */ Dwarf_Error * error) { /* Used to fill in 0. */ const Dwarf_Signed big_zero = 0; /* Used to scan the section data buffers. */ Dwarf_P_Section_Data debug_sect; Dwarf_Signed debug_info_size; Dwarf_P_Simple_nameentry nameentry_original; Dwarf_P_Simple_nameentry nameentry; Dwarf_Small *stream_bytes; Dwarf_Small *cur_stream_bytes_ptr; Dwarf_Unsigned stream_bytes_count; Dwarf_Unsigned adjusted_length; /* count excluding length field */ int uword_size = dbg->de_offset_size; int extension_size = dbg->de_64bit_extension ? 4 : 0; Dwarf_P_Simple_name_header hdr; /* ***** BEGIN CODE ***** */ debug_info_size = 0; for (debug_sect = dbg->de_debug_sects; debug_sect != NULL; debug_sect = debug_sect->ds_next) { /* We want the size of the .debug_info section for this CU because the dwarf spec requires us to output it below so we look for it specifically. */ if (debug_sect->ds_elf_sect_no == dbg->de_elf_sects[DEBUG_INFO]) { debug_info_size += debug_sect->ds_nbytes; } } hdr = &dbg->de_simple_name_headers[entrykind]; /* Size of the .debug_typenames (or similar) section header. */ stream_bytes_count = extension_size + uword_size + /* Size of length field. */ sizeof(Dwarf_Half) + /* Size of version field. */ uword_size + /* Size of .debug_info offset. */ uword_size; /* Size of .debug_names. */ nameentry_original = hdr->sn_head; nameentry = nameentry_original; /* add in the content size */ stream_bytes_count += hdr->sn_net_len; /* Size of the last 0 offset. */ stream_bytes_count += uword_size; /* Now we know how long the entire section is */ GET_CHUNK(dbg, dbg->de_elf_sects[section_index], stream_bytes, (unsigned long) stream_bytes_count, error); if (stream_bytes == NULL) { _dwarf_p_error(dbg, error, DW_DLE_ALLOC_FAIL); return (0); } cur_stream_bytes_ptr = stream_bytes; if (extension_size) { Dwarf_Unsigned x = DISTINGUISHED_VALUE; WRITE_UNALIGNED(dbg, cur_stream_bytes_ptr, (const void *) &x, sizeof(x), extension_size); cur_stream_bytes_ptr += extension_size; } /* Write the adjusted length of .debug_*names section. */ adjusted_length = stream_bytes_count - uword_size - extension_size; WRITE_UNALIGNED(dbg, cur_stream_bytes_ptr, (const void *) &adjusted_length, sizeof(adjusted_length), uword_size); cur_stream_bytes_ptr += uword_size; /* Write the version as 2 bytes. */ { Dwarf_Half verstamp = CURRENT_VERSION_STAMP; WRITE_UNALIGNED(dbg, cur_stream_bytes_ptr, (const void *) &verstamp, sizeof(verstamp), sizeof(Dwarf_Half)); cur_stream_bytes_ptr += sizeof(Dwarf_Half); } /* Write the offset of the compile-unit. */ WRITE_UNALIGNED(dbg, cur_stream_bytes_ptr, (const void *) &big_zero, sizeof(big_zero), uword_size); cur_stream_bytes_ptr += uword_size; /* now create the relocation for the compile_unit offset */ { int res = dbg->de_reloc_name(dbg, section_index, extension_size + uword_size + sizeof(Dwarf_Half) /* r_offset */ , /* debug_info section name symbol */ dbg->de_sect_name_idx[DEBUG_INFO], dwarf_drt_data_reloc, uword_size); if (res != DW_DLV_OK) { { _dwarf_p_error(dbg, error, DW_DLE_ALLOC_FAIL); return (0); } } } /* Write the size of .debug_info section. */ WRITE_UNALIGNED(dbg, cur_stream_bytes_ptr, (const void *) &debug_info_size, sizeof(debug_info_size), uword_size); cur_stream_bytes_ptr += uword_size; for (nameentry = nameentry_original; nameentry != NULL; nameentry = nameentry->sne_next) { /* Copy offset of die from start of compile-unit. */ WRITE_UNALIGNED(dbg, cur_stream_bytes_ptr, (const void *) &nameentry->sne_die->di_offset, sizeof(nameentry->sne_die->di_offset), uword_size); cur_stream_bytes_ptr += uword_size; /* Copy the type name. */ strcpy((char *) cur_stream_bytes_ptr, nameentry->sne_name); cur_stream_bytes_ptr += nameentry->sne_name_len + 1; } WRITE_UNALIGNED(dbg, cur_stream_bytes_ptr, (const void *) &big_zero, sizeof(big_zero), uword_size); return (int) dbg->de_n_debug_sect; }