/* * Scan all partially initialized symbols to determine what output Elf64_Move sections * or partially expanded data section, must be created. */ static uintptr_t make_mvsections(Ofl_desc *ofl) { size_t idx; Sym_desc *sdp; Elf64_Word mv_nums = 0; Elf64_Xword align_parexpn = 0; /* for -z nopartial .data sec */ size_t size_parexpn = 0; /* size of parexpn section */ /* * Compute the size of the output move section */ for (APLIST_TRAVERSE(ofl->ofl_parsyms, idx, sdp)) { if (sdp->sd_flags & FLG_SY_PAREXPN) { Elf64_Sym *sym = sdp->sd_sym; Elf64_Xword align_val; if (sym->st_shndx == SHN_COMMON) align_val = sym->st_value; else align_val = 8; /* * This global symbol is redirected to the special * partial initialization .data section. */ size_parexpn = (size_t)S_ROUND(size_parexpn, sym->st_value) + sym->st_size; if (align_val > align_parexpn) align_parexpn = align_val; } else { mv_nums += alist_nitems(sdp->sd_move); } } /* * Generate a new Elf64_Move section. */ if (mv_nums && (ld_make_sunwmove(ofl, mv_nums) == S_ERROR)) return (S_ERROR); /* * Add empty area for partially initialized symbols. * * A special .data section is created when the '-z nopartial' * option is in effect in order to receive the expanded data. */ if (size_parexpn) { if (ld_make_parexpn_data(ofl, size_parexpn, align_parexpn) == S_ERROR) return (S_ERROR); } return (1); }
/* * Output a block of raw data as hex bytes. Each row is given * the index of the first byte in the row. * * entry: * data - Pointer to first byte of data to be displayed * n - # of bytes of data * prefix - String to be output before each line. Useful * for indenting output. * bytes_per_col - # of space separated bytes to output * in each column. * col_per_row - # of columns to output per row * * exit: * The formatted data has been sent to stdout. Each row of output * shows (bytes_per_col * col_per_row) bytes of data. */ void dump_hex_bytes(const void *data, size_t n, int indent, int bytes_per_col, int col_per_row) { const uchar_t *ldata = data; int bytes_per_row = bytes_per_col * col_per_row; int ndx, byte, word; char string[128], *str = string; char index[MAXNDXSIZE]; int index_width; int sp_prefix = 0; /* * Determine the width to use for the index string. We follow * 8-byte tab rules, but don't use an actual \t character so * that the output can be arbitrarily shifted without odd * tab effects, and so that all the columns line up no matter * how many lines of output are produced. */ ndx = n / bytes_per_row; (void) snprintf(index, sizeof (index), MSG_ORIG(MSG_FMT_INDEX2), EC_WORD(ndx)); index_width = strlen(index); index_width = S_ROUND(index_width, 8); for (ndx = byte = word = 0; n > 0; n--, ldata++) { while (sp_prefix-- > 0) *str++ = ' '; (void) snprintf(str, sizeof (string), MSG_ORIG(MSG_HEXDUMP_TOK), (int)*ldata); str += 2; sp_prefix = 1; if (++byte == bytes_per_col) { sp_prefix += 2; word++; byte = 0; } if (word == col_per_row) { *str = '\0'; (void) snprintf(index, sizeof (index), MSG_ORIG(MSG_FMT_INDEX2), EC_WORD(ndx)); dbg_print(0, MSG_ORIG(MSG_HEXDUMP_ROW), indent, MSG_ORIG(MSG_STR_EMPTY), index_width, index, string); sp_prefix = 0; word = 0; ndx += bytes_per_row; str = string; } } if (byte || word) { *str = '\0'; /* */ (void) snprintf(index, sizeof (index), MSG_ORIG(MSG_FMT_INDEX2), EC_WORD(ndx)); dbg_print(0, MSG_ORIG(MSG_HEXDUMP_ROW), indent, MSG_ORIG(MSG_STR_EMPTY), index_width, index, string); } }
/* * Track any static TLS use, retain the TLS header, and assign a TLS module * identifier. */ int tls_assign(Lm_list *lml, Rt_map *lmp, Phdr *phdr) { ulong_t memsz = S_ROUND(phdr->p_memsz, M_TLSSTATALIGN); ulong_t filesz = phdr->p_filesz; ulong_t resv = tls_static_resv; /* * If this object explicitly references static TLS, then there are some * limitations. */ if (FLAGS1(lmp) & FL1_RT_TLSSTAT) { /* * Static TLS is only available to objects on the primary * link-map list. */ if (((lml->lm_flags & LML_FLG_BASELM) == 0) || ((rtld_flags2 & RT_FL2_NOPLM) != 0)) { eprintf(lml, ERR_FATAL, MSG_INTL(MSG_TLS_STATBASE), NAME(lmp)); return (0); } /* * All TLS blocks that are processed before thread * initialization, are registered with libc. This * initialization is carried out through a handshake with libc * prior to executing any user code (ie. before the first .init * sections are called). As part of this initialization, a * small backup TLS reservation is added (tls_static_resv). * Only explicit static TLS references that can be satisfied by * this TLS backup reservation can be satisfied. */ if (rtld_flags2 & RT_FL2_PLMSETUP) { /* * Initialized static TLS can not be satisfied from the * TLS backup reservation. */ if (filesz) { eprintf(lml, ERR_FATAL, MSG_INTL(MSG_TLS_STATINIT), NAME(lmp)); return (0); } /* * Make sure the backup reservation is sufficient. */ if (memsz > tls_static_resv) { eprintf(lml, ERR_FATAL, MSG_INTL(MSG_TLS_STATSIZE), NAME(lmp), EC_XWORD(memsz), EC_XWORD(tls_static_resv)); return (0); } tls_static_resv -= memsz; } } /* * If we haven't yet initialized threads, or this static reservation can * be satisfied from the TLS backup reservation, determine the total * static TLS size, and assign this object a static TLS offset. */ if (((rtld_flags2 & RT_FL2_PLMSETUP) == 0) || (FLAGS1(lmp) & FL1_RT_TLSSTAT)) { tls_static_size += memsz; TLSSTATOFF(lmp) = tls_static_size; } /* * Retain the PT_TLS header, obtain a new module identifier, and * indicate that this link-map list contains a new TLS object. */ PTTLS(lmp) = phdr; TLSMODID(lmp) = tls_getmodid(); /* * Now that we have a TLS module id, generate any static TLS reservation * diagnostic. */ if (resv != tls_static_resv) DBG_CALL(Dbg_tls_static_resv(lmp, memsz, tls_static_resv)); return (++lml->lm_tls); }
/* * Insert a value into an array at a specified index: * * alist_insert(): Insert an item into an Alist at the specified index * alist_insert_by_offset(): Insert an item into an Alist at the * specified offset relative to the list address. * aplist_insert() Insert a pointer into an APlist at the specified index * * entry: * Note: All the arguments for all three routines are listed here. * The routine to which a given argument applies is given with * each description. * * llp [all] - Address of a pointer to an Alist/APlist. The pointer should * be initialized to NULL before its first use. * datap [alist_insert / aplist_insert] - Pointer to item data, or * NULL. If non-null the data referenced is copied into the * Alist item. Otherwise, the list item is zeroed, and * further initialization is left to the caller. * ptr [aplist_insert] - Pointer to be inserted. * size [alist_insert / alist_insert_by_offset] - Size of an item * in the array list, in bytes. As with any array, A given * Alist can support any item size, but every item in that * list must have the same size. * init_arritems [all] - Initial allocation size: On the first insertion * into the array list, room for init_arritems items is allocated. * idx [alist_insert / aplist_insert] - Index at which to insert the * new item. This index must lie within the existing list, * or be the next index following. * off [alist_insert_by_offset] - Offset at which to insert the new * item, based from the start of the Alist. The offset of * the first item is ALIST_OFF_DATA. * * exit: * The item is inserted at the specified position. This operation * can cause memory for the list to be allocated, or reallocated, * either of which will cause the value of the list pointer * to change. * * These routines can only fail if unable to allocate memory, * in which case NULL is returned. * * If a pointer list (aplist_insert), then the pointer * is stored in the requested index. On success, the address * of the pointer within the list is returned. * * If the list contains arbitrary data (not aplist_insert): If datap * is non-NULL, the data it references is copied into the item at * the index. If datap is NULL, the specified item is zeroed. * On success, a pointer to the inserted item is returned. * * The caller must not retain the returned pointer from this * routine across calls to the list module. It is only safe to use * it until the next call to this module for the given list. * */ void * alist_insert(Alist **lpp, const void *datap, size_t size, Aliste init_arritems, Aliste idx) { Alist *lp = *lpp; char *addr; /* The size and initial array count need to be non-zero */ ASSERT(init_arritems != 0); ASSERT(size != 0); if (lp == NULL) { Aliste bsize; /* * First time here, allocate a new Alist. Note that the * Alist al_desc[] entry is defined for 1 element, * but we actually allocate the number we need. */ bsize = size * init_arritems; bsize = S_ROUND(bsize, sizeof (void *)); bsize = ALIST_OFF_DATA + bsize; if ((lp = malloc((size_t)bsize)) == NULL) return (NULL); lp->al_arritems = init_arritems; lp->al_nitems = 0; lp->al_next = ALIST_OFF_DATA; lp->al_size = size; *lpp = lp; } else { /* We must get the same value for size every time */ ASSERT(size == lp->al_size); if (lp->al_nitems >= lp->al_arritems) { /* * The list is full: Increase the memory allocation * by doubling it. */ Aliste bsize; bsize = lp->al_size * lp->al_arritems * 2; bsize = S_ROUND(bsize, sizeof (void *)); bsize = ALIST_OFF_DATA + bsize; if ((lp = realloc((void *)lp, (size_t)bsize)) == 0) return (NULL); lp->al_arritems *= 2; *lpp = lp; } } /* * The caller is not supposed to use an index that * would introduce a "hole" in the array. */ ASSERT(idx <= lp->al_nitems); addr = (idx * lp->al_size) + (char *)lp->al_data; /* * An appended item is added to the next available array element. * An insert at any other spot requires that the data items that * exist at the point of insertion be shifted down to open a slot. */ if (idx < lp->al_nitems) (void) memmove(addr + lp->al_size, addr, (lp->al_nitems - idx) * lp->al_size); lp->al_nitems++; lp->al_next += lp->al_size; if (datap != NULL) (void) memcpy(addr, datap, lp->al_size); else (void) memset(addr, 0, lp->al_size); return (addr); }