Ejemplo n.º 1
0
/*
 * la_activity() caller.  Traverse through all audit libraries and call any
 * la_activity() entry points found.
 */
static void
_audit_activity(APlist *list, Rt_map *clmp, uint_t flags, Boolean client)
{
	Audit_list	*alp;
	Aliste		idx;
	Lm_list		*clml = LIST(clmp);

	for (APLIST_TRAVERSE(list, idx, alp)) {
		Audit_client	*acp;
		Rt_map		*almp = alp->al_lmp;
		Lm_list		*alml = LIST(almp);
		uintptr_t	*cookie;

		if (alp->al_activity == 0)
			continue;

		/*
		 * Determine what cookie is required.  Any auditing that
		 * originates from the object that heads the link-map list has
		 * its own cookie.  Local auditors must obtain the cookie that
		 * represents the object that heads the link-map list.
		 */
		if (client)
			acp = _audit_client(AUDINFO(clmp), almp);
		else
			acp = _audit_get_head_client(clml->lm_head, almp);

		if (acp == NULL)
			continue;
		cookie = &(acp->ac_cookie);

		/*
		 * Make sure the audit library only sees one addition/deletion
		 * at a time.  This ensures the library doesn't see numerous
		 * events from lazy loading a series of libraries.  Keep track
		 * of this caller having called an auditor, so that the
		 * appropriate "consistent" event can be supplied on leaving
		 * ld.so.1.
		 */
		if ((flags == LA_ACT_ADD) || (flags == LA_ACT_DELETE)) {
			if (alml->lm_flags & LML_FLG_AUDITNOTIFY)
				continue;

			alml->lm_flags |= LML_FLG_AUDITNOTIFY;
			clml->lm_flags |= LML_FLG_ACTAUDIT;
		} else {
			if ((alml->lm_flags & LML_FLG_AUDITNOTIFY) == 0)
				continue;

			alml->lm_flags &= ~LML_FLG_AUDITNOTIFY;
		}

		DBG_CALL(Dbg_audit_activity(clml, alp->al_libname,
		    NAME(clml->lm_head), flags));

		leave(alml, thr_flg_reenter);
		(*alp->al_activity)(cookie, flags);
		(void) enter(thr_flg_reenter);
	}
}
Ejemplo n.º 2
0
void
audit_activity(Rt_map *clmp, uint_t flags)
{
	Rt_map	*lmp;
	Aliste	idx;
	uint_t	rtldflags;

	if (rt_critical())
		return;

	APPLICATION_ENTER(rtldflags);

	if (auditors && (auditors->ad_flags & LML_TFLG_AUD_ACTIVITY))
		_audit_activity(auditors->ad_list, clmp, flags, TRUE);
	if (AUDITORS(clmp) &&
	    (AUDITORS(clmp)->ad_flags & LML_TFLG_AUD_ACTIVITY))
		_audit_activity(AUDITORS(clmp)->ad_list, clmp, flags, TRUE);

	for (APLIST_TRAVERSE(aud_activity, idx, lmp)) {
		if ((clmp != lmp) && AUDITORS(lmp) &&
		    (AUDITORS(lmp)->ad_flags & LML_TFLG_AUD_ACTIVITY)) {
			_audit_activity(AUDITORS(lmp)->ad_list, lmp, flags,
			    FALSE);
		}
	}

	APPLICATION_RETURN(rtldflags);
}
Ejemplo n.º 3
0
/*
 * Return the PT_SUNWSTACK segment descriptor from the ofl_segs list.
 * This segment is part of the default set and cannot be removed, so
 * this routine will always succeed.
 *
 * exit:
 *	The descriptor is located, a DBG_STATE_MOD_BEFORE debug
 *	message issued, the FLG_SG_DISABLED flag is cleared, and the
 *	descriptor pointer returned.
 */
Sg_desc *
ld_map_seg_stack(Mapfile *mf)
{
	Ofl_desc	*ofl = mf->mf_ofl;
	Sg_desc		*sgp;
	size_t		idx;

	/*
	 * The stack is established by exec(), using the executable's program
	 * headers, before any sharable objects are loaded. If there is a
	 * PT_SUNWSTACK program header, exec() will act on it. As such, stack
	 * program headers are normally only applicable to executables.
	 *
	 * However, ELF allows a sharable object with an interpreter to
	 * be executed directly, and in this extremely rare case, the
	 * PT_SUNWSTACK program header would have meaning. Rather than
	 * second guess user intent, we simply create it on demand for any
	 * dynamic object, trusting that the user has a good reason for it.
	 */
	for (APLIST_TRAVERSE(ofl->ofl_segs, idx, sgp))
		if (sgp->sg_phdr.p_type == PT_SUNWSTACK) {
			//DBG_CALL(Dbg_map_seg(mf->mf_ofl, DBG_STATE_MOD_BEFORE,
			//    idx, sgp, mf->mf_lineno));
			sgp->sg_flags &= ~FLG_SG_DISABLED;
			return (sgp);
		}

	/*NOTREACHED*/
	return (NULL);
}
Ejemplo n.º 4
0
static Ver_index *
vers_index(Ofl_desc *ofl, Ifl_desc *ifl, int avail)
{
	size_t		idx1;
	Ver_desc	*vdp;
	Ver_index	*vip;
	Sdf_desc	*sdf = ifl->ifl_sdfdesc;
	Elf64_Word		count = ifl->ifl_vercnt;
	Sdv_desc	*sdv;

	/*
	 * Allocate an index array large enough to hold all of the files
	 * version descriptors.
	 */
	if ((vip = libld_calloc(sizeof (Ver_index), (count + 1))) == NULL)
		return ((Ver_index *)S_ERROR);

	for (APLIST_TRAVERSE(ifl->ifl_verdesc, idx1, vdp)) {
		int	ndx = vdp->vd_ndx;

		vip[ndx].vi_name = vdp->vd_name;
		vip[ndx].vi_desc = vdp;

		/*
		 * Any relocatable object versions, and the `base' version are
		 * always available.
		 */
		if (avail || (vdp->vd_flags & VER_FLG_BASE))
			vip[ndx].vi_flags |= FLG_VER_AVAIL;

		/*
		 * If this is a weak version mark it as such.  Weak versions
		 * are always dragged into any version dependencies created,
		 * and if a weak version is referenced it will be promoted to
		 * a non-weak version dependency.
		 */
		if (vdp->vd_flags & VER_FLG_WEAK)
			vip[ndx].vi_flags |= VER_FLG_WEAK;
		/*
		 * If this version is mentioned in a mapfile using ADDVERS
		 * syntax then check to see if it corresponds to an actual
		 * version in the file.
		 */
		if (sdf && (sdf->sdf_flags & FLG_SDF_ADDVER)) {
			size_t	idx2;

			for (ALIST_TRAVERSE(sdf->sdf_verneed, idx2, sdv)) {
				if (strcmp(vip[ndx].vi_name, sdv->sdv_name))
					continue;

				vip[ndx].vi_flags |= FLG_VER_REFER;
				sdv->sdv_flags |= FLG_SDV_MATCHED;
				break;
			}
		}
	}
Ejemplo n.º 5
0
/*
 * Scan all partially initialized symbols to determine what output Elf64_Move sections
 * or partially expanded data section, must be created.
 */
static uintptr_t
make_mvsections(Ofl_desc *ofl)
{
	size_t		idx;
	Sym_desc	*sdp;
	Elf64_Word 	mv_nums = 0;
	Elf64_Xword	align_parexpn = 0;	/* for -z nopartial .data sec */
	size_t		size_parexpn = 0;	/* size of parexpn section */

	/*
	 * Compute the size of the output move section
	 */
	for (APLIST_TRAVERSE(ofl->ofl_parsyms, idx, sdp)) {
		if (sdp->sd_flags & FLG_SY_PAREXPN) {
			Elf64_Sym *sym = sdp->sd_sym;
			Elf64_Xword align_val;

			if (sym->st_shndx == SHN_COMMON)
				align_val = sym->st_value;
			else
				align_val = 8;

			/*
			 * This global symbol is redirected to the special
			 * partial initialization .data section.
			 */
			size_parexpn = (size_t)S_ROUND(size_parexpn,
			    sym->st_value) + sym->st_size;
			if (align_val > align_parexpn)
				align_parexpn = align_val;

		} else {
			mv_nums += alist_nitems(sdp->sd_move);
		}
	}

	/*
	 * Generate a new Elf64_Move section.
	 */
	if (mv_nums && (ld_make_sunwmove(ofl, mv_nums) == S_ERROR))
		return (S_ERROR);

	/*
	 * Add empty area for partially initialized symbols.
	 *
	 * A special .data section is created when the '-z nopartial'
	 * option is in effect in order to receive the expanded data.
	 */
	if (size_parexpn) {
		if (ld_make_parexpn_data(ofl, size_parexpn,
		    align_parexpn) == S_ERROR)
			return (S_ERROR);
	}
	return (1);
}
Ejemplo n.º 6
0
/*
 * Determine whether an auditor is in use by the head link-map object.
 */
static int
_audit_used_by_head(Rt_map *hlmp, Rt_map *almp)
{
	Audit_list	*alp;
	Aliste		idx;

	for (APLIST_TRAVERSE(AUDITORS(hlmp)->ad_list, idx, alp)) {
		if (alp->al_lmp == almp)
			return (1);
	}
	return (0);
}
Ejemplo n.º 7
0
/*
 * Determine any la_objclose() requirements.  An object that is about to be
 * deleted needs to trigger an la_objclose() event to any associated auditors.
 * In the case of local auditing, a deleted object may have a number of callers,
 * and each of these callers may have their own auditing requirements.  To
 * ensure only one la_objclose() event is sent to each auditor, collect the
 * auditors from any callers and make sure there's no duplication.
 */
inline static void
add_objclose_list(Rt_map *lmp, APlist **alpp)
{
	if (AFLAGS(lmp) & LML_TFLG_AUD_OBJCLOSE) {
		Audit_list	*alp;
		Aliste		idx;

		for (APLIST_TRAVERSE(AUDITORS(lmp)->ad_list, idx, alp)) {
			if (aplist_test(alpp, alp, AL_CNT_AUDITORS) == 0)
				return;
		}
	}
Ejemplo n.º 8
0
/*
 * Determine whether an address is the recipient of a move record.
 * Returns 1 if the address matches a move symbol, 0 otherwise.
 */
int
is_move_data(caddr_t addr)
{
	caddr_t	maddr;
	Aliste	idx;

	for (APLIST_TRAVERSE(alp, idx, maddr)) {
		if (addr == maddr)
			return (1);
	}
	return (0);
}
Ejemplo n.º 9
0
/*
 * Given a segment descriptor, return its index.
 *
 * entry:
 *	mf - Mapfile descriptor
 *	sgp - Segment for which index is desired
 *
 * exit:
 *	Index of segment is returned.
 */
Elf64_Xword
ld_map_seg_index(Mapfile *mf, Sg_desc *sgp)
{
	size_t		idx;
	Sg_desc		*sgp2;
	Ofl_desc	*ofl = mf->mf_ofl;

	for (APLIST_TRAVERSE(ofl->ofl_segs, idx, sgp2))
		if (sgp == sgp2)
			break;

	return (idx);
}
Ejemplo n.º 10
0
/*
 * Locate a version descriptor.
 */
Ver_desc *
ld_vers_find(const char *name, Elf64_Word hash, APlist *alp)
{
	size_t		idx;
	Ver_desc	*vdp;

	for (APLIST_TRAVERSE(alp, idx, vdp)) {
		if (vdp->vd_hash != hash)
			continue;
		if (strcmp(vdp->vd_name, name) == 0)
			return (vdp);
	}
	return (NULL);
}
Ejemplo n.º 11
0
/*
 * Indicate dependency selection (allows recursion).
 */
static void
vers_select(Ofl_desc *ofl, Ifl_desc *ifl, Ver_desc *vdp, const char *ref)
{
	size_t		idx;
	Ver_desc	*_vdp;
	Ver_index	*vip = &ifl->ifl_verndx[vdp->vd_ndx];

	vip->vi_flags |= FLG_VER_AVAIL;
	
	//DBG_CALL(Dbg_ver_avail_entry(ofl->ofl_lml, vip, ref));

	for (APLIST_TRAVERSE(vdp->vd_deps, idx, _vdp))
		vers_select(ofl, ifl, _vdp, ref);
}
Ejemplo n.º 12
0
void
Dbg_statistics_ar(Ofl_desc *ofl)
{
	Aliste		idx;
	Ar_desc		*adp;
	Elf_Arsym	*arsym;
	Ar_aux		*aux;
	Lm_list		*lml = ofl->ofl_lml;

	if (DBG_NOTCLASS(DBG_C_STATS | DBG_C_UNUSED))
		return;

	Dbg_util_nl(lml, DBG_NL_STD);
	for (APLIST_TRAVERSE(ofl->ofl_ars, idx, adp)) {
		size_t	poffset = 0;
		uint_t	count = 0, used = 0;

		if ((adp->ad_flags & FLG_ARD_EXTRACT) == 0) {
			Dbg_unused_file(lml, adp->ad_name, 0, 0);
			continue;
		}

		if (DBG_NOTCLASS(DBG_C_STATS))
			continue;

		arsym = adp->ad_start;
		aux = adp->ad_aux;
		while ((arsym != NULL) && (arsym->as_off != NULL)) {
			/*
			 * Assume that symbols from the same member file are
			 * adjacent within the archive symbol table.
			 */
			if (poffset != arsym->as_off) {
				count++;
				poffset = arsym->as_off;
				if (aux->au_mem == FLG_ARMEM_PROC)
					used++;
			}
			aux++, arsym++;
		}
		if ((count == 0) || (used == 0))
			continue;

		dbg_print(lml, MSG_INTL(MSG_STATS_AR), adp->ad_name, count,
		    used, ((used * 100) / count));
	}
	Dbg_util_nl(lml, DBG_NL_STD);
}
Ejemplo n.º 13
0
/*
 * la_objsearch() caller.  Traverse through all audit libraries and call any
 * la_objsearch() entry points found.
 *
 * Effectively any audit library can change the name we're working with, so we
 * continue to propagate the new name to each audit library.  Any 0 return
 * terminates the search.
 */
static char *
_audit_objsearch(APlist *list, char *oname, Rt_map *clmp, uint_t flags)
{
	Audit_list	*alp;
	Aliste		idx;
	Lm_list		*clml = LIST(clmp);

	for (APLIST_TRAVERSE(list, idx, alp)) {
		Audit_client	*acp;
		Rt_map		*almp = alp->al_lmp;
		Lm_list		*alml = LIST(almp);
		char		*nname = oname;

		if (alp->al_objsearch == NULL)
			continue;
		if ((acp = _audit_client(AUDINFO(clmp), almp)) == NULL)
			continue;

		DBG_CALL(Dbg_audit_objsearch(clml, DBG_AUD_CALL,
		    alp->al_libname, nname, flags, NULL));

		leave(alml, thr_flg_reenter);
		nname = (*alp->al_objsearch)(nname, &(acp->ac_cookie), flags);
		(void) enter(thr_flg_reenter);

		/*
		 * Diagnose any return name that differs from the original name
		 * passed to the auditor.
		 */
		if (nname && (nname[0] == '\0'))
			nname = NULL;
		if ((nname != oname) || strcmp(nname, oname))
			DBG_CALL(Dbg_audit_objsearch(clml, DBG_AUD_RET,
			    alp->al_libname, oname, flags, nname));

		if ((oname = nname) == NULL)
			break;

	}
	return (oname);
}
Ejemplo n.º 14
0
/*
 * Attach an input section to an output section
 *
 * entry:
 *	ofl - File descriptor
 *	osp - Output section descriptor
 *	isp - Input section descriptor
 *	mapfile_sort - True (1) if segment supports mapfile specified ordering
 *		of otherwise unordered input sections, and False (0) otherwise.
 *
 * exit:
 *	- The input section has been attached to the output section
 *	- If the input section is a candidate for string table merging,
 *		then it is appended to the output section's list of merge
 *		candidates (os_mstridescs).
 *
 *	On success, returns True (1). On failure, False (0).
 */
static int
os_attach_isp(Ofl_desc *ofl, Os_desc *osp, Is_desc *isp, int mapfile_sort)
{
	Aliste	init_arritems;
	int	os_isdescs_idx, do_append = 1;

	if ((isp->is_flags & FLG_IS_ORDERED) == 0) {
		init_arritems = AL_CNT_OS_ISDESCS;
		os_isdescs_idx = OS_ISD_DEFAULT;

		/*
		 * If section ordering was specified for an unordered section
		 * via the mapfile, then search in the OS_ISD_DEFAULT list
		 * and insert it in the specified position. Ordered sections
		 * are placed in ascending order before unordered sections
		 * (sections with an is_ordndx value of zero).
		 *
		 * If no mapfile ordering was specified, we append it in
		 * the usual way below.
		 */
		if (mapfile_sort && (isp->is_ordndx > 0)) {
			APlist *ap_isdesc = osp->os_isdescs[OS_ISD_DEFAULT];
			Aliste	idx2;
			Is_desc	*isp2;

			for (APLIST_TRAVERSE(ap_isdesc, idx2, isp2)) {
				if (isp2->is_ordndx &&
				    (isp2->is_ordndx <= isp->is_ordndx))
						continue;

				if (aplist_insert(
				    &osp->os_isdescs[OS_ISD_DEFAULT],
				    isp, init_arritems, idx2) == NULL)
					return (0);
				do_append = 0;
				break;
			}
		}
Ejemplo n.º 15
0
/*
 * Dereference dependencies as a part of normalizing (allows recursion).
 */
static void
vers_derefer(Ifl_desc *ifl, Ver_desc *vdp, int weak)
{
	size_t		idx;
	Ver_desc	*_vdp;
	Ver_index	*vip = &ifl->ifl_verndx[vdp->vd_ndx];

	/*
	 * Set the INFO bit on all dependencies that ld.so.1
	 * can skip verification for. These are the dependencies
	 * that are inherited by others -- verifying the inheriting
	 * version implicitily covers this one.
	 *
	 * If the head of the list was a weak then we only mark
	 * weak dependencies, but if the head of the list was 'strong'
	 * we set INFO on all dependencies.
	 */
	if ((weak && (vdp->vd_flags & VER_FLG_WEAK)) || (!weak))
		vip->vi_flags |= VER_FLG_INFO;

	for (APLIST_TRAVERSE(vdp->vd_deps, idx, _vdp))
		vers_derefer(ifl, _vdp, weak);
}
Ejemplo n.º 16
0
/*
 * Generate a statistics line for the auxiliary relocation descriptor cache.
 *
 * entry:
 *	ofl - output file descriptor
 */
static void
rel_aux_cache_statistics(Ofl_desc *ofl)
{
	Rel_aux_cachebuf	*racp;
	Lm_list	*lml = ofl->ofl_lml;
	size_t	desc_cnt = 0, desc_used = 0, bytes;
	Aliste	idx;
	char	unit_buf[CONV_INV_BUFSIZE + 10];

	/* Sum the total memory allocated across all the buffers */
	for (APLIST_TRAVERSE(ofl->ofl_relaux, idx, racp)) {
		desc_cnt += racp->rac_end - racp->rac_arr;
		desc_used += racp->rac_free - racp->rac_arr;
	}
	bytes = desc_cnt * sizeof (Rel_desc);

	dbg_print(lml, MSG_INTL(MSG_STATS_REL_ACACHE),
	    EC_WORD(aplist_nitems(ofl->ofl_relaux)),
	    EC_XWORD(desc_used), EC_XWORD(desc_cnt),
	    (desc_cnt == 0) ? 100 : EC_WORD((desc_used * 100) / desc_cnt),
	    EC_XWORD(bytes),
	    fmt_human_units(bytes, unit_buf, sizeof (unit_buf)));
}
Ejemplo n.º 17
0
/*
 * la_filter() caller.  Traverse through all audit libraries and call any
 * la_filter() entry points found.  A zero return from an auditor indicates
 * that the filtee should be ignored.
 */
static int
_audit_objfilter(APlist *list, Rt_map *frlmp, const char *ref, Rt_map *felmp,
    uint_t flags)
{
	Audit_list	*alp;
	Aliste		idx;
	Lm_list		*frlml = LIST(frlmp);

	for (APLIST_TRAVERSE(list, idx, alp)) {
		Audit_client	*fracp, *feacp;
		Rt_map		*almp = alp->al_lmp;
		Lm_list		*alml = LIST(almp);
		int		ret;

		if (alp->al_objfilter == NULL)
			continue;
		if ((fracp = _audit_client(AUDINFO(frlmp), almp)) == NULL)
			continue;
		if ((feacp = _audit_client(AUDINFO(felmp), almp)) == NULL)
			continue;

		DBG_CALL(Dbg_audit_objfilter(frlml, DBG_AUD_CALL,
		    alp->al_libname, NAME(frlmp), NAME(felmp), ref));

		leave(alml, thr_flg_reenter);
		ret = (*alp->al_objfilter)(&(fracp->ac_cookie), ref,
		    &(feacp->ac_cookie), flags);
		(void) enter(thr_flg_reenter);

		if (ret == 0) {
			DBG_CALL(Dbg_audit_objfilter(frlml, DBG_AUD_RET,
			    alp->al_libname, NAME(frlmp), NULL, NULL));
			return (0);
		}
	}
	return (1);
}
Ejemplo n.º 18
0
/*
 * la_objclose() caller.  Traverse through all audit libraries and call any
 * la_objclose() entry points found.
 */
void
_audit_objclose(APlist *list, Rt_map *lmp)
{
	Audit_list	*alp;
	Aliste		idx;
	Lm_list		*lml = LIST(lmp);

	for (APLIST_TRAVERSE(list, idx, alp)) {
		Audit_client	*acp;
		Rt_map		*almp = alp->al_lmp;
		Lm_list		*alml = LIST(almp);

		if (alp->al_objclose == NULL)
			continue;
		if ((acp = _audit_client(AUDINFO(lmp), almp)) == NULL)
			continue;

		DBG_CALL(Dbg_audit_objclose(lml, alp->al_libname, NAME(lmp)));

		leave(alml, thr_flg_reenter);
		(*alp->al_objclose)(&(acp->ac_cookie));
		(void) enter(thr_flg_reenter);
	}
}
Ejemplo n.º 19
0
static uintptr_t
vers_visit_children(Ofl_desc *ofl, Ver_desc *vp, int flag)
{
	size_t			idx;
	Ver_desc		*vdp;
	static int		err = 0;
	static Ver_Stack	ver_stk = {0, 0, 0};
	int			tmp_sp;

	/*
	 * If there was any fatal error,
	 * just return.
	 */
	if (err == S_ERROR)
		return (err);

	/*
	 * if this is called from, ver_check_defs(), initialize sp.
	 */
	if (flag == 0)
		ver_stk.ver_sp = 0;

	/*
	 * Check if passed version pointer vp is already in the stack.
	 */
	for (tmp_sp = 0; tmp_sp < ver_stk.ver_sp; tmp_sp++) {
		Ver_desc *v;

		v = ver_stk.ver_stk[tmp_sp];
		if (v == vp) {
			/*
			 * cyclic dependency.
			 */
			if (err == 0) {
				ld_eprintf(ofl, ERR_FATAL,
				    (MSG_VER_CYCLIC));
				err = 1;
			}
			for (tmp_sp = 0; tmp_sp < ver_stk.ver_sp; tmp_sp++) {
				v = ver_stk.ver_stk[tmp_sp];
				if ((v->vd_flags & FLG_VER_CYCLIC) == 0) {
					v->vd_flags |= FLG_VER_CYCLIC;
					ld_eprintf(ofl, ERR_NONE,
					    (MSG_VER_ADDVER),
					    v->vd_name);
				}
			}
			if ((vp->vd_flags & FLG_VER_CYCLIC) == 0) {
				vp->vd_flags |= FLG_VER_CYCLIC;
				ld_eprintf(ofl, ERR_NONE,
				    (MSG_VER_ADDVER), vp->vd_name);
			}
			return (err);
		}
	}

	/*
	 * Push version on the stack.
	 */
	if (ver_stk.ver_sp >= ver_stk.ver_lmt) {
		ver_stk.ver_lmt += _NUM_OF_VERS_;
		if ((ver_stk.ver_stk = (Ver_desc **)
		    libld_realloc((void *)ver_stk.ver_stk,
		    ver_stk.ver_lmt * sizeof (Ver_desc *))) == NULL)
			return (S_ERROR);
	}
	ver_stk.ver_stk[(ver_stk.ver_sp)++] = vp;

	/*
	 * Now visit children.
	 */
	for (APLIST_TRAVERSE(vp->vd_deps, idx, vdp))
		if (vers_visit_children(ofl, vdp, 1) == S_ERROR)
			return (S_ERROR);

	/*
	 * Pop version from the stack.
	 */
	(ver_stk.ver_sp)--;

	return (err);
}
Ejemplo n.º 20
0
/*
 * Finish the initialization of a new segment descriptor allocated by
 * ld_map_seg_alloc(), and enter it into the segment list.
 *
 * entry:
 *	mf - Mapfile descriptor
 *	seg_type - One of DBG_SEG_NEW or DBG_SEG_NEW_IMPLICIT
 *	ins_head - If true, the new segment goes at the front of
 *		others of its type. If false, it goes at the end.
 *	sgp - Segment descriptor to enter.
 *	where - Insertion point, initialized by a previous (failed) call to
 *		ld_seg_lookup(). Ignored if the segment has a NULL sg_name.
 *
 * exit:
 *	On success, returns SEG_INS_OK. A non-fatal error is indicated with
 *	a return value of SEG_INS_SKIP, in which case the descriptor is
 *	not entered, but the user is expected to discard it and continue
 *	running. On failure, returns SEG_INS_FAIL.
 *
 * note:
 *	This routine will modify the contents of the descriptor referenced
 *	by sgp_tmpl before allocating the new descriptor. The caller must
 *	not expect it to be unmodified.
 */
ld_map_seg_ins_t
ld_map_seg_insert(Mapfile *mf, dbg_state_t dbg_state, Sg_desc *sgp,
    avl_index_t where)
{
	Ofl_desc	*ofl = mf->mf_ofl;
	size_t		idx;
	Sg_desc		*sgp2;		/* temp segment descriptor pointer */
	int		ins_head;
	Elf64_Xword	sg_ndx;

	/*
	 * If specific fields have not been supplied via
	 * map_equal(), make sure defaults are supplied.
	 */
	if (((sgp->sg_flags & FLG_SG_P_TYPE) == 0) &&
	    (sgp->sg_phdr.p_type == PT_NULL)) {
		/*
		 * Default to a loadable segment.
		 */
		sgp->sg_phdr.p_type = PT_LOAD;
		sgp->sg_flags |= FLG_SG_P_TYPE;
	}
	if (sgp->sg_phdr.p_type == PT_LOAD) {
		if ((sgp->sg_flags & FLG_SG_P_FLAGS) == 0) {
			/*
			 * Default to read/write and execute.
			 */
			sgp->sg_phdr.p_flags = PF_R + PF_W + PF_X;
			sgp->sg_flags |= FLG_SG_P_FLAGS;
		}
		if ((sgp->sg_flags & FLG_SG_P_ALIGN) == 0) {
			/*
			 * Default to segment alignment
			 */
			sgp->sg_phdr.p_align = ld_targ.t_m.m_segm_align;
			sgp->sg_flags |= FLG_SG_P_ALIGN;
		}
	}

	/*
	 * Determine where the new item should be inserted in
	 * the segment descriptor list.
	 */
	switch (sgp->sg_phdr.p_type) {
	case PT_LOAD:
		if (sgp->sg_flags & FLG_SG_EMPTY)
			sgp->sg_id = SGID_TEXT_EMPTY;
		else
			sgp->sg_id = SGID_TEXT;
		break;
	case PT_NULL:
		if (sgp->sg_flags & FLG_SG_EMPTY)
			sgp->sg_id = SGID_NULL_EMPTY;
		else
			sgp->sg_id = SGID_NULL;
		break;
	case PT_NOTE:
		sgp->sg_id = SGID_NOTE;
		break;
	default:
		mf_fatal(mf, (MSG_MAP_UNKSEGTYP),
		    EC_WORD(sgp->sg_phdr.p_type));
		return (SEG_INS_FAIL);
	}

	/*
	 * Add the descriptor to the segment list. In the v1 syntax,
	 * new sections are added at the head of their type, while in
	 * the newer syntax, they go at the end of their type.
	 */
	sg_ndx = 0;
	ins_head = (mf->mf_version == MFV_SYSV);
	for (APLIST_TRAVERSE(ofl->ofl_segs, idx, sgp2)) {
		if (ins_head) {	/* Insert before the others of its type */
			if (sgp->sg_id > sgp2->sg_id) {
				sg_ndx++;
				continue;
			}
		} else {	/* Insert after the others of its type */
			if (sgp->sg_id >= sgp2->sg_id) {
				sg_ndx++;
				continue;
			}
		}
		break;
	}
	if (aplist_insert(&ofl->ofl_segs, sgp, AL_CNT_SEGMENTS, idx) == NULL)
		return (SEG_INS_FAIL);
	if (sgp->sg_name != NULL)
		avl_insert(&ofl->ofl_segs_avl, sgp, where);

	//DBG_CALL(Dbg_map_seg(ofl, dbg_state, sg_ndx, sgp, mf->mf_lineno));
	return (SEG_INS_OK);
}
Ejemplo n.º 21
0
void
Dbg_sec_order_list(Ofl_desc *ofl, int flag)
{
	Os_desc		*osp;
	Is_desc		*isp1;
	Aliste		idx1;
	Lm_list		*lml = ofl->ofl_lml;
	const char	*str;

	if (DBG_NOTCLASS(DBG_C_SECTIONS))
		return;
	if (DBG_NOTDETAIL())
		return;

	Dbg_util_nl(lml, DBG_NL_STD);

	/*
	 * If the flag == 0, then the routine is called before sorting.
	 */
	if (flag == 0)
		str = MSG_INTL(MSG_ORD_SORT_BEFORE);
	else
		str = MSG_INTL(MSG_ORD_SORT_AFTER);

	for (APLIST_TRAVERSE(ofl->ofl_ordered, idx1, osp)) {
		int		os_isdescs_idx;
		Aliste		idx2;

		Dbg_util_nl(lml, DBG_NL_STD);
		dbg_print(lml, str, osp->os_name);
		dbg_print(lml, MSG_INTL(MSG_ORD_HDR_1),
		    EC_WORD(aplist_nitems(osp->os_isdescs[OS_ISD_BEFORE])),
		    EC_WORD(aplist_nitems(osp->os_isdescs[OS_ISD_ORDERED])),
		    EC_WORD(aplist_nitems(osp->os_isdescs[OS_ISD_DEFAULT])),
		    EC_WORD(aplist_nitems(osp->os_isdescs[OS_ISD_AFTER])));

		OS_ISDESCS_TRAVERSE(os_isdescs_idx, osp, idx2, isp1) {
			dbg_isec_name_buf_t	buf;
			char			*alloc_mem;
			const char		*isp1_str;
			Word			link;
			Ifl_desc		*ifl = isp1->is_file;
			Is_desc			*isp2;
			const char		*msg;

			/*
			 * An output segment that requires ordering might have
			 * as little as two sorted input sections.  For example,
			 * the crt's can provide a SHN_BEGIN and SHN_AFTER, and
			 * only these two sections must be processed.  Thus, if
			 * a input section is unordered, move on.  Diagnosing
			 * any unsorted section can produce way too much noise.
			 */
			if ((isp1->is_flags & FLG_IS_ORDERED) == 0)
				continue;

			if (isp1->is_shdr->sh_flags & SHF_ORDERED) {
				link = isp1->is_shdr->sh_info;
				msg = MSG_ORIG(MSG_SH_INFO);
			} else {	/* SHF_LINK_ORDER */
				link = isp1->is_shdr->sh_link;
				msg = MSG_ORIG(MSG_SH_LINK);
			}

			isp1_str = dbg_fmt_isec_name(isp1, buf, &alloc_mem);

			if (link == SHN_BEFORE) {
				dbg_print(lml, MSG_INTL(MSG_ORD_TITLE_1), msg,
				    isp1_str, isp1->is_file->ifl_name);
			} else if (link == SHN_AFTER) {
				dbg_print(lml, MSG_INTL(MSG_ORD_TITLE_2), msg,
				    isp1_str, isp1->is_file->ifl_name);
			} else {
				isp2 = ifl->ifl_isdesc[link];
				dbg_print(lml, MSG_INTL(MSG_ORD_TITLE_3),
				    EC_WORD(isp2->is_keyident), isp1_str,
				    ifl->ifl_name, msg, isp2->is_name);
			}
			if (alloc_mem != NULL)
				free(alloc_mem);
		}
	}
Ejemplo n.º 22
0
/*
 * If we need to record the versions of any needed dependencies traverse the
 * shared object dependency list and calculate what version needed entries are
 * required.
 */
uintptr_t
ld_vers_check_need(Ofl_desc *ofl)
{
	size_t		idx1;
	Ifl_desc	*ifl;
	Elf64_Half		needndx;
	Str_tbl		*strtbl;


	/*
	 * Determine which string table is appropriate.
	 */
	strtbl = (OFL_IS_STATIC_OBJ(ofl)) ? ofl->ofl_strtab :
	    ofl->ofl_dynstrtab;

	/*
	 * Elf64_Versym indexes for needed versions start with the next
	 * available version after the final definied version.
	 * However, it can never be less than 2. 0 is always for local
	 * scope, and 1 is always the first global definition.
	 */
	needndx = (ofl->ofl_vercnt > 0) ? (ofl->ofl_vercnt + 1) : 2;

	/*
	 * Traverse the shared object list looking for dependencies.
	 */
	for (APLIST_TRAVERSE(ofl->ofl_sos, idx1, ifl)) {
		size_t		idx2;
		Ver_index	*vip;
		Ver_desc	*vdp;
		uint8_t		cnt, need = 0;

		if (!(ifl->ifl_flags & FLG_IF_NEEDED))
			continue;

		if (ifl->ifl_vercnt <= VER_NDX_GLOBAL)
			continue;

		/*
		 * Scan the version index list and if any weak version
		 * definition has been referenced by the user promote the
		 * dependency to be non-weak.  Weak version dependencies do not
		 * cause fatal errors from the runtime linker, non-weak
		 * dependencies do.
		 */
		for (cnt = 0; cnt <= ifl->ifl_vercnt; cnt++) {
			vip = &ifl->ifl_verndx[cnt];
			vdp = vip->vi_desc;

			if ((vip->vi_flags & (FLG_VER_REFER | VER_FLG_WEAK)) ==
			    (FLG_VER_REFER | VER_FLG_WEAK))
				vdp->vd_flags &= ~VER_FLG_WEAK;

			/*
			 * Mark any weak reference as referred to so as to
			 * simplify normalization and later version dependency
			 * manipulation.
			 */
			if (vip->vi_flags & VER_FLG_WEAK)
				vip->vi_flags |= FLG_VER_REFER;
		}

		/*
		 * Scan the version dependency list to normalize the referenced
		 * dependencies.  Any needed version that is inherited by
		 * another like version is dereferenced as it is not necessary
		 * to make this part of the version dependencies.
		 */
		for (APLIST_TRAVERSE(ifl->ifl_verdesc, idx2, vdp)) {
			size_t		idx3;
			Ver_desc	*_vdp;
			int		type;

			vip = &ifl->ifl_verndx[vdp->vd_ndx];

			if (!(vip->vi_flags & FLG_VER_REFER))
				continue;

			type = vdp->vd_flags & VER_FLG_WEAK;
			for (APLIST_TRAVERSE(vdp->vd_deps, idx3, _vdp))
				vers_derefer(ifl, _vdp, type);
		}

		/*
		 * Finally, determine how many of the version dependencies need
		 * to be recorded.
		 */
		for (cnt = 0; cnt <= ifl->ifl_vercnt; cnt++) {
			vip = &ifl->ifl_verndx[cnt];

			/*
			 * If a version has been referenced then record it as a
			 * version dependency.
			 */
			if (vip->vi_flags & FLG_VER_REFER) {
				/* Assign a VERSYM index for it */
				vip->vi_overndx = needndx++;

				ofl->ofl_verneedsz += sizeof (Elf64_Vernaux);
				if (st_insert(strtbl, vip->vi_name) == -1)
					return (S_ERROR);
				need++;
			}
		}

		if (need) {
			ifl->ifl_flags |= FLG_IF_VERNEED;
			ofl->ofl_verneedsz += sizeof (Elf64_Verneed);
			if (st_insert(strtbl, ifl->ifl_soname) == -1)
				return (S_ERROR);
		}
	}

	/*
	 * If no version needed information is required unset the output file
	 * flag.
	 */
	if (ofl->ofl_verneedsz == 0)
		ofl->ofl_flags &= ~FLG_OF_VERNEED;

	return (1);
}
Ejemplo n.º 23
0
/*
 * Create an unwind header (.eh_frame_hdr) output section.
 * The section is created and space reserved, but the data
 * is not copied into place. That is done by a later call
 * to ld_unwind_populate(), after active relocations have been
 * processed.
 *
 * When GNU linkonce processing is in effect, we can end up in a situation
 * where the FDEs related to discarded sections remain in the eh_frame
 * section. Ideally, we would remove these dead entries from eh_frame.
 * However, that optimization has not yet been implemented. In the current
 * implementation, the number of dead FDEs cannot be determined until
 * active relocations are processed, and that processing follows the
 * call to this function. This means that we are unable to detect dead FDEs
 * here, and the section created by this routine is sized for maximum case
 * where all FDEs are valid.
 */
uintptr_t
ld_unwind_make_hdr(Ofl_desc *ofl)
{
	int		bswap = (ofl->ofl_flags1 & FLG_OF1_ENCDIFF) != 0;
	Shdr		*shdr;
	Elf_Data	*elfdata;
	Is_desc		*isp;
	size_t		size;
	Xword		fde_cnt;
	Aliste		idx1;
	Os_desc		*osp;

	/*
	 * we only build a unwind header if we have
	 * some unwind information in the file.
	 */
	if (ofl->ofl_unwind == NULL)
		return (1);

	/*
	 * Allocate and initialize the Elf_Data structure.
	 */
	if ((elfdata = libld_calloc(sizeof (Elf_Data), 1)) == NULL)
		return (S_ERROR);
	elfdata->d_type = ELF_T_BYTE;
	elfdata->d_align = ld_targ.t_m.m_word_align;
	elfdata->d_version = ofl->ofl_dehdr->e_version;

	/*
	 * Allocate and initialize the Shdr structure.
	 */
	if ((shdr = libld_calloc(sizeof (Shdr), 1)) == NULL)
		return (S_ERROR);
	shdr->sh_type = ld_targ.t_m.m_sht_unwind;
	shdr->sh_flags = SHF_ALLOC;
	shdr->sh_addralign = ld_targ.t_m.m_word_align;
	shdr->sh_entsize = 0;

	/*
	 * Allocate and initialize the Is_desc structure.
	 */
	if ((isp = libld_calloc(1, sizeof (Is_desc))) == NULL)
		return (S_ERROR);
	isp->is_name = MSG_ORIG(MSG_SCN_UNWINDHDR);
	isp->is_shdr = shdr;
	isp->is_indata = elfdata;

	if ((ofl->ofl_unwindhdr = ld_place_section(ofl, isp, NULL,
	    ld_targ.t_id.id_unwindhdr, NULL)) == (Os_desc *)S_ERROR)
		return (S_ERROR);

	/*
	 * Scan through all of the input Frame information, counting each FDE
	 * that requires an index.  Each fde_entry gets a corresponding entry
	 * in the binary search table.
	 */
	fde_cnt = 0;
	for (APLIST_TRAVERSE(ofl->ofl_unwind, idx1, osp)) {
		Aliste	idx2;
		int	os_isdescs_idx;

		OS_ISDESCS_TRAVERSE(os_isdescs_idx, osp, idx2, isp) {
			uchar_t		*data;
			uint64_t	off = 0;

			data = isp->is_indata->d_buf;
			size = isp->is_indata->d_size;

			while (off < size) {
				uint_t		length, id;
				uint64_t	ndx = 0;

				/*
				 * Extract length in lsb format.  A zero length
				 * indicates that this CIE is a terminator and
				 * that processing for unwind information is
				 * complete.
				 */
				length = extract_uint(data + off, &ndx, bswap);
				if (length == 0)
					break;

				/*
				 * Extract CIE id in lsb format.
				 */
				id = extract_uint(data + off, &ndx, bswap);

				/*
				 * A CIE record has a id of '0', otherwise
				 * this is a FDE entry and the 'id' is the
				 * CIE pointer.
				 */
				if (id == 0) {
					uint_t	cieversion;
					/*
					 * The only CIE version supported
					 * is '1' - quick sanity check
					 * here.
					 */
					cieversion = data[off + ndx];
					ndx += 1;
					/* BEGIN CSTYLED */
					if (cieversion != 1) {
					    ld_eprintf(ofl, ERR_FATAL,
						MSG_INTL(MSG_UNW_BADCIEVERS),
						isp->is_file->ifl_name,
						isp->is_name, off);
					    return (S_ERROR);
					}
					/* END CSTYLED */
				} else {
					fde_cnt++;
				}
				off += length + 4;
			}
		}
	}
Ejemplo n.º 24
0
/*
 * Validate a SHT_SUNW_move section.  These are only processed from input
 * relocatable objects.  The move section entries are validated and any data
 * structures required for later processing are created.
 */
uintptr_t
ld_process_move(Ofl_desc *ofl)
{
	size_t		idx;
	Is_desc		*isp;
	int 		errcnt = 0;

	for (APLIST_TRAVERSE(ofl->ofl_ismove, idx, isp)) {
		Ifl_desc	*ifile = isp->is_file;
		Elf64_Move		*mvp;
		Elf64_Xword		i, num;

		mvp = (Elf64_Move *)isp->is_indata->d_buf;

		if (isp->is_shdr->sh_entsize == 0) {
			ld_eprintf(ofl, ERR_FATAL,
			    MSG_FIL_INVSHENTSIZE,
			    isp->is_file->ifl_name, EC_WORD(isp->is_scnndx),
			    isp->is_name, EC_XWORD(0));
			return (S_ERROR);
		}
		num = isp->is_shdr->sh_size / isp->is_shdr->sh_entsize;

		for (i = 0; i < num; i++) {
			Elf64_Xword 		ndx = ELF_M_SYM(mvp->m_info);
			Sym_desc	*sdp;
			Elf64_Sym	*sym;

			if ((ndx >= (Elf64_Xword) isp->is_file->ifl_symscnt) ||
			    (ndx == 0)) {
				ld_eprintf(ofl, ERR_FATAL,
				    MSG_PSYM_INVMINFO1,
				    isp->is_file->ifl_name,
				    EC_WORD(isp->is_scnndx), isp->is_name, i,
				    EC_XWORD(mvp->m_info));
				return (S_ERROR);
			}
			if (mvp->m_repeat == 0) {
				ld_eprintf(ofl, ERR_FATAL,
				    MSG_PSYM_INVMREPEAT,
				    isp->is_file->ifl_name,
				    EC_WORD(isp->is_scnndx), isp->is_name, i,
				    EC_XWORD(mvp->m_repeat));
				return (S_ERROR);
			}

			sdp = isp->is_file->ifl_oldndx[ndx];

			/*
			 * Validate that this entry has a valid size.
			 */
			/* LINTED */
			switch (ELF_M_SIZE(mvp->m_info)) {
			case 1: case 2: case 4: case 8:
				break;
			default:
				ld_eprintf(ofl, ERR_FATAL,
				    MSG_PSYM_INVMINFO2,
				    isp->is_file->ifl_name,
				    EC_WORD(isp->is_scnndx), isp->is_name, i,
				    EC_XWORD(mvp->m_info));
				return (S_ERROR);
			}

			/*
			 * If this is a global symbol, adjust the visibility.
			 */
			if (sdp->sd_aux &&
			    ((sdp->sd_flags & FLG_SY_VISIBLE) == 0))
				ld_sym_adjust_vis(sdp, ofl);

			sym = sdp->sd_sym;

			if (sdp->sd_move == NULL) {
				/*
				 * If this is the first move entry associated
				 * with this symbol, save the symbol on the
				 * partial symbol list, and initialize various
				 * state regarding this symbol.
				 */
				if (aplist_append(&ofl->ofl_parsyms, sdp,
				    AL_CNT_OFL_PARSYMS) == NULL)
					return (S_ERROR);

				/*
				 * Even if -zredlocsym is in effect, the local
				 * symbol used for partial initialization is
				 * kept.
				 */
				if ((ofl->ofl_flags & FLG_OF_REDLSYM) &&
				    (ELF_ST_BIND(sym->st_info) == STB_LOCAL) &&
				    (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)) {
					ofl->ofl_locscnt++;
					if (st_insert(ofl->ofl_strtab,
					    sdp->sd_name) == -1)
						return (S_ERROR);
				}

				/*
				 * Mark the input section associated with this
				 * partially initialized symbol.
				 * This is needed when the symbol
				 * the relocation entry uses symbol information
				 * not from the symbol entry.
				 *
				 * For executable, the following is
				 * needed only for expanded symbol. However,
				 * for shared object any partially non
				 * expanded symbols are moved from
				 * .bss/COMMON to .sunwbss. So the following are
				 * needed.
				 */
				if ((sym->st_shndx != SHN_UNDEF) &&
				    (sym->st_shndx < SHN_LOPROC)) {
					Is_desc	*isc;

					isc = ifile->ifl_isdesc[ sym->st_shndx];
					isc->is_flags |= FLG_IS_RELUPD;

					if (sdp->sd_osym == NULL) {
						if ((sdp->sd_osym =
						    libld_calloc(sizeof(Elf64_Sym),
						    1)) == NULL)
							return (S_ERROR);
						*(sdp->sd_osym) =
						    *(sdp->sd_sym);
					}
				}
			}

			if (append_move_desc(ofl, sdp, mvp, isp) == S_ERROR)
				return (S_ERROR);

			if (sdp->sd_flags & FLG_SY_OVERLAP)
				errcnt++;

			/*
			 * If this symbol is marked to be expanded, go to the
			 * next move entry.
			 */
			if (sdp->sd_flags & FLG_SY_PAREXPN) {
				mvp++;
				continue;
			}

			/*
			 * Decide whether this partial symbol is to be expanded
			 * or not.
			 *
			 * The symbol will be expanded if:
			 *	a) '-z nopartial' is specified
			 *	b) move entries covered entire symbol
			 *
			 * To expand an move entry, size of the symbol to be
			 * expanded need to be known to generate a file space.
			 * (see make_movesections().)
			 *
			 * Therefore the move entry can not be expanded
			 * if the partial symbol is a section symbol.
			 * (The size of the symbol may be unknown.)
			 * This may happen, for example, when a local symbol is
			 * reduced by the -zredlocsym.
			 *
			 * The following two if statements checks the
			 * if the move entry can be expanded or not.
			 */
			if (OFL_IS_STATIC_EXEC(ofl)) {
				if (ELF_ST_TYPE(sym->st_info) == STT_SECTION) {
					errcnt++;
					ld_eprintf(ofl, ERR_FATAL,
					    MSG_PSYM_CANNOTEXPND,
					    sdp->sd_file->ifl_name,
					    EC_WORD(isp->is_scnndx),
					    isp->is_name, i,
					    MSG_PSYM_NOSTATIC);
				} else {
					sdp->sd_flags |= FLG_SY_PAREXPN;
				}
			} else if ((ofl->ofl_flags1 & FLG_OF1_NOPARTI) != 0) {
				if (ELF_ST_TYPE(sym->st_info) == STT_SECTION) {
					ld_eprintf(ofl, ERR_WARNING,
					    MSG_PSYM_CANNOTEXPND,
					    sdp->sd_file->ifl_name,
					    EC_WORD(isp->is_scnndx),
					    isp->is_name, i,
					    MSG_STR_EMPTY);
				} else {
					sdp->sd_flags |= FLG_SY_PAREXPN;
				}
			} else if (((Elf64_Xword)((sizeof (Elf64_Move)) *
			    alist_nitems(sdp->sd_move)) > sym->st_size) &&
			    (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)) {
				sdp->sd_flags |= FLG_SY_PAREXPN;
			}

			/*
			 * If a move entry exists that references a local
			 * symbol, and this symbol reference will eventually
			 * be assigned to the associated section, make sure the
			 * section symbol is available for relocating against
			 * at runtime.
			 */
			if ((ELF_ST_BIND(sym->st_info) == STB_LOCAL) &&
			    (((ofl->ofl_flags & FLG_OF_RELOBJ) == 0) ||
			    (ofl->ofl_flags & FLG_OF_REDLSYM))) {
				Os_desc *osp = sdp->sd_isc->is_osdesc;

				if (osp &&
				    ((osp->os_flags & FLG_OS_OUTREL) == 0)) {
					ofl->ofl_dynshdrcnt++;
					osp->os_flags |= FLG_OS_OUTREL;
				} else if ((sdp->sd_flags &
				    FLG_SY_PAREXPN) == 0)
					ofl->ofl_flags1 |= FLG_OF1_BSSOREL;
			}
			mvp++;
		}
	}

	if (errcnt != 0)
		return (S_ERROR);
	if (make_mvsections(ofl) == S_ERROR)
		return (S_ERROR);

	return (1);
}
Ejemplo n.º 25
0
/*
 * la_objopen() caller for the head link-map.  Global auditors, or an auditor
 * started from the object that heads a link-map list (typically the dynamic
 * executable), are passed to la_objopen().  However, local auditors can
 * provide activity and preinit events, and for these events, a cookie
 * representing the head link-map list object is expected.  This routine obtains
 * these cookies from the link-map list lm_cookies element.  This element
 * ensures all clients of the same auditor use the same cookie.
 *
 * Although a local auditor will get an la_objopen() call for the object that
 * heads the link-map list of the object being audited, the auditor is not
 * permitted to request binding information for this head object.  The head
 * object has already been in existence, and bindings may have been resolved
 * with it.  This local auditor is coming into existence too late, and thus we
 * don't allow any bindings to be caught at all.
 */
static int
_audit_add_head(Rt_map *clmp, Rt_map *hlmp, int preinit, int activity)
{
	Lm_list		*clml = LIST(clmp);
	Lmid_t		lmid = get_linkmap_id(clml);
	Audit_list	*alp;
	Aliste		idx;
	int		save = 0;

	for (APLIST_TRAVERSE(AUDITORS(clmp)->ad_list, idx, alp)) {
		Audit_client	*acp;
		Rt_map		*almp = alp->al_lmp;
		Lm_list		*alml = LIST(almp);
		uintptr_t	*cookie;
		uint_t		rtldflags;

		/*
		 * Ensure this local auditor isn't already in existence as an
		 * auditor for the head of the link-map list.  If it is, then
		 * this auditor will have already receive preinit and activity
		 * events.
		 */
		if (AUDITORS(hlmp) && _audit_used_by_head(hlmp, almp))
			continue;

		/*
		 * Create a cookie that represents the object that heads the
		 * link-map list.  If the cookie already exists, then this
		 * auditor has already been established for another objects
		 * local auditing.  In this case, do not issue a la_objopen()
		 * or la_activity() event, as these will have already occurred.
		 */
		if ((acp = _audit_get_head_client(clml->lm_head, almp)) != NULL)
			continue;
		if ((acp =
		    _audit_create_head_client(clml->lm_head, almp)) == NULL)
			return (0);

		cookie = &(acp->ac_cookie);
		save++;

		/*
		 * Call the la_objopen() if available.
		 */
		if (alp->al_objopen) {
			uint_t	flags;

			DBG_CALL(Dbg_audit_objopen(clml, DBG_AUD_CALL,
			    alp->al_libname, NAME(hlmp), 0, FALSE));

			APPLICATION_ENTER(rtldflags);
			leave(alml, thr_flg_reenter);
			flags = (*alp->al_objopen)((Link_map *)hlmp, lmid,
			    cookie);
			(void) enter(thr_flg_reenter);
			APPLICATION_RETURN(rtldflags);

			if (flags) {
				DBG_CALL(Dbg_audit_objopen(clml, DBG_AUD_RET,
				    alp->al_libname, NAME(hlmp), flags, TRUE));
			}
		}

		/*
		 * Call the la_activity() if available.
		 */
		if (alp->al_activity) {
			alml->lm_flags |= LML_FLG_AUDITNOTIFY;
			clml->lm_flags |= LML_FLG_ACTAUDIT;

			DBG_CALL(Dbg_audit_activity(clml, alp->al_libname,
			    NAME(clml->lm_head), LA_ACT_ADD));

			APPLICATION_ENTER(rtldflags);
			leave(alml, thr_flg_reenter);
			(*alp->al_activity)(cookie, LA_ACT_ADD);
			(void) enter(thr_flg_reenter);
			APPLICATION_RETURN(rtldflags);
		}
	}

	/*
	 * If new head link-map cookies have been generated, then maintain
	 * any preinit and/or activity requirements.
	 */
	if (save) {
		if (preinit && (aplist_append(&aud_preinit, clmp,
		    AL_CNT_AUDITORS) == NULL))
			return (0);
		if (activity && (aplist_append(&aud_activity, clmp,
		    AL_CNT_AUDITORS) == NULL))
			return (0);
	}
	return (1);
}
Ejemplo n.º 26
0
/*
 * la_objopen() caller.  Create an audit information structure for the indicated
 * link-map, regardless of an la_objopen() entry point.  This structure is used
 * to supply information to various audit interfaces (see LML_MSK_AUDINFO).
 * Traverse through all audit libraries and call any la_objopen() entry points
 * found.
 */
static int
_audit_objopen(APlist *list, Rt_map *nlmp, Lmid_t lmid, Audit_info *aip,
    int *ndx)
{
	Lm_list		*nlml = LIST(nlmp);
	Audit_list	*alp;
	Aliste		idx;

	for (APLIST_TRAVERSE(list, idx, alp)) {
		uint_t		flags;
		Audit_client	*acp;
		Rt_map		*almp = alp->al_lmp;
		Lm_list		*alml = LIST(almp);

		/*
		 * Associate a cookie with the audit library, and assign the
		 * initial cookie as the present link-map.
		 */
		acp = &aip->ai_clients[(*ndx)++];
		acp->ac_lmp = alp->al_lmp;
		acp->ac_cookie = (uintptr_t)nlmp;

		if (alp->al_objopen == NULL)
			continue;

		DBG_CALL(Dbg_audit_objopen(nlml, DBG_AUD_CALL, alp->al_libname,
		    NAME(nlmp), 0, FALSE));

		leave(alml, thr_flg_reenter);
		flags = (*alp->al_objopen)((Link_map *)nlmp, lmid,
		    &(acp->ac_cookie));
		(void) enter(thr_flg_reenter);

		/*
		 * Diagnose any flags returned by the auditor.
		 */
		if (flags) {
			DBG_CALL(Dbg_audit_objopen(nlml, DBG_AUD_RET,
			    alp->al_libname, NAME(nlmp), flags, FALSE));
		}

		if (flags & LA_FLG_BINDTO)
			acp->ac_flags |= FLG_AC_BINDTO;

		if (flags & LA_FLG_BINDFROM) {
			ulong_t		pltcnt;

			acp->ac_flags |= FLG_AC_BINDFROM;

			/*
			 * We only need dynamic plt's if a pltenter and/or a
			 * pltexit() entry point exist in one of our auditing
			 * libraries.
			 */
			if (aip->ai_dynplts || (JMPREL(nlmp) == 0) ||
			    ((audit_flags & (AF_PLTENTER | AF_PLTEXIT)) == 0))
				continue;

			/*
			 * Create one dynplt for every 'PLT' that exists in the
			 * object.
			 */
			pltcnt = PLTRELSZ(nlmp) / RELENT(nlmp);
			if ((aip->ai_dynplts = calloc(pltcnt,
			    dyn_plt_ent_size)) == NULL)
				return (0);
		}
	}
	return (1);
}
Ejemplo n.º 27
0
uintptr_t
ld_vers_check_defs(Ofl_desc *ofl)
{
	size_t		idx1;
	Ver_desc	*vdp;
	uintptr_t 	is_cyclic = 0;

	//DBG_CALL(Dbg_ver_def_title(ofl->ofl_lml, ofl->ofl_name));

	/*
	 * First check if there are any cyclic dependency
	 */
	for (APLIST_TRAVERSE(ofl->ofl_verdesc, idx1, vdp))
		if ((is_cyclic = vers_visit_children(ofl, vdp, 0)) == S_ERROR)
			return (S_ERROR);

	if (is_cyclic)
		ofl->ofl_flags |= FLG_OF_FATAL;

	for (APLIST_TRAVERSE(ofl->ofl_verdesc, idx1, vdp)) {
		uint8_t	cnt;
		Elf64_Sym	*sym;
		Sym_desc	*sdp;
		const char	*name = vdp->vd_name;
		unsigned char	bind;
		Ver_desc	*_vdp;
		avl_index_t	where;
		size_t		idx2;

		if (vdp->vd_ndx == 0) {
			ld_eprintf(ofl, ERR_FATAL,
			    (MSG_VER_UNDEF), name, vdp->vd_ref->vd_name,
			    vdp->vd_ref->vd_file->ifl_name);
			continue;
		}

		//DBG_CALL(Dbg_ver_desc_entry(ofl->ofl_lml, vdp));

		/*
		 * If a version definition contains no symbols this is possibly
		 * a mapfile error.
		 */
		#if 0
		if ((vdp->vd_flags &
		    (VER_FLG_BASE | VER_FLG_WEAK | FLG_VER_REFER)) == 0)
			DBG_CALL(Dbg_ver_nointerface(ofl->ofl_lml,	
		    	    vdp->vd_name));
		#endif

		/*
		 * Update the version entry count to account for this new
		 * version descriptor (the count is the size in bytes).
		 */
		ofl->ofl_verdefsz += sizeof (Elf64_Verdef);

		/*
		 * Traverse this versions dependency list to determine what
		 * additional version dependencies we must account for against
		 * this descriptor.
		 */
		cnt = 1;
		for (APLIST_TRAVERSE(vdp->vd_deps, idx2, _vdp)) {
#if	defined(__lint)
			/* get lint to think `_vdp' is used... */
			vdp = _vdp;
#endif
			cnt++;
		}
		ofl->ofl_verdefsz += (cnt * sizeof (Elf64_Verdaux));

		/*
		 * Except for the base version descriptor, generate an absolute
		 * symbol to reflect this version.
		 */
		if (vdp->vd_flags & VER_FLG_BASE)
			continue;

		if (vdp->vd_flags & VER_FLG_WEAK)
			bind = STB_WEAK;
		else
			bind = STB_GLOBAL;

		if (sdp = ld_sym_find(name, vdp->vd_hash, &where, ofl)) {
			/*
			 * If the symbol already exists and is undefined or was
			 * defined in a shared library, convert it to an
			 * absolute.
			 */
			if ((sdp->sd_sym->st_shndx == SHN_UNDEF) ||
			    (sdp->sd_ref != REF_REL_NEED)) {
				sdp->sd_shndx = sdp->sd_sym->st_shndx = SHN_ABS;
				sdp->sd_sym->st_info =
				    ELF_ST_INFO(bind, STT_OBJECT);
				sdp->sd_ref = REF_REL_NEED;
				sdp->sd_flags |= (FLG_SY_SPECSEC |
				    FLG_SY_DEFAULT | FLG_SY_EXPDEF);
				sdp->sd_aux->sa_overndx = vdp->vd_ndx;

				/*
				 * If the reference originated from a mapfile
				 * insure we mark the symbol as used.
				 */
				if (sdp->sd_flags & FLG_SY_MAPREF)
					sdp->sd_flags |= FLG_SY_MAPUSED;

			} else if ((sdp->sd_flags & FLG_SY_SPECSEC) &&
			    (sdp->sd_sym->st_shndx != SHN_ABS) &&
			    (sdp->sd_ref == REF_REL_NEED)) {
				ld_eprintf(ofl, ERR_WARNING,
				    (MSG_VER_DEFINED), name,
				    sdp->sd_file->ifl_name);
			}
		} else {
			/*
			 * If the symbol does not exist create it.
			 */
			if ((sym = libld_calloc(sizeof (Elf64_Sym), 1)) == NULL)
				return (S_ERROR);

			sym->st_shndx = SHN_ABS;
			sym->st_info = ELF_ST_INFO(bind, STT_OBJECT);
			
			//DBG_CALL(Dbg_ver_symbol(ofl->ofl_lml, name));

			if ((sdp = ld_sym_enter(name, sym, vdp->vd_hash,
			    vdp->vd_file, ofl, 0, SHN_ABS,
			    (FLG_SY_SPECSEC | FLG_SY_DEFAULT | FLG_SY_EXPDEF),
			    &where)) == (Sym_desc *)S_ERROR)
				return (S_ERROR);

			sdp->sd_ref = REF_REL_NEED;
			sdp->sd_aux->sa_overndx = vdp->vd_ndx;
		}
	}
	return (1);
}