示例#1
0
/*
 * Resolve the type down to a base type node, and then return the alignment
 * needed for the type storage in bytes.
 */
ssize_t
ctf_type_align(ctf_file_t *fp, ctf_id_t type)
{
	const ctf_type_t *tp;
	ctf_arinfo_t r;

	if ((type = ctf_type_resolve(fp, type)) == CTF_ERR)
		return (-1); /* errno is set for us */

	if ((tp = ctf_lookup_by_id(&fp, type)) == NULL)
		return (-1); /* errno is set for us */

	switch (LCTF_INFO_KIND(fp, tp->ctt_info)) {
	case CTF_K_POINTER:
	case CTF_K_FUNCTION:
		return (fp->ctf_dmodel->ctd_pointer);

	case CTF_K_ARRAY:
		if (ctf_array_info(fp, type, &r) == CTF_ERR)
			return (-1); /* errno is set for us */
		return (ctf_type_align(fp, r.ctr_contents));

	case CTF_K_STRUCT:
	case CTF_K_UNION: {
		uint_t n = LCTF_INFO_VLEN(fp, tp->ctt_info);
		ssize_t size, increment;
		size_t align = 0;
		const void *vmp;

		(void) ctf_get_ctt_size(fp, tp, &size, &increment);
		vmp = (uchar_t *)tp + increment;

		if (LCTF_INFO_KIND(fp, tp->ctt_info) == CTF_K_STRUCT)
			n = MIN(n, 1); /* only use first member for structs */

		if (fp->ctf_version == CTF_VERSION_1 ||
		    size < CTF_LSTRUCT_THRESH) {
			const ctf_member_t *mp = vmp;
			for (; n != 0; n--, mp++) {
				ssize_t am = ctf_type_align(fp, mp->ctm_type);
				align = MAX(align, am);
			}
		} else {
			const ctf_lmember_t *lmp = vmp;
			for (; n != 0; n--, lmp++) {
				ssize_t am = ctf_type_align(fp, lmp->ctlm_type);
				align = MAX(align, am);
			}
		}

		return (align);
	}

	case CTF_K_ENUM:
		return (fp->ctf_dmodel->ctd_int);

	default:
		return (ctf_get_ctt_size(fp, tp, NULL, NULL));
	}
}
示例#2
0
/*
 * Convert the specified enum tag name to the corresponding value, if a
 * matching name can be found.  Otherwise CTF_ERR is returned.
 */
int
ctf_enum_value(ctf_file_t *fp, ctf_id_t type, const char *name, int *valp)
{
	ctf_file_t *ofp = fp;
	const ctf_type_t *tp;
	const ctf_enum_t *ep;
	ssize_t size, increment;
	uint_t n;

	if ((type = ctf_type_resolve(fp, type)) == CTF_ERR)
		return (CTF_ERR); /* errno is set for us */

	if ((tp = ctf_lookup_by_id(&fp, type)) == NULL)
		return (CTF_ERR); /* errno is set for us */

	if (LCTF_INFO_KIND(fp, tp->ctt_info) != CTF_K_ENUM) {
		(void) ctf_set_errno(ofp, ECTF_NOTENUM);
		return (CTF_ERR);
	}

	(void) ctf_get_ctt_size(fp, tp, &size, &increment);

	ep = (const ctf_enum_t *)((uintptr_t)tp + increment);

	for (n = LCTF_INFO_VLEN(fp, tp->ctt_info); n != 0; n--, ep++) {
		if (strcmp(ctf_strptr(fp, ep->cte_name), name) == 0) {
			if (valp != NULL)
				*valp = ep->cte_value;
			return (0);
		}
	}

	(void) ctf_set_errno(ofp, ECTF_NOENUMNAM);
	return (CTF_ERR);
}
示例#3
0
/*
 * Return the encoding for the specified INTEGER or FLOAT.
 */
int
ctf_type_encoding(ctf_file_t *fp, ctf_id_t type, ctf_encoding_t *ep)
{
	ctf_file_t *ofp = fp;
	const ctf_type_t *tp;
	ssize_t increment;
	uint_t data;

	if ((tp = ctf_lookup_by_id(&fp, type)) == NULL)
		return (CTF_ERR); /* errno is set for us */

	(void) ctf_get_ctt_size(fp, tp, NULL, &increment);

	switch (LCTF_INFO_KIND(fp, tp->ctt_info)) {
	case CTF_K_INTEGER:
		data = *(const uint_t *)((uintptr_t)tp + increment);
		ep->cte_format = CTF_INT_ENCODING(data);
		ep->cte_offset = CTF_INT_OFFSET(data);
		ep->cte_bits = CTF_INT_BITS(data);
		break;
	case CTF_K_FLOAT:
		data = *(const uint_t *)((uintptr_t)tp + increment);
		ep->cte_format = CTF_FP_ENCODING(data);
		ep->cte_offset = CTF_FP_OFFSET(data);
		ep->cte_bits = CTF_FP_BITS(data);
		break;
	default:
		return (ctf_set_errno(ofp, ECTF_NOTINTFP));
	}

	return (0);
}
示例#4
0
/*
 * Iterate over the members of an ENUM.  We pass the string name and associated
 * integer value of each enum element to the specified callback function.
 */
int
ctf_enum_iter(ctf_file_t *fp, ctf_id_t type, ctf_enum_f *func, void *arg)
{
	ctf_file_t *ofp = fp;
	const ctf_type_t *tp;
	const ctf_enum_t *ep;
	ssize_t increment;
	uint_t n;
	int rc;

	if ((type = ctf_type_resolve(fp, type)) == CTF_ERR)
		return (CTF_ERR); /* errno is set for us */

	if ((tp = ctf_lookup_by_id(&fp, type)) == NULL)
		return (CTF_ERR); /* errno is set for us */

	if (LCTF_INFO_KIND(fp, tp->ctt_info) != CTF_K_ENUM)
		return (ctf_set_errno(ofp, ECTF_NOTENUM));

	(void) ctf_get_ctt_size(fp, tp, NULL, &increment);

	ep = (const ctf_enum_t *)((uintptr_t)tp + increment);

	for (n = LCTF_INFO_VLEN(fp, tp->ctt_info); n != 0; n--, ep++) {
		const char *name = ctf_strptr(fp, ep->cte_name);
		if ((rc = func(name, ep->cte_value, arg)) != 0)
			return (rc);
	}

	return (0);
}
示例#5
0
/*
 * Follow a given type through the graph for TYPEDEF, VOLATILE, CONST, and
 * RESTRICT nodes until we reach a "base" type node.  This is useful when
 * we want to follow a type ID to a node that has members or a size.  To guard
 * against infinite loops, we implement simplified cycle detection and check
 * each link against itself, the previous node, and the topmost node.
 */
ctf_id_t
ctf_type_resolve(ctf_file_t *fp, ctf_id_t type)
{
	ctf_id_t prev = type, otype = type;
	ctf_file_t *ofp = fp;
	const ctf_type_t *tp;

	while ((tp = ctf_lookup_by_id(&fp, type)) != NULL) {
		switch (LCTF_INFO_KIND(fp, tp->ctt_info)) {
		case CTF_K_TYPEDEF:
		case CTF_K_VOLATILE:
		case CTF_K_CONST:
		case CTF_K_RESTRICT:
			if (tp->ctt_type == type || tp->ctt_type == otype ||
			    tp->ctt_type == prev) {
				ctf_dprintf("type %ld cycle detected\n", otype);
				return (ctf_set_errno(ofp, ECTF_CORRUPT));
			}
			prev = type;
			type = tp->ctt_type;
			break;
		default:
			return (type);
		}
	}

	return (CTF_ERR); /* errno is set for us */
}
示例#6
0
/*
 * Return the kind (CTF_K_* constant) for the specified type ID.
 */
int
ctf_type_kind(ctf_file_t *fp, ctf_id_t type)
{
	const ctf_type_t *tp;

	if ((tp = ctf_lookup_by_id(&fp, type)) == NULL)
		return (CTF_ERR); /* errno is set for us */

	return (LCTF_INFO_KIND(fp, tp->ctt_info));
}
示例#7
0
/*
 * Recursively visit the members of any type.  This function is used as the
 * engine for ctf_type_visit, below.  We resolve the input type, recursively
 * invoke ourself for each type member if the type is a struct or union, and
 * then invoke the callback function on the current type.  If any callback
 * returns non-zero, we abort and percolate the error code back up to the top.
 */
static int
ctf_type_rvisit(ctf_file_t *fp, ctf_id_t type, ctf_visit_f *func, void *arg,
    const char *name, ulong_t offset, int depth)
{
	ctf_id_t otype = type;
	const ctf_type_t *tp;
	ssize_t size, increment;
	uint_t kind, n;
	int rc;

	if ((type = ctf_type_resolve(fp, type)) == CTF_ERR)
		return (CTF_ERR); /* errno is set for us */

	if ((tp = ctf_lookup_by_id(&fp, type)) == NULL)
		return (CTF_ERR); /* errno is set for us */

	if ((rc = func(name, otype, offset, depth, arg)) != 0)
		return (rc);

	kind = LCTF_INFO_KIND(fp, tp->ctt_info);

	if (kind != CTF_K_STRUCT && kind != CTF_K_UNION)
		return (0);

	(void) ctf_get_ctt_size(fp, tp, &size, &increment);

	if (fp->ctf_version == CTF_VERSION_1 || size < CTF_LSTRUCT_THRESH) {
		const ctf_member_t *mp = (const ctf_member_t *)
		    ((uintptr_t)tp + increment);

		for (n = LCTF_INFO_VLEN(fp, tp->ctt_info); n != 0; n--, mp++) {
			if ((rc = ctf_type_rvisit(fp, mp->ctm_type,
			    func, arg, ctf_strptr(fp, mp->ctm_name),
			    offset + mp->ctm_offset, depth + 1)) != 0)
				return (rc);
		}

	} else {
		const ctf_lmember_t *lmp = (const ctf_lmember_t *)
		    ((uintptr_t)tp + increment);

		for (n = LCTF_INFO_VLEN(fp, tp->ctt_info); n != 0; n--, lmp++) {
			if ((rc = ctf_type_rvisit(fp, lmp->ctlm_type,
			    func, arg, ctf_strptr(fp, lmp->ctlm_name),
			    offset + (ulong_t)CTF_LMEM_OFFSET(lmp),
			    depth + 1)) != 0)
				return (rc);
		}
	}

	return (0);
}
示例#8
0
/*
 * Given a symbol table index, return the info for the function described
 * by the corresponding entry in the symbol table.
 */
int
ctf_func_info(ctf_file_t *fp, ulong_t symidx, ctf_funcinfo_t *fip)
{
	const ctf_sect_t *sp = &fp->ctf_symtab;
	const ushort_t *dp;
	ushort_t info, kind, n;

	if (sp->cts_data == NULL)
		return (ctf_set_errno(fp, ECTF_NOSYMTAB));

	if (symidx >= fp->ctf_nsyms)
		return (ctf_set_errno(fp, EINVAL));

	if (sp->cts_entsize == sizeof (Elf32_Sym)) {
		const Elf32_Sym *symp = (Elf32_Sym *)sp->cts_data + symidx;
		if (ELF32_ST_TYPE(symp->st_info) != STT_FUNC)
			return (ctf_set_errno(fp, ECTF_NOTFUNC));
	} else {
		const Elf64_Sym *symp = (Elf64_Sym *)sp->cts_data + symidx;
		if (ELF64_ST_TYPE(symp->st_info) != STT_FUNC)
			return (ctf_set_errno(fp, ECTF_NOTFUNC));
	}

	if (fp->ctf_sxlate[symidx] == -1u)
		return (ctf_set_errno(fp, ECTF_NOFUNCDAT));

	dp = (ushort_t *)((uintptr_t)fp->ctf_buf + fp->ctf_sxlate[symidx]);

	info = *dp++;
	kind = LCTF_INFO_KIND(fp, info);
	n = LCTF_INFO_VLEN(fp, info);

	if (kind == CTF_K_UNKNOWN && n == 0)
		return (ctf_set_errno(fp, ECTF_NOFUNCDAT));

	if (kind != CTF_K_FUNCTION)
		return (ctf_set_errno(fp, ECTF_CORRUPT));

	fip->ctc_return = *dp++;
	fip->ctc_argc = n;
	fip->ctc_flags = 0;

	if (n != 0 && dp[n - 1] == 0) {
		fip->ctc_flags |= CTF_FUNC_VARARG;
		fip->ctc_argc--;
	}

	return (0);
}
示例#9
0
/*
 * Return the type and offset for a given member of a STRUCT or UNION.
 */
int
ctf_member_info(ctf_file_t *fp, ctf_id_t type, const char *name,
    ctf_membinfo_t *mip)
{
	ctf_file_t *ofp = fp;
	const ctf_type_t *tp;
	ssize_t size, increment;
	uint_t kind, n;

	if ((type = ctf_type_resolve(fp, type)) == CTF_ERR)
		return (CTF_ERR); /* errno is set for us */

	if ((tp = ctf_lookup_by_id(&fp, type)) == NULL)
		return (CTF_ERR); /* errno is set for us */

	(void) ctf_get_ctt_size(fp, tp, &size, &increment);
	kind = LCTF_INFO_KIND(fp, tp->ctt_info);

	if (kind != CTF_K_STRUCT && kind != CTF_K_UNION)
		return (ctf_set_errno(ofp, ECTF_NOTSOU));

	if (fp->ctf_version == CTF_VERSION_1 || size < CTF_LSTRUCT_THRESH) {
		const ctf_member_t *mp = (const ctf_member_t *)
		    ((uintptr_t)tp + increment);

		for (n = LCTF_INFO_VLEN(fp, tp->ctt_info); n != 0; n--, mp++) {
			if (strcmp(ctf_strptr(fp, mp->ctm_name), name) == 0) {
				mip->ctm_type = mp->ctm_type;
				mip->ctm_offset = mp->ctm_offset;
				return (0);
			}
		}
	} else {
		const ctf_lmember_t *lmp = (const ctf_lmember_t *)
		    ((uintptr_t)tp + increment);

		for (n = LCTF_INFO_VLEN(fp, tp->ctt_info); n != 0; n--, lmp++) {
			if (strcmp(ctf_strptr(fp, lmp->ctlm_name), name) == 0) {
				mip->ctm_type = lmp->ctlm_type;
				mip->ctm_offset = (ulong_t)CTF_LMEM_OFFSET(lmp);
				return (0);
			}
		}
	}

	return (ctf_set_errno(ofp, ECTF_NOMEMBNAM));
}
示例#10
0
/*
 * Resolve the type down to a base type node, and then return the size
 * of the type storage in bytes.
 */
ssize_t
ctf_type_size(ctf_file_t *fp, ctf_id_t type)
{
	const ctf_type_t *tp;
	ssize_t size;
	ctf_arinfo_t ar;

	if ((type = ctf_type_resolve(fp, type)) == CTF_ERR)
		return (-1); /* errno is set for us */

	if ((tp = ctf_lookup_by_id(&fp, type)) == NULL)
		return (-1); /* errno is set for us */

	switch (LCTF_INFO_KIND(fp, tp->ctt_info)) {
	case CTF_K_POINTER:
		return (fp->ctf_dmodel->ctd_pointer);

	case CTF_K_FUNCTION:
		return (0); /* function size is only known by symtab */

	case CTF_K_ENUM:
		return (fp->ctf_dmodel->ctd_int);

	case CTF_K_ARRAY:
		/*
		 * Array size is not directly returned by stabs data.  Instead,
		 * it defines the element type and requires the user to perform
		 * the multiplication.  If ctf_get_ctt_size() returns zero, the
		 * current version of ctfconvert does not compute member sizes
		 * and we compute the size here on its behalf.
		 */
		if ((size = ctf_get_ctt_size(fp, tp, NULL, NULL)) > 0)
			return (size);

		if (ctf_array_info(fp, type, &ar) == CTF_ERR ||
		    (size = ctf_type_size(fp, ar.ctr_contents)) == CTF_ERR)
			return (-1); /* errno is set for us */

		return (size * ar.ctr_nelems);

	default:
		return (ctf_get_ctt_size(fp, tp, NULL, NULL));
	}
}
示例#11
0
/*
 * If the type is one that directly references another type (such as POINTER),
 * then return the ID of the type to which it refers.
 */
ctf_id_t
ctf_type_reference(ctf_file_t *fp, ctf_id_t type)
{
	ctf_file_t *ofp = fp;
	const ctf_type_t *tp;

	if ((tp = ctf_lookup_by_id(&fp, type)) == NULL)
		return (CTF_ERR); /* errno is set for us */

	switch (LCTF_INFO_KIND(fp, tp->ctt_info)) {
	case CTF_K_POINTER:
	case CTF_K_TYPEDEF:
	case CTF_K_VOLATILE:
	case CTF_K_CONST:
	case CTF_K_RESTRICT:
		return (tp->ctt_type);
	default:
		return (ctf_set_errno(ofp, ECTF_NOTREF));
	}
}
示例#12
0
/*
 * Return the array type, index, and size information for the specified ARRAY.
 */
int
ctf_array_info(ctf_file_t *fp, ctf_id_t type, ctf_arinfo_t *arp)
{
	ctf_file_t *ofp = fp;
	const ctf_type_t *tp;
	const ctf_array_t *ap;
	ssize_t increment;

	if ((tp = ctf_lookup_by_id(&fp, type)) == NULL)
		return (CTF_ERR); /* errno is set for us */

	if (LCTF_INFO_KIND(fp, tp->ctt_info) != CTF_K_ARRAY)
		return (ctf_set_errno(ofp, ECTF_NOTARRAY));

	(void) ctf_get_ctt_size(fp, tp, NULL, &increment);

	ap = (const ctf_array_t *)((uintptr_t)tp + increment);
	arp->ctr_contents = ap->cta_contents;
	arp->ctr_index = ap->cta_index;
	arp->ctr_nelems = ap->cta_nelems;

	return (0);
}
示例#13
0
/*
 * Initialize the type ID translation table with the byte offset of each type,
 * and initialize the hash tables of each named type.
 */
static int
init_types(ctf_file_t *fp, const ctf_header_t *cth)
{
	/* LINTED - pointer alignment */
	const ctf_type_t *tbuf = (ctf_type_t *)(fp->ctf_buf + cth->cth_typeoff);
	/* LINTED - pointer alignment */
	const ctf_type_t *tend = (ctf_type_t *)(fp->ctf_buf + cth->cth_stroff);

	ulong_t pop[CTF_K_MAX + 1] = { 0 };
	const ctf_type_t *tp;
	ctf_hash_t *hp;
	ushort_t dst;
	ctf_id_t id;
	uint_t *xp;

	/*
	 * We initially determine whether the container is a child or a parent
	 * based on the value of cth_parname.  To support containers that pre-
	 * date cth_parname, we also scan the types themselves for references
	 * to values in the range reserved for child types in our first pass.
	 */
	int child = cth->cth_parname != 0;
	int nlstructs = 0, nlunions = 0;
	int err;

	/*
	 * We make two passes through the entire type section.  In this first
	 * pass, we count the number of each type and the total number of types.
	 */
	for (tp = tbuf; tp < tend; fp->ctf_typemax++) {
		ushort_t kind = LCTF_INFO_KIND(fp, tp->ctt_info);
		ulong_t vlen = LCTF_INFO_VLEN(fp, tp->ctt_info);
		ssize_t size, increment;

		size_t vbytes;
		uint_t n;

		(void) ctf_get_ctt_size(fp, tp, &size, &increment);

		switch (kind) {
		case CTF_K_INTEGER:
		case CTF_K_FLOAT:
			vbytes = sizeof (uint_t);
			break;
		case CTF_K_ARRAY:
			vbytes = sizeof (ctf_array_t);
			break;
		case CTF_K_FUNCTION:
			vbytes = sizeof (ushort_t) * (vlen + (vlen & 1));
			break;
		case CTF_K_STRUCT:
		case CTF_K_UNION:
			if (fp->ctf_version == CTF_VERSION_1 ||
			    size < CTF_LSTRUCT_THRESH) {
				ctf_member_t *mp = (ctf_member_t *)
				    ((uintptr_t)tp + increment);

				vbytes = sizeof (ctf_member_t) * vlen;
				for (n = vlen; n != 0; n--, mp++)
					child |= CTF_TYPE_ISCHILD(mp->ctm_type);
			} else {
				ctf_lmember_t *lmp = (ctf_lmember_t *)
				    ((uintptr_t)tp + increment);

				vbytes = sizeof (ctf_lmember_t) * vlen;
				for (n = vlen; n != 0; n--, lmp++)
					child |=
					    CTF_TYPE_ISCHILD(lmp->ctlm_type);
			}
			break;
		case CTF_K_ENUM:
			vbytes = sizeof (ctf_enum_t) * vlen;
			break;
		case CTF_K_FORWARD:
			/*
			 * For forward declarations, ctt_type is the CTF_K_*
			 * kind for the tag, so bump that population count too.
			 * If ctt_type is unknown, treat the tag as a struct.
			 */
			if (tp->ctt_type == CTF_K_UNKNOWN ||
			    tp->ctt_type >= CTF_K_MAX)
				pop[CTF_K_STRUCT]++;
			else
				pop[tp->ctt_type]++;
			/*FALLTHRU*/
		case CTF_K_UNKNOWN:
			vbytes = 0;
			break;
		case CTF_K_POINTER:
		case CTF_K_TYPEDEF:
		case CTF_K_VOLATILE:
		case CTF_K_CONST:
		case CTF_K_RESTRICT:
			child |= CTF_TYPE_ISCHILD(tp->ctt_type);
			vbytes = 0;
			break;
		default:
			ctf_dprintf("detected invalid CTF kind -- %u\n", kind);
			return (ECTF_CORRUPT);
		}
		tp = (ctf_type_t *)((uintptr_t)tp + increment + vbytes);
		pop[kind]++;
	}

	/*
	 * If we detected a reference to a child type ID, then we know this
	 * container is a child and may have a parent's types imported later.
	 */
	if (child) {
		ctf_dprintf("CTF container %p is a child\n", (void *)fp);
		fp->ctf_flags |= LCTF_CHILD;
	} else
		ctf_dprintf("CTF container %p is a parent\n", (void *)fp);

	/*
	 * Now that we've counted up the number of each type, we can allocate
	 * the hash tables, type translation table, and pointer table.
	 */
	if ((err = ctf_hash_create(&fp->ctf_structs, pop[CTF_K_STRUCT])) != 0)
		return (err);

	if ((err = ctf_hash_create(&fp->ctf_unions, pop[CTF_K_UNION])) != 0)
		return (err);

	if ((err = ctf_hash_create(&fp->ctf_enums, pop[CTF_K_ENUM])) != 0)
		return (err);

	if ((err = ctf_hash_create(&fp->ctf_names,
	    pop[CTF_K_INTEGER] + pop[CTF_K_FLOAT] + pop[CTF_K_FUNCTION] +
	    pop[CTF_K_TYPEDEF] + pop[CTF_K_POINTER] + pop[CTF_K_VOLATILE] +
	    pop[CTF_K_CONST] + pop[CTF_K_RESTRICT])) != 0)
		return (err);

	fp->ctf_txlate = ctf_alloc(sizeof (uint_t) * (fp->ctf_typemax + 1));
	fp->ctf_ptrtab = ctf_alloc(sizeof (ushort_t) * (fp->ctf_typemax + 1));

	if (fp->ctf_txlate == NULL || fp->ctf_ptrtab == NULL)
		return (EAGAIN); /* memory allocation failed */

	xp = fp->ctf_txlate;
	*xp++ = 0; /* type id 0 is used as a sentinel value */

	bzero(fp->ctf_txlate, sizeof (uint_t) * (fp->ctf_typemax + 1));
	bzero(fp->ctf_ptrtab, sizeof (ushort_t) * (fp->ctf_typemax + 1));

	/*
	 * In the second pass through the types, we fill in each entry of the
	 * type and pointer tables and add names to the appropriate hashes.
	 */
	for (id = 1, tp = tbuf; tp < tend; xp++, id++) {
		ushort_t kind = LCTF_INFO_KIND(fp, tp->ctt_info);
		ulong_t vlen = LCTF_INFO_VLEN(fp, tp->ctt_info);
		ssize_t size, increment;

		const char *name;
		size_t vbytes;
		ctf_helem_t *hep;
		ctf_encoding_t cte;

		(void) ctf_get_ctt_size(fp, tp, &size, &increment);
		name = ctf_strptr(fp, tp->ctt_name);

		switch (kind) {
		case CTF_K_INTEGER:
		case CTF_K_FLOAT:
			/*
			 * Only insert a new integer base type definition if
			 * this type name has not been defined yet.  We re-use
			 * the names with different encodings for bit-fields.
			 */
			if ((hep = ctf_hash_lookup(&fp->ctf_names, fp,
			    name, strlen(name))) == NULL) {
				err = ctf_hash_insert(&fp->ctf_names, fp,
				    CTF_INDEX_TO_TYPE(id, child), tp->ctt_name);
				if (err != 0 && err != ECTF_STRTAB)
					return (err);
			} else if (ctf_type_encoding(fp, hep->h_type,
			    &cte) == 0 && cte.cte_bits == 0) {
				/*
				 * Work-around SOS8 stabs bug: replace existing
				 * intrinsic w/ same name if it was zero bits.
				 */
				hep->h_type = CTF_INDEX_TO_TYPE(id, child);
			}
			vbytes = sizeof (uint_t);
			break;

		case CTF_K_ARRAY:
			vbytes = sizeof (ctf_array_t);
			break;

		case CTF_K_FUNCTION:
			err = ctf_hash_insert(&fp->ctf_names, fp,
			    CTF_INDEX_TO_TYPE(id, child), tp->ctt_name);
			if (err != 0 && err != ECTF_STRTAB)
				return (err);
			vbytes = sizeof (ushort_t) * (vlen + (vlen & 1));
			break;

		case CTF_K_STRUCT:
			err = ctf_hash_define(&fp->ctf_structs, fp,
			    CTF_INDEX_TO_TYPE(id, child), tp->ctt_name);

			if (err != 0 && err != ECTF_STRTAB)
				return (err);

			if (fp->ctf_version == CTF_VERSION_1 ||
			    size < CTF_LSTRUCT_THRESH)
				vbytes = sizeof (ctf_member_t) * vlen;
			else {
				vbytes = sizeof (ctf_lmember_t) * vlen;
				nlstructs++;
			}
			break;

		case CTF_K_UNION:
			err = ctf_hash_define(&fp->ctf_unions, fp,
			    CTF_INDEX_TO_TYPE(id, child), tp->ctt_name);

			if (err != 0 && err != ECTF_STRTAB)
				return (err);

			if (fp->ctf_version == CTF_VERSION_1 ||
			    size < CTF_LSTRUCT_THRESH)
				vbytes = sizeof (ctf_member_t) * vlen;
			else {
				vbytes = sizeof (ctf_lmember_t) * vlen;
				nlunions++;
			}
			break;

		case CTF_K_ENUM:
			err = ctf_hash_define(&fp->ctf_enums, fp,
			    CTF_INDEX_TO_TYPE(id, child), tp->ctt_name);

			if (err != 0 && err != ECTF_STRTAB)
				return (err);

			vbytes = sizeof (ctf_enum_t) * vlen;
			break;

		case CTF_K_TYPEDEF:
			err = ctf_hash_insert(&fp->ctf_names, fp,
			    CTF_INDEX_TO_TYPE(id, child), tp->ctt_name);
			if (err != 0 && err != ECTF_STRTAB)
				return (err);
			vbytes = 0;
			break;

		case CTF_K_FORWARD:
			/*
			 * Only insert forward tags into the given hash if the
			 * type or tag name is not already present.
			 */
			switch (tp->ctt_type) {
			case CTF_K_STRUCT:
				hp = &fp->ctf_structs;
				break;
			case CTF_K_UNION:
				hp = &fp->ctf_unions;
				break;
			case CTF_K_ENUM:
				hp = &fp->ctf_enums;
				break;
			default:
				hp = &fp->ctf_structs;
			}

			if (ctf_hash_lookup(hp, fp,
			    name, strlen(name)) == NULL) {
				err = ctf_hash_insert(hp, fp,
				    CTF_INDEX_TO_TYPE(id, child), tp->ctt_name);
				if (err != 0 && err != ECTF_STRTAB)
					return (err);
			}
			vbytes = 0;
			break;

		case CTF_K_POINTER:
			/*
			 * If the type referenced by the pointer is in this CTF
			 * container, then store the index of the pointer type
			 * in fp->ctf_ptrtab[ index of referenced type ].
			 */
			if (CTF_TYPE_ISCHILD(tp->ctt_type) == child &&
			    CTF_TYPE_TO_INDEX(tp->ctt_type) <= fp->ctf_typemax)
				fp->ctf_ptrtab[
				    CTF_TYPE_TO_INDEX(tp->ctt_type)] = id;
			/*FALLTHRU*/

		case CTF_K_VOLATILE:
		case CTF_K_CONST:
		case CTF_K_RESTRICT:
			err = ctf_hash_insert(&fp->ctf_names, fp,
			    CTF_INDEX_TO_TYPE(id, child), tp->ctt_name);
			if (err != 0 && err != ECTF_STRTAB)
				return (err);
			/*FALLTHRU*/

		default:
			vbytes = 0;
			break;
		}

		*xp = (uint_t)((uintptr_t)tp - (uintptr_t)fp->ctf_buf);
		tp = (ctf_type_t *)((uintptr_t)tp + increment + vbytes);
	}

	ctf_dprintf("%lu total types processed\n", fp->ctf_typemax);
	ctf_dprintf("%u enum names hashed\n", ctf_hash_size(&fp->ctf_enums));
	ctf_dprintf("%u struct names hashed (%d long)\n",
	    ctf_hash_size(&fp->ctf_structs), nlstructs);
	ctf_dprintf("%u union names hashed (%d long)\n",
	    ctf_hash_size(&fp->ctf_unions), nlunions);
	ctf_dprintf("%u base type names hashed\n",
	    ctf_hash_size(&fp->ctf_names));

	/*
	 * Make an additional pass through the pointer table to find pointers
	 * that point to anonymous typedef nodes.  If we find one, modify the
	 * pointer table so that the pointer is also known to point to the
	 * node that is referenced by the anonymous typedef node.
	 */
	for (id = 1; id <= fp->ctf_typemax; id++) {
		if ((dst = fp->ctf_ptrtab[id]) != 0) {
			tp = LCTF_INDEX_TO_TYPEPTR(fp, id);

			if (LCTF_INFO_KIND(fp, tp->ctt_info) == CTF_K_TYPEDEF &&
			    strcmp(ctf_strptr(fp, tp->ctt_name), "") == 0 &&
			    CTF_TYPE_ISCHILD(tp->ctt_type) == child &&
			    CTF_TYPE_TO_INDEX(tp->ctt_type) <= fp->ctf_typemax)
				fp->ctf_ptrtab[
				    CTF_TYPE_TO_INDEX(tp->ctt_type)] = dst;
		}
	}

	return (0);
}
示例#14
0
/*
 * Initialize the symtab translation table by filling each entry with the
 * offset of the CTF type or function data corresponding to each STT_FUNC or
 * STT_OBJECT entry in the symbol table.
 */
static int
init_symtab(ctf_file_t *fp, const ctf_header_t *hp,
    const ctf_sect_t *sp, const ctf_sect_t *strp)
{
	const uchar_t *symp = sp->cts_data;
	uint_t *xp = fp->ctf_sxlate;
	uint_t *xend = xp + fp->ctf_nsyms;

	uint_t objtoff = hp->cth_objtoff;
	uint_t funcoff = hp->cth_funcoff;

	ushort_t info, vlen;
	Elf64_Sym sym, *gsp;
	const char *name;

	/*
	 * The CTF data object and function type sections are ordered to match
	 * the relative order of the respective symbol types in the symtab.
	 * If no type information is available for a symbol table entry, a
	 * pad is inserted in the CTF section.  As a further optimization,
	 * anonymous or undefined symbols are omitted from the CTF data.
	 */
	for (; xp < xend; xp++, symp += sp->cts_entsize) {
		if (sp->cts_entsize == sizeof (struct nlist)) {
			gsp = sym_to_gelf_macho(sp, (Elf32_Sym *)(uintptr_t)symp, &sym, (const char *)strp->cts_data);
		}
		else if (sp->cts_entsize == sizeof (struct nlist_64)) {
			gsp = sym_to_gelf_macho_64(sp, (Elf32_Sym *)(uintptr_t)symp, &sym, (const char *)strp->cts_data);
		}
		else if (sp->cts_entsize == sizeof (Elf32_Sym))
			gsp = sym_to_gelf((Elf32_Sym *)(uintptr_t)symp, &sym);
		else
			gsp = (Elf64_Sym *)(uintptr_t)symp;

		if (gsp->st_name < strp->cts_size)
			name = (const char *)strp->cts_data + gsp->st_name;
		else
			name = _CTF_NULLSTR;

		if (gsp->st_name == 0 || gsp->st_shndx == SHN_UNDEF ||
		    strcmp(name, "_START_") == 0 ||
		    strcmp(name, "_END_") == 0) {
			*xp = -1u;
			continue;
		}

		switch (ELF64_ST_TYPE(gsp->st_info)) {
		case STT_OBJECT:
			if (objtoff >= hp->cth_funcoff ||
			    (gsp->st_shndx == SHN_ABS && gsp->st_value == 0)) {
				*xp = -1u;
				break;
			}

			*xp = objtoff;
			objtoff += sizeof (ushort_t);
			break;

		case STT_FUNC:
			if (funcoff >= hp->cth_typeoff) {
				*xp = -1u;
				break;
			}

			*xp = funcoff;

			info = *(ushort_t *)((uintptr_t)fp->ctf_buf + funcoff);
			vlen = LCTF_INFO_VLEN(fp, info);

			/*
			 * If we encounter a zero pad at the end, just skip it.
			 * Otherwise skip over the function and its return type
			 * (+2) and the argument list (vlen).
			 */
			if (LCTF_INFO_KIND(fp, info) == CTF_K_UNKNOWN &&
			    vlen == 0)
				funcoff += sizeof (ushort_t); /* skip pad */
			else
				funcoff += sizeof (ushort_t) * (vlen + 2);
			break;

		default:
			*xp = -1u;
			break;
		}
	}

	ctf_dprintf("loaded %lu symtab entries\n", fp->ctf_nsyms);
	return (0);
}
示例#15
0
/*
 * The ctf_add_type routine is used to copy a type from a source CTF container
 * to a dynamic destination container.  This routine operates recursively by
 * following the source type's links and embedded member types.  If the
 * destination container already contains a named type which has the same
 * attributes, then we succeed and return this type but no changes occur.
 */
ctf_id_t
ctf_add_type(ctf_file_t *dst_fp, ctf_file_t *src_fp, ctf_id_t src_type)
{
	ctf_id_t dst_type = CTF_ERR;
	uint_t dst_kind = CTF_K_UNKNOWN;

	const ctf_type_t *tp;
	const char *name;
	uint_t kind, flag, vlen;

	ctf_bundle_t src, dst;
	ctf_encoding_t src_en, dst_en;
	ctf_arinfo_t src_ar, dst_ar;

	ctf_dtdef_t *dtd;
	ctf_funcinfo_t ctc;
	ssize_t size;

	ctf_hash_t *hp;
	ctf_helem_t *hep;

	if (dst_fp == src_fp)
		return (src_type);

	if (!(dst_fp->ctf_flags & LCTF_RDWR))
		return (ctf_set_errno(dst_fp, ECTF_RDONLY));

	if ((tp = ctf_lookup_by_id(&src_fp, src_type)) == NULL)
		return (ctf_set_errno(dst_fp, ctf_errno(src_fp)));

	name = ctf_strptr(src_fp, tp->ctt_name);
	kind = LCTF_INFO_KIND(src_fp, tp->ctt_info);
	flag = LCTF_INFO_ROOT(src_fp, tp->ctt_info);
	vlen = LCTF_INFO_VLEN(src_fp, tp->ctt_info);

	switch (kind) {
	case CTF_K_STRUCT:
		hp = &dst_fp->ctf_structs;
		break;
	case CTF_K_UNION:
		hp = &dst_fp->ctf_unions;
		break;
	case CTF_K_ENUM:
		hp = &dst_fp->ctf_enums;
		break;
	default:
		hp = &dst_fp->ctf_names;
		break;
	}

	/*
	 * If the source type has a name and is a root type (visible at the
	 * top-level scope), lookup the name in the destination container and
	 * verify that it is of the same kind before we do anything else.
	 */
	if ((flag & CTF_ADD_ROOT) && name[0] != '\0' &&
	    (hep = ctf_hash_lookup(hp, dst_fp, name, strlen(name))) != NULL) {
		dst_type = (ctf_id_t)hep->h_type;
		dst_kind = ctf_type_kind(dst_fp, dst_type);
	}

	/*
	 * If an identically named dst_type exists, fail with ECTF_CONFLICT
	 * unless dst_type is a forward declaration and src_type is a struct,
	 * union, or enum (i.e. the definition of the previous forward decl).
	 */
	if (dst_type != CTF_ERR && dst_kind != kind) {
		if (dst_kind != CTF_K_FORWARD || (kind != CTF_K_ENUM &&
		    kind != CTF_K_STRUCT && kind != CTF_K_UNION))
			return (ctf_set_errno(dst_fp, ECTF_CONFLICT));
		else
			dst_type = CTF_ERR;
	}

	/*
	 * If the non-empty name was not found in the appropriate hash, search
	 * the list of pending dynamic definitions that are not yet committed.
	 * If a matching name and kind are found, assume this is the type that
	 * we are looking for.  This is necessary to permit ctf_add_type() to
	 * operate recursively on entities such as a struct that contains a
	 * pointer member that refers to the same struct type.
	 *
	 * In the case of integer and floating point types, we match using the
	 * type encoding as well - else we may incorrectly return a bitfield
	 * type, for instance.
	 */
	if (dst_type == CTF_ERR && name[0] != '\0') {
		for (dtd = ctf_list_prev(&dst_fp->ctf_dtdefs); dtd != NULL &&
		    CTF_TYPE_TO_INDEX(dtd->dtd_type) > dst_fp->ctf_dtoldid;
		    dtd = ctf_list_prev(dtd)) {
			if (CTF_INFO_KIND(dtd->dtd_data.ctt_info) != kind ||
			    dtd->dtd_name == NULL ||
			    strcmp(dtd->dtd_name, name) != 0)
				continue;
			if (kind == CTF_K_INTEGER || kind == CTF_K_FLOAT) {
				if (ctf_type_encoding(src_fp, src_type,
				    &src_en) != 0)
					continue;
				if (bcmp(&src_en, &dtd->dtd_u.dtu_enc,
				    sizeof (ctf_encoding_t)) != 0)
					continue;
			}
			return (dtd->dtd_type);
		}
	}

	src.ctb_file = src_fp;
	src.ctb_type = src_type;
	src.ctb_dtd = NULL;

	dst.ctb_file = dst_fp;
	dst.ctb_type = dst_type;
	dst.ctb_dtd = NULL;

	/*
	 * Now perform kind-specific processing.  If dst_type is CTF_ERR, then
	 * we add a new type with the same properties as src_type to dst_fp.
	 * If dst_type is not CTF_ERR, then we verify that dst_type has the
	 * same attributes as src_type.  We recurse for embedded references.
	 */
	switch (kind) {
	case CTF_K_INTEGER:
	case CTF_K_FLOAT:
		if (ctf_type_encoding(src_fp, src_type, &src_en) != 0)
			return (ctf_set_errno(dst_fp, ctf_errno(src_fp)));

		if (dst_type != CTF_ERR) {
			if (ctf_type_encoding(dst_fp, dst_type, &dst_en) != 0)
				return (CTF_ERR); /* errno is set for us */

			if (bcmp(&src_en, &dst_en, sizeof (ctf_encoding_t)))
				return (ctf_set_errno(dst_fp, ECTF_CONFLICT));

		} else if (kind == CTF_K_INTEGER) {
			dst_type = ctf_add_integer(dst_fp, flag, name, &src_en);
		} else
			dst_type = ctf_add_float(dst_fp, flag, name, &src_en);
		break;

	case CTF_K_POINTER:
	case CTF_K_VOLATILE:
	case CTF_K_CONST:
	case CTF_K_RESTRICT:
		src_type = ctf_type_reference(src_fp, src_type);
		src_type = ctf_add_type(dst_fp, src_fp, src_type);

		if (src_type == CTF_ERR)
			return (CTF_ERR); /* errno is set for us */

		dst_type = ctf_add_reftype(dst_fp, flag, src_type, kind);
		break;

	case CTF_K_ARRAY:
		if (ctf_array_info(src_fp, src_type, &src_ar) == CTF_ERR)
			return (ctf_set_errno(dst_fp, ctf_errno(src_fp)));

		src_ar.ctr_contents =
		    ctf_add_type(dst_fp, src_fp, src_ar.ctr_contents);
		src_ar.ctr_index =
		    ctf_add_type(dst_fp, src_fp, src_ar.ctr_index);
		src_ar.ctr_nelems = src_ar.ctr_nelems;

		if (src_ar.ctr_contents == CTF_ERR ||
		    src_ar.ctr_index == CTF_ERR)
			return (CTF_ERR); /* errno is set for us */

		if (dst_type != CTF_ERR) {
			if (ctf_array_info(dst_fp, dst_type, &dst_ar) != 0)
				return (CTF_ERR); /* errno is set for us */

			if (bcmp(&src_ar, &dst_ar, sizeof (ctf_arinfo_t)))
				return (ctf_set_errno(dst_fp, ECTF_CONFLICT));
		} else
			dst_type = ctf_add_array(dst_fp, flag, &src_ar);
		break;

	case CTF_K_FUNCTION:
		ctc.ctc_return = ctf_add_type(dst_fp, src_fp, tp->ctt_type);
		ctc.ctc_argc = 0;
		ctc.ctc_flags = 0;

		if (ctc.ctc_return == CTF_ERR)
			return (CTF_ERR); /* errno is set for us */

		dst_type = ctf_add_function(dst_fp, flag, &ctc, NULL);
		break;

	case CTF_K_STRUCT:
	case CTF_K_UNION: {
		ctf_dmdef_t *dmd;
		int errs = 0;

		/*
		 * Technically to match a struct or union we need to check both
		 * ways (src members vs. dst, dst members vs. src) but we make
		 * this more optimal by only checking src vs. dst and comparing
		 * the total size of the structure (which we must do anyway)
		 * which covers the possibility of dst members not in src.
		 * This optimization can be defeated for unions, but is so
		 * pathological as to render it irrelevant for our purposes.
		 */
		if (dst_type != CTF_ERR && dst_kind != CTF_K_FORWARD) {
			if (ctf_type_size(src_fp, src_type) !=
			    ctf_type_size(dst_fp, dst_type))
				return (ctf_set_errno(dst_fp, ECTF_CONFLICT));

			if (ctf_member_iter(src_fp, src_type, membcmp, &dst))
				return (ctf_set_errno(dst_fp, ECTF_CONFLICT));

			break;
		}

		/*
		 * Unlike the other cases, copying structs and unions is done
		 * manually so as to avoid repeated lookups in ctf_add_member
		 * and to ensure the exact same member offsets as in src_type.
		 */
		dst_type = ctf_add_generic(dst_fp, flag, name, &dtd);
		if (dst_type == CTF_ERR)
			return (CTF_ERR); /* errno is set for us */

		dst.ctb_type = dst_type;
		dst.ctb_dtd = dtd;

		if (ctf_member_iter(src_fp, src_type, membadd, &dst) != 0)
			errs++; /* increment errs and fail at bottom of case */

		if ((size = ctf_type_size(src_fp, src_type)) > CTF_MAX_SIZE) {
			dtd->dtd_data.ctt_size = CTF_LSIZE_SENT;
			dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI(size);
			dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO(size);
		} else
			dtd->dtd_data.ctt_size = (ushort_t)size;

		dtd->dtd_data.ctt_info = CTF_TYPE_INFO(kind, flag, vlen);

		/*
		 * Make a final pass through the members changing each dmd_type
		 * (a src_fp type) to an equivalent type in dst_fp.  We pass
		 * through all members, leaving any that fail set to CTF_ERR.
		 */
		for (dmd = ctf_list_next(&dtd->dtd_u.dtu_members);
		    dmd != NULL; dmd = ctf_list_next(dmd)) {
			if ((dmd->dmd_type = ctf_add_type(dst_fp, src_fp,
			    dmd->dmd_type)) == CTF_ERR)
				errs++;
		}

		if (errs)
			return (CTF_ERR); /* errno is set for us */

		/*
		 * Now that we know that we can't fail, we go through and bump
		 * all the reference counts on the member types.
		 */
		for (dmd = ctf_list_next(&dtd->dtd_u.dtu_members);
		    dmd != NULL; dmd = ctf_list_next(dmd))
			ctf_ref_inc(dst_fp, dmd->dmd_type);
		break;
	}

	case CTF_K_ENUM:
		if (dst_type != CTF_ERR && dst_kind != CTF_K_FORWARD) {
			if (ctf_enum_iter(src_fp, src_type, enumcmp, &dst) ||
			    ctf_enum_iter(dst_fp, dst_type, enumcmp, &src))
				return (ctf_set_errno(dst_fp, ECTF_CONFLICT));
		} else {
			dst_type = ctf_add_enum(dst_fp, flag, name);
			if ((dst.ctb_type = dst_type) == CTF_ERR ||
			    ctf_enum_iter(src_fp, src_type, enumadd, &dst))
				return (CTF_ERR); /* errno is set for us */
		}
		break;

	case CTF_K_FORWARD:
		if (dst_type == CTF_ERR) {
			dst_type = ctf_add_forward(dst_fp,
			    flag, name, CTF_K_STRUCT); /* assume STRUCT */
		}
		break;

	case CTF_K_TYPEDEF:
		src_type = ctf_type_reference(src_fp, src_type);
		src_type = ctf_add_type(dst_fp, src_fp, src_type);

		if (src_type == CTF_ERR)
			return (CTF_ERR); /* errno is set for us */

		/*
		 * If dst_type is not CTF_ERR at this point, we should check if
		 * ctf_type_reference(dst_fp, dst_type) != src_type and if so
		 * fail with ECTF_CONFLICT.  However, this causes problems with
		 * <sys/types.h> typedefs that vary based on things like if
		 * _ILP32x then pid_t is int otherwise long.  We therefore omit
		 * this check and assume that if the identically named typedef
		 * already exists in dst_fp, it is correct or equivalent.
		 */
		if (dst_type == CTF_ERR) {
			dst_type = ctf_add_typedef(dst_fp, flag,
			    name, src_type);
		}
		break;

	default:
		return (ctf_set_errno(dst_fp, ECTF_CORRUPT));
	}

	return (dst_type);
}
示例#16
0
void
ctf_decl_push(ctf_decl_t *cd, ctf_file_t *fp, ctf_id_t type)
{
	ctf_decl_node_t *cdp;
	ctf_decl_prec_t prec;
	uint_t kind, n = 1;
	int is_qual = 0;

	const ctf_type_t *tp;
	ctf_arinfo_t ar;

	if ((tp = ctf_lookup_by_id(&fp, type)) == NULL) {
		cd->cd_err = fp->ctf_errno;
		return;
	}

	switch (kind = LCTF_INFO_KIND(fp, tp->ctt_info)) {
	case CTF_K_ARRAY:
		(void) ctf_array_info(fp, type, &ar);
		ctf_decl_push(cd, fp, ar.ctr_contents);
		n = ar.ctr_nelems;
		prec = CTF_PREC_ARRAY;
		break;

	case CTF_K_TYPEDEF:
		if (ctf_strptr(fp, tp->ctt_name)[0] == '\0') {
			ctf_decl_push(cd, fp, tp->ctt_type);
			return;
		}
		prec = CTF_PREC_BASE;
		break;

	case CTF_K_FUNCTION:
		ctf_decl_push(cd, fp, tp->ctt_type);
		prec = CTF_PREC_FUNCTION;
		break;

	case CTF_K_POINTER:
		ctf_decl_push(cd, fp, tp->ctt_type);
		prec = CTF_PREC_POINTER;
		break;

	case CTF_K_VOLATILE:
	case CTF_K_CONST:
	case CTF_K_RESTRICT:
		ctf_decl_push(cd, fp, tp->ctt_type);
		prec = cd->cd_qualp;
		is_qual++;
		break;

	default:
		prec = CTF_PREC_BASE;
	}

	if ((cdp = ctf_alloc(sizeof (ctf_decl_node_t))) == NULL) {
		cd->cd_err = EAGAIN;
		return;
	}

	cdp->cd_type = type;
	cdp->cd_kind = kind;
	cdp->cd_n = n;

	if (ctf_list_next(&cd->cd_nodes[prec]) == NULL)
		cd->cd_order[prec] = cd->cd_ordp++;

	/*
	 * Reset cd_qualp to the highest precedence level that we've seen so
	 * far that can be qualified (CTF_PREC_BASE or CTF_PREC_POINTER).
	 */
	if (prec > cd->cd_qualp && prec < CTF_PREC_ARRAY)
		cd->cd_qualp = prec;

	/*
	 * C array declarators are ordered inside out so prepend them.  Also by
	 * convention qualifiers of base types precede the type specifier (e.g.
	 * const int vs. int const) even though the two forms are equivalent.
	 */
	if (kind == CTF_K_ARRAY || (is_qual && prec == CTF_PREC_BASE))
		ctf_list_prepend(&cd->cd_nodes[prec], cdp);
	else
		ctf_list_append(&cd->cd_nodes[prec], cdp);
}