Esempio n. 1
0
/*
 * void *exacct_create_header(size_t *)
 *
 * Overview
 *   exacct_create_header() constructs an exacct file header identifying the
 *   accounting file as the output of the kernel.  exacct_create_header() and
 *   the static write_header() and verify_header() routines in libexacct must
 *   remain synchronized.
 *
 * Return values
 *   A pointer to a packed exacct buffer containing the appropriate header is
 *   returned; the size of the buffer is placed in the location indicated by
 *   sizep.
 *
 * Caller's context
 *   Suitable for KM_SLEEP allocations.
 */
void *
exacct_create_header(size_t *sizep)
{
	ea_object_t *hdr_grp;
	uint32_t bskip;
	void *buf;
	size_t bufsize;

	hdr_grp = ea_alloc_group(EXT_GROUP | EXC_DEFAULT | EXD_GROUP_HEADER);
	(void) ea_attach_item(hdr_grp, (void *)&exacct_version, 0,
	    EXT_UINT32 | EXC_DEFAULT | EXD_VERSION);
	(void) ea_attach_item(hdr_grp, (void *)exacct_header, 0,
	    EXT_STRING | EXC_DEFAULT | EXD_FILETYPE);
	(void) ea_attach_item(hdr_grp, (void *)exacct_creator, 0,
	    EXT_STRING | EXC_DEFAULT | EXD_CREATOR);
	(void) ea_attach_item(hdr_grp, uts_nodename(), 0,
	    EXT_STRING | EXC_DEFAULT | EXD_HOSTNAME);

	bufsize = ea_pack_object(hdr_grp, NULL, 0);
	buf = kmem_alloc(bufsize, KM_SLEEP);
	(void) ea_pack_object(hdr_grp, buf, bufsize);
	ea_free_object(hdr_grp, EUP_ALLOC);

	/*
	 * To prevent reading the header when reading the file backwards,
	 * set the large backskip of the header group to 0 (last 4 bytes).
	 */
	bskip = 0;
	exacct_order32(&bskip);
	bcopy(&bskip, (char *)buf + bufsize - sizeof (bskip),
	    sizeof (bskip));

	*sizep = bufsize;
	return (buf);
}
Esempio n. 2
0
/*
 * Read in the specified number of objects, returning the same data
 * structure that would have originally been passed to ea_write().
 */
ea_object_t *
ea_get_object_tree(ea_file_t *ef, uint32_t nobj)
{
	ea_object_t *first_obj, *prev_obj, *obj;

	first_obj = prev_obj = NULL;
	while (nobj--) {
		/* Allocate space for the new object. */
		obj = ea_alloc(sizeof (ea_object_t));
		bzero(obj, sizeof (*obj));

		/* Read it in. */
		if (ea_get_object(ef, obj) == -1) {
			ea_free(obj, sizeof (ea_object_t));
			if (first_obj != NULL) {
				ea_free_object(first_obj, EUP_ALLOC);
			}
			return (NULL);
		}

		/* Link it into the list. */
		if (first_obj == NULL) {
			first_obj = obj;
		}
		if (prev_obj != NULL) {
			prev_obj->eo_next = obj;
		}
		prev_obj = obj;

		/* Recurse if the object is a group with contents. */
		if (obj->eo_type == EO_GROUP && obj->eo_group.eg_nobjs > 0) {
			if ((obj->eo_group.eg_objs = ea_get_object_tree(ef,
			    obj->eo_group.eg_nobjs)) == NULL) {
				/* exacct_error set above. */
				ea_free_object(first_obj, EUP_ALLOC);
				return (NULL);
			}
		}
	}
	EXACCT_SET_ERR(EXR_OK);
	return (first_obj);
}
Esempio n. 3
0
/*
 * int exacct_assemble_proc_usage(proc_usage_t *, int (*)(void *, size_t, void
 *	*, size_t, size_t *), void *, size_t, size_t *)
 *
 * Overview
 *   Assemble record with miscellaneous accounting information about the process
 *   and execute the callback on it. It is the callback's job to set "actual" to
 *   the size of record.
 *
 * Return values
 *   The result of the callback function, unless the extended process accounting
 *   feature is not active, in which case ENOTACTIVE is returned.
 *
 * Caller's context
 *   Suitable for KM_SLEEP allocations.
 */
int
exacct_assemble_proc_usage(ac_info_t *ac_proc, proc_usage_t *pu,
    int (*callback)(ac_info_t *, void *, size_t, void *, size_t, size_t *),
    void *ubuf, size_t ubufsize, size_t *actual, int flag)
{
	ulong_t mask[AC_MASK_SZ];
	ea_object_t *proc_record;
	ea_catalog_t record_type;
	void *buf;
	size_t bufsize;
	int ret;

	ASSERT(flag == EW_FINAL || flag == EW_PARTIAL);

	mutex_enter(&ac_proc->ac_lock);
	if (ac_proc->ac_state == AC_OFF) {
		mutex_exit(&ac_proc->ac_lock);
		return (ENOTACTIVE);
	}
	bt_copy(&ac_proc->ac_mask[0], mask, AC_MASK_SZ);
	mutex_exit(&ac_proc->ac_lock);

	switch (flag) {
	case EW_FINAL:
		record_type = EXD_GROUP_PROC;
		break;
	case EW_PARTIAL:
		record_type = EXD_GROUP_PROC_PARTIAL;
		break;
	}

	proc_record = exacct_assemble_proc_record(pu, mask, record_type);
	if (proc_record == NULL)
		return (0);

	/*
	 * Pack object into buffer and pass to callback.
	 */
	bufsize = ea_pack_object(proc_record, NULL, 0);
	buf = kmem_alloc(bufsize, KM_SLEEP);
	(void) ea_pack_object(proc_record, buf, bufsize);

	ret = callback(ac_proc, ubuf, ubufsize, buf, bufsize, actual);

	/*
	 * Free all previously allocations.
	 */
	kmem_free(buf, bufsize);
	ea_free_object(proc_record, EUP_ALLOC);
	return (ret);
}
Esempio n. 4
0
/*
 * Recursively copy a list of ea_object_t.  All the elements in the eo_next
 * list will be copied, and any group objects will be recursively copied.
 */
ea_object_t *
ea_copy_object_tree(const ea_object_t *src)
{
	ea_object_t *ret_obj, *dst, *last;

	for (ret_obj = last = NULL; src != NULL;
	    last = dst, src = src->eo_next) {

		/* Allocate a new object and copy to it. */
		if ((dst = ea_copy_object(src)) == NULL) {
			ea_free_object(ret_obj, EUP_ALLOC);
			return (NULL);
		}

		/* Groups need the object list copying. */
		if (src->eo_type == EO_GROUP) {
			dst->eo_group.eg_objs =
			    ea_copy_object_tree(src->eo_group.eg_objs);
			if (dst->eo_group.eg_objs == NULL) {
				ea_free_object(ret_obj, EUP_ALLOC);
				return (NULL);
			}
			dst->eo_group.eg_nobjs = src->eo_group.eg_nobjs;
		}

		/* Remember the list head the first time round. */
		if (ret_obj == NULL) {
			ret_obj = dst;
		}

		/* Link together if not at the list head. */
		if (last != NULL) {
			last->eo_next = dst;
		}
	}
	EXACCT_SET_ERR(EXR_OK);
	return (ret_obj);
}
Esempio n. 5
0
int
exacct_assemble_flow_usage(ac_info_t *ac_flow, flow_usage_t *fu,
    int (*callback)(ac_info_t *, void *, size_t, void *, size_t, size_t *),
    void *ubuf, size_t ubufsize, size_t *actual)
{
	ulong_t mask[AC_MASK_SZ];
	ea_object_t *flow_usage;
	ea_catalog_t record_type;
	void *buf;
	size_t bufsize;
	int ret;

	mutex_enter(&ac_flow->ac_lock);
	if (ac_flow->ac_state == AC_OFF) {
		mutex_exit(&ac_flow->ac_lock);
		return (ENOTACTIVE);
	}
	bt_copy(&ac_flow->ac_mask[0], mask, AC_MASK_SZ);
	mutex_exit(&ac_flow->ac_lock);

	record_type = EXD_GROUP_FLOW;

	flow_usage = exacct_assemble_flow_record(fu, mask, record_type);
	if (flow_usage == NULL) {
		return (0);
	}

	/*
	 * Pack object into buffer and pass to callback.
	 */
	bufsize = ea_pack_object(flow_usage, NULL, 0);
	buf = kmem_alloc(bufsize, KM_NOSLEEP);
	if (buf == NULL) {
		return (ENOMEM);
	}

	(void) ea_pack_object(flow_usage, buf, bufsize);

	ret = callback(ac_flow, ubuf, ubufsize, buf, bufsize, actual);

	/*
	 * Free all previously allocations.
	 */
	kmem_free(buf, bufsize);
	ea_free_object(flow_usage, EUP_ALLOC);
	return (ret);
}
Esempio n. 6
0
static ea_object_t *
exacct_assemble_proc_record(proc_usage_t *pu, ulong_t *mask,
    ea_catalog_t record_type)
{
	int res, count;
	ea_object_t *record;

	/*
	 * Assemble usage values into group.
	 */
	record = ea_alloc_group(EXT_GROUP | EXC_DEFAULT | record_type);
	for (res = 1, count = 0; res <= AC_PROC_MAX_RES; res++)
		if (BT_TEST(mask, res))
			count += exacct_attach_proc_item(pu, record, res);
	if (count == 0) {
		ea_free_object(record, EUP_ALLOC);
		record = NULL;
	}
	return (record);
}
Esempio n. 7
0
/*
 * exacct_tag_proc(pid_t, taskid_t, void *, size_t, int, char *)
 *
 * Overview
 *   exacct_tag_proc() provides the exacct record construction and writing
 *   support required by putacct(2) for processes.
 *
 * Return values
 *   The result of the write operation is returned, unless the extended
 *   accounting facility is not active, in which case ENOTACTIVE is returned.
 *
 * Caller's context
 *   Suitable for KM_SLEEP allocations.
 */
int
exacct_tag_proc(ac_info_t *ac_proc, pid_t pid, taskid_t tkid, void *ubuf,
    size_t ubufsz, int flags, const char *hostname)
{
	int error = 0;
	void *buf;
	size_t bufsize;
	ea_catalog_t cat;
	ea_object_t *tag;

	mutex_enter(&ac_proc->ac_lock);
	if (ac_proc->ac_state == AC_OFF || ac_proc->ac_vnode == NULL) {
		mutex_exit(&ac_proc->ac_lock);
		return (ENOTACTIVE);
	}
	mutex_exit(&ac_proc->ac_lock);

	tag = ea_alloc_group(EXT_GROUP | EXC_DEFAULT | EXD_GROUP_PROC_TAG);
	(void) ea_attach_item(tag, &pid, sizeof (uint32_t),
	    EXT_UINT32 | EXC_DEFAULT | EXD_PROC_PID);
	(void) ea_attach_item(tag, &tkid, 0,
	    EXT_UINT32 | EXC_DEFAULT | EXD_TASK_TASKID);
	(void) ea_attach_item(tag, (void *)hostname, 0,
	    EXT_STRING | EXC_DEFAULT | EXD_TASK_HOSTNAME);
	if (flags == EP_RAW)
		cat = EXT_RAW | EXC_DEFAULT | EXD_PROC_TAG;
	else
		cat = EXT_EXACCT_OBJECT | EXC_DEFAULT | EXD_PROC_TAG;
	(void) ea_attach_item(tag, ubuf, ubufsz, cat);

	bufsize = ea_pack_object(tag, NULL, 0);
	buf = kmem_alloc(bufsize, KM_SLEEP);
	(void) ea_pack_object(tag, buf, bufsize);
	error = exacct_vn_write(ac_proc, buf, bufsize);
	kmem_free(buf, bufsize);
	ea_free_object(tag, EUP_ALLOC);
	return (error);
}
Esempio n. 8
0
/*
 * int exacct_tag_task(task_t *, void *, size_t, int)
 *
 * Overview
 *   exacct_tag_task() provides the exacct record construction and writing
 *   support required by putacct(2) for task entities.
 *
 * Return values
 *   The result of the write operation is returned, unless the extended
 *   accounting facility is not active, in which case ENOTACTIVE is returned.
 *
 * Caller's context
 *   Suitable for KM_SLEEP allocations.
 */
int
exacct_tag_task(ac_info_t *ac_task, task_t *tk, void *ubuf, size_t ubufsz,
    int flags)
{
	int error = 0;
	void *buf;
	size_t bufsize;
	ea_catalog_t cat;
	ea_object_t *tag;

	mutex_enter(&ac_task->ac_lock);
	if (ac_task->ac_state == AC_OFF || ac_task->ac_vnode == NULL) {
		mutex_exit(&ac_task->ac_lock);
		return (ENOTACTIVE);
	}
	mutex_exit(&ac_task->ac_lock);

	tag = ea_alloc_group(EXT_GROUP | EXC_DEFAULT | EXD_GROUP_TASK_TAG);
	(void) ea_attach_item(tag, &tk->tk_tkid, 0,
	    EXT_UINT32 | EXC_DEFAULT | EXD_TASK_TASKID);
	(void) ea_attach_item(tag, tk->tk_zone->zone_nodename, 0,
	    EXT_STRING | EXC_DEFAULT | EXD_TASK_HOSTNAME);
	if (flags == EP_RAW)
		cat = EXT_RAW | EXC_DEFAULT | EXD_TASK_TAG;
	else
		cat = EXT_EXACCT_OBJECT | EXC_DEFAULT | EXD_TASK_TAG;
	(void) ea_attach_item(tag, ubuf, ubufsz, cat);

	bufsize = ea_pack_object(tag, NULL, 0);
	buf = kmem_alloc(bufsize, KM_SLEEP);
	(void) ea_pack_object(tag, buf, bufsize);
	error = exacct_vn_write(ac_task, buf, bufsize);
	kmem_free(buf, bufsize);
	ea_free_object(tag, EUP_ALLOC);
	return (error);
}
Esempio n. 9
0
/*
 * ea_unpack_object() can be considered as a finite series of get operations on
 * a given buffer, that rebuilds the hierarchy of objects compacted by a pack
 * operation.  Because there is complex state associated with the group depth,
 * ea_unpack_object() must complete as one operation on a given buffer.
 */
ea_object_type_t
ea_unpack_object(ea_object_t **objp, int flag, void *buf, size_t bufsize)
{
	ea_file_impl_t fake;
	ea_object_t *obj;
	ea_object_type_t first_obj_type;

	*objp = NULL;
	if (buf == NULL) {
		EXACCT_SET_ERR(EXR_INVALID_BUF);
		return (EO_ERROR);
	}

	/* Set up the structures needed for unpacking */
	bzero(&fake, sizeof (ea_file_impl_t));
	if (stack_check(&fake) == -1) {
		/* exacct_errno set above. */
		return (EO_ERROR);
	}
	fake.ef_buf = buf;
	fake.ef_bufsize = bufsize;

	/* Unpack the first object in the buffer - this should succeed. */
	if ((obj = ea_alloc(sizeof (ea_object_t))) == NULL) {
		stack_free(&fake);
		/* exacct_errno set above. */
		return (EO_ERROR);
	}
	obj->eo_next = NULL;
	if ((first_obj_type = xget_object(&fake, obj, bufread_wrapper,
	    bufseek_wrapper, bufpos_wrapper, flag)) == -1) {
		stack_free(&fake);
		ea_free(obj, sizeof (ea_object_t));
		/* exacct_errno set above. */
		return (EO_ERROR);
	}

	if (obj->eo_type == EO_GROUP && unpack_group(&fake, obj, flag) == -1) {
		stack_free(&fake);
		ea_free_object(obj, flag);
		/* exacct_errno set above. */
		return (EO_ERROR);
	}
	*objp = obj;

	/*
	 * There may be other objects in the buffer - if so, chain them onto
	 * the end of the list.  We have reached the end of the list when
	 * xget_object() returns -1 with exacct_error set to EXR_EOF.
	 */
	for (;;) {
		if ((obj = ea_alloc(sizeof (ea_object_t))) == NULL) {
			stack_free(&fake);
			ea_free_object(*objp, flag);
			*objp = NULL;
			/* exacct_errno set above. */
			return (EO_ERROR);
		}
		obj->eo_next = NULL;
		if (xget_object(&fake, obj, bufread_wrapper, bufseek_wrapper,
			    bufpos_wrapper, flag) == -1) {
			stack_free(&fake);
			ea_free(obj, sizeof (ea_object_t));
			if (ea_error() == EXR_EOF) {
				EXACCT_SET_ERR(EXR_OK);
				return (first_obj_type);
			} else {
				ea_free_object(*objp, flag);
				*objp = NULL;
				/* exacct_error set above. */
				return (EO_ERROR);
			}
		}

		(void) ea_attach_to_object(*objp, obj);

		if (obj->eo_type == EO_GROUP &&
		    unpack_group(&fake, obj, flag) == -1) {
			stack_free(&fake);
			ea_free(obj, sizeof (ea_object_t));
			ea_free_object(*objp, flag);
			*objp = NULL;
			/* exacct_errno set above. */
			return (EO_ERROR);
		}
	}
}
Esempio n. 10
0
/*
 * Copy an ea_object_t.  Note that in the case of a group, just the group
 * object will be copied, and not its list of members.  To recursively copy
 * a group or a list of items use ea_copy_tree().
 */
ea_object_t *
ea_copy_object(const ea_object_t *src)
{
	ea_object_t *dst;

	/* Allocate a new object and copy to it. */
	if ((dst = ea_alloc(sizeof (ea_object_t))) == NULL) {
		return (NULL);
	}
	bcopy(src, dst, sizeof (ea_object_t));
	dst->eo_next = NULL;

	switch (src->eo_type) {
	case EO_GROUP:
		dst->eo_group.eg_nobjs = 0;
		dst->eo_group.eg_objs = NULL;
		break;
	case EO_ITEM:
		/* Items containing pointers need special treatment. */
		switch (src->eo_catalog & EXT_TYPE_MASK) {
		case EXT_STRING:
			if (src->eo_item.ei_string != NULL) {
				dst->eo_item.ei_string =
				    ea_strdup(src->eo_item.ei_string);
				if (dst->eo_item.ei_string == NULL) {
					ea_free_object(dst, EUP_ALLOC);
					return (NULL);
				}
			}
			break;
		case EXT_RAW:
			if (src->eo_item.ei_raw != NULL) {
				dst->eo_item.ei_raw =
				    ea_alloc(src->eo_item.ei_size);
				if (dst->eo_item.ei_raw == NULL) {
					ea_free_object(dst, EUP_ALLOC);
					return (NULL);
				}
				bcopy(src->eo_item.ei_raw, dst->eo_item.ei_raw,
				    (size_t)src->eo_item.ei_size);
			}
			break;
		case EXT_EXACCT_OBJECT:
			if (src->eo_item.ei_object != NULL) {
				dst->eo_item.ei_object =
				    ea_alloc(src->eo_item.ei_size);
				if (dst->eo_item.ei_object == NULL) {
					ea_free_object(dst, EUP_ALLOC);
					return (NULL);
				}
				bcopy(src->eo_item.ei_raw, dst->eo_item.ei_raw,
				    (size_t)src->eo_item.ei_size);
			}
			break;
		default:
			/* Other item types require no special handling. */
			break;
		}
		break;
	default:
		ea_free_object(dst, EUP_ALLOC);
		EXACCT_SET_ERR(EXR_INVALID_OBJ);
		return (NULL);
	}
	EXACCT_SET_ERR(EXR_OK);
	return (dst);
}
Esempio n. 11
0
/*
 * int exacct_assemble_task_usage(task_t *, int (*)(void *, size_t, void *,
 *	size_t, size_t *), void *, size_t, size_t *, int)
 *
 * Overview
 *   exacct_assemble_task_usage() builds the packed exacct buffer for the
 *   indicated task, executes the given callback function, and free the packed
 *   buffer.
 *
 * Return values
 *   Returns 0 on success; otherwise the appropriate error code is returned.
 *
 * Caller's context
 *   Suitable for KM_SLEEP allocations.
 */
int
exacct_assemble_task_usage(ac_info_t *ac_task, task_t *tk,
    int (*callback)(ac_info_t *, void *, size_t, void *, size_t, size_t *),
    void *ubuf, size_t ubufsize, size_t *actual, int flag)
{
	ulong_t mask[AC_MASK_SZ];
	ea_object_t *task_record;
	ea_catalog_t record_type;
	task_usage_t *tu;
	void *buf;
	size_t bufsize;
	int ret;

	ASSERT(flag == EW_FINAL || flag == EW_PARTIAL || flag == EW_INTERVAL);

	mutex_enter(&ac_task->ac_lock);
	if (ac_task->ac_state == AC_OFF) {
		mutex_exit(&ac_task->ac_lock);
		return (ENOTACTIVE);
	}
	bt_copy(ac_task->ac_mask, mask, AC_MASK_SZ);
	mutex_exit(&ac_task->ac_lock);

	switch (flag) {
	case EW_FINAL:
		record_type = EXD_GROUP_TASK;
		break;
	case EW_PARTIAL:
		record_type = EXD_GROUP_TASK_PARTIAL;
		break;
	case EW_INTERVAL:
		record_type = EXD_GROUP_TASK_INTERVAL;
		break;
	}

	/*
	 * Calculate task usage and assemble it into the task record.
	 */
	tu = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
	exacct_calculate_task_usage(tk, tu, flag);
	task_record = exacct_assemble_task_record(tk, tu, mask, record_type);
	if (task_record == NULL) {
		/*
		 * The current configuration of the accounting system has
		 * resulted in records with no data; accordingly, we don't write
		 * these, but we return success.
		 */
		kmem_free(tu, sizeof (task_usage_t));
		return (0);
	}

	/*
	 * Pack object into buffer and run callback on it.
	 */
	bufsize = ea_pack_object(task_record, NULL, 0);
	buf = kmem_alloc(bufsize, KM_SLEEP);
	(void) ea_pack_object(task_record, buf, bufsize);
	ret = callback(ac_task, ubuf, ubufsize, buf, bufsize, actual);

	/*
	 * Free all previously allocated structures.
	 */
	kmem_free(buf, bufsize);
	ea_free_object(task_record, EUP_ALLOC);
	kmem_free(tu, sizeof (task_usage_t));
	return (ret);
}