/* * void *exacct_create_header(size_t *) * * Overview * exacct_create_header() constructs an exacct file header identifying the * accounting file as the output of the kernel. exacct_create_header() and * the static write_header() and verify_header() routines in libexacct must * remain synchronized. * * Return values * A pointer to a packed exacct buffer containing the appropriate header is * returned; the size of the buffer is placed in the location indicated by * sizep. * * Caller's context * Suitable for KM_SLEEP allocations. */ void * exacct_create_header(size_t *sizep) { ea_object_t *hdr_grp; uint32_t bskip; void *buf; size_t bufsize; hdr_grp = ea_alloc_group(EXT_GROUP | EXC_DEFAULT | EXD_GROUP_HEADER); (void) ea_attach_item(hdr_grp, (void *)&exacct_version, 0, EXT_UINT32 | EXC_DEFAULT | EXD_VERSION); (void) ea_attach_item(hdr_grp, (void *)exacct_header, 0, EXT_STRING | EXC_DEFAULT | EXD_FILETYPE); (void) ea_attach_item(hdr_grp, (void *)exacct_creator, 0, EXT_STRING | EXC_DEFAULT | EXD_CREATOR); (void) ea_attach_item(hdr_grp, uts_nodename(), 0, EXT_STRING | EXC_DEFAULT | EXD_HOSTNAME); bufsize = ea_pack_object(hdr_grp, NULL, 0); buf = kmem_alloc(bufsize, KM_SLEEP); (void) ea_pack_object(hdr_grp, buf, bufsize); ea_free_object(hdr_grp, EUP_ALLOC); /* * To prevent reading the header when reading the file backwards, * set the large backskip of the header group to 0 (last 4 bytes). */ bskip = 0; exacct_order32(&bskip); bcopy(&bskip, (char *)buf + bufsize - sizeof (bskip), sizeof (bskip)); *sizep = bufsize; return (buf); }
static ea_object_t * exacct_assemble_proc_record(proc_usage_t *pu, ulong_t *mask, ea_catalog_t record_type) { int res, count; ea_object_t *record; /* * Assemble usage values into group. */ record = ea_alloc_group(EXT_GROUP | EXC_DEFAULT | record_type); for (res = 1, count = 0; res <= AC_PROC_MAX_RES; res++) if (BT_TEST(mask, res)) count += exacct_attach_proc_item(pu, record, res); if (count == 0) { ea_free_object(record, EUP_ALLOC); record = NULL; } return (record); }
/* * exacct_tag_proc(pid_t, taskid_t, void *, size_t, int, char *) * * Overview * exacct_tag_proc() provides the exacct record construction and writing * support required by putacct(2) for processes. * * Return values * The result of the write operation is returned, unless the extended * accounting facility is not active, in which case ENOTACTIVE is returned. * * Caller's context * Suitable for KM_SLEEP allocations. */ int exacct_tag_proc(ac_info_t *ac_proc, pid_t pid, taskid_t tkid, void *ubuf, size_t ubufsz, int flags, const char *hostname) { int error = 0; void *buf; size_t bufsize; ea_catalog_t cat; ea_object_t *tag; mutex_enter(&ac_proc->ac_lock); if (ac_proc->ac_state == AC_OFF || ac_proc->ac_vnode == NULL) { mutex_exit(&ac_proc->ac_lock); return (ENOTACTIVE); } mutex_exit(&ac_proc->ac_lock); tag = ea_alloc_group(EXT_GROUP | EXC_DEFAULT | EXD_GROUP_PROC_TAG); (void) ea_attach_item(tag, &pid, sizeof (uint32_t), EXT_UINT32 | EXC_DEFAULT | EXD_PROC_PID); (void) ea_attach_item(tag, &tkid, 0, EXT_UINT32 | EXC_DEFAULT | EXD_TASK_TASKID); (void) ea_attach_item(tag, (void *)hostname, 0, EXT_STRING | EXC_DEFAULT | EXD_TASK_HOSTNAME); if (flags == EP_RAW) cat = EXT_RAW | EXC_DEFAULT | EXD_PROC_TAG; else cat = EXT_EXACCT_OBJECT | EXC_DEFAULT | EXD_PROC_TAG; (void) ea_attach_item(tag, ubuf, ubufsz, cat); bufsize = ea_pack_object(tag, NULL, 0); buf = kmem_alloc(bufsize, KM_SLEEP); (void) ea_pack_object(tag, buf, bufsize); error = exacct_vn_write(ac_proc, buf, bufsize); kmem_free(buf, bufsize); ea_free_object(tag, EUP_ALLOC); return (error); }
/* * int exacct_tag_task(task_t *, void *, size_t, int) * * Overview * exacct_tag_task() provides the exacct record construction and writing * support required by putacct(2) for task entities. * * Return values * The result of the write operation is returned, unless the extended * accounting facility is not active, in which case ENOTACTIVE is returned. * * Caller's context * Suitable for KM_SLEEP allocations. */ int exacct_tag_task(ac_info_t *ac_task, task_t *tk, void *ubuf, size_t ubufsz, int flags) { int error = 0; void *buf; size_t bufsize; ea_catalog_t cat; ea_object_t *tag; mutex_enter(&ac_task->ac_lock); if (ac_task->ac_state == AC_OFF || ac_task->ac_vnode == NULL) { mutex_exit(&ac_task->ac_lock); return (ENOTACTIVE); } mutex_exit(&ac_task->ac_lock); tag = ea_alloc_group(EXT_GROUP | EXC_DEFAULT | EXD_GROUP_TASK_TAG); (void) ea_attach_item(tag, &tk->tk_tkid, 0, EXT_UINT32 | EXC_DEFAULT | EXD_TASK_TASKID); (void) ea_attach_item(tag, tk->tk_zone->zone_nodename, 0, EXT_STRING | EXC_DEFAULT | EXD_TASK_HOSTNAME); if (flags == EP_RAW) cat = EXT_RAW | EXC_DEFAULT | EXD_TASK_TAG; else cat = EXT_EXACCT_OBJECT | EXC_DEFAULT | EXD_TASK_TAG; (void) ea_attach_item(tag, ubuf, ubufsz, cat); bufsize = ea_pack_object(tag, NULL, 0); buf = kmem_alloc(bufsize, KM_SLEEP); (void) ea_pack_object(tag, buf, bufsize); error = exacct_vn_write(ac_task, buf, bufsize); kmem_free(buf, bufsize); ea_free_object(tag, EUP_ALLOC); return (error); }
/* * Take a snapshot of the current state of processor sets and CPUs, * pack it in the exacct format, and attach it to specified exacct record. */ int pool_pset_pack(ea_object_t *eo_system) { ea_object_t *eo_pset, *eo_cpu; cpupart_t *cpupart; psetid_t mypsetid; pool_pset_t *pset; nvlist_t *nvl; size_t bufsz; cpu_t *cpu; char *buf; int ncpu; ASSERT(pool_lock_held()); mutex_enter(&cpu_lock); mypsetid = zone_pset_get(curproc->p_zone); for (pset = list_head(&pool_pset_list); pset; pset = list_next(&pool_pset_list, pset)) { psetid_t psetid = pset->pset_id; if (!INGLOBALZONE(curproc) && mypsetid != psetid) continue; cpupart = cpupart_find(psetid); ASSERT(cpupart != NULL); eo_pset = ea_alloc_group(EXT_GROUP | EXC_LOCAL | EXD_GROUP_PSET); (void) ea_attach_item(eo_pset, &psetid, sizeof (id_t), EXC_LOCAL | EXD_PSET_PSETID | EXT_UINT32); /* * Pack info for all CPUs in this processor set. */ ncpu = 0; cpu = cpu_list; do { if (cpu->cpu_part != cpupart) /* not our pset */ continue; ncpu++; eo_cpu = ea_alloc_group(EXT_GROUP | EXC_LOCAL | EXD_GROUP_CPU); (void) ea_attach_item(eo_cpu, &cpu->cpu_id, sizeof (processorid_t), EXC_LOCAL | EXD_CPU_CPUID | EXT_UINT32); if (cpu->cpu_props == NULL) { (void) nvlist_alloc(&cpu->cpu_props, NV_UNIQUE_NAME, KM_SLEEP); (void) nvlist_add_string(cpu->cpu_props, "cpu.comment", ""); } (void) nvlist_dup(cpu->cpu_props, &nvl, KM_SLEEP); (void) nvlist_add_int64(nvl, "cpu.sys_id", cpu->cpu_id); (void) nvlist_add_string(nvl, "cpu.status", (char *)cpu_get_state_str(cpu)); buf = NULL; bufsz = 0; (void) nvlist_pack(nvl, &buf, &bufsz, NV_ENCODE_NATIVE, 0); (void) ea_attach_item(eo_cpu, buf, bufsz, EXC_LOCAL | EXD_CPU_PROP | EXT_RAW); (void) nvlist_free(nvl); kmem_free(buf, bufsz); (void) ea_attach_to_group(eo_pset, eo_cpu); } while ((cpu = cpu->cpu_next) != cpu_list); (void) nvlist_dup(pset->pset_props, &nvl, KM_SLEEP); (void) nvlist_add_uint64(nvl, "pset.size", ncpu); (void) nvlist_add_uint64(nvl, "pset.load", (uint64_t)PSET_LOAD(cpupart->cp_hp_avenrun[0])); buf = NULL; bufsz = 0; (void) nvlist_pack(nvl, &buf, &bufsz, NV_ENCODE_NATIVE, 0); (void) ea_attach_item(eo_pset, buf, bufsz, EXC_LOCAL | EXD_PSET_PROP | EXT_RAW); (void) nvlist_free(nvl); kmem_free(buf, bufsz); (void) ea_attach_to_group(eo_system, eo_pset); } mutex_exit(&cpu_lock); return (0); }