ea_object_t * ea_attach_item(ea_object_t *grp, void *buf, size_t bufsz, ea_catalog_t catalog) { ea_object_t *item; item = ea_alloc_item(catalog, buf, bufsz); (void) ea_attach_to_group(grp, item); return (item); }
/* * unpack_group() recursively unpacks record groups from the buffer tucked * within the passed ea_file, and attaches them to grp. */ static int unpack_group(ea_file_impl_t *f, ea_object_t *grp, int flag) { ea_object_t *obj; uint_t nobjs = grp->eo_group.eg_nobjs; int i; /* * Set the group's object count to zero, as we will rebuild it via the * individual object attachments. */ grp->eo_group.eg_nobjs = 0; grp->eo_group.eg_objs = NULL; for (i = 0; i < nobjs; i++) { if ((obj = ea_alloc(sizeof (ea_object_t))) == NULL) { /* exacct_errno set above. */ return (-1); } obj->eo_next = NULL; if (xget_object(f, obj, bufread_wrapper, bufseek_wrapper, bufpos_wrapper, flag) == -1) { ea_free(obj, sizeof (ea_object_t)); /* exacct_errno set above. */ return (-1); } (void) ea_attach_to_group(grp, obj); if (obj->eo_type == EO_GROUP && unpack_group(f, obj, flag) == -1) { /* exacct_errno set above. */ return (-1); } } if (nobjs != grp->eo_group.eg_nobjs) { EXACCT_SET_ERR(EXR_CORRUPT_FILE); return (-1); } EXACCT_SET_ERR(EXR_OK); return (0); }
static int write_header(ea_file_t *ef) { ea_object_t hdr_grp; ea_object_t vers_obj; ea_object_t creator_obj; ea_object_t filetype_obj; ea_object_t hostname_obj; uint32_t bskip; const uint32_t version = EXACCT_VERSION; ea_file_impl_t *f = (ea_file_impl_t *)ef; void *buf; size_t bufsize; char hostbuf[SYSINFO_BUFSIZE]; int error = EXR_OK; bzero(&hdr_grp, sizeof (ea_object_t)); bzero(&vers_obj, sizeof (ea_object_t)); bzero(&creator_obj, sizeof (ea_object_t)); bzero(&filetype_obj, sizeof (ea_object_t)); bzero(&hostname_obj, sizeof (ea_object_t)); bzero(hostbuf, SYSINFO_BUFSIZE); (void) sysinfo(SI_HOSTNAME, hostbuf, SYSINFO_BUFSIZE); if (ea_set_item(&vers_obj, EXT_UINT32 | EXC_DEFAULT | EXD_VERSION, (void *)&version, 0) == -1 || ea_set_item(&creator_obj, EXT_STRING | EXC_DEFAULT | EXD_CREATOR, f->ef_creator, strlen(f->ef_creator)) == -1 || ea_set_item(&filetype_obj, EXT_STRING | EXC_DEFAULT | EXD_FILETYPE, EXACCT_HDR_STR, strlen(EXACCT_HDR_STR)) == -1 || ea_set_item(&hostname_obj, EXT_STRING | EXC_DEFAULT | EXD_HOSTNAME, hostbuf, strlen(hostbuf)) == -1) { error = ea_error(); goto cleanup1; } (void) ea_set_group(&hdr_grp, EXT_GROUP | EXC_DEFAULT | EXD_GROUP_HEADER); (void) ea_attach_to_group(&hdr_grp, &vers_obj); (void) ea_attach_to_group(&hdr_grp, &creator_obj); (void) ea_attach_to_group(&hdr_grp, &filetype_obj); (void) ea_attach_to_group(&hdr_grp, &hostname_obj); /* Get the required size by passing a null buffer. */ bufsize = ea_pack_object(&hdr_grp, NULL, 0); if ((buf = ea_alloc(bufsize)) == NULL) { error = ea_error(); goto cleanup1; } if (ea_pack_object(&hdr_grp, buf, bufsize) == (size_t)-1) { error = ea_error(); goto cleanup2; } /* * To prevent reading the header when reading the file backwards, * set the large backskip of the header group to 0 (last 4 bytes). */ bskip = 0; exacct_order32(&bskip); bcopy(&bskip, (char *)buf + bufsize - sizeof (bskip), sizeof (bskip)); if (fwrite(buf, sizeof (char), bufsize, f->ef_fp) != bufsize || fflush(f->ef_fp) == EOF) { error = EXR_SYSCALL_FAIL; goto cleanup2; } cleanup2: ea_free(buf, bufsize); cleanup1: (void) ea_free_item(&vers_obj, EUP_ALLOC); (void) ea_free_item(&creator_obj, EUP_ALLOC); (void) ea_free_item(&filetype_obj, EUP_ALLOC); (void) ea_free_item(&hostname_obj, EUP_ALLOC); EXACCT_SET_ERR(error); return (error == EXR_OK ? 0 : -1); }
/* * Take a snapshot of the current state of processor sets and CPUs, * pack it in the exacct format, and attach it to specified exacct record. */ int pool_pset_pack(ea_object_t *eo_system) { ea_object_t *eo_pset, *eo_cpu; cpupart_t *cpupart; psetid_t mypsetid; pool_pset_t *pset; nvlist_t *nvl; size_t bufsz; cpu_t *cpu; char *buf; int ncpu; ASSERT(pool_lock_held()); mutex_enter(&cpu_lock); mypsetid = zone_pset_get(curproc->p_zone); for (pset = list_head(&pool_pset_list); pset; pset = list_next(&pool_pset_list, pset)) { psetid_t psetid = pset->pset_id; if (!INGLOBALZONE(curproc) && mypsetid != psetid) continue; cpupart = cpupart_find(psetid); ASSERT(cpupart != NULL); eo_pset = ea_alloc_group(EXT_GROUP | EXC_LOCAL | EXD_GROUP_PSET); (void) ea_attach_item(eo_pset, &psetid, sizeof (id_t), EXC_LOCAL | EXD_PSET_PSETID | EXT_UINT32); /* * Pack info for all CPUs in this processor set. */ ncpu = 0; cpu = cpu_list; do { if (cpu->cpu_part != cpupart) /* not our pset */ continue; ncpu++; eo_cpu = ea_alloc_group(EXT_GROUP | EXC_LOCAL | EXD_GROUP_CPU); (void) ea_attach_item(eo_cpu, &cpu->cpu_id, sizeof (processorid_t), EXC_LOCAL | EXD_CPU_CPUID | EXT_UINT32); if (cpu->cpu_props == NULL) { (void) nvlist_alloc(&cpu->cpu_props, NV_UNIQUE_NAME, KM_SLEEP); (void) nvlist_add_string(cpu->cpu_props, "cpu.comment", ""); } (void) nvlist_dup(cpu->cpu_props, &nvl, KM_SLEEP); (void) nvlist_add_int64(nvl, "cpu.sys_id", cpu->cpu_id); (void) nvlist_add_string(nvl, "cpu.status", (char *)cpu_get_state_str(cpu)); buf = NULL; bufsz = 0; (void) nvlist_pack(nvl, &buf, &bufsz, NV_ENCODE_NATIVE, 0); (void) ea_attach_item(eo_cpu, buf, bufsz, EXC_LOCAL | EXD_CPU_PROP | EXT_RAW); (void) nvlist_free(nvl); kmem_free(buf, bufsz); (void) ea_attach_to_group(eo_pset, eo_cpu); } while ((cpu = cpu->cpu_next) != cpu_list); (void) nvlist_dup(pset->pset_props, &nvl, KM_SLEEP); (void) nvlist_add_uint64(nvl, "pset.size", ncpu); (void) nvlist_add_uint64(nvl, "pset.load", (uint64_t)PSET_LOAD(cpupart->cp_hp_avenrun[0])); buf = NULL; bufsz = 0; (void) nvlist_pack(nvl, &buf, &bufsz, NV_ENCODE_NATIVE, 0); (void) ea_attach_item(eo_pset, buf, bufsz, EXC_LOCAL | EXD_PSET_PROP | EXT_RAW); (void) nvlist_free(nvl); kmem_free(buf, bufsz); (void) ea_attach_to_group(eo_system, eo_pset); } mutex_exit(&cpu_lock); return (0); }