static void exacct_do_commit_proc(ac_info_t *ac_proc, proc_t *p, int wstat) { size_t size; proc_usage_t *pu; ulong_t mask[AC_MASK_SZ]; mutex_enter(&ac_proc->ac_lock); if (ac_proc->ac_state == AC_ON) { bt_copy(&ac_proc->ac_mask[0], mask, AC_MASK_SZ); mutex_exit(&ac_proc->ac_lock); } else { mutex_exit(&ac_proc->ac_lock); return; } mutex_enter(&p->p_lock); size = strlen(p->p_user.u_comm) + 1; mutex_exit(&p->p_lock); pu = kmem_alloc(sizeof (proc_usage_t), KM_SLEEP); pu->pu_command = kmem_alloc(size, KM_SLEEP); mutex_enter(&p->p_lock); exacct_calculate_proc_usage(p, pu, mask, EW_FINAL, wstat); mutex_exit(&p->p_lock); (void) exacct_assemble_proc_usage(ac_proc, pu, exacct_commit_callback, NULL, 0, &size, EW_FINAL); kmem_free(pu->pu_command, strlen(pu->pu_command) + 1); kmem_free(pu, sizeof (proc_usage_t)); }
/* * int exacct_assemble_proc_usage(proc_usage_t *, int (*)(void *, size_t, void * *, size_t, size_t *), void *, size_t, size_t *) * * Overview * Assemble record with miscellaneous accounting information about the process * and execute the callback on it. It is the callback's job to set "actual" to * the size of record. * * Return values * The result of the callback function, unless the extended process accounting * feature is not active, in which case ENOTACTIVE is returned. * * Caller's context * Suitable for KM_SLEEP allocations. */ int exacct_assemble_proc_usage(ac_info_t *ac_proc, proc_usage_t *pu, int (*callback)(ac_info_t *, void *, size_t, void *, size_t, size_t *), void *ubuf, size_t ubufsize, size_t *actual, int flag) { ulong_t mask[AC_MASK_SZ]; ea_object_t *proc_record; ea_catalog_t record_type; void *buf; size_t bufsize; int ret; ASSERT(flag == EW_FINAL || flag == EW_PARTIAL); mutex_enter(&ac_proc->ac_lock); if (ac_proc->ac_state == AC_OFF) { mutex_exit(&ac_proc->ac_lock); return (ENOTACTIVE); } bt_copy(&ac_proc->ac_mask[0], mask, AC_MASK_SZ); mutex_exit(&ac_proc->ac_lock); switch (flag) { case EW_FINAL: record_type = EXD_GROUP_PROC; break; case EW_PARTIAL: record_type = EXD_GROUP_PROC_PARTIAL; break; } proc_record = exacct_assemble_proc_record(pu, mask, record_type); if (proc_record == NULL) return (0); /* * Pack object into buffer and pass to callback. */ bufsize = ea_pack_object(proc_record, NULL, 0); buf = kmem_alloc(bufsize, KM_SLEEP); (void) ea_pack_object(proc_record, buf, bufsize); ret = callback(ac_proc, ubuf, ubufsize, buf, bufsize, actual); /* * Free all previously allocations. */ kmem_free(buf, bufsize); ea_free_object(proc_record, EUP_ALLOC); return (ret); }
int exacct_assemble_flow_usage(ac_info_t *ac_flow, flow_usage_t *fu, int (*callback)(ac_info_t *, void *, size_t, void *, size_t, size_t *), void *ubuf, size_t ubufsize, size_t *actual) { ulong_t mask[AC_MASK_SZ]; ea_object_t *flow_usage; ea_catalog_t record_type; void *buf; size_t bufsize; int ret; mutex_enter(&ac_flow->ac_lock); if (ac_flow->ac_state == AC_OFF) { mutex_exit(&ac_flow->ac_lock); return (ENOTACTIVE); } bt_copy(&ac_flow->ac_mask[0], mask, AC_MASK_SZ); mutex_exit(&ac_flow->ac_lock); record_type = EXD_GROUP_FLOW; flow_usage = exacct_assemble_flow_record(fu, mask, record_type); if (flow_usage == NULL) { return (0); } /* * Pack object into buffer and pass to callback. */ bufsize = ea_pack_object(flow_usage, NULL, 0); buf = kmem_alloc(bufsize, KM_NOSLEEP); if (buf == NULL) { return (ENOMEM); } (void) ea_pack_object(flow_usage, buf, bufsize); ret = callback(ac_flow, ubuf, ubufsize, buf, bufsize, actual); /* * Free all previously allocations. */ kmem_free(buf, bufsize); ea_free_object(flow_usage, EUP_ALLOC); return (ret); }
static int getacct_proc(ac_info_t *ac_proc, pid_t pid, void *buf, size_t bufsize, size_t *sizep) { proc_t *p; proc_usage_t *pu; ulong_t mask[AC_MASK_SZ]; ulong_t *ac_mask = &mask[0]; int error; mutex_enter(&ac_proc->ac_lock); if (ac_proc->ac_state == AC_OFF) { mutex_exit(&ac_proc->ac_lock); return (ENOTACTIVE); } bt_copy(&ac_proc->ac_mask[0], ac_mask, AC_MASK_SZ); mutex_exit(&ac_proc->ac_lock); pu = kmem_zalloc(sizeof (proc_usage_t), KM_SLEEP); pu->pu_command = kmem_zalloc(MAXCOMLEN + 1, KM_SLEEP); mutex_enter(&pidlock); if ((p = prfind(pid)) == NULL) { mutex_exit(&pidlock); kmem_free(pu->pu_command, MAXCOMLEN + 1); kmem_free(pu, sizeof (proc_usage_t)); return (ESRCH); } mutex_enter(&p->p_lock); mutex_exit(&pidlock); exacct_calculate_proc_usage(p, pu, ac_mask, EW_PARTIAL, 0); mutex_exit(&p->p_lock); error = exacct_assemble_proc_usage(ac_proc, pu, getacct_callback, buf, bufsize, sizep, EW_PARTIAL); kmem_free(pu->pu_command, MAXCOMLEN + 1); kmem_free(pu, sizeof (proc_usage_t)); return (error); }
void exacct_commit_flow(void *arg) { flow_usage_t *f = (flow_usage_t *)arg; size_t size; ulong_t mask[AC_MASK_SZ]; struct exacct_globals *acg; ac_info_t *ac_flow; if (exacct_zone_key == ZONE_KEY_UNINITIALIZED) { /* * acctctl module not loaded. Nothing to do. */ return; } /* * Even though each zone nominally has its own flow accounting settings * (ac_flow), these are only maintained by and for the global zone. * * If this were to change in the future, this function should grow a * second zoneid (or zone) argument, and use the corresponding zone's * settings rather than always using those of the global zone. */ acg = zone_getspecific(exacct_zone_key, global_zone); ac_flow = &acg->ac_flow; mutex_enter(&ac_flow->ac_lock); if (ac_flow->ac_state == AC_OFF) { mutex_exit(&ac_flow->ac_lock); return; } bt_copy(&ac_flow->ac_mask[0], mask, AC_MASK_SZ); mutex_exit(&ac_flow->ac_lock); (void) exacct_assemble_flow_usage(ac_flow, f, exacct_commit_callback, NULL, 0, &size); }
/* * int exacct_assemble_task_usage(task_t *, int (*)(void *, size_t, void *, * size_t, size_t *), void *, size_t, size_t *, int) * * Overview * exacct_assemble_task_usage() builds the packed exacct buffer for the * indicated task, executes the given callback function, and free the packed * buffer. * * Return values * Returns 0 on success; otherwise the appropriate error code is returned. * * Caller's context * Suitable for KM_SLEEP allocations. */ int exacct_assemble_task_usage(ac_info_t *ac_task, task_t *tk, int (*callback)(ac_info_t *, void *, size_t, void *, size_t, size_t *), void *ubuf, size_t ubufsize, size_t *actual, int flag) { ulong_t mask[AC_MASK_SZ]; ea_object_t *task_record; ea_catalog_t record_type; task_usage_t *tu; void *buf; size_t bufsize; int ret; ASSERT(flag == EW_FINAL || flag == EW_PARTIAL || flag == EW_INTERVAL); mutex_enter(&ac_task->ac_lock); if (ac_task->ac_state == AC_OFF) { mutex_exit(&ac_task->ac_lock); return (ENOTACTIVE); } bt_copy(ac_task->ac_mask, mask, AC_MASK_SZ); mutex_exit(&ac_task->ac_lock); switch (flag) { case EW_FINAL: record_type = EXD_GROUP_TASK; break; case EW_PARTIAL: record_type = EXD_GROUP_TASK_PARTIAL; break; case EW_INTERVAL: record_type = EXD_GROUP_TASK_INTERVAL; break; } /* * Calculate task usage and assemble it into the task record. */ tu = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP); exacct_calculate_task_usage(tk, tu, flag); task_record = exacct_assemble_task_record(tk, tu, mask, record_type); if (task_record == NULL) { /* * The current configuration of the accounting system has * resulted in records with no data; accordingly, we don't write * these, but we return success. */ kmem_free(tu, sizeof (task_usage_t)); return (0); } /* * Pack object into buffer and run callback on it. */ bufsize = ea_pack_object(task_record, NULL, 0); buf = kmem_alloc(bufsize, KM_SLEEP); (void) ea_pack_object(task_record, buf, bufsize); ret = callback(ac_task, ubuf, ubufsize, buf, bufsize, actual); /* * Free all previously allocated structures. */ kmem_free(buf, bufsize); ea_free_object(task_record, EUP_ALLOC); kmem_free(tu, sizeof (task_usage_t)); return (ret); }