static void mgr_free(struct mem_tpm_mgr *mgr) { int i; if (!mgr) return; if (mgr->groups) { for(i=0; i < mgr->nr_groups; i++) group_free(mgr->groups[i].v); free(mgr->groups); } free(mgr); }
int main(void) { int len, id; char buf[DH_MAXSZ], buf2[DH_MAXSZ]; char sec[DH_MAXSZ], sec2[DH_MAXSZ]; struct group *group, *group2; const char *name[] = { "MODP", "EC2N", "ECP" }; group_init(); for (id = 0; id < 0xff; id++) { if ((group = group_get(id)) == NULL || (group2 = group_get(id)) == NULL) continue; printf ("Testing group %d (%s%d): ", id, name[group->spec->type], group->spec->bits); len = dh_getlen(group); dh_create_exchange(group, buf); dh_create_exchange(group2, buf2); dh_create_shared(group, sec, buf2); dh_create_shared(group2, sec2, buf); if (memcmp (sec, sec2, len)) { printf("FAILED\n"); return (1); } else printf("OKAY\n"); group_free(group); group_free(group2); } return (0); }
int group_free(struct group *g) { REFCOUNT_DEBUG(g, g->name, g->refcnt); if (--g->refcnt == 0) { RB_REMOVE(group_tree, &groups, g); group_free(g->next); free(g->filename); free(g->sname); free(g); return (0); } return (g->refcnt); }
nt_base_fixed_memory_pool::node* general_static_fixed_memory_pool<FAST_HEAP_CHECK>::newchunk() { cumulative_size += Chunksize; int blocksize= Recsize*Chunksize; int totalsize= blocksize + sizeof(chunk); //note: alignment not considered. byte* top= static_cast<byte*>(new byte[totalsize]); chunk* p= reinterpret_cast<chunk*> (top + blocksize); p->top= reinterpret_cast<node*>(top); p->reccount= Chunksize; p->next= chunklist; chunklist= p; // now chop into individual nodes node* first= reinterpret_cast<node*>(top); node* n= first; for (int loop= 0; loop < Chunksize; loop++) { node* nextnode= n->addptr (Recsize); n->next= nextnode; n= nextnode; } group_free (first->addptr(Recsize), first->addptr(Recsize, Chunksize-1)); return first; }
sGroup *group_parse(const char *groups,size_t *count) { sGroup *res = NULL; sGroup *g,*last = NULL; const char *p = groups; size_t cnt = 0; while(*p) { size_t i,size; int uid,gid; if(sscanf(p,"%u",&gid) != 1) goto error; /* to name */ while(*p && *p != ':') p++; if(!*p) goto error; p++; g = (sGroup*)malloc(sizeof(sGroup)); if(!g) goto error; g->next = NULL; if(!res) res = g; if(last) last->next = g; g->gid = gid; g->userCount = 0; size = 8; g->users = (uid_t*)malloc(size * sizeof(uid_t)); if(!g->users) goto error; /* read the name */ i = 0; while(*p && *p != '\n' && *p != ':' && i < MAX_GROUPNAME_LEN) g->name[i++] = *p++; /* empty name is not allowed */ if(i == 0) goto error; g->name[i] = '\0'; /* read user-ids */ while(p && sscanf(p,":%u",&uid) == 1) { if(g->userCount >= size) { uid_t *old = g->users; size *= 2; g->users = (uid_t*)realloc(g->users,size * sizeof(uid_t)); if(!g->users) { free(old); goto error; } } g->users[g->userCount++] = uid; p++; while(*p && *p != ':' && *p != '\n') p++; } /* to next line */ while(*p && *p != '\n') p++; while(*p == '\n') p++; cnt = cnt + 1; last = g; } if(count) *count = cnt; return res; error: group_free(res); return NULL; }
static int process( const char *controller, const char *path, Hashmap *a, Hashmap *b, unsigned iteration, Group **ret) { Group *g; int r; assert(controller); assert(path); assert(a); g = hashmap_get(a, path); if (!g) { g = hashmap_get(b, path); if (!g) { g = new0(Group, 1); if (!g) return -ENOMEM; g->path = strdup(path); if (!g->path) { group_free(g); return -ENOMEM; } r = hashmap_put(a, g->path, g); if (r < 0) { group_free(g); return r; } } else { r = hashmap_move_one(a, b, path); if (r < 0) return r; g->cpu_valid = g->memory_valid = g->io_valid = g->n_tasks_valid = false; } } if (streq(controller, SYSTEMD_CGROUP_CONTROLLER) && IN_SET(arg_count, COUNT_ALL_PROCESSES, COUNT_USERSPACE_PROCESSES)) { _cleanup_fclose_ FILE *f = NULL; pid_t pid; r = cg_enumerate_processes(controller, path, &f); if (r == -ENOENT) return 0; if (r < 0) return r; g->n_tasks = 0; while (cg_read_pid(f, &pid) > 0) { if (arg_count == COUNT_USERSPACE_PROCESSES && is_kernel_thread(pid) > 0) continue; g->n_tasks++; } if (g->n_tasks > 0) g->n_tasks_valid = true; } else if (streq(controller, "pids") && arg_count == COUNT_PIDS) { _cleanup_free_ char *p = NULL, *v = NULL; r = cg_get_path(controller, path, "pids.current", &p); if (r < 0) return r; r = read_one_line_file(p, &v); if (r == -ENOENT) return 0; if (r < 0) return r; r = safe_atou64(v, &g->n_tasks); if (r < 0) return r; if (g->n_tasks > 0) g->n_tasks_valid = true; } else if (streq(controller, "cpu") || streq(controller, "cpuacct")) { _cleanup_free_ char *p = NULL, *v = NULL; uint64_t new_usage; nsec_t timestamp; if (cg_all_unified() > 0) { const char *keys[] = { "usage_usec", NULL }; _cleanup_free_ char *val = NULL; if (!streq(controller, "cpu")) return 0; r = cg_get_keyed_attribute("cpu", path, "cpu.stat", keys, &val); if (r == -ENOENT) return 0; if (r < 0) return r; r = safe_atou64(val, &new_usage); if (r < 0) return r; new_usage *= NSEC_PER_USEC; } else { if (!streq(controller, "cpuacct")) return 0; r = cg_get_path(controller, path, "cpuacct.usage", &p); if (r < 0) return r; r = read_one_line_file(p, &v); if (r == -ENOENT) return 0; if (r < 0) return r; r = safe_atou64(v, &new_usage); if (r < 0) return r; } timestamp = now_nsec(CLOCK_MONOTONIC); if (g->cpu_iteration == iteration - 1 && (nsec_t) new_usage > g->cpu_usage) { nsec_t x, y; x = timestamp - g->cpu_timestamp; if (x < 1) x = 1; y = (nsec_t) new_usage - g->cpu_usage; g->cpu_fraction = (double) y / (double) x; g->cpu_valid = true; } g->cpu_usage = (nsec_t) new_usage; g->cpu_timestamp = timestamp; g->cpu_iteration = iteration; } else if (streq(controller, "memory")) { _cleanup_free_ char *p = NULL, *v = NULL; if (cg_all_unified() <= 0) r = cg_get_path(controller, path, "memory.usage_in_bytes", &p); else r = cg_get_path(controller, path, "memory.current", &p); if (r < 0) return r; r = read_one_line_file(p, &v); if (r == -ENOENT) return 0; if (r < 0) return r; r = safe_atou64(v, &g->memory); if (r < 0) return r; if (g->memory > 0) g->memory_valid = true; } else if ((streq(controller, "io") && cg_all_unified() > 0) || (streq(controller, "blkio") && cg_all_unified() <= 0)) { _cleanup_fclose_ FILE *f = NULL; _cleanup_free_ char *p = NULL; bool unified = cg_all_unified() > 0; uint64_t wr = 0, rd = 0; nsec_t timestamp; r = cg_get_path(controller, path, unified ? "io.stat" : "blkio.io_service_bytes", &p); if (r < 0) return r; f = fopen(p, "re"); if (!f) { if (errno == ENOENT) return 0; return -errno; } for (;;) { char line[LINE_MAX], *l; uint64_t k, *q; if (!fgets(line, sizeof(line), f)) break; /* Trim and skip the device */ l = strstrip(line); l += strcspn(l, WHITESPACE); l += strspn(l, WHITESPACE); if (unified) { while (!isempty(l)) { if (sscanf(l, "rbytes=%" SCNu64, &k)) rd += k; else if (sscanf(l, "wbytes=%" SCNu64, &k)) wr += k; l += strcspn(l, WHITESPACE); l += strspn(l, WHITESPACE); } } else { if (first_word(l, "Read")) { l += 4; q = &rd; } else if (first_word(l, "Write")) { l += 5; q = ≀ } else continue; l += strspn(l, WHITESPACE); r = safe_atou64(l, &k); if (r < 0) continue; *q += k; } } timestamp = now_nsec(CLOCK_MONOTONIC); if (g->io_iteration == iteration - 1) { uint64_t x, yr, yw; x = (uint64_t) (timestamp - g->io_timestamp); if (x < 1) x = 1; if (rd > g->io_input) yr = rd - g->io_input; else yr = 0; if (wr > g->io_output) yw = wr - g->io_output; else yw = 0; if (yr > 0 || yw > 0) { g->io_input_bps = (yr * 1000000000ULL) / x; g->io_output_bps = (yw * 1000000000ULL) / x; g->io_valid = true; } } g->io_input = rd; g->io_output = wr; g->io_timestamp = timestamp; g->io_iteration = iteration; } if (ret) *ret = g; return 0; }
static void group_hashmap_clear(Hashmap *h) { Group *g; while ((g = hashmap_steal_first(h))) group_free(g); }
static int process(const char *controller, const char *path, Hashmap *a, Hashmap *b, unsigned iteration) { Group *g; int r; FILE *f = NULL; pid_t pid; unsigned n; assert(controller); assert(path); assert(a); g = hashmap_get(a, path); if (!g) { g = hashmap_get(b, path); if (!g) { g = new0(Group, 1); if (!g) return -ENOMEM; g->path = strdup(path); if (!g->path) { group_free(g); return -ENOMEM; } r = hashmap_put(a, g->path, g); if (r < 0) { group_free(g); return r; } } else { assert_se(hashmap_move_one(a, b, path) == 0); g->cpu_valid = g->memory_valid = g->io_valid = g->n_tasks_valid = false; } } /* Regardless which controller, let's find the maximum number * of processes in any of it */ r = cg_enumerate_processes(controller, path, &f); if (r < 0) return r; n = 0; while (cg_read_pid(f, &pid) > 0) n++; fclose(f); if (n > 0) { if (g->n_tasks_valid) g->n_tasks = MAX(g->n_tasks, n); else g->n_tasks = n; g->n_tasks_valid = true; } if (streq(controller, "cpuacct")) { uint64_t new_usage; char *p, *v; struct timespec ts; r = cg_get_path(controller, path, "cpuacct.usage", &p); if (r < 0) return r; r = read_one_line_file(p, &v); free(p); if (r < 0) return r; r = safe_atou64(v, &new_usage); free(v); if (r < 0) return r; assert_se(clock_gettime(CLOCK_MONOTONIC, &ts) == 0); if (g->cpu_iteration == iteration - 1) { uint64_t x, y; x = ((uint64_t) ts.tv_sec * 1000000000ULL + (uint64_t) ts.tv_nsec) - ((uint64_t) g->cpu_timestamp.tv_sec * 1000000000ULL + (uint64_t) g->cpu_timestamp.tv_nsec); y = new_usage - g->cpu_usage; if (y > 0) { g->cpu_fraction = (double) y / (double) x; g->cpu_valid = true; } } g->cpu_usage = new_usage; g->cpu_timestamp = ts; g->cpu_iteration = iteration; } else if (streq(controller, "memory")) { char *p, *v; r = cg_get_path(controller, path, "memory.usage_in_bytes", &p); if (r < 0) return r; r = read_one_line_file(p, &v); free(p); if (r < 0) return r; r = safe_atou64(v, &g->memory); free(v); if (r < 0) return r; if (g->memory > 0) g->memory_valid = true; } else if (streq(controller, "blkio")) { char *p; uint64_t wr = 0, rd = 0; struct timespec ts; r = cg_get_path(controller, path, "blkio.io_service_bytes", &p); if (r < 0) return r; f = fopen(p, "re"); free(p); if (!f) return -errno; for (;;) { char line[LINE_MAX], *l; uint64_t k, *q; if (!fgets(line, sizeof(line), f)) break; l = strstrip(line); l += strcspn(l, WHITESPACE); l += strspn(l, WHITESPACE); if (first_word(l, "Read")) { l += 4; q = &rd; } else if (first_word(l, "Write")) { l += 5; q = ≀ } else continue; l += strspn(l, WHITESPACE); r = safe_atou64(l, &k); if (r < 0) continue; *q += k; } fclose(f); assert_se(clock_gettime(CLOCK_MONOTONIC, &ts) == 0); if (g->io_iteration == iteration - 1) { uint64_t x, yr, yw; x = ((uint64_t) ts.tv_sec * 1000000000ULL + (uint64_t) ts.tv_nsec) - ((uint64_t) g->io_timestamp.tv_sec * 1000000000ULL + (uint64_t) g->io_timestamp.tv_nsec); yr = rd - g->io_input; yw = wr - g->io_output; if (yr > 0 || yw > 0) { g->io_input_bps = (yr * 1000000000ULL) / x; g->io_output_bps = (yw * 1000000000ULL) / x; g->io_valid = true; } } g->io_input = rd; g->io_output = wr; g->io_timestamp = ts; g->io_iteration = iteration; } return 0; }
int group_allocate(FAR struct task_tcb_s *tcb, uint8_t ttype) { FAR struct task_group_s *group; int ret; DEBUGASSERT(tcb && !tcb->cmn.group); /* Allocate the group structure and assign it to the TCB */ group = (FAR struct task_group_s *)kmm_zalloc(sizeof(struct task_group_s)); if (!group) { return -ENOMEM; } #if CONFIG_NFILE_STREAMS > 0 && (defined(CONFIG_BUILD_PROTECTED) || \ defined(CONFIG_BUILD_KERNEL)) && defined(CONFIG_MM_KERNEL_HEAP) /* If this group is being created for a privileged thread, then all elements * of the group must be created for privileged access. */ if ((ttype & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_KERNEL) { group->tg_flags |= GROUP_FLAG_PRIVILEGED; } /* In a flat, single-heap build. The stream list is allocated with the * group structure. But in a kernel build with a kernel allocator, it * must be separately allocated using a user-space allocator. */ group->tg_streamlist = (FAR struct streamlist *) group_zalloc(group, sizeof(struct streamlist)); if (!group->tg_streamlist) { kmm_free(group); return -ENOMEM; } #endif /* Attach the group to the TCB */ tcb->cmn.group = group; #if defined(HAVE_GROUP_MEMBERS) || defined(CONFIG_ARCH_ADDRENV) /* Assign the group a unique ID. If g_gidcounter were to wrap before we * finish with task creation, that would be a problem. */ group_assigngid(group); #endif /* Duplicate the parent tasks environment */ ret = env_dup(group); if (ret < 0) { #if CONFIG_NFILE_STREAMS > 0 && (defined(CONFIG_BUILD_PROTECTED) || \ defined(CONFIG_BUILD_KERNEL)) && defined(CONFIG_MM_KERNEL_HEAP) group_free(group, group->tg_streamlist); #endif kmm_free(group); tcb->cmn.group = NULL; return ret; } /* Initialize the pthread join semaphore */ #ifndef CONFIG_DISABLE_PTHREAD (void)sem_init(&group->tg_joinsem, 0, 1); #endif return OK; }
static void test_basics(void) { size_t count,oldFree; test_caseStart("Testing basics"); { sGroup *g; oldFree = heapspace(); g = group_parse("0:root:0",&count); test_assertTrue(g != NULL); test_assertSize(count,1); if(g) { test_assertTrue(g->next == NULL); test_assertUInt(g->gid,0); test_assertStr(g->name,"root"); test_assertSize(g->userCount,1); test_assertUInt(g->users[0],0); } group_free(g); test_assertSize(heapspace(),oldFree); } { sGroup *g; oldFree = heapspace(); g = group_parse("0:root\n",&count); test_assertTrue(g != NULL); test_assertSize(count,1); if(g) { test_assertTrue(g->next == NULL); test_assertUInt(g->gid,0); test_assertStr(g->name,"root"); test_assertSize(g->userCount,0); } group_free(g); test_assertSize(heapspace(),oldFree); } { sGroup *g; oldFree = heapspace(); g = group_parse("0:root:",&count); test_assertTrue(g != NULL); test_assertSize(count,1); if(g) { test_assertTrue(g->next == NULL); test_assertUInt(g->gid,0); test_assertStr(g->name,"root"); test_assertSize(g->userCount,0); } group_free(g); test_assertSize(heapspace(),oldFree); } { sGroup *g; oldFree = heapspace(); g = group_parse("2444:a:100:200",&count); test_assertTrue(g != NULL); test_assertSize(count,1); if(g) { test_assertTrue(g->next == NULL); test_assertUInt(g->gid,2444); test_assertStr(g->name,"a"); test_assertSize(g->userCount,2); test_assertUInt(g->users[0],100); test_assertUInt(g->users[1],200); } group_free(g); test_assertSize(heapspace(),oldFree); } { sGroup *g,*res; oldFree = heapspace(); res = group_parse("1:a:1:2\n\n2:b:4",&count); test_assertSize(count,2); g = res; test_assertTrue(g != NULL); if(g) { test_assertUInt(g->gid,1); test_assertStr(g->name,"a"); test_assertSize(g->userCount,2); test_assertUInt(g->users[0],1); test_assertUInt(g->users[1],2); g = g->next; } test_assertTrue(g != NULL); if(g) { test_assertUInt(g->gid,2); test_assertStr(g->name,"b"); test_assertSize(g->userCount,1); test_assertUInt(g->users[0],4); } group_free(res); test_assertSize(heapspace(),oldFree); } test_caseSucceeded(); }
char * group_set(struct ctl_group_settings *gs, size_t len) { struct group *g, *parent, fake; int i; strlcpy(fake.name, gs->parent, sizeof fake.name); if ((parent = RB_FIND(group_tree, &groups, &fake)) == NULL) return "no such parent group"; strlcpy(fake.name, gs->name, sizeof fake.name); if ((g = RB_FIND(group_tree, &groups, &fake)) == NULL) return "no such group"; /* Reparenting needs to ensure we don't create a loop. */ if (gs->flags & GROUP_WANT_PARENT) { struct group *temp = parent; if (strcmp(fake.name, "default") == 0) return "can't reparent default group"; do { if (temp == g) return "ouroboros loop detected"; temp = temp->next; /* default_group's parent is deliberately NULL */ } while (temp && temp != &default_group); } len -= offsetof(struct ctl_group_settings, options); memset(&fake, 0, sizeof fake); if (len > 2 && dhcp_options_parse(gs->options, len, fake.options) < 0) return "can't parse options"; /* We need all of the options to be malloc'd, not just on stack. */ for (i = 0; i < 256; ++i) { u_int8_t *temp; size_t optlen; if (fake.options[i] == NULL) continue; optlen = fake.options[i][0] + 1; if ((temp = malloc(optlen)) == NULL) goto fail; memcpy(temp, fake.options[i], optlen); fake.options[i] = temp; } /* All have been allocated successfully, move them into production. */ for (i = 0; i < 256; ++i) if (fake.options[i]) { free(g->options[i]); g->options[i] = fake.options[i]; } /* Take care of configuration with special needs. */ if (gs->flags & GROUP_WANT_PARENT) { struct group *tmp = group_use(parent); group_free(g->next); g->next = tmp; } if (gs->flags & GROUP_WANT_NEXT_SERVER) g->next_server = gs->next_server; if (gs->flags & GROUP_WANT_FILENAME) { free(g->filename); g->filename = strndup(gs->filename, BOOTP_FILE); } if (gs->flags & GROUP_WANT_SNAME) { free(g->sname); g->sname = strndup(gs->sname, BOOTP_SNAME); } g->flags |= gs->flags | GROUP_MODIFIED; return NULL; fail: while (i > 0) free(fake.options[--i]); return "out of memory for options"; }