whp new_walker(pp_pp* pp, mpz_t limit, int invsum) { whp wh; walker* w; walk_result* wr; int numsize = pp->valnumsize; wh = arena_size; w = WP(wh); arena_size += walker_charsize(numsize); grow_arena(w, arena_size); w->heap = mbh_new(I2P(wh), &mbh_compare_wr); w->pp = pp; w->numsize = numsize; w->adder = pp->adder; w->cmper = pp->cmper; w->invsum = invsum; w->vecsize = (pp->valsize + 31) >> 5; w->arenanext = (wrhp)0; w->have_previous = 0; mpx_set_z(w_limit(w), numsize, limit); w_pick_arena(w, wr); wr->invsum = 0; wr->nextbit = pp->valsize; mpx_set_ui(wr_next_discard(w, wr), numsize, 0); mpx_set_ui(wr_discard_direct(w, wr), numsize, 0); memset(wr_vec_direct(w, wr), 0, w->vecsize * sizeof(int)); push_heap(w, wr); return wh; }
static int rebuild_gid_cache() { /* We're holding the lock, so we have mutual exclusion on getpwent and getgrent too. */ struct group *gr; struct gid_cache_entry *ent; int i; struct uid_cache_entry *uid_ent; gid_cache_size = 0; qsort(uid_cache, uid_cache_size, sizeof(struct uid_cache_entry), uid_cache_name_sortcmp); while (1) { errno = 0; gr = getgrent(); if (gr == NULL) { if (errno == 0) { break; } else { goto error; } } if (gid_cache_size == gid_cache_capacity) { grow_array(&gid_cache, &gid_cache_capacity, sizeof(struct gid_cache_entry)); } ent = &gid_cache[gid_cache_size++]; ent->gid = gr->gr_gid; ent->uid_count = 0; ent->uids_offset = cache_arena.size; for (i = 0; gr->gr_mem[i] != NULL; ++i) { uid_ent = (struct uid_cache_entry *)bsearch( gr->gr_mem[i], uid_cache, uid_cache_size, sizeof(struct uid_cache_entry), uid_cache_name_searchcmp ); if (uid_ent != NULL) { grow_arena(&cache_arena, sizeof(uid_t)); ((uid_t *)ARENA_GET(cache_arena, ent->uids_offset))[ent->uid_count++] = uid_ent->uid; } } } endgrent(); return 1; error: endgrent(); clear_gid_cache(); DPRINTF("Failed to rebuild uid cache"); return 0; }