/*! * \brief Create a new domain structure * \param _n is pointer to str representing name of the domain, the string is * not copied, it should point to str structure stored in domain list * \param _s is hash table size * \param _d new created domain * \return 0 on success, -1 on failure */ int new_udomain(str* _n, int _s, udomain_t** _d) { int i; /* Must be always in shared memory, since * the cache is accessed from timer which * lives in a separate process */ *_d = (udomain_t*) shm_malloc(sizeof (udomain_t)); if (!(*_d)) { LM_ERR("new_udomain(): No memory left\n"); goto error0; } memset(*_d, 0, sizeof (udomain_t)); (*_d)->table = (hslot_t*) shm_malloc(sizeof (hslot_t) * _s); if (!(*_d)->table) { LM_ERR("no memory left 2\n"); goto error1; } (*_d)->name = _n; for (i = 0; i < _s; i++) { init_slot(*_d, &((*_d)->table[i]), i); } (*_d)->size = _s; return 0; error1: shm_free(*_d); error0: return -1; }
TupleTable ExecCreateTupleTable(int tableSize) { TupleTable newtable; int i; /* * sanity checks */ Assert(tableSize >= 1); /* * allocate the table itself */ newtable = (TupleTable) palloc(sizeof(TupleTableData) + (tableSize - 1) *sizeof(TupleTableSlot)); newtable->size = tableSize; newtable->next = 0; /* * initialize all the slots to empty states */ for (i = 0; i < tableSize; i++) { init_slot(&(newtable->array[i]), NULL); } return newtable; }
CPPackage cp_package_new( const char *category, const char *name, CPVersion version, const char *slot, const char *repo ) { CPPackage self; self = g_new0(struct CPPackageS, 1); self->refs = (unsigned int)1; /* TODO: validate args or make function private */ g_assert(self->category == NULL); self->category = g_strdup(category); g_assert(self->name == NULL); self->name = g_strdup(name); g_assert(self->version == NULL); self->version = cp_version_ref(version); init_slot(self, slot); g_assert(self->repo == NULL); self->repo = g_strdup(repo); g_assert(self->str == NULL); self->str = g_strdup_printf("%s/%s-%s", category, name, cp_version_str(version)); return self; }
TupleTableSlot * MakeTupleTableSlot(void) { TupleTableSlot *slot = makeNode(TupleTableSlot); init_slot(slot, NULL); return slot; }
int new_udomain(str* _n, int _s, udomain_t** _d) { int i; #ifdef STATISTICS char *name; #endif /* Must be always in shared memory, since * the cache is accessed from timer which * lives in a separate process */ *_d = (udomain_t*)shm_malloc(sizeof(udomain_t)); if (!(*_d)) { LM_ERR("new_udomain(): No memory left\n"); goto error0; } memset(*_d, 0, sizeof(udomain_t)); (*_d)->table = (hslot_t*)shm_malloc(sizeof(hslot_t) * _s); if (!(*_d)->table) { LM_ERR("no memory left 2\n"); goto error1; } (*_d)->name = _n; for(i = 0; i < _s; i++) { init_slot(*_d, &((*_d)->table[i]), i); } (*_d)->size = _s; #ifdef STATISTICS /* register the statistics */ if ( (name=build_stat_name(_n,"contacts"))==0 || register_stat("usrloc", name, &(*_d)->contacts, STAT_NO_RESET|STAT_SHM_NAME)!=0 ) { LM_ERR("failed to add stat variable\n"); goto error2; } if ( (name=build_stat_name(_n,"expires"))==0 || register_stat("usrloc", name, &(*_d)->expired, STAT_SHM_NAME)!=0 ) { LM_ERR("failed to add stat variable\n"); goto error2; } #endif return 0; #ifdef STATISTICS error2: shm_free((*_d)->table); #endif error1: shm_free(*_d); error0: return -1; }
/* -------------------------------- * MakeSingleTupleTableSlot * * This is a convenience routine for operations that need a * standalone TupleTableSlot not gotten from the main executor * tuple table. It makes a single slot and initializes it as * though by ExecSetSlotDescriptor(slot, tupdesc). * -------------------------------- */ TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc) { TupleTableSlot *slot = palloc(sizeof(*slot)); init_slot(slot, NULL); ExecSetSlotDescriptor(slot, tupdesc); return slot; }
int main() { int n, i; printf("input n: "); scanf("%d", &n); init_slot(); for (i = 0; i < n; i++) golla(i); print_arr(slot, SLOT_SIZE); return 0; }
static void return_cslot_nolock(child_t *ch) { int slot = ch->child_slot; /* have open slot ? add to and reset tail */ if (open_tail != -1) { child[open_tail]->next_open = slot; open_tail = slot; } else { /* no open slot ? make one */ open_head = open_tail = slot; } (void) init_slot(ch->child_slot); }
static inline struct domain_list_item * add_to_dlist (str *name, int type) { struct domain_list_item *item; int i; item = (struct domain_list_item *) pkg_malloc (sizeof (struct domain_list_item)); if (item == NULL) { LM_ERR("Out of pkg memory.\n"); return NULL; } item->name.s = (char *) pkg_malloc (name->len + 1); if (item->name.s == NULL) { LM_ERR("Out of pkg memory (1).\n"); pkg_free(item); return NULL; } memcpy (item->name.s, name->s, name->len); item->name.s[name->len] = '\0'; item->name.len = name->len; memset (&item->domain, 0, sizeof (struct udomain)); item->domain.name = &item->name; item->domain.dbt = type; item->domain.table = (hslot_t*)pkg_malloc(sizeof(hslot_t) * ul_hash_size); if (!item->domain.table) { LM_ERR("Out of pkg memory (2)\n"); pkg_free(item->name.s); pkg_free(item); return NULL; } for(i = 0; i < ul_hash_size; i++) { init_slot(&item->domain, &(item->domain.table[i]), i); } item->domain.size = ul_hash_size; /* Everything else is not useful for now. */ item->next = domain_list; domain_list = item; return item; }
static void handle_packet(struct connection* c) { str* packet; const char* id; const char* runas; const char* command; const char* envstart; long slot; unsigned int i; struct passwd* pw; packet = &c->packet; id = packet->s; if (*id == NUL) FAIL(id, "DInvalid ID"); if ((slot = pick_slot()) < 0) FAIL(id, "DCould not allocate a slot"); wrap_str(str_copys(&slots[slot].id, id)); if ((i = str_findfirst(packet, NUL) + 1) >= packet->len) FAIL(id, "DInvalid packet"); runas = packet->s + i; if (*runas == NUL || (pw = getpwnam(runas)) == 0) FAIL(id, "DInvalid username"); if ((i = str_findnext(packet, NUL, i) + 1) >= packet->len) FAIL(id, "DInvalid packet"); command = packet->s + i; if ((i = str_findnext(packet, NUL, i) + 1) >= packet->len) envstart = 0; else { envstart = packet->s + i; wrap_str(str_catc(packet, 0)); } if (!init_slot(slot, pw)) FAIL(id, "ZOut of memory"); start_slot(slot, command, envstart); }
/* * Create a new domain structure * _n is pointer to str representing * name of the domain, the string is * not copied, it should point to str * structure stored in domain list */ int new_udomain(str* _n, udomain_t** _d) { int i; /* Must be always in shared memory, since * the cache is accessed from timer which * lives in a separate process */ *_d = (udomain_t*)shm_malloc(sizeof(udomain_t)); if (!(*_d)) { LOG(L_ERR, "new_udomain(): No memory left\n"); return -1; } memset(*_d, 0, sizeof(udomain_t)); (*_d)->table = (hslot_t*)shm_malloc(sizeof(hslot_t) * UDOMAIN_HASH_SIZE); if (!(*_d)->table) { LOG(L_ERR, "new_udomain(): No memory left 2\n"); shm_free(*_d); return -2; } (*_d)->name = _n; for(i = 0; i < UDOMAIN_HASH_SIZE; i++) { if (init_slot(*_d, &((*_d)->table[i])) < 0) { LOG(L_ERR, "new_udomain(): Error while initializing hash table\n"); shm_free((*_d)->table); shm_free(*_d); return -3; } } lock_init(&(*_d)->lock); (*_d)->users = 0; (*_d)->expired = 0; return 0; }
/* * Create a new domain structure * _n is pointer to str representing * name of the domain, the string is * not copied, it should point to str * structure stored in domain list * _s is hash table size */ int new_pdomain(str* _n, int _s, pdomain_t** _d, register_watcher_t _r, unregister_watcher_t _u) { int i; pdomain_t* ptr; ptr = (pdomain_t*)mem_alloc(sizeof(pdomain_t)); if (!ptr) { paerrno = PA_NO_MEMORY; LOG(L_ERR, "new_pdomain(): No memory left\n"); return -1; } memset(ptr, 0, sizeof(pdomain_t)); ptr->table = (hslot_t*)mem_alloc(sizeof(hslot_t) * _s); if (!ptr->table) { paerrno = PA_NO_MEMORY; LOG(L_ERR, "new_pdomain(): No memory left 2\n"); mem_free(ptr); return -2; } ptr->name = _n; for(i = 0; i < _s; i++) { init_slot(ptr, &ptr->table[i]); } ptr->size = _s; lock_init(&ptr->lock); ptr->users = 0; ptr->expired = 0; ptr->reg = _r; ptr->unreg = _u; *_d = ptr; return 0; }
int eisa_enumerator(unsigned long eeprom_addr, struct resource *io_parent, struct resource *mem_parent) { int i; struct eeprom_header *eh; static char eeprom_buf[HPEE_MAX_LENGTH]; for (i=0; i < HPEE_MAX_LENGTH; i++) { eeprom_buf[i] = gsc_readb(eeprom_addr+i); } printk(KERN_INFO "Enumerating EISA bus\n"); eh = (struct eeprom_header*)(eeprom_buf); for (i=0;i<eh->num_slots;i++) { struct eeprom_eisa_slot_info *es; es = (struct eeprom_eisa_slot_info*) (&eeprom_buf[HPEE_SLOT_INFO(i)]); if (-1==init_slot(i+1, es)) { return -1; } if (es->config_data_offset < HPEE_MAX_LENGTH) { if (parse_slot_config(i+1, &eeprom_buf[es->config_data_offset], es, io_parent, mem_parent)) { return -1; } } else { printk (KERN_WARNING "EISA EEPROM offset 0x%x out of range\n",es->config_data_offset); return -1; } } return 0; }
/* * vacuum_one_database * * Process tables in the given database. If the 'tables' list is empty, * process all tables in the database. * * Note that this function is only concerned with running exactly one stage * when in analyze-in-stages mode; caller must iterate on us if necessary. * * If concurrentCons is > 1, multiple connections are used to vacuum tables * in parallel. In this case and if the table list is empty, we first obtain * a list of tables from the database. */ static void vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, int stage, SimpleStringList *tables, const char *host, const char *port, const char *username, enum trivalue prompt_password, int concurrentCons, const char *progname, bool echo, bool quiet) { PQExpBufferData sql; PGconn *conn; SimpleStringListCell *cell; ParallelSlot *slots = NULL; SimpleStringList dbtables = {NULL, NULL}; int i; bool result = 0; bool parallel = concurrentCons > 1; const char *stage_commands[] = { "SET default_statistics_target=1; SET vacuum_cost_delay=0;", "SET default_statistics_target=10; RESET vacuum_cost_delay;", "RESET default_statistics_target;" }; const char *stage_messages[] = { gettext_noop("Generating minimal optimizer statistics (1 target)"), gettext_noop("Generating medium optimizer statistics (10 targets)"), gettext_noop("Generating default (full) optimizer statistics") }; Assert(stage == ANALYZE_NO_STAGE || (stage >= 0 && stage < ANALYZE_NUM_STAGES)); if (!quiet) { if (stage != ANALYZE_NO_STAGE) printf(_("%s: processing database \"%s\": %s\n"), progname, dbname, stage_messages[stage]); else printf(_("%s: vacuuming database \"%s\"\n"), progname, dbname); fflush(stdout); } conn = connectDatabase(dbname, host, port, username, prompt_password, progname, false); initPQExpBuffer(&sql); /* * If a table list is not provided and we're using multiple connections, * prepare the list of tables by querying the catalogs. */ if (parallel && (!tables || !tables->head)) { PQExpBufferData buf; PGresult *res; int ntups; int i; initPQExpBuffer(&buf); res = executeQuery(conn, "SELECT c.relname, ns.nspname FROM pg_class c, pg_namespace ns\n" " WHERE relkind IN (\'r\', \'m\') AND c.relnamespace = ns.oid\n" " ORDER BY c.relpages DESC;", progname, echo); ntups = PQntuples(res); for (i = 0; i < ntups; i++) { appendPQExpBuffer(&buf, "%s", fmtQualifiedId(PQserverVersion(conn), PQgetvalue(res, i, 1), PQgetvalue(res, i, 0))); simple_string_list_append(&dbtables, buf.data); resetPQExpBuffer(&buf); } termPQExpBuffer(&buf); tables = &dbtables; /* * If there are more connections than vacuumable relations, we don't * need to use them all. */ if (concurrentCons > ntups) concurrentCons = ntups; if (concurrentCons <= 1) parallel = false; } /* * Setup the database connections. We reuse the connection we already have * for the first slot. If not in parallel mode, the first slot in the * array contains the connection. */ slots = (ParallelSlot *) pg_malloc(sizeof(ParallelSlot) * concurrentCons); init_slot(slots, conn); if (parallel) { for (i = 1; i < concurrentCons; i++) { conn = connectDatabase(dbname, host, port, username, prompt_password, progname, false); init_slot(slots + i, conn); } } /* * Prepare all the connections to run the appropriate analyze stage, if * caller requested that mode. */ if (stage != ANALYZE_NO_STAGE) { int j; /* We already emitted the message above */ for (j = 0; j < concurrentCons; j++) executeCommand((slots + j)->connection, stage_commands[stage], progname, echo); } cell = tables ? tables->head : NULL; do { ParallelSlot *free_slot; const char *tabname = cell ? cell->val : NULL; prepare_vacuum_command(&sql, conn, vacopts, tabname); if (CancelRequested) { result = -1; goto finish; } /* * Get the connection slot to use. If in parallel mode, here we wait * for one connection to become available if none already is. In * non-parallel mode we simply use the only slot we have, which we * know to be free. */ if (parallel) { /* * Get a free slot, waiting until one becomes free if none * currently is. */ free_slot = GetIdleSlot(slots, concurrentCons, dbname, progname); if (!free_slot) { result = -1; goto finish; } free_slot->isFree = false; } else free_slot = slots; run_vacuum_command(free_slot->connection, sql.data, echo, dbname, tabname, progname, parallel); if (cell) cell = cell->next; } while (cell != NULL); if (parallel) { int j; for (j = 0; j < concurrentCons; j++) { /* wait for all connection to return the results */ if (!GetQueryResult((slots + j)->connection, dbname, progname)) goto finish; (slots + j)->isFree = true; } } finish: for (i = 0; i < concurrentCons; i++) DisconnectDatabase(slots + i); pfree(slots); termPQExpBuffer(&sql); if (result == -1) exit(1); }
int handle_op_records_put(RecordsPutOp * const put_op, HttpHandlerContext * const context) { yajl_gen json_gen; PanDB *pan_db; if (get_pan_db_by_layer_name(context, put_op->layer_name->val, AUTOMATICALLY_CREATE_LAYERS, &pan_db) < 0) { release_key(put_op->layer_name); release_key(put_op->key); free_slip_map(&put_op->properties); free_slip_map(&put_op->special_properties); return HTTP_NOTFOUND; } release_key(put_op->layer_name); int status; KeyNode *key_node; status = get_key_node_from_key(pan_db, put_op->key, 1, &key_node); release_key(put_op->key); if (key_node == NULL) { assert(status <= 0); free_slip_map(&put_op->properties); free_slip_map(&put_op->special_properties); return HTTP_NOTFOUND; } assert(status > 0); const Rectangle2D * const qbounds = &pan_db->qbounds; if (put_op->position_set != 0 && !(put_op->position.latitude >= qbounds->edge0.latitude && put_op->position.longitude >= qbounds->edge0.longitude && put_op->position.latitude < qbounds->edge1.latitude && put_op->position.longitude < qbounds->edge1.longitude)) { put_op->position_set = 0; } if (status > 0 && put_op->position_set != 0 && key_node->slot != NULL) { #if PROJECTION const Position2D * const previous_position = &key_node->slot->real_position; #else const Position2D * const previous_position = &key_node->slot->position; #endif if (previous_position->latitude == put_op->position.latitude && previous_position->longitude == put_op->position.longitude) { put_op->position_set = 0; } else { remove_entry_from_key_node(pan_db, key_node, 0); assert(key_node->slot != NULL); key_node->slot = NULL; } } if (put_op->position_set != 0) { Slot slot; Slot *new_slot; init_slot(&slot); slot = (Slot) { #if PROJECTION .real_position = put_op->position, #endif .position = put_op->position, .key_node = key_node }; if (add_slot(pan_db, &slot, &new_slot) != 0) { RB_REMOVE(KeyNodes_, &pan_db->key_nodes, key_node); key_node->slot = NULL; free_key_node(pan_db, key_node); free_slip_map(&put_op->properties); free_slip_map(&put_op->special_properties); return HTTP_SERVUNAVAIL; } key_node->slot = new_slot; assert(new_slot != NULL); } if (put_op->special_properties != NULL) { RecordsPutApplySpecialPropertiesCBContext cb_context = { .target_map_pnt = &key_node->properties }; slip_map_foreach(&put_op->special_properties, records_put_apply_special_properties_cb, &cb_context); free_slip_map(&put_op->special_properties); } if (key_node->properties == NULL) { key_node->properties = put_op->properties; } else { RecordsPutMergePropertiesCBContext cb_context = { .target_map_pnt = &key_node->properties }; slip_map_foreach(&put_op->properties, records_put_merge_properties_cb, &cb_context); free_slip_map(&put_op->properties); } Expirable *expirable = key_node->expirable; if (put_op->expires_at != (time_t) 0) { if (expirable == NULL) { Expirable new_expirable = { .ts = put_op->expires_at, .key_node = key_node }; expirable = add_entry_to_slab(&context->expirables_slab, &new_expirable); key_node->expirable = expirable; add_expirable_to_tree(pan_db, expirable); } else { if (expirable->ts != put_op->expires_at) { remove_expirable_from_tree(pan_db, expirable); expirable->ts = put_op->expires_at; add_expirable_to_tree(pan_db, expirable); } } assert(expirable->key_node == key_node); } else if (expirable != NULL) {
/* * vacuum_one_database * * Process tables in the given database. If the 'tables' list is empty, * process all tables in the database. * * Note that this function is only concerned with running exactly one stage * when in analyze-in-stages mode; caller must iterate on us if necessary. * * If concurrentCons is > 1, multiple connections are used to vacuum tables * in parallel. In this case and if the table list is empty, we first obtain * a list of tables from the database. */ static void vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, int stage, SimpleStringList *tables, const char *host, const char *port, const char *username, enum trivalue prompt_password, int concurrentCons, const char *progname, bool echo, bool quiet) { PQExpBufferData sql; PQExpBufferData buf; PQExpBufferData catalog_query; PGresult *res; PGconn *conn; SimpleStringListCell *cell; ParallelSlot *slots; SimpleStringList dbtables = {NULL, NULL}; int i; int ntups; bool failed = false; bool parallel = concurrentCons > 1; bool tables_listed = false; bool has_where = false; const char *stage_commands[] = { "SET default_statistics_target=1; SET vacuum_cost_delay=0;", "SET default_statistics_target=10; RESET vacuum_cost_delay;", "RESET default_statistics_target;" }; const char *stage_messages[] = { gettext_noop("Generating minimal optimizer statistics (1 target)"), gettext_noop("Generating medium optimizer statistics (10 targets)"), gettext_noop("Generating default (full) optimizer statistics") }; Assert(stage == ANALYZE_NO_STAGE || (stage >= 0 && stage < ANALYZE_NUM_STAGES)); conn = connectDatabase(dbname, host, port, username, prompt_password, progname, echo, false, true); if (vacopts->disable_page_skipping && PQserverVersion(conn) < 90600) { PQfinish(conn); fprintf(stderr, _("%s: cannot use the \"%s\" option on server versions older than PostgreSQL 9.6\n"), progname, "disable-page-skipping"); exit(1); } if (vacopts->skip_locked && PQserverVersion(conn) < 120000) { PQfinish(conn); fprintf(stderr, _("%s: cannot use the \"%s\" option on server versions older than PostgreSQL 12\n"), progname, "skip-locked"); exit(1); } if (vacopts->min_xid_age != 0 && PQserverVersion(conn) < 90600) { fprintf(stderr, _("%s: cannot use the \"%s\" option on server versions older than PostgreSQL 9.6\n"), progname, "--min-xid-age"); exit(1); } if (vacopts->min_mxid_age != 0 && PQserverVersion(conn) < 90600) { fprintf(stderr, _("%s: cannot use the \"%s\" option on server versions older than PostgreSQL 9.6\n"), progname, "--min-mxid-age"); exit(1); } if (!quiet) { if (stage != ANALYZE_NO_STAGE) printf(_("%s: processing database \"%s\": %s\n"), progname, PQdb(conn), _(stage_messages[stage])); else printf(_("%s: vacuuming database \"%s\"\n"), progname, PQdb(conn)); fflush(stdout); } /* * Prepare the list of tables to process by querying the catalogs. * * Since we execute the constructed query with the default search_path * (which could be unsafe), everything in this query MUST be fully * qualified. * * First, build a WITH clause for the catalog query if any tables were * specified, with a set of values made of relation names and their * optional set of columns. This is used to match any provided column * lists with the generated qualified identifiers and to filter for the * tables provided via --table. If a listed table does not exist, the * catalog query will fail. */ initPQExpBuffer(&catalog_query); for (cell = tables ? tables->head : NULL; cell; cell = cell->next) { char *just_table; const char *just_columns; /* * Split relation and column names given by the user, this is used to * feed the CTE with values on which are performed pre-run validity * checks as well. For now these happen only on the relation name. */ splitTableColumnsSpec(cell->val, PQclientEncoding(conn), &just_table, &just_columns); if (!tables_listed) { appendPQExpBuffer(&catalog_query, "WITH listed_tables (table_oid, column_list) " "AS (\n VALUES ("); tables_listed = true; } else appendPQExpBuffer(&catalog_query, ",\n ("); appendStringLiteralConn(&catalog_query, just_table, conn); appendPQExpBuffer(&catalog_query, "::pg_catalog.regclass, "); if (just_columns && just_columns[0] != '\0') appendStringLiteralConn(&catalog_query, just_columns, conn); else appendPQExpBufferStr(&catalog_query, "NULL"); appendPQExpBufferStr(&catalog_query, "::pg_catalog.text)"); pg_free(just_table); } /* Finish formatting the CTE */ if (tables_listed) appendPQExpBuffer(&catalog_query, "\n)\n"); appendPQExpBuffer(&catalog_query, "SELECT c.relname, ns.nspname"); if (tables_listed) appendPQExpBuffer(&catalog_query, ", listed_tables.column_list"); appendPQExpBuffer(&catalog_query, " FROM pg_catalog.pg_class c\n" " JOIN pg_catalog.pg_namespace ns" " ON c.relnamespace OPERATOR(pg_catalog.=) ns.oid\n" " LEFT JOIN pg_catalog.pg_class t" " ON c.reltoastrelid OPERATOR(pg_catalog.=) t.oid\n"); /* Used to match the tables listed by the user */ if (tables_listed) appendPQExpBuffer(&catalog_query, " JOIN listed_tables" " ON listed_tables.table_oid OPERATOR(pg_catalog.=) c.oid\n"); /* * If no tables were listed, filter for the relevant relation types. If * tables were given via --table, don't bother filtering by relation type. * Instead, let the server decide whether a given relation can be * processed in which case the user will know about it. */ if (!tables_listed) { appendPQExpBuffer(&catalog_query, " WHERE c.relkind OPERATOR(pg_catalog.=) ANY (array[" CppAsString2(RELKIND_RELATION) ", " CppAsString2(RELKIND_MATVIEW) "])\n"); has_where = true; } /* * For --min-xid-age and --min-mxid-age, the age of the relation is the * greatest of the ages of the main relation and its associated TOAST * table. The commands generated by vacuumdb will also process the TOAST * table for the relation if necessary, so it does not need to be * considered separately. */ if (vacopts->min_xid_age != 0) { appendPQExpBuffer(&catalog_query, " %s GREATEST(pg_catalog.age(c.relfrozenxid)," " pg_catalog.age(t.relfrozenxid)) " " OPERATOR(pg_catalog.>=) '%d'::pg_catalog.int4\n" " AND c.relfrozenxid OPERATOR(pg_catalog.!=)" " '0'::pg_catalog.xid\n", has_where ? "AND" : "WHERE", vacopts->min_xid_age); has_where = true; } if (vacopts->min_mxid_age != 0) { appendPQExpBuffer(&catalog_query, " %s GREATEST(pg_catalog.mxid_age(c.relminmxid)," " pg_catalog.mxid_age(t.relminmxid)) OPERATOR(pg_catalog.>=)" " '%d'::pg_catalog.int4\n" " AND c.relminmxid OPERATOR(pg_catalog.!=)" " '0'::pg_catalog.xid\n", has_where ? "AND" : "WHERE", vacopts->min_mxid_age); has_where = true; } /* * Execute the catalog query. We use the default search_path for this * query for consistency with table lookups done elsewhere by the user. */ appendPQExpBuffer(&catalog_query, " ORDER BY c.relpages DESC;"); executeCommand(conn, "RESET search_path;", progname, echo); res = executeQuery(conn, catalog_query.data, progname, echo); termPQExpBuffer(&catalog_query); PQclear(executeQuery(conn, ALWAYS_SECURE_SEARCH_PATH_SQL, progname, echo)); /* * If no rows are returned, there are no matching tables, so we are done. */ ntups = PQntuples(res); if (ntups == 0) { PQclear(res); PQfinish(conn); return; } /* * Build qualified identifiers for each table, including the column list * if given. */ initPQExpBuffer(&buf); for (i = 0; i < ntups; i++) { appendPQExpBufferStr(&buf, fmtQualifiedId(PQgetvalue(res, i, 1), PQgetvalue(res, i, 0))); if (tables_listed && !PQgetisnull(res, i, 2)) appendPQExpBufferStr(&buf, PQgetvalue(res, i, 2)); simple_string_list_append(&dbtables, buf.data); resetPQExpBuffer(&buf); } termPQExpBuffer(&buf); PQclear(res); /* * If there are more connections than vacuumable relations, we don't need * to use them all. */ if (parallel) { if (concurrentCons > ntups) concurrentCons = ntups; if (concurrentCons <= 1) parallel = false; } /* * Setup the database connections. We reuse the connection we already have * for the first slot. If not in parallel mode, the first slot in the * array contains the connection. */ if (concurrentCons <= 0) concurrentCons = 1; slots = (ParallelSlot *) pg_malloc(sizeof(ParallelSlot) * concurrentCons); init_slot(slots, conn); if (parallel) { for (i = 1; i < concurrentCons; i++) { conn = connectDatabase(dbname, host, port, username, prompt_password, progname, echo, false, true); init_slot(slots + i, conn); } } /* * Prepare all the connections to run the appropriate analyze stage, if * caller requested that mode. */ if (stage != ANALYZE_NO_STAGE) { int j; /* We already emitted the message above */ for (j = 0; j < concurrentCons; j++) executeCommand((slots + j)->connection, stage_commands[stage], progname, echo); } initPQExpBuffer(&sql); cell = dbtables.head; do { const char *tabname = cell->val; ParallelSlot *free_slot; if (CancelRequested) { failed = true; goto finish; } /* * Get the connection slot to use. If in parallel mode, here we wait * for one connection to become available if none already is. In * non-parallel mode we simply use the only slot we have, which we * know to be free. */ if (parallel) { /* * Get a free slot, waiting until one becomes free if none * currently is. */ free_slot = GetIdleSlot(slots, concurrentCons, progname); if (!free_slot) { failed = true; goto finish; } free_slot->isFree = false; } else free_slot = slots; prepare_vacuum_command(&sql, PQserverVersion(free_slot->connection), vacopts, tabname); /* * Execute the vacuum. If not in parallel mode, this terminates the * program in case of an error. (The parallel case handles query * errors in ProcessQueryResult through GetIdleSlot.) */ run_vacuum_command(free_slot->connection, sql.data, echo, tabname, progname, parallel); cell = cell->next; } while (cell != NULL); if (parallel) { int j; /* wait for all connections to finish */ for (j = 0; j < concurrentCons; j++) { if (!GetQueryResult((slots + j)->connection, progname)) goto finish; } } finish: for (i = 0; i < concurrentCons; i++) DisconnectDatabase(slots + i); pfree(slots); termPQExpBuffer(&sql); if (failed) exit(1); }
static child_t * get_cslot( uid_t uid, int no_alloc) { int i; child_t *ch, *ret = NULL; char *me = "get_cslot"; (void) mutex_lock(&child_lock); _NSCD_LOG(NSCD_LOG_SELF_CRED, NSCD_LOG_LEVEL_DEBUG) (me, "looking for uid %d (slot used = %d)\n", uid, used_slot); /* first find the slot with a matching uid */ for (i = 0; i <= used_slot; i++) { ch = child[i]; if (ch->child_state >= CHILD_STATE_UIDKNOWN && ch->child_uid == uid) { ret = ch; (void) mutex_unlock(&child_lock); _NSCD_LOG(NSCD_LOG_SELF_CRED, NSCD_LOG_LEVEL_DEBUG) (me, "slot %d found with uid %d\n", ret->child_slot, ret->child_uid); return (ret); } } /* if no need to allocate a new slot, return NULL */ if (no_alloc == 1) { (void) mutex_unlock(&child_lock); return (ret); } /* no open slot ? get a new one */ if (open_head == -1) { /* if no slot available, allocate more */ if (used_slot >= max_pu_nscd - 1) { child_t **tmp; int newmax = max_pu_nscd + _NSCD_PUN_BLOCK; tmp = (child_t **)calloc(newmax, sizeof (child_t *)); if (tmp == NULL) { (void) mutex_unlock(&child_lock); return (ret); } (void) memcpy(tmp, child, sizeof (child_t) * max_pu_nscd); free(child); child = tmp; max_pu_nscd = newmax; } used_slot++; if (init_slot(used_slot) == -1) { used_slot--; (void) mutex_unlock(&child_lock); return (ret); } ch = child[used_slot]; } else { ch = child[open_head]; open_head = ch->next_open; /* got last one ? reset tail */ if (open_head == -1) open_tail = -1; ch->next_open = -1; } ch->child_uid = uid; ch->child_state = CHILD_STATE_UIDKNOWN; ret = ch; (void) mutex_unlock(&child_lock); return (ret); }