/* read the command-line options and set the global_config_filename and * local_config_filename fields accordingly */ int libconf_phase1(libconf_t * handle) { int argc = handle->argc; char ** argv = handle->argv; libconf_phase_error_t have_error; libconf_optparam_t * param; libconf_do_on_error_t error_action = DOE_ERROR; int have_param; char * param_name; char * tmp_str; libconf_opt_t * option; libconf_opt_t t_option; handle->argv0 = *argv; argv++; argc--; while (argc > 0) { have_error = PT_NONE; param = NULL; have_param = 0; tmp_str = NULL; switch (argv[0][0]) { case '-' : switch (argv[0][1]) { case '-' : /* we have a long option */ option = hash_get(handle->options, argv[0] + 2); if (option != NULL) { /* we've found the option */ param_name = option->co_name; error_action = option->do_on_error; if (option->co_long_takes_param != TP_NO) { if (argc > 1 && argv[1][0] && argv[1][0] != '-') { /* we have our option's param */ switch (option->co_long_param_type) { case PT_NUMERIC_LIST : case PT_STRING_LIST : case PT_FILENAME_LIST : param = hash_get(handle->tmp_hash, option->co_name); if (param != NULL) vector_push_back(param->val.vector_val, strdup(argv[1])); default : if (param == NULL) param = libconf_optparam_new(option->co_name, option->co_long_param_type, argv[1]); } if (param == NULL) have_error = ET_PARAM_MALFORMED; else if (param->have_error) have_error = ET_PARAM_MALFORMED; have_param = 1; } else if (option->co_long_takes_param == TP_YES) { /* we should have had a param, but we don't */ have_error = ET_EXPECTED_PARAM_TP_NOT_FOUND; } } hash_put(handle->tmp_hash, strdup(option->co_name), param); } else { have_error = ET_UNKNOWN_OPTION; } break; case 0 : /* we should stop treating command-line options and * concatenate the rest of the command-line, separated by * spaces. */ argv++; argc--; while (argc) { argv++; argc--; tmp_str = catstr(tmp_str, argv[0]); } param = libconf_optparam_new("-", PT_STRING, tmp_str); free(tmp_str); hash_put(handle->tmp_hash, strdup("-"), param); break; default : /* we have a short option */ t_option.co_short_opt = argv[0][1]; option = hash_search(handle->options, &t_option, libconf_phase1_helper1); if (option != NULL) { /* we have our option */ param_name = option->co_name; error_action = option->do_on_error; if (option->co_short_takes_param != TP_NO) { if (argv[0][2]) { /* we have a parameter directly following */ switch (option->co_short_param_type) { case PT_NUMERIC_LIST : case PT_STRING_LIST : case PT_FILENAME_LIST : param = hash_get(handle->tmp_hash, option->co_name); if (param != NULL) vector_push_back(param->val.vector_val, strdup(argv[0] + 2)); default : if (param == NULL) param = libconf_optparam_new(option->co_name, option->co_short_param_type, argv[0] + 2); } if (param == NULL) have_error = ET_PARAM_MALFORMED; else if (param->have_error) have_error = ET_PARAM_MALFORMED; // have_param = 1; we don't count these } else if (argc > 1 && argv[1][0] && argv[1][0] != '-' ) { /* we have a parameter */ switch (option->co_short_param_type) { case PT_NUMERIC_LIST : case PT_STRING_LIST : case PT_FILENAME_LIST : param = hash_get(handle->tmp_hash, option->co_name); if (param != NULL) vector_push_back(param->val.vector_val, strdup(argv[1])); default : if (param == NULL) param = libconf_optparam_new(option->co_name, option->co_short_param_type, argv[1]); } if (param == NULL) have_error = ET_PARAM_MALFORMED; else if (param->have_error) have_error = ET_PARAM_MALFORMED; have_param = 1; } else if (option->co_short_takes_param == TP_YES) { /* we should have had a param, but we don't */ have_error = ET_EXPECTED_PARAM_TP_NOT_FOUND; } } hash_put(handle->tmp_hash, strdup(option->co_name), param); } else { have_error = ET_UNKNOWN_OPTION; } break; } break; default : /* no option? */ have_error = ET_EXPECTED_OPTION_TP_NOT_FOUND; break; } if ((have_error != ET_NONE) && (error_action != DOE_NOTHING)) { switch (error_action) { case DOE_WARNING : fprintf(stderr, "%s: Warning: ", handle->argv0); break; case DOE_ERROR : fprintf(stderr, "%s: Error: ", handle->argv0); break; default : break; } switch (have_error) { case ET_EXPECTED_PARAM_TP_NOT_FOUND : fprintf(stderr, "expected parameter not found for option %s\n", argv[0]); break; case ET_PARAM_MALFORMED : fprintf(stderr, "parameter malformed for option %s\n", argv[0]); break; case ET_EXPECTED_OPTION_TP_NOT_FOUND : fprintf(stderr, "not an option %s\n", argv[0]); break; case ET_UNKNOWN_OPTION : fprintf(stderr, "unknown option %s\n", argv[0]); break; default : break; } if (error_action == DOE_ERROR) exit(1); } if (have_param) { argv++; argc--; } argv++; argc--; } return 0; }
/* * CompactCheckpointerRequestQueue * Remove duplicates from the request queue to avoid backend fsyncs. * Returns "true" if any entries were removed. * * Although a full fsync request queue is not common, it can lead to severe * performance problems when it does happen. So far, this situation has * only been observed to occur when the system is under heavy write load, * and especially during the "sync" phase of a checkpoint. Without this * logic, each backend begins doing an fsync for every block written, which * gets very expensive and can slow down the whole system. * * Trying to do this every time the queue is full could lose if there * aren't any removable entries. But that should be vanishingly rare in * practice: there's one queue entry per shared buffer. */ static bool CompactCheckpointerRequestQueue(void) { struct CheckpointerSlotMapping { CheckpointerRequest request; int slot; }; int n, preserve_count; int num_skipped = 0; HASHCTL ctl; HTAB *htab; bool *skip_slot; /* must hold CheckpointerCommLock in exclusive mode */ Assert(LWLockHeldByMe(CheckpointerCommLock)); /* Initialize skip_slot array */ skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests); /* Initialize temporary hash table */ MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(CheckpointerRequest); ctl.entrysize = sizeof(struct CheckpointerSlotMapping); ctl.hcxt = CurrentMemoryContext; htab = hash_create("CompactCheckpointerRequestQueue", CheckpointerShmem->num_requests, &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); /* * The basic idea here is that a request can be skipped if it's followed * by a later, identical request. It might seem more sensible to work * backwards from the end of the queue and check whether a request is * *preceded* by an earlier, identical request, in the hopes of doing less * copying. But that might change the semantics, if there's an * intervening FORGET_RELATION_FSYNC or FORGET_DATABASE_FSYNC request, so * we do it this way. It would be possible to be even smarter if we made * the code below understand the specific semantics of such requests (it * could blow away preceding entries that would end up being canceled * anyhow), but it's not clear that the extra complexity would buy us * anything. */ for (n = 0; n < CheckpointerShmem->num_requests; n++) { CheckpointerRequest *request; struct CheckpointerSlotMapping *slotmap; bool found; /* * We use the request struct directly as a hashtable key. This * assumes that any padding bytes in the structs are consistently the * same, which should be okay because we zeroed them in * CheckpointerShmemInit. Note also that RelFileNode had better * contain no pad bytes. */ request = &CheckpointerShmem->requests[n]; slotmap = hash_search(htab, request, HASH_ENTER, &found); if (found) { /* Duplicate, so mark the previous occurrence as skippable */ skip_slot[slotmap->slot] = true; num_skipped++; } /* Remember slot containing latest occurrence of this request value */ slotmap->slot = n; } /* Done with the hash table. */ hash_destroy(htab); /* If no duplicates, we're out of luck. */ if (!num_skipped) { pfree(skip_slot); return false; } /* We found some duplicates; remove them. */ preserve_count = 0; for (n = 0; n < CheckpointerShmem->num_requests; n++) { if (skip_slot[n]) continue; CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n]; } ereport(DEBUG1, (errmsg("compacted fsync request queue from %d entries to %d entries", CheckpointerShmem->num_requests, preserve_count))); CheckpointerShmem->num_requests = preserve_count; /* Cleanup. */ pfree(skip_slot); return true; }
/* Resolve dependencies for a given package * @param curl curl handle * @param hashdb hash database * @param curpkg current package we are resolving * @param dep_list pointer to list to store resulting dependencies * @param resolve_lvl level of dep resolution. RESOLVE_THOROUGH forces * downloading of AUR PKGBUILDs * * returns -1 on error, 0 on success */ static int crawl_resolve(CURL *curl, struct pw_hashdb *hashdb, struct pkgpair *curpkg, alpm_list_t **dep_list, int resolve_lvl) { alpm_list_t *i, *depmod_list, *deps = NULL; struct pkgpair *pkgpair; struct pkgpair tmppkg; void *pkg_provides; void *memlist_ptr; const char *cache_result; const char *depname, *final_pkgname; char cwd[PATH_MAX]; char buf[PATH_MAX]; /* Normalize package before doing anything else */ final_pkgname = normalize_package(curl, hashdb, curpkg->pkgname, resolve_lvl); if (!final_pkgname) { return -1; } enum pkgfrom_t *from = hashmap_search(hashdb->pkg_from, (void *) final_pkgname); if (!from) { die("Failed to find out where package \"%s\" is from!\n", final_pkgname); } switch (*from) { case PKG_FROM_LOCAL: tmppkg.pkgname = final_pkgname; pkgpair = hash_search(hashdb->local, &tmppkg); goto get_deps; case PKG_FROM_SYNC: tmppkg.pkgname = final_pkgname; pkgpair = hash_search(hashdb->sync, &tmppkg); goto get_deps; default: goto aur_deps; } aur_uptodate: tmppkg.pkgname = final_pkgname; tmppkg.pkg = NULL; pkgpair = hash_search(hashdb->aur, &tmppkg); get_deps: if (!pkgpair) { /* Shouldn't happen */ die("Unable to find package \"%s\" in local/sync db!", final_pkgname); } depmod_list = alpm_pkg_get_depends(pkgpair->pkg); for (i = depmod_list; i; i = i->next) { char *s = alpm_dep_compute_string(i->data); strncpy(buf, s, sizeof(buf)); free(s); chompversion(buf); depname = normalize_package(curl, hashdb, buf, resolve_lvl); /* Possibility of normalize_package fail due to AUR download failing */ if (!depname) { alpm_list_free(deps); return -1; } deps = alpm_list_add(deps, (void *) depname); } if (dep_list) { *dep_list = deps; } else { alpm_list_free(deps); } return 0; aur_deps: tmppkg.pkgname = final_pkgname; tmppkg.pkg = NULL; /* For installed AUR packages which are up to date */ if (resolve_lvl != RESOLVE_THOROUGH) { if (hash_search(hashdb->aur, &tmppkg) && !hash_search(hashdb->aur_outdated, (void *) final_pkgname)) { /* NOTE: top goto ! */ goto aur_uptodate; } } /* RESOLVE_THOROUGH / out to date AUR package. * Download pkgbuild and extract deps */ if (!getcwd(cwd, PATH_MAX)) { return error(PW_ERR_GETCWD); } if (chdir(final_pkgname)) { return error(PW_ERR_CHDIR); } deps = grab_dependencies("PKGBUILD"); if (chdir(cwd)) { alpm_list_free(deps); return error(PW_ERR_RESTORECWD); } if (dep_list) { const char *normdep; alpm_list_t *new_deps = NULL; /* Transfer control to memlist and normalize packages */ for (i = deps; i; i = i->next) { memlist_ptr = memlist_add(hashdb->strpool, &i->data); normdep = normalize_package(curl, hashdb, memlist_ptr, resolve_lvl); new_deps = alpm_list_add(new_deps, (void *) normdep); } *dep_list = new_deps; } alpm_list_free(deps); return 0; }
/* * find_all_inheritors - * Returns a list of relation OIDs including the given rel plus * all relations that inherit from it, directly or indirectly. * Optionally, it also returns the number of parents found for * each such relation within the inheritance tree rooted at the * given rel. * * The specified lock type is acquired on all child relations (but not on the * given rel; caller should already have locked it). If lockmode is NoLock * then no locks are acquired, but caller must beware of race conditions * against possible DROPs of child relations. */ List * find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents) { /* hash table for O(1) rel_oid -> rel_numparents cell lookup */ HTAB *seen_rels; HASHCTL ctl; List *rels_list, *rel_numparents; ListCell *l; memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(SeenRelsEntry); ctl.hcxt = CurrentMemoryContext; seen_rels = hash_create("find_all_inheritors temporary table", 32, /* start small and extend */ &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); /* * We build a list starting with the given rel and adding all direct and * indirect children. We can use a single list as both the record of * already-found rels and the agenda of rels yet to be scanned for more * children. This is a bit tricky but works because the foreach() macro * doesn't fetch the next list element until the bottom of the loop. */ rels_list = list_make1_oid(parentrelId); rel_numparents = list_make1_int(0); foreach(l, rels_list) { Oid currentrel = lfirst_oid(l); List *currentchildren; ListCell *lc; /* Get the direct children of this rel */ currentchildren = find_inheritance_children(currentrel, lockmode); /* * Add to the queue only those children not already seen. This avoids * making duplicate entries in case of multiple inheritance paths from * the same parent. (It'll also keep us from getting into an infinite * loop, though theoretically there can't be any cycles in the * inheritance graph anyway.) */ foreach(lc, currentchildren) { Oid child_oid = lfirst_oid(lc); bool found; SeenRelsEntry *hash_entry; hash_entry = hash_search(seen_rels, &child_oid, HASH_ENTER, &found); if (found) { /* if the rel is already there, bump number-of-parents counter */ lfirst_int(hash_entry->numparents_cell)++; } else { /* if it's not there, add it. expect 1 parent, initially. */ rels_list = lappend_oid(rels_list, child_oid); rel_numparents = lappend_int(rel_numparents, 1); hash_entry->numparents_cell = rel_numparents->tail; } }
/* * Load a timezone from file or from cache. * Does not verify that the timezone is acceptable! * * "GMT" is always interpreted as the tzparse() definition, without attempting * to load a definition from the filesystem. This has a number of benefits: * 1. It's guaranteed to succeed, so we don't have the failure mode wherein * the bootstrap default timezone setting doesn't work (as could happen if * the OS attempts to supply a leap-second-aware version of "GMT"). * 2. Because we aren't accessing the filesystem, we can safely initialize * the "GMT" zone definition before my_exec_path is known. * 3. It's quick enough that we don't waste much time when the bootstrap * default timezone setting is later overridden from postgresql.conf. */ pg_tz * pg_tzset(const char *name) { pg_tz_cache *tzp; struct state tzstate; char uppername[TZ_STRLEN_MAX + 1]; char canonname[TZ_STRLEN_MAX + 1]; char *p; if (strlen(name) > TZ_STRLEN_MAX) return NULL; /* not going to fit */ if (!timezone_cache) if (!init_timezone_hashtable()) return NULL; /* * Upcase the given name to perform a case-insensitive hashtable search. * (We could alternatively downcase it, but we prefer upcase so that we * can get consistently upcased results from tzparse() in case the name is * a POSIX-style timezone spec.) */ p = uppername; while (*name) *p++ = pg_toupper((unsigned char) *name++); *p = '\0'; tzp = (pg_tz_cache *) hash_search(timezone_cache, uppername, HASH_FIND, NULL); if (tzp) { /* Timezone found in cache, nothing more to do */ return &tzp->tz; } /* * "GMT" is always sent to tzparse(), as per discussion above. */ if (strcmp(uppername, "GMT") == 0) { if (!tzparse(uppername, &tzstate, true)) { /* This really, really should not happen ... */ elog(ERROR, "could not initialize GMT time zone"); } /* Use uppercase name as canonical */ strcpy(canonname, uppername); } else if (tzload(uppername, canonname, &tzstate, true) != 0) { if (uppername[0] == ':' || !tzparse(uppername, &tzstate, false)) { /* Unknown timezone. Fail our call instead of loading GMT! */ return NULL; } /* For POSIX timezone specs, use uppercase name as canonical */ strcpy(canonname, uppername); } /* Save timezone in the cache */ tzp = (pg_tz_cache *) hash_search(timezone_cache, uppername, HASH_ENTER, NULL); /* hash_search already copied uppername into the hash key */ strcpy(tzp->tz.TZname, canonname); memcpy(&tzp->tz.state, &tzstate, sizeof(tzstate)); return &tzp->tz; }
/* Log a reference to an invalid page */ static void log_invalid_page(RelFileNode node, BlockNumber blkno, bool present) { xl_invalid_page_key key; xl_invalid_page *hentry; bool found; /* * Log references to invalid pages at DEBUG1 level. This allows some * tracing of the cause (note the elog context mechanism will tell us * something about the XLOG record that generated the reference). */ if (present) { elog(DEBUG1, "page %u of relation %u/%u/%u is uninitialized", blkno, node.spcNode, node.dbNode, node.relNode); if (Debug_persistent_recovery_print) elog(PersistentRecovery_DebugPrintLevel(), "log_invalid_page: page %u of relation %u/%u/%u is uninitialized", blkno, node.spcNode, node.dbNode, node.relNode); } else { elog(DEBUG1, "page %u of relation %u/%u/%u does not exist", blkno, node.spcNode, node.dbNode, node.relNode); if (Debug_persistent_recovery_print) elog(PersistentRecovery_DebugPrintLevel(), "log_invalid_page: page %u of relation %u/%u/%u does not exist", blkno, node.spcNode, node.dbNode, node.relNode); } if (invalid_page_tab == NULL) { /* create hash table when first needed */ HASHCTL ctl; memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(xl_invalid_page_key); ctl.entrysize = sizeof(xl_invalid_page); ctl.hash = tag_hash; invalid_page_tab = hash_create("XLOG invalid-page table", 100, &ctl, HASH_ELEM | HASH_FUNCTION); } /* we currently assume xl_invalid_page_key contains no padding */ key.node = node; key.blkno = blkno; hentry = (xl_invalid_page *) hash_search(invalid_page_tab, (void *) &key, HASH_ENTER, &found); if (!found) { /* hash_search already filled in the key */ hentry->present = present; } else { /* repeat reference ... leave "present" as it was */ } }
/* * Get a PGconn which can be used to execute queries on the remote PostgreSQL * server with the user's authorization. A new connection is established * if we don't already have a suitable one, and a transaction is opened at * the right subtransaction nesting depth if we didn't do that already. * * will_prep_stmt must be true if caller intends to create any prepared * statements. Since those don't go away automatically at transaction end * (not even on error), we need this flag to cue manual cleanup. * * XXX Note that caching connections theoretically requires a mechanism to * detect change of FDW objects to invalidate already established connections. * We could manage that by watching for invalidation events on the relevant * syscaches. For the moment, though, it's not clear that this would really * be useful and not mere pedantry. We could not flush any active connections * mid-transaction anyway. */ PGconn * GetConnection(ForeignServer *server, UserMapping *user, bool will_prep_stmt) { bool found; ConnCacheEntry *entry; ConnCacheKey key; /* First time through, initialize connection cache hashtable */ if (ConnectionHash == NULL) { HASHCTL ctl; MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(ConnCacheKey); ctl.entrysize = sizeof(ConnCacheEntry); ctl.hash = tag_hash; /* allocate ConnectionHash in the cache context */ ctl.hcxt = CacheMemoryContext; ConnectionHash = hash_create("postgres_fdw connections", 8, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); /* * Register some callback functions that manage connection cleanup. * This should be done just once in each backend. */ RegisterXactCallback(pgfdw_xact_callback, NULL); RegisterSubXactCallback(pgfdw_subxact_callback, NULL); } /* Set flag that we did GetConnection during the current transaction */ xact_got_connection = true; /* Create hash key for the entry. Assume no pad bytes in key struct */ key.serverid = server->serverid; key.userid = user->userid; /* * Find or create cached entry for requested connection. */ entry = hash_search(ConnectionHash, &key, HASH_ENTER, &found); if (!found) { /* initialize new hashtable entry (key is already filled in) */ entry->conn = NULL; entry->xact_depth = 0; entry->have_prep_stmt = false; entry->have_error = false; } /* * We don't check the health of cached connection here, because it would * require some overhead. Broken connection will be detected when the * connection is actually used. */ /* * If cache entry doesn't have a connection, we have to establish a new * connection. (If connect_pg_server throws an error, the cache entry * will be left in a valid empty state.) */ if (entry->conn == NULL) { entry->xact_depth = 0; /* just to be sure */ entry->have_prep_stmt = false; entry->have_error = false; entry->conn = connect_pg_server(server, user); elog(DEBUG3, "new postgres_fdw connection %p for server \"%s\"", entry->conn, server->servername); } /* * Start a new transaction or subtransaction if needed. */ begin_remote_xact(entry); /* Remember if caller will prepare statements */ entry->have_prep_stmt |= will_prep_stmt; return entry->conn; }
/* Returns a list of outdated AUR packages among targets or all AUR packages. * The list and the packages are to be freed by the caller. * * @param curl curl easy handle * @param targets list of strings (package names) that are _definitely_ AUR packages */ static alpm_list_t *get_outdated_pkgs(CURL *curl, struct pw_hashdb *hashdb, alpm_list_t *targets) { alpm_list_t *i; alpm_list_t *outdated_pkgs = NULL; alpm_list_t *pkglist, *targs; struct pkgpair pkgpair; struct pkgpair *pkgpair_ptr; struct aurpkg_t *aurpkg; const char *pkgname, *pkgver; if (targets) { targs = targets; } else { targs = NULL; alpm_list_t *tmp_targs = hash_to_list(hashdb->aur); for (i = tmp_targs; i; i = i->next) { pkgpair_ptr = i->data; targs = alpm_list_add(targs, (void *) pkgpair_ptr->pkgname); } alpm_list_free(tmp_targs); } for (i = targs; i; i = i->next) { pkglist = query_aur(curl, i->data, AUR_QUERY_INFO); if (!pkglist) { continue; } pkgpair.pkgname = i->data; pkgpair_ptr = hash_search(hashdb->aur, &pkgpair); if (!pkgpair_ptr) { /* Shouldn't happen */ pw_fprintf(PW_LOG_ERROR, stderr, "Unable to find AUR package \"%s\"" "in hashdb!\n", i->data); } aurpkg = pkglist->data; pkgver = alpm_pkg_get_version(pkgpair_ptr->pkg); pkgname = i->data; if (alpm_pkg_vercmp(aurpkg->version, pkgver) > 0) { /* Just show outdated package for now */ pw_printf(PW_LOG_INFO, "%s %s is outdated, %s%s%s%s is available\n", pkgname, pkgver, color.bred, aurpkg->version, color.nocolor, color.bold); /* Add to upgrade list */ outdated_pkgs = alpm_list_add(outdated_pkgs, aurpkg); pkglist->data = NULL; } else if (config->verbose) { pw_printf(PW_LOG_INFO, "%s %s is up to date.\n", pkgname, pkgver); } alpm_list_free_inner(pkglist, (alpm_list_fn_free) aurpkg_free); alpm_list_free(pkglist); } if (!targets) { alpm_list_free(targs); } return outdated_pkgs; }
/* -Su, checks AUR packages */ static int sync_upgrade(CURL *curl, alpm_list_t *targets) { int ret = 0; int cnt = 0; int upgrade_all; struct pkgpair pkgpair; struct pw_hashdb *hashdb = build_hashdb(); if (!hashdb) { pw_fprintf(PW_LOG_ERROR, stderr, "Failed to build hash database."); return -1; } /* Make sure that packages are from AUR */ alpm_list_t *i, *new_targs = NULL; for (i = targets; i; i = i->next) { pkgpair.pkgname = i->data; if (!hash_search(hashdb->aur, &pkgpair)) { if (cnt++) { printf(", "); } pw_printf(PW_LOG_NORM, "%s", i->data); } else { new_targs = alpm_list_add(new_targs, i->data); } } if (cnt > 1) { printf(" are not AUR packages and will not be checked.\n"); } else if (cnt == 1) { printf(" is not an AUR package and will not be checked.\n"); } alpm_list_t *outdated_pkgs = NULL; if (!targets) { /* Check all AUR packages */ outdated_pkgs = get_outdated_pkgs(curl, hashdb, NULL); } else { if (!new_targs) { goto cleanup; } outdated_pkgs = get_outdated_pkgs(curl, hashdb, new_targs); } if (!outdated_pkgs) { pw_printf(PW_LOG_INFO, "All AUR packages are up to date.\n"); goto cleanup; } printf("\n"); pw_printf(PW_LOG_INFO, "Targets:\n"); print_aurpkg_list(outdated_pkgs); printf("\n"); /* --check, don't upgrade */ if (config->op_s_check) { goto cleanup; } upgrade_all = config->noconfirm || yesno("Do you wish to upgrade the above packages?"); if (upgrade_all) { /* Experimental */ alpm_list_t *final_targets = NULL; struct aurpkg_t *aurpkg; for (i = outdated_pkgs; i; i = i->next) { aurpkg = i->data; final_targets = alpm_list_add(final_targets, aurpkg->name); } ret = upgrade_pkgs(final_targets, hashdb); alpm_list_free(final_targets); } cleanup: alpm_list_free_inner(outdated_pkgs, (alpm_list_fn_free) aurpkg_free); alpm_list_free(outdated_pkgs); alpm_list_free(new_targs); hashdb_free(hashdb); return ret; }
static void plx_result_cache_delete(FunctionCallInfo fcinfo) { hash_search(plx_result_cache, &fcinfo, HASH_REMOVE, NULL); }
/* * Get a combo command id that maps to cmin and cmax. * * We try to reuse old combo command ids when possible. */ static CommandId GetComboCommandId(CommandId cmin, CommandId cmax) { CommandId combocid; ComboCidKeyData key; ComboCidEntry entry; bool found; /* * Create the hash table and array the first time we need to use combo * cids in the transaction. */ if (comboHash == NULL) { HASHCTL hash_ctl; /* Make array first; existence of hash table asserts array exists */ comboCids = (ComboCidKeyData *) MemoryContextAlloc(TopTransactionContext, sizeof(ComboCidKeyData) * CCID_ARRAY_SIZE); sizeComboCids = CCID_ARRAY_SIZE; usedComboCids = 0; memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(ComboCidKeyData); hash_ctl.entrysize = sizeof(ComboCidEntryData); hash_ctl.hcxt = TopTransactionContext; comboHash = hash_create("Combo CIDs", CCID_HASH_SIZE, &hash_ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); } /* * Grow the array if there's not at least one free slot. We must do this * before possibly entering a new hashtable entry, else failure to * repalloc would leave a corrupt hashtable entry behind. */ if (usedComboCids >= sizeComboCids) { int newsize = sizeComboCids * 2; comboCids = (ComboCidKeyData *) repalloc(comboCids, sizeof(ComboCidKeyData) * newsize); sizeComboCids = newsize; } /* Lookup or create a hash entry with the desired cmin/cmax */ /* We assume there is no struct padding in ComboCidKeyData! */ key.cmin = cmin; key.cmax = cmax; entry = (ComboCidEntry) hash_search(comboHash, (void *) &key, HASH_ENTER, &found); if (found) { /* Reuse an existing combo cid */ return entry->combocid; } /* We have to create a new combo cid; we already made room in the array */ combocid = usedComboCids; comboCids[combocid].cmin = cmin; comboCids[combocid].cmax = cmax; usedComboCids++; entry->combocid = combocid; return combocid; }
/* Log a reference to an invalid page */ static void log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present) { xl_invalid_page_key key; xl_invalid_page *hentry; bool found; /* * Once recovery has reached a consistent state, the invalid-page table * should be empty and remain so. If a reference to an invalid page is * found after consistency is reached, PANIC immediately. This might seem * aggressive, but it's better than letting the invalid reference linger * in the hash table until the end of recovery and PANIC there, which * might come only much later if this is a standby server. */ if (reachedConsistency) { report_invalid_page(WARNING, node, forkno, blkno, present); elog(PANIC, "WAL contains references to invalid pages"); } /* * Log references to invalid pages at DEBUG1 level. This allows some * tracing of the cause (note the elog context mechanism will tell us * something about the XLOG record that generated the reference). */ if (log_min_messages <= DEBUG1 || client_min_messages <= DEBUG1) report_invalid_page(DEBUG1, node, forkno, blkno, present); if (invalid_page_tab == NULL) { /* create hash table when first needed */ HASHCTL ctl; memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(xl_invalid_page_key); ctl.entrysize = sizeof(xl_invalid_page); invalid_page_tab = hash_create("XLOG invalid-page table", 100, &ctl, HASH_ELEM | HASH_BLOBS); } /* we currently assume xl_invalid_page_key contains no padding */ key.node = node; key.forkno = forkno; key.blkno = blkno; hentry = (xl_invalid_page *) hash_search(invalid_page_tab, (void *) &key, HASH_ENTER, &found); if (!found) { /* hash_search already filled in the key */ hentry->present = present; } else { /* repeat reference ... leave "present" as it was */ } }
/* * mdsync() -- Sync previous writes to stable storage. */ void mdsync(void) { static bool mdsync_in_progress = false; HASH_SEQ_STATUS hstat; PendingOperationEntry *entry; int absorb_counter; /* * This is only called during checkpoints, and checkpoints should only * occur in processes that have created a pendingOpsTable. */ if (!pendingOpsTable) elog(ERROR, "cannot sync without a pendingOpsTable"); /* * If we are in the bgwriter, the sync had better include all fsync * requests that were queued by backends up to this point. The tightest * race condition that could occur is that a buffer that must be written * and fsync'd for the checkpoint could have been dumped by a backend just * before it was visited by BufferSync(). We know the backend will have * queued an fsync request before clearing the buffer's dirtybit, so we * are safe as long as we do an Absorb after completing BufferSync(). */ AbsorbFsyncRequests(); /* * To avoid excess fsync'ing (in the worst case, maybe a never-terminating * checkpoint), we want to ignore fsync requests that are entered into the * hashtable after this point --- they should be processed next time, * instead. We use mdsync_cycle_ctr to tell old entries apart from new * ones: new ones will have cycle_ctr equal to the incremented value of * mdsync_cycle_ctr. * * In normal circumstances, all entries present in the table at this point * will have cycle_ctr exactly equal to the current (about to be old) * value of mdsync_cycle_ctr. However, if we fail partway through the * fsync'ing loop, then older values of cycle_ctr might remain when we * come back here to try again. Repeated checkpoint failures would * eventually wrap the counter around to the point where an old entry * might appear new, causing us to skip it, possibly allowing a checkpoint * to succeed that should not have. To forestall wraparound, any time the * previous mdsync() failed to complete, run through the table and * forcibly set cycle_ctr = mdsync_cycle_ctr. * * Think not to merge this loop with the main loop, as the problem is * exactly that that loop may fail before having visited all the entries. * From a performance point of view it doesn't matter anyway, as this path * will never be taken in a system that's functioning normally. */ if (mdsync_in_progress) { /* prior try failed, so update any stale cycle_ctr values */ hash_seq_init(&hstat, pendingOpsTable); while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL) { entry->cycle_ctr = mdsync_cycle_ctr; } } /* Advance counter so that new hashtable entries are distinguishable */ mdsync_cycle_ctr++; /* Set flag to detect failure if we don't reach the end of the loop */ mdsync_in_progress = true; /* Now scan the hashtable for fsync requests to process */ absorb_counter = FSYNCS_PER_ABSORB; hash_seq_init(&hstat, pendingOpsTable); while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL) { /* * If the entry is new then don't process it this time. Note that * "continue" bypasses the hash-remove call at the bottom of the loop. */ if (entry->cycle_ctr == mdsync_cycle_ctr) continue; /* Else assert we haven't missed it */ Assert((CycleCtr) (entry->cycle_ctr + 1) == mdsync_cycle_ctr); /* * If fsync is off then we don't have to bother opening the file at * all. (We delay checking until this point so that changing fsync on * the fly behaves sensibly.) Also, if the entry is marked canceled, * fall through to delete it. */ if (enableFsync && !entry->canceled) { int failures; /* * If in bgwriter, we want to absorb pending requests every so * often to prevent overflow of the fsync request queue. It is * unspecified whether newly-added entries will be visited by * hash_seq_search, but we don't care since we don't need to * process them anyway. */ if (--absorb_counter <= 0) { AbsorbFsyncRequests(); absorb_counter = FSYNCS_PER_ABSORB; } /* * The fsync table could contain requests to fsync segments that * have been deleted (unlinked) by the time we get to them. Rather * than just hoping an ENOENT (or EACCES on Windows) error can be * ignored, what we do on error is absorb pending requests and * then retry. Since mdunlink() queues a "revoke" message before * actually unlinking, the fsync request is guaranteed to be * marked canceled after the absorb if it really was this case. * DROP DATABASE likewise has to tell us to forget fsync requests * before it starts deletions. */ for (failures = 0;; failures++) /* loop exits at "break" */ { SMgrRelation reln; MdfdVec *seg; char *path; /* * Find or create an smgr hash entry for this relation. This * may seem a bit unclean -- md calling smgr? But it's really * the best solution. It ensures that the open file reference * isn't permanently leaked if we get an error here. (You may * say "but an unreferenced SMgrRelation is still a leak!" Not * really, because the only case in which a checkpoint is done * by a process that isn't about to shut down is in the * bgwriter, and it will periodically do smgrcloseall(). This * fact justifies our not closing the reln in the success path * either, which is a good thing since in non-bgwriter cases * we couldn't safely do that.) Furthermore, in many cases * the relation will have been dirtied through this same smgr * relation, and so we can save a file open/close cycle. */ reln = smgropen(entry->tag.rnode); /* * It is possible that the relation has been dropped or * truncated since the fsync request was entered. Therefore, * allow ENOENT, but only if we didn't fail already on this * file. This applies both during _mdfd_getseg() and during * FileSync, since fd.c might have closed the file behind our * back. */ seg = _mdfd_getseg(reln, entry->tag.forknum, entry->tag.segno * ((BlockNumber) RELSEG_SIZE), false, EXTENSION_RETURN_NULL); if (seg != NULL && FileSync(seg->mdfd_vfd) >= 0) break; /* success; break out of retry loop */ /* * XXX is there any point in allowing more than one retry? * Don't see one at the moment, but easy to change the test * here if so. */ path = _mdfd_segpath(reln, entry->tag.forknum, entry->tag.segno); if (!FILE_POSSIBLY_DELETED(errno) || failures > 0) ereport(ERROR, (errcode_for_file_access(), errmsg("could not fsync file \"%s\": %m", path))); else ereport(DEBUG1, (errcode_for_file_access(), errmsg("could not fsync file \"%s\" but retrying: %m", path))); pfree(path); /* * Absorb incoming requests and check to see if canceled. */ AbsorbFsyncRequests(); absorb_counter = FSYNCS_PER_ABSORB; /* might as well... */ if (entry->canceled) break; } /* end retry loop */ } /* * If we get here, either we fsync'd successfully, or we don't have to * because enableFsync is off, or the entry is (now) marked canceled. * Okay to delete it. */ if (hash_search(pendingOpsTable, &entry->tag, HASH_REMOVE, NULL) == NULL) elog(ERROR, "pendingOpsTable corrupted"); } /* end loop over hashtable entries */ /* Flag successful completion of mdsync */ mdsync_in_progress = false; }
int hashitem( register struct hash *hp, HASHDATA **data, int enter ) { register ITEM *i; OBJECT *b = (*data)->key; unsigned int keyval = hash_keyval(b); #ifdef HASH_DEBUG_PROFILE profile_frame prof[1]; if ( DEBUG_PROFILE ) profile_enter( 0, prof ); #endif if ( enter && !hp->items.more ) hashrehash( hp ); if ( !enter && !hp->items.nel ) { #ifdef HASH_DEBUG_PROFILE if ( DEBUG_PROFILE ) profile_exit( prof ); #endif return 0; } i = hash_search( hp, keyval, (*data)->key, 0 ); if (i) { *data = &i->data; #ifdef HASH_DEBUG_PROFILE if ( DEBUG_PROFILE ) profile_exit( prof ); #endif return !0; } if ( enter ) { ITEM * * base = hash_bucket(hp,keyval); /* try to grab one from the free list */ if ( hp->items.free ) { i = hp->items.free; hp->items.free = i->hdr.next; assert( i->data.key == 0 ); } else { i = (ITEM *)hp->items.next; hp->items.next += hp->items.size; } hp->items.more--; memcpy( (char *)&i->data, (char *)*data, hp->items.datalen ); i->hdr.next = *base; *base = i; *data = &i->data; #ifdef OPT_BOEHM_GC if (sizeof(HASHDATA) == hp->items.datalen) { GC_REGISTER_FINALIZER(i->data.key,&hash_mem_finalizer,hp,0,0); } #endif } #ifdef HASH_DEBUG_PROFILE if ( DEBUG_PROFILE ) profile_exit( prof ); #endif return 0; }
/* * Fetch parser cache entry */ TSParserCacheEntry * lookup_ts_parser_cache(Oid prsId) { TSParserCacheEntry *entry; if (TSParserCacheHash == NULL) { /* First time through: initialize the hash table */ HASHCTL ctl; MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(TSParserCacheEntry); ctl.hash = oid_hash; TSParserCacheHash = hash_create("Tsearch parser cache", 4, &ctl, HASH_ELEM | HASH_FUNCTION); /* Flush cache on pg_ts_parser changes */ CacheRegisterSyscacheCallback(TSPARSEROID, InvalidateTSCacheCallBack, PointerGetDatum(TSParserCacheHash)); /* Also make sure CacheMemoryContext exists */ if (!CacheMemoryContext) CreateCacheMemoryContext(); } /* Check single-entry cache */ if (lastUsedParser && lastUsedParser->prsId == prsId && lastUsedParser->isvalid) return lastUsedParser; /* Try to look up an existing entry */ entry = (TSParserCacheEntry *) hash_search(TSParserCacheHash, (void *) &prsId, HASH_FIND, NULL); if (entry == NULL || !entry->isvalid) { /* * If we didn't find one, we want to make one. But first look up the * object to be sure the OID is real. */ HeapTuple tp; Form_pg_ts_parser prs; tp = SearchSysCache1(TSPARSEROID, ObjectIdGetDatum(prsId)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for text search parser %u", prsId); prs = (Form_pg_ts_parser) GETSTRUCT(tp); /* * Sanity checks */ if (!OidIsValid(prs->prsstart)) elog(ERROR, "text search parser %u has no prsstart method", prsId); if (!OidIsValid(prs->prstoken)) elog(ERROR, "text search parser %u has no prstoken method", prsId); if (!OidIsValid(prs->prsend)) elog(ERROR, "text search parser %u has no prsend method", prsId); if (entry == NULL) { bool found; /* Now make the cache entry */ entry = (TSParserCacheEntry *) hash_search(TSParserCacheHash, (void *) &prsId, HASH_ENTER, &found); Assert(!found); /* it wasn't there a moment ago */ } MemSet(entry, 0, sizeof(TSParserCacheEntry)); entry->prsId = prsId; entry->startOid = prs->prsstart; entry->tokenOid = prs->prstoken; entry->endOid = prs->prsend; entry->headlineOid = prs->prsheadline; entry->lextypeOid = prs->prslextype; ReleaseSysCache(tp); fmgr_info_cxt(entry->startOid, &entry->prsstart, CacheMemoryContext); fmgr_info_cxt(entry->tokenOid, &entry->prstoken, CacheMemoryContext); fmgr_info_cxt(entry->endOid, &entry->prsend, CacheMemoryContext); if (OidIsValid(entry->headlineOid)) fmgr_info_cxt(entry->headlineOid, &entry->prsheadline, CacheMemoryContext); entry->isvalid = true; } lastUsedParser = entry; return entry; }
/* Normal -S, install packages from AUR * returns 0 on success, -1 on failure */ static int sync_targets(CURL *curl, alpm_list_t *targets) { struct pw_hashdb *hashdb = build_hashdb(); struct pkgpair pkgpair; struct pkgpair *pkgpair_ptr; struct aurpkg_t *aurpkg; alpm_pkg_t *lpkg; alpm_list_t *i; alpm_list_t *reinstall, *new_packages, *upgrade, *downgrade, *not_aur; alpm_list_t *aurpkg_list, *final_targets; int vercmp; int joined = 0, ret = 0; reinstall = new_packages = upgrade = downgrade = aurpkg_list = not_aur = NULL; final_targets = NULL; if (!hashdb) { pw_fprintf(PW_LOG_ERROR, stderr, "Failed to create hashdb\n"); goto cleanup; } for (i = targets; i; i = i->next) { aurpkg_list = query_aur(curl, i->data, AUR_QUERY_INFO); if (!aurpkg_list) { not_aur = alpm_list_add(not_aur, i->data); goto free_aurpkg; } /* Check version string */ pkgpair.pkgname = i->data; pkgpair_ptr = hash_search(hashdb->aur, &pkgpair); /* Locally installed AUR */ if (pkgpair_ptr) { aurpkg = aurpkg_list->data; lpkg = pkgpair_ptr->pkg; vercmp = alpm_pkg_vercmp(aurpkg->version, alpm_pkg_get_version(lpkg)); if (vercmp > 0) { upgrade = alpm_list_add(upgrade, i->data); } else if (vercmp == 0) { reinstall = alpm_list_add(reinstall, i->data); } else { downgrade = alpm_list_add(downgrade, i->data); } } else { new_packages = alpm_list_add(new_packages, i->data); } free_aurpkg: alpm_list_free_inner(aurpkg_list, (alpm_list_fn_free) aurpkg_free); alpm_list_free(aurpkg_list); } if (not_aur) { printf("\n%sThese packages are not from the AUR:%s\n", color.bred, color.nocolor); print_list(not_aur); } if (downgrade) { printf("\n%sLocally installed but newer than AUR, ignoring:%s\n", color.bcyan, color.nocolor); print_list(downgrade); } if (reinstall) { printf("\n%sReinstalling:%s\n", color.byellow, color.nocolor); print_list(reinstall); } if (upgrade) { printf("\n%sUpgrading:%s\n", color.bblue, color.nocolor); print_list(upgrade); } if (new_packages) { printf("\n%sSyncing:%s\n", color.bmag, color.nocolor); print_list(new_packages); } printf("\n"); if (config->noconfirm || yesno("Do you wish to proceed?")) { final_targets = alpm_list_join(reinstall, upgrade); final_targets = alpm_list_join(final_targets, new_packages); joined = 1; ret = upgrade_pkgs(final_targets, hashdb); } cleanup: hashdb_free(hashdb); alpm_list_free(downgrade); alpm_list_free(not_aur); if (joined) { alpm_list_free(final_targets); } else { alpm_list_free(reinstall); alpm_list_free(new_packages); alpm_list_free(upgrade); } return ret; }
/* * Open a relation during XLOG replay * * Note: this once had an API that allowed NULL return on failure, but it * no longer does; any failure results in elog(). */ Relation XLogOpenRelation(RelFileNode rnode) { XLogRelDesc *res; XLogRelCacheEntry *hentry; bool found; hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache, (void *) &rnode, HASH_FIND, NULL); if (hentry) { res = hentry->rdesc; res->lessRecently->moreRecently = res->moreRecently; res->moreRecently->lessRecently = res->lessRecently; } else { /* * We need to fault in the database directory on the standby. */ if (rnode.spcNode != GLOBALTABLESPACE_OID && IsStandbyMode()) { char *primaryFilespaceLocation = NULL; char *dbPath; if (IsBuiltinTablespace(rnode.spcNode)) { /* * No filespace to fetch. */ } else { char *mirrorFilespaceLocation = NULL; /* * Investigate whether the containing directories exist to give more detail. */ PersistentTablespace_GetPrimaryAndMirrorFilespaces( rnode.spcNode, &primaryFilespaceLocation, &mirrorFilespaceLocation); if (primaryFilespaceLocation == NULL || strlen(primaryFilespaceLocation) == 0) { elog(ERROR, "Empty primary filespace directory location"); } if (mirrorFilespaceLocation != NULL) { pfree(mirrorFilespaceLocation); mirrorFilespaceLocation = NULL; } } dbPath = (char*)palloc(MAXPGPATH + 1); FormDatabasePath( dbPath, primaryFilespaceLocation, rnode.spcNode, rnode.dbNode); if (primaryFilespaceLocation != NULL) { pfree(primaryFilespaceLocation); primaryFilespaceLocation = NULL; } if (mkdir(dbPath, 0700) == 0) { if (Debug_persistent_recovery_print) { elog(PersistentRecovery_DebugPrintLevel(), "XLogOpenRelation: Re-created database directory \"%s\"", dbPath); } } else { /* * Allowed to already exist. */ if (errno != EEXIST) { elog(ERROR, "could not create database directory \"%s\": %m", dbPath); } else { if (Debug_persistent_recovery_print) { elog(PersistentRecovery_DebugPrintLevel(), "XLogOpenRelation: Database directory \"%s\" already exists", dbPath); } } } pfree(dbPath); } res = _xl_new_reldesc(); sprintf(RelationGetRelationName(&(res->reldata)), "%u", rnode.relNode); res->reldata.rd_node = rnode; /* * We set up the lockRelId in case anything tries to lock the dummy * relation. Note that this is fairly bogus since relNode may be * different from the relation's OID. It shouldn't really matter * though, since we are presumably running by ourselves and can't have * any lock conflicts ... */ res->reldata.rd_lockInfo.lockRelId.dbId = rnode.dbNode; res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode; hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache, (void *) &rnode, HASH_ENTER, &found); if (found) elog(PANIC, "xlog relation already present on insert into cache"); hentry->rdesc = res; res->reldata.rd_targblock = InvalidBlockNumber; res->reldata.rd_smgr = NULL; RelationOpenSmgr(&(res->reldata)); /* * Create the target file if it doesn't already exist. This lets us * cope if the replay sequence contains writes to a relation that is * later deleted. (The original coding of this routine would instead * return NULL, causing the writes to be suppressed. But that seems * like it risks losing valuable data if the filesystem loses an inode * during a crash. Better to write the data until we are actually * told to delete the file.) */ // NOTE: We no longer re-create files automatically because // new FileRep persistent objects will ensure files exist. // UNDONE: Can't remove this block of code yet until boot time calls to this routine are analyzed... { MirrorDataLossTrackingState mirrorDataLossTrackingState; int64 mirrorDataLossTrackingSessionNum; bool mirrorDataLossOccurred; // UNDONE: What about the persistent rel files table??? // UNDONE: This condition should not occur anymore. // UNDONE: segmentFileNum and AO? mirrorDataLossTrackingState = FileRepPrimary_GetMirrorDataLossTrackingSessionNum( &mirrorDataLossTrackingSessionNum); smgrcreate( res->reldata.rd_smgr, res->reldata.rd_isLocalBuf, /* relationName */ NULL, // Ok to be NULL -- we don't know the name here. mirrorDataLossTrackingState, mirrorDataLossTrackingSessionNum, /* ignoreAlreadyExists */ true, &mirrorDataLossOccurred); } } res->moreRecently = &(_xlrelarr[0]); res->lessRecently = _xlrelarr[0].lessRecently; _xlrelarr[0].lessRecently = res; res->lessRecently->moreRecently = res; Assert(&(res->reldata) != NULL); // Assert what it says in the interface -- we don't return NULL anymore. return &(res->reldata); }
/* * LocalBufferAlloc - * Find or create a local buffer for the given page of the given relation. * * API is similar to bufmgr.c's BufferAlloc, except that we do not need * to do any locking since this is all local. Also, IO_IN_PROGRESS * does not get set. Lastly, we support only default access strategy * (hence, usage_count is always advanced). */ BufferDesc * LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, bool *foundPtr) { BufferTag newTag; /* identity of requested block */ LocalBufferLookupEnt *hresult; BufferDesc *bufHdr; int b; int trycounter; bool found; INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum); /* Initialize local buffers if first request in this session */ if (LocalBufHash == NULL) InitLocalBuffers(); /* See if the desired buffer already exists */ hresult = (LocalBufferLookupEnt *) hash_search(LocalBufHash, (void *) &newTag, HASH_FIND, NULL); if (hresult) { b = hresult->id; bufHdr = &LocalBufferDescriptors[b]; Assert(BUFFERTAGS_EQUAL(bufHdr->tag, newTag)); #ifdef LBDEBUG fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n", smgr->smgr_rnode.node.relNode, forkNum, blockNum, -b - 1); #endif /* this part is equivalent to PinBuffer for a shared buffer */ if (LocalRefCount[b] == 0) { if (bufHdr->usage_count < BM_MAX_USAGE_COUNT) bufHdr->usage_count++; } LocalRefCount[b]++; ResourceOwnerRememberBuffer(CurrentResourceOwner, BufferDescriptorGetBuffer(bufHdr)); if (bufHdr->flags & BM_VALID) *foundPtr = TRUE; else { /* Previous read attempt must have failed; try again */ *foundPtr = FALSE; } return bufHdr; } #ifdef LBDEBUG fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n", smgr->smgr_rnode.node.relNode, forkNum, blockNum, -nextFreeLocalBuf - 1); #endif /* * Need to get a new buffer. We use a clock sweep algorithm (essentially * the same as what freelist.c does now...) */ trycounter = NLocBuffer; for (;;) { b = nextFreeLocalBuf; if (++nextFreeLocalBuf >= NLocBuffer) nextFreeLocalBuf = 0; bufHdr = &LocalBufferDescriptors[b]; if (LocalRefCount[b] == 0) { if (bufHdr->usage_count > 0) { bufHdr->usage_count--; trycounter = NLocBuffer; } else { /* Found a usable buffer */ LocalRefCount[b]++; ResourceOwnerRememberBuffer(CurrentResourceOwner, BufferDescriptorGetBuffer(bufHdr)); break; } } else if (--trycounter == 0) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("no empty local buffer available"))); } /* * this buffer is not referenced but it might still be dirty. if that's * the case, write it out before reusing it! */ if (bufHdr->flags & BM_DIRTY) { SMgrRelation oreln; /* Find smgr relation for buffer */ oreln = smgropen(bufHdr->tag.rnode, MyBackendId); /* And write... */ smgrwrite(oreln, bufHdr->tag.forkNum, bufHdr->tag.blockNum, (char *) LocalBufHdrGetBlock(bufHdr), false); /* Mark not-dirty now in case we error out below */ bufHdr->flags &= ~BM_DIRTY; pgBufferUsage.local_blks_written++; } /* * lazy memory allocation: allocate space on first use of a buffer. */ if (LocalBufHdrGetBlock(bufHdr) == NULL) { /* Set pointer for use by BufferGetBlock() macro */ LocalBufHdrGetBlock(bufHdr) = GetLocalBufferStorage(); } /* * Update the hash table: remove old entry, if any, and make new one. */ if (bufHdr->flags & BM_TAG_VALID) { hresult = (LocalBufferLookupEnt *) hash_search(LocalBufHash, (void *) &bufHdr->tag, HASH_REMOVE, NULL); if (!hresult) /* shouldn't happen */ elog(ERROR, "local buffer hash table corrupted"); /* mark buffer invalid just in case hash insert fails */ CLEAR_BUFFERTAG(bufHdr->tag); bufHdr->flags &= ~(BM_VALID | BM_TAG_VALID); } hresult = (LocalBufferLookupEnt *) hash_search(LocalBufHash, (void *) &newTag, HASH_ENTER, &found); if (found) /* shouldn't happen */ elog(ERROR, "local buffer hash table corrupted"); hresult->id = b; /* * it's all ours now. */ bufHdr->tag = newTag; bufHdr->flags &= ~(BM_VALID | BM_DIRTY | BM_JUST_DIRTIED | BM_IO_ERROR); bufHdr->flags |= BM_TAG_VALID; bufHdr->usage_count = 1; *foundPtr = FALSE; return bufHdr; }
/* * compute_tsvector_stats() -- compute statistics for a tsvector column * * This functions computes statistics that are useful for determining @@ * operations' selectivity, along with the fraction of non-null rows and * average width. * * Instead of finding the most common values, as we do for most datatypes, * we're looking for the most common lexemes. This is more useful, because * there most probably won't be any two rows with the same tsvector and thus * the notion of a MCV is a bit bogus with this datatype. With a list of the * most common lexemes we can do a better job at figuring out @@ selectivity. * * For the same reasons we assume that tsvector columns are unique when * determining the number of distinct values. * * The algorithm used is Lossy Counting, as proposed in the paper "Approximate * frequency counts over data streams" by G. S. Manku and R. Motwani, in * Proceedings of the 28th International Conference on Very Large Data Bases, * Hong Kong, China, August 2002, section 4.2. The paper is available at * http://www.vldb.org/conf/2002/S10P03.pdf * * The Lossy Counting (aka LC) algorithm goes like this: * Let s be the threshold frequency for an item (the minimum frequency we * are interested in) and epsilon the error margin for the frequency. Let D * be a set of triples (e, f, delta), where e is an element value, f is that * element's frequency (actually, its current occurrence count) and delta is * the maximum error in f. We start with D empty and process the elements in * batches of size w. (The batch size is also known as "bucket size" and is * equal to 1/epsilon.) Let the current batch number be b_current, starting * with 1. For each element e we either increment its f count, if it's * already in D, or insert a new triple into D with values (e, 1, b_current * - 1). After processing each batch we prune D, by removing from it all * elements with f + delta <= b_current. After the algorithm finishes we * suppress all elements from D that do not satisfy f >= (s - epsilon) * N, * where N is the total number of elements in the input. We emit the * remaining elements with estimated frequency f/N. The LC paper proves * that this algorithm finds all elements with true frequency at least s, * and that no frequency is overestimated or is underestimated by more than * epsilon. Furthermore, given reasonable assumptions about the input * distribution, the required table size is no more than about 7 times w. * * We set s to be the estimated frequency of the K'th word in a natural * language's frequency table, where K is the target number of entries in * the MCELEM array plus an arbitrary constant, meant to reflect the fact * that the most common words in any language would usually be stopwords * so we will not actually see them in the input. We assume that the * distribution of word frequencies (including the stopwords) follows Zipf's * law with an exponent of 1. * * Assuming Zipfian distribution, the frequency of the K'th word is equal * to 1/(K * H(W)) where H(n) is 1/2 + 1/3 + ... + 1/n and W is the number of * words in the language. Putting W as one million, we get roughly 0.07/K. * Assuming top 10 words are stopwords gives s = 0.07/(K + 10). We set * epsilon = s/10, which gives bucket width w = (K + 10)/0.007 and * maximum expected hashtable size of about 1000 * (K + 10). * * Note: in the above discussion, s, epsilon, and f/N are in terms of a * lexeme's frequency as a fraction of all lexemes seen in the input. * However, what we actually want to store in the finished pg_statistic * entry is each lexeme's frequency as a fraction of all rows that it occurs * in. Assuming that the input tsvectors are correctly constructed, no * lexeme occurs more than once per tsvector, so the final count f is a * correct estimate of the number of input tsvectors it occurs in, and we * need only change the divisor from N to nonnull_cnt to get the number we * want. */ static void compute_tsvector_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows) { int num_mcelem; int null_cnt = 0; double total_width = 0; /* This is D from the LC algorithm. */ HTAB *lexemes_tab; HASHCTL hash_ctl; HASH_SEQ_STATUS scan_status; /* This is the current bucket number from the LC algorithm */ int b_current; /* This is 'w' from the LC algorithm */ int bucket_width; int vector_no, lexeme_no; LexemeHashKey hash_key; TrackItem *item; /* * We want statistics_target * 10 lexemes in the MCELEM array. This * multiplier is pretty arbitrary, but is meant to reflect the fact that * the number of individual lexeme values tracked in pg_statistic ought to * be more than the number of values for a simple scalar column. */ num_mcelem = stats->attr->attstattarget * 10; /* * We set bucket width equal to (num_mcelem + 10) / 0.007 as per the * comment above. */ bucket_width = (num_mcelem + 10) * 1000 / 7; /* * Create the hashtable. It will be in local memory, so we don't need to * worry about overflowing the initial size. Also we don't need to pay any * attention to locking and memory management. */ MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(LexemeHashKey); hash_ctl.entrysize = sizeof(TrackItem); hash_ctl.hash = lexeme_hash; hash_ctl.match = lexeme_match; hash_ctl.hcxt = CurrentMemoryContext; lexemes_tab = hash_create("Analyzed lexemes table", num_mcelem, &hash_ctl, HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); /* Initialize counters. */ b_current = 1; lexeme_no = 0; /* Loop over the tsvectors. */ for (vector_no = 0; vector_no < samplerows; vector_no++) { Datum value; bool isnull; TSVector vector; WordEntry *curentryptr; char *lexemesptr; int j; vacuum_delay_point(); value = fetchfunc(stats, vector_no, &isnull); /* * Check for null/nonnull. */ if (isnull) { null_cnt++; continue; } /* * Add up widths for average-width calculation. Since it's a * tsvector, we know it's varlena. As in the regular * compute_minimal_stats function, we use the toasted width for this * calculation. */ total_width += VARSIZE_ANY(DatumGetPointer(value)); /* * Now detoast the tsvector if needed. */ vector = DatumGetTSVector(value); /* * We loop through the lexemes in the tsvector and add them to our * tracking hashtable. */ lexemesptr = STRPTR(vector); curentryptr = ARRPTR(vector); for (j = 0; j < vector->size; j++) { bool found; /* * Construct a hash key. The key points into the (detoasted) * tsvector value at this point, but if a new entry is created, we * make a copy of it. This way we can free the tsvector value * once we've processed all its lexemes. */ hash_key.lexeme = lexemesptr + curentryptr->pos; hash_key.length = curentryptr->len; /* Lookup current lexeme in hashtable, adding it if new */ item = (TrackItem *) hash_search(lexemes_tab, (const void *) &hash_key, HASH_ENTER, &found); if (found) { /* The lexeme is already on the tracking list */ item->frequency++; } else { /* Initialize new tracking list element */ item->frequency = 1; item->delta = b_current - 1; item->key.lexeme = palloc(hash_key.length); memcpy(item->key.lexeme, hash_key.lexeme, hash_key.length); } /* lexeme_no is the number of elements processed (ie N) */ lexeme_no++; /* We prune the D structure after processing each bucket */ if (lexeme_no % bucket_width == 0) { prune_lexemes_hashtable(lexemes_tab, b_current); b_current++; } /* Advance to the next WordEntry in the tsvector */ curentryptr++; } /* If the vector was toasted, free the detoasted copy. */ if (TSVectorGetDatum(vector) != value) pfree(vector); } /* We can only compute real stats if we found some non-null values. */ if (null_cnt < samplerows) { int nonnull_cnt = samplerows - null_cnt; int i; TrackItem **sort_table; int track_len; int cutoff_freq; int minfreq, maxfreq; stats->stats_valid = true; /* Do the simple null-frac and average width stats */ stats->stanullfrac = (double) null_cnt / (double) samplerows; stats->stawidth = total_width / (double) nonnull_cnt; /* Assume it's a unique column (see notes above) */ stats->stadistinct = -1.0 * (1.0 - stats->stanullfrac); /* * Construct an array of the interesting hashtable items, that is, * those meeting the cutoff frequency (s - epsilon)*N. Also identify * the minimum and maximum frequencies among these items. * * Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff * frequency is 9*N / bucket_width. */ cutoff_freq = 9 * lexeme_no / bucket_width; i = hash_get_num_entries(lexemes_tab); /* surely enough space */ sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i); hash_seq_init(&scan_status, lexemes_tab); track_len = 0; minfreq = lexeme_no; maxfreq = 0; while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL) { if (item->frequency > cutoff_freq) { sort_table[track_len++] = item; minfreq = Min(minfreq, item->frequency); maxfreq = Max(maxfreq, item->frequency); } } Assert(track_len <= i); /* emit some statistics for debug purposes */ elog(DEBUG3, "tsvector_stats: target # mces = %d, bucket width = %d, " "# lexemes = %d, hashtable size = %d, usable entries = %d", num_mcelem, bucket_width, lexeme_no, i, track_len); /* * If we obtained more lexemes than we really want, get rid of those * with least frequencies. The easiest way is to qsort the array into * descending frequency order and truncate the array. */ if (num_mcelem < track_len) { qsort(sort_table, track_len, sizeof(TrackItem *), trackitem_compare_frequencies_desc); /* reset minfreq to the smallest frequency we're keeping */ minfreq = sort_table[num_mcelem - 1]->frequency; } else num_mcelem = track_len; /* Generate MCELEM slot entry */ if (num_mcelem > 0) { MemoryContext old_context; Datum *mcelem_values; float4 *mcelem_freqs; /* * We want to store statistics sorted on the lexeme value using * first length, then byte-for-byte comparison. The reason for * doing length comparison first is that we don't care about the * ordering so long as it's consistent, and comparing lengths * first gives us a chance to avoid a strncmp() call. * * This is different from what we do with scalar statistics -- * they get sorted on frequencies. The rationale is that we * usually search through most common elements looking for a * specific value, so we can grab its frequency. When values are * presorted we can employ binary search for that. See * ts_selfuncs.c for a real usage scenario. */ qsort(sort_table, num_mcelem, sizeof(TrackItem *), trackitem_compare_lexemes); /* Must copy the target values into anl_context */ old_context = MemoryContextSwitchTo(stats->anl_context); /* * We sorted statistics on the lexeme value, but we want to be * able to find out the minimal and maximal frequency without * going through all the values. We keep those two extra * frequencies in two extra cells in mcelem_freqs. * * (Note: the MCELEM statistics slot definition allows for a third * extra number containing the frequency of nulls, but we don't * create that for a tsvector column, since null elements aren't * possible.) */ mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum)); mcelem_freqs = (float4 *) palloc((num_mcelem + 2) * sizeof(float4)); /* * See comments above about use of nonnull_cnt as the divisor for * the final frequency estimates. */ for (i = 0; i < num_mcelem; i++) { TrackItem *item = sort_table[i]; mcelem_values[i] = PointerGetDatum(cstring_to_text_with_len(item->key.lexeme, item->key.length)); mcelem_freqs[i] = (double) item->frequency / (double) nonnull_cnt; } mcelem_freqs[i++] = (double) minfreq / (double) nonnull_cnt; mcelem_freqs[i] = (double) maxfreq / (double) nonnull_cnt; MemoryContextSwitchTo(old_context); stats->stakind[0] = STATISTIC_KIND_MCELEM; stats->staop[0] = TextEqualOperator; stats->stacoll[0] = DEFAULT_COLLATION_OID; stats->stanumbers[0] = mcelem_freqs; /* See above comment about two extra frequency fields */ stats->numnumbers[0] = num_mcelem + 2; stats->stavalues[0] = mcelem_values; stats->numvalues[0] = num_mcelem; /* We are storing text values */ stats->statypid[0] = TEXTOID; stats->statyplen[0] = -1; /* typlen, -1 for varlena */ stats->statypbyval[0] = false; stats->statypalign[0] = 'i'; } } else { /* We found only nulls; assume the column is entirely null */ stats->stats_valid = true; stats->stanullfrac = 1.0; stats->stawidth = 0; /* "unknown" */ stats->stadistinct = 0.0; /* "unknown" */ } /* * We don't need to bother cleaning up any of our temporary palloc's. The * hashtable should also go away, as it used a child memory context. */ }
/* Process one per-dbspace directory for ResetUnloggedRelations */ static void ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op) { DIR *dbspace_dir; struct dirent *de; char rm_path[MAXPGPATH]; /* Caller must specify at least one operation. */ Assert((op & (UNLOGGED_RELATION_CLEANUP | UNLOGGED_RELATION_INIT)) != 0); /* * Cleanup is a two-pass operation. First, we go through and identify all * the files with init forks. Then, we go through again and nuke * everything with the same OID except the init fork. */ if ((op & UNLOGGED_RELATION_CLEANUP) != 0) { HTAB *hash = NULL; HASHCTL ctl; /* Open the directory. */ dbspace_dir = AllocateDir(dbspacedirname); if (dbspace_dir == NULL) { elog(LOG, "could not open dbspace directory \"%s\": %m", dbspacedirname); return; } /* * It's possible that someone could create a ton of unlogged relations * in the same database & tablespace, so we'd better use a hash table * rather than an array or linked list to keep track of which files * need to be reset. Otherwise, this cleanup operation would be * O(n^2). */ ctl.keysize = sizeof(unlogged_relation_entry); ctl.entrysize = sizeof(unlogged_relation_entry); hash = hash_create("unlogged hash", 32, &ctl, HASH_ELEM); /* Scan the directory. */ while ((de = ReadDir(dbspace_dir, dbspacedirname)) != NULL) { ForkNumber forkNum; int oidchars; unlogged_relation_entry ent; /* Skip anything that doesn't look like a relation data file. */ if (!parse_filename_for_nontemp_relation(de->d_name, &oidchars, &forkNum)) continue; /* Also skip it unless this is the init fork. */ if (forkNum != INIT_FORKNUM) continue; /* * Put the OID portion of the name into the hash table, if it * isn't already. */ memset(ent.oid, 0, sizeof(ent.oid)); memcpy(ent.oid, de->d_name, oidchars); hash_search(hash, &ent, HASH_ENTER, NULL); } /* Done with the first pass. */ FreeDir(dbspace_dir); /* * If we didn't find any init forks, there's no point in continuing; * we can bail out now. */ if (hash_get_num_entries(hash) == 0) { hash_destroy(hash); return; } /* * Now, make a second pass and remove anything that matches. First, * reopen the directory. */ dbspace_dir = AllocateDir(dbspacedirname); if (dbspace_dir == NULL) { elog(LOG, "could not open dbspace directory \"%s\": %m", dbspacedirname); hash_destroy(hash); return; } /* Scan the directory. */ while ((de = ReadDir(dbspace_dir, dbspacedirname)) != NULL) { ForkNumber forkNum; int oidchars; bool found; unlogged_relation_entry ent; /* Skip anything that doesn't look like a relation data file. */ if (!parse_filename_for_nontemp_relation(de->d_name, &oidchars, &forkNum)) continue; /* We never remove the init fork. */ if (forkNum == INIT_FORKNUM) continue; /* * See whether the OID portion of the name shows up in the hash * table. */ memset(ent.oid, 0, sizeof(ent.oid)); memcpy(ent.oid, de->d_name, oidchars); hash_search(hash, &ent, HASH_FIND, &found); /* If so, nuke it! */ if (found) { snprintf(rm_path, sizeof(rm_path), "%s/%s", dbspacedirname, de->d_name); /* * It's tempting to actually throw an error here, but since * this code gets run during database startup, that could * result in the database failing to start. (XXX Should we do * it anyway?) */ if (unlink(rm_path)) elog(LOG, "could not unlink file \"%s\": %m", rm_path); else elog(DEBUG2, "unlinked file \"%s\"", rm_path); } } /* Cleanup is complete. */ FreeDir(dbspace_dir); hash_destroy(hash); } /* * Initialization happens after cleanup is complete: we copy each init * fork file to the corresponding main fork file. Note that if we are * asked to do both cleanup and init, we may never get here: if the * cleanup code determines that there are no init forks in this dbspace, * it will return before we get to this point. */ if ((op & UNLOGGED_RELATION_INIT) != 0) { /* Open the directory. */ dbspace_dir = AllocateDir(dbspacedirname); if (dbspace_dir == NULL) { /* we just saw this directory, so it really ought to be there */ elog(LOG, "could not open dbspace directory \"%s\": %m", dbspacedirname); return; } /* Scan the directory. */ while ((de = ReadDir(dbspace_dir, dbspacedirname)) != NULL) { ForkNumber forkNum; int oidchars; char oidbuf[OIDCHARS + 1]; char srcpath[MAXPGPATH]; char dstpath[MAXPGPATH]; /* Skip anything that doesn't look like a relation data file. */ if (!parse_filename_for_nontemp_relation(de->d_name, &oidchars, &forkNum)) continue; /* Also skip it unless this is the init fork. */ if (forkNum != INIT_FORKNUM) continue; /* Construct source pathname. */ snprintf(srcpath, sizeof(srcpath), "%s/%s", dbspacedirname, de->d_name); /* Construct destination pathname. */ memcpy(oidbuf, de->d_name, oidchars); oidbuf[oidchars] = '\0'; snprintf(dstpath, sizeof(dstpath), "%s/%s%s", dbspacedirname, oidbuf, de->d_name + oidchars + 1 + strlen(forkNames[INIT_FORKNUM])); /* OK, we're ready to perform the actual copy. */ elog(DEBUG2, "copying %s to %s", srcpath, dstpath); copy_file(srcpath, dstpath); } /* Done with the first pass. */ FreeDir(dbspace_dir); } }
/* * Maintain a cache of names. * * The keys are all NAMEDATALEN long. */ static char * getDnsCachedAddress(char *name, int port, int elevel) { struct segment_ip_cache_entry *e; if (segment_ip_cache_htab == NULL) { HASHCTL hash_ctl; MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = NAMEDATALEN + 1; hash_ctl.entrysize = sizeof(struct segment_ip_cache_entry); segment_ip_cache_htab = hash_create("segment_dns_cache", 256, &hash_ctl, HASH_ELEM); Assert(segment_ip_cache_htab != NULL); } e = (struct segment_ip_cache_entry *)hash_search(segment_ip_cache_htab, name, HASH_FIND, NULL); /* not in our cache, we've got to actually do the name lookup. */ if (e == NULL) { MemoryContext oldContext; int ret; char portNumberStr[32]; char *service; struct addrinfo *addrs = NULL, *addr; struct addrinfo hint; /* Initialize hint structure */ MemSet(&hint, 0, sizeof(hint)); hint.ai_socktype = SOCK_STREAM; hint.ai_family = AF_UNSPEC; snprintf(portNumberStr, sizeof(portNumberStr), "%d", port); service = portNumberStr; ret = pg_getaddrinfo_all(name, service, &hint, &addrs); if (ret || !addrs) { if (addrs) pg_freeaddrinfo_all(hint.ai_family, addrs); ereport(elevel, (errmsg("could not translate host name \"%s\", port \"%d\" to address: %s", name, port, gai_strerror(ret)))); return NULL; } /* save in the cache context */ oldContext = MemoryContextSwitchTo(TopMemoryContext); for (addr = addrs; addr; addr = addr->ai_next) { #ifdef HAVE_UNIX_SOCKETS /* Ignore AF_UNIX sockets, if any are returned. */ if (addr->ai_family == AF_UNIX) continue; #endif if (addr->ai_family == AF_INET) /* IPv4 address */ { char hostinfo[NI_MAXHOST]; pg_getnameinfo_all((struct sockaddr_storage *)addr->ai_addr, addr->ai_addrlen, hostinfo, sizeof(hostinfo), NULL, 0, NI_NUMERICHOST); /* INSERT INTO OUR CACHE HTAB HERE */ e = (struct segment_ip_cache_entry *)hash_search(segment_ip_cache_htab, name, HASH_ENTER, NULL); Assert(e != NULL); memcpy(e->hostinfo, hostinfo, sizeof(hostinfo)); break; } } #ifdef HAVE_IPV6 /* * IPv6 probably would work fine, we'd just need to make sure all the data structures are big enough for * the IPv6 address. And on some broken systems, you can get an IPv6 address, but not be able to bind to it * because IPv6 is disabled or missing in the kernel, so we'd only want to use the IPv6 address if there isn't * an IPv4 address. All we really need to do is test this. */ if (e == NULL && addrs->ai_family == AF_INET6) { char hostinfo[NI_MAXHOST]; addr = addrs; pg_getnameinfo_all((struct sockaddr_storage *)addr->ai_addr, addr->ai_addrlen, hostinfo, sizeof(hostinfo), NULL, 0, NI_NUMERICHOST); /* INSERT INTO OUR CACHE HTAB HERE */ e = (struct segment_ip_cache_entry *)hash_search(segment_ip_cache_htab, name, HASH_ENTER, NULL); Assert(e != NULL); memcpy(e->hostinfo, hostinfo, sizeof(hostinfo)); } #endif MemoryContextSwitchTo(oldContext); pg_freeaddrinfo_all(hint.ai_family, addrs); } /* return a pointer to our cache. */ return e->hostinfo; }
/* * Store some statistics for a statement. */ static void pgss_store(const char *query, double total_time, uint64 rows, const BufferUsage *bufusage) { pgssHashKey key; double usage; pgssEntry *entry; Assert(query != NULL); /* Safety check... */ if (!pgss || !pgss_hash) return; /* Set up key for hashtable search */ key.userid = GetUserId(); key.dbid = MyDatabaseId; key.encoding = GetDatabaseEncoding(); key.query_len = strlen(query); if (key.query_len >= pgss->query_size) key.query_len = pg_encoding_mbcliplen(key.encoding, query, key.query_len, pgss->query_size - 1); key.query_ptr = query; usage = USAGE_EXEC(duration); /* Lookup the hash table entry with shared lock. */ LWLockAcquire(pgss->lock, LW_SHARED); entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL); if (!entry) { /* Must acquire exclusive lock to add a new entry. */ LWLockRelease(pgss->lock); LWLockAcquire(pgss->lock, LW_EXCLUSIVE); entry = entry_alloc(&key); } /* Grab the spinlock while updating the counters. */ { volatile pgssEntry *e = (volatile pgssEntry *) entry; SpinLockAcquire(&e->mutex); e->counters.calls += 1; e->counters.total_time += total_time; e->counters.rows += rows; e->counters.shared_blks_hit += bufusage->shared_blks_hit; e->counters.shared_blks_read += bufusage->shared_blks_read; e->counters.shared_blks_written += bufusage->shared_blks_written; e->counters.local_blks_hit += bufusage->local_blks_hit; e->counters.local_blks_read += bufusage->local_blks_read; e->counters.local_blks_written += bufusage->local_blks_written; e->counters.temp_blks_read += bufusage->temp_blks_read; e->counters.temp_blks_written += bufusage->temp_blks_written; e->counters.usage += usage; SpinLockRelease(&e->mutex); } LWLockRelease(pgss->lock); }
/** * Scan through a origin file, looking for sections that match * checksums from the generator, and transmit either literal or token * data. * * Also calculates the MD4 checksum of the whole file, using the md * accumulator. This is transmitted with the file as protection * against corruption on the wire. * * @param s Checksums received from the generator. If <tt>s->count == * 0</tt>, then there are actually no checksums for this file. * * @param len Length of the file to send. **/ void match_sums(int f, struct sum_struct *s, struct map_struct *buf, OFF_T len) { char file_sum[MD4_SUM_LENGTH]; last_match = 0; false_alarms = 0; hash_hits = 0; matches = 0; data_transfer = 0; sum_init(checksum_seed); if (append_mode > 0) { OFF_T j = 0; for (j = CHUNK_SIZE; j < s->flength; j += CHUNK_SIZE) { if (buf && do_progress) show_progress(last_match, buf->file_size); sum_update(map_ptr(buf, last_match, CHUNK_SIZE), CHUNK_SIZE); last_match = j; } if (last_match < s->flength) { int32 len = (int32)(s->flength - last_match); if (buf && do_progress) show_progress(last_match, buf->file_size); sum_update(map_ptr(buf, last_match, len), len); last_match = s->flength; } s->count = 0; } if (len > 0 && s->count > 0) { build_hash_table(s); if (verbose > 2) rprintf(FINFO,"built hash table\n"); hash_search(f,s,buf,len); if (verbose > 2) rprintf(FINFO,"done hash search\n"); } else { OFF_T j; /* by doing this in pieces we avoid too many seeks */ for (j = last_match + CHUNK_SIZE; j < len; j += CHUNK_SIZE) matched(f, s, buf, j, -2); matched(f, s, buf, len, -1); } sum_end(file_sum); /* If we had a read error, send a bad checksum. */ if (buf && buf->status != 0) file_sum[0]++; if (verbose > 2) rprintf(FINFO,"sending file_sum\n"); write_buf(f,file_sum,MD4_SUM_LENGTH); if (verbose > 2) rprintf(FINFO, "false_alarms=%d hash_hits=%d matches=%d\n", false_alarms, hash_hits, matches); total_hash_hits += hash_hits; total_false_alarms += false_alarms; total_matches += matches; stats.literal_data += data_transfer; }
/* * SQL function json_populate_record * * set fields in a record from the argument json * * Code adapted shamelessly from hstore's populate_record * which is in turn partly adapted from record_out. * * The json is decomposed into a hash table, in which each * field in the record is then looked up by name. */ Datum json_populate_record(PG_FUNCTION_ARGS) { Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); text *json = PG_GETARG_TEXT_P(1); bool use_json_as_text = PG_GETARG_BOOL(2); HTAB *json_hash; HeapTupleHeader rec; Oid tupType; int32 tupTypmod; TupleDesc tupdesc; HeapTupleData tuple; HeapTuple rettuple; RecordIOData *my_extra; int ncolumns; int i; Datum *values; bool *nulls; char fname[NAMEDATALEN]; JsonHashEntry hashentry; if (!type_is_rowtype(argtype)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("first argument must be a rowtype"))); if (PG_ARGISNULL(0)) { if (PG_ARGISNULL(1)) PG_RETURN_NULL(); rec = NULL; /* * have no tuple to look at, so the only source of type info is the * argtype. The lookup_rowtype_tupdesc call below will error out if we * don't have a known composite type oid here. */ tupType = argtype; tupTypmod = -1; } else { rec = PG_GETARG_HEAPTUPLEHEADER(0); if (PG_ARGISNULL(1)) PG_RETURN_POINTER(rec); /* Extract type info from the tuple itself */ tupType = HeapTupleHeaderGetTypeId(rec); tupTypmod = HeapTupleHeaderGetTypMod(rec); } json_hash = get_json_object_as_hash(json, "json_populate_record", use_json_as_text); /* * if the input json is empty, we can only skip the rest if we were passed * in a non-null record, since otherwise there may be issues with domain * nulls. */ if (hash_get_num_entries(json_hash) == 0 && rec) PG_RETURN_POINTER(rec); tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); ncolumns = tupdesc->natts; if (rec) { /* Build a temporary HeapTuple control structure */ tuple.t_len = HeapTupleHeaderGetDatumLength(rec); ItemPointerSetInvalid(&(tuple.t_self)); tuple.t_data = rec; } /* * We arrange to look up the needed I/O info just once per series of * calls, assuming the record type doesn't change underneath us. */ my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; if (my_extra == NULL || my_extra->ncolumns != ncolumns) { fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; my_extra->record_type = InvalidOid; my_extra->record_typmod = 0; } if (my_extra->record_type != tupType || my_extra->record_typmod != tupTypmod) { MemSet(my_extra, 0, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); my_extra->record_type = tupType; my_extra->record_typmod = tupTypmod; my_extra->ncolumns = ncolumns; } values = (Datum *) palloc(ncolumns * sizeof(Datum)); nulls = (bool *) palloc(ncolumns * sizeof(bool)); if (rec) { /* Break down the tuple into fields */ heap_deform_tuple(&tuple, tupdesc, values, nulls); } else { for (i = 0; i < ncolumns; ++i) { values[i] = (Datum) 0; nulls[i] = true; } } for (i = 0; i < ncolumns; ++i) { ColumnIOData *column_info = &my_extra->columns[i]; Oid column_type = tupdesc->attrs[i]->atttypid; char *value; /* Ignore dropped columns in datatype */ if (tupdesc->attrs[i]->attisdropped) { nulls[i] = true; continue; } memset(fname, 0, NAMEDATALEN); strncpy(fname, NameStr(tupdesc->attrs[i]->attname), NAMEDATALEN); hashentry = hash_search(json_hash, fname, HASH_FIND, NULL); /* * we can't just skip here if the key wasn't found since we might have * a domain to deal with. If we were passed in a non-null record * datum, we assume that the existing values are valid (if they're * not, then it's not our fault), but if we were passed in a null, * then every field which we don't populate needs to be run through * the input function just in case it's a domain type. */ if (hashentry == NULL && rec) continue; /* * Prepare to convert the column value from text */ if (column_info->column_type != column_type) { getTypeInputInfo(column_type, &column_info->typiofunc, &column_info->typioparam); fmgr_info_cxt(column_info->typiofunc, &column_info->proc, fcinfo->flinfo->fn_mcxt); column_info->column_type = column_type; } if (hashentry == NULL || hashentry->isnull) { /* * need InputFunctionCall to happen even for nulls, so that domain * checks are done */ values[i] = InputFunctionCall(&column_info->proc, NULL, column_info->typioparam, tupdesc->attrs[i]->atttypmod); nulls[i] = true; } else { value = hashentry->val; values[i] = InputFunctionCall(&column_info->proc, value, column_info->typioparam, tupdesc->attrs[i]->atttypmod); nulls[i] = false; } } rettuple = heap_form_tuple(tupdesc, values, nulls); ReleaseTupleDesc(tupdesc); PG_RETURN_DATUM(HeapTupleGetDatum(rettuple)); }
void AppendOnlyMirrorResyncEofs_Merge(RelFileNode *relFileNode, int32 segmentFileNum, int nestLevel, /* Transaction nesting level. */ char *relationName, ItemPointer persistentTid, int64 persistentSerialNum, bool mirrorCatchupRequired, MirrorDataLossTrackingState mirrorDataLossTrackingState, int64 mirrorDataLossTrackingSessionNum, int64 mirrorNewEof) { int64 previousMirrorNewEof = 0; AppendOnlyMirrorResyncEofsKey key; AppendOnlyMirrorResyncEofs *entry; bool found; if (AppendOnlyMirrorResyncEofsTable == NULL) AppendOnlyMirrorResyncEofs_HashTableInit(); AppendOnlyMirrorResyncEofs_InitKey( &key, relFileNode, segmentFileNum, nestLevel); entry = (AppendOnlyMirrorResyncEofs *) hash_search(AppendOnlyMirrorResyncEofsTable, (void *) &key, HASH_ENTER, &found); if (!found) { entry->relationName = MemoryContextStrdup(TopMemoryContext, relationName); entry->persistentSerialNum = persistentSerialNum; entry->persistentTid = *persistentTid; entry->didIncrementCommitCount = false; entry->isDistributedTransaction = false; entry->gid[0] = '\0'; entry->mirrorCatchupRequired = mirrorCatchupRequired; entry->mirrorDataLossTrackingState = mirrorDataLossTrackingState; entry->mirrorDataLossTrackingSessionNum = mirrorDataLossTrackingSessionNum; entry->mirrorNewEof = mirrorNewEof; } else { previousMirrorNewEof = entry->mirrorNewEof; /* * UNDONE: What is the purpose of this IF stmt? Shouldn't we always * set the new EOF? */ if (mirrorNewEof > entry->mirrorNewEof) entry->mirrorNewEof = mirrorNewEof; /* * We adopt the newer FileRep state because we accurately track the * state of mirror data. For example, the first write session might * have had loss because the mirror was down. But then the second * write session discovered we were in sync and copied both the first * and second write session to the mirror and flushed it. */ entry->mirrorCatchupRequired = mirrorCatchupRequired; entry->mirrorDataLossTrackingState = mirrorDataLossTrackingState; entry->mirrorDataLossTrackingSessionNum = mirrorDataLossTrackingSessionNum; } if (Debug_persistent_print || Debug_persistent_appendonly_commit_count_print) elog(Persistent_DebugPrintLevel(), "Storage Manager: %s Append-Only mirror resync eofs entry: %u/%u/%u, segment file #%d, relation name '%s' (transaction nest level %d, persistent TID %s, persistent serial number " INT64_FORMAT ", " "mirror data loss tracking (state '%s', session num " INT64_FORMAT "), " "previous mirror new EOF " INT64_FORMAT ", input mirror new EOF " INT64_FORMAT ", saved mirror new EOF " INT64_FORMAT ")", (found ? "Merge" : "New"), entry->key.relFileNode.spcNode, entry->key.relFileNode.dbNode, entry->key.relFileNode.relNode, entry->key.segmentFileNum, (entry->relationName == NULL ? "<null>" : entry->relationName), entry->key.nestLevel, ItemPointerToString(&entry->persistentTid), entry->persistentSerialNum, MirrorDataLossTrackingState_Name(mirrorDataLossTrackingState), mirrorDataLossTrackingSessionNum, previousMirrorNewEof, mirrorNewEof, entry->mirrorNewEof); }
static void populate_recordset_object_end(void *state) { PopulateRecordsetState _state = (PopulateRecordsetState) state; HTAB *json_hash = _state->json_hash; Datum *values; bool *nulls; char fname[NAMEDATALEN]; int i; RecordIOData *my_extra = _state->my_extra; int ncolumns = my_extra->ncolumns; TupleDesc tupdesc = _state->ret_tdesc; JsonHashEntry hashentry; HeapTupleHeader rec = _state->rec; HeapTuple rettuple; if (_state->lex->lex_level > 1) return; values = (Datum *) palloc(ncolumns * sizeof(Datum)); nulls = (bool *) palloc(ncolumns * sizeof(bool)); if (_state->rec) { HeapTupleData tuple; /* Build a temporary HeapTuple control structure */ tuple.t_len = HeapTupleHeaderGetDatumLength(_state->rec); ItemPointerSetInvalid(&(tuple.t_self)); tuple.t_data = _state->rec; /* Break down the tuple into fields */ heap_deform_tuple(&tuple, tupdesc, values, nulls); } else { for (i = 0; i < ncolumns; ++i) { values[i] = (Datum) 0; nulls[i] = true; } } for (i = 0; i < ncolumns; ++i) { ColumnIOData *column_info = &my_extra->columns[i]; Oid column_type = tupdesc->attrs[i]->atttypid; char *value; /* Ignore dropped columns in datatype */ if (tupdesc->attrs[i]->attisdropped) { nulls[i] = true; continue; } memset(fname, 0, NAMEDATALEN); strncpy(fname, NameStr(tupdesc->attrs[i]->attname), NAMEDATALEN); hashentry = hash_search(json_hash, fname, HASH_FIND, NULL); /* * we can't just skip here if the key wasn't found since we might have * a domain to deal with. If we were passed in a non-null record * datum, we assume that the existing values are valid (if they're * not, then it's not our fault), but if we were passed in a null, * then every field which we don't populate needs to be run through * the input function just in case it's a domain type. */ if (hashentry == NULL && rec) continue; /* * Prepare to convert the column value from text */ if (column_info->column_type != column_type) { getTypeInputInfo(column_type, &column_info->typiofunc, &column_info->typioparam); fmgr_info_cxt(column_info->typiofunc, &column_info->proc, _state->fn_mcxt); column_info->column_type = column_type; } if (hashentry == NULL || hashentry->isnull) { /* * need InputFunctionCall to happen even for nulls, so that domain * checks are done */ values[i] = InputFunctionCall(&column_info->proc, NULL, column_info->typioparam, tupdesc->attrs[i]->atttypmod); nulls[i] = true; } else { value = hashentry->val; values[i] = InputFunctionCall(&column_info->proc, value, column_info->typioparam, tupdesc->attrs[i]->atttypmod); nulls[i] = false; } } rettuple = heap_form_tuple(tupdesc, values, nulls); tuplestore_puttuple(_state->tuple_store, rettuple); hash_destroy(json_hash); }
/* Change provided package to a package which provides it. * For AUR packages, this also downloads and extracts PKGBUILD in cwd. * In addition, the "normalized" packages will be cached in hashdb->pkg_from * * @param curl curl handle * @param hashdb hash database * @param pkg package name * @param resolve_lvl level of dep resolution. RESOLVE_THOROUGH forces * downloading of AUR PKGBUILDs * * returns the "normalized" package if present, NULL on failure */ static const char *normalize_package(CURL *curl, struct pw_hashdb *hashdb, const char *pkgname, int resolve_lvl) { const char *provided = NULL; struct pkgpair pkgpair; struct pkgpair *pkgptr; enum pkgfrom_t *pkgfrom; pkgpair.pkgname = pkgname; pkgpair.pkg = NULL; /* If we know where pkg is from and it's not AUR / it's from AUR and * already downloaded, done */ pkgfrom = hashmap_search(hashdb->pkg_from, (void *) pkgname); if (pkgfrom) { if (*pkgfrom != PKG_FROM_AUR || hash_search(hashdb->aur_downloaded, (void *) pkgname)) { return pkgname; } goto search_aur; } /* If it's in local db and not AUR, done */ if (hash_search(hashdb->local, &pkgpair)) { if (hash_search(hashdb->aur, &pkgpair)) { goto search_aur; } hashmap_insert(hashdb->pkg_from, (void *) pkgname, &hashdb->pkg_from_local); return pkgname; } /* Search provides cache */ provided = hashmap_search(hashdb->provides_cache, (void *) pkgname); if (provided) { return provided; } /* Search local provides */ pkgptr = hashbst_tree_search(hashdb->local_provides, (void *) pkgname, hashdb->local, provides_search); if (pkgptr) { /* Cache in provides and pkg_from */ hashmap_insert(hashdb->provides_cache, (void *) pkgname, (void *) pkgptr->pkgname); hashmap_insert(hashdb->pkg_from, (void *) pkgptr->pkgname, &hashdb->pkg_from_local); return pkgptr->pkgname; } /* Search sync provides tree in local db * TODO: Is there a meaning to this? * local provides are obtained from local packages. * sync provides are obtained from sync packages. * So searching for sync provides in local database is kind of... */ pkgptr = hashbst_tree_search(hashdb->sync_provides, (void *) pkgname, hashdb->local, provides_search); if (pkgptr) { /* Cache in pkg_from */ hashmap_insert(hashdb->pkg_from, (void *) pkgptr->pkgname, &hashdb->pkg_from_local); return pkgptr->pkgname; } /* Search sync db */ if (hash_search(hashdb->sync, &pkgpair)) { hashmap_insert(hashdb->pkg_from, (void *) pkgname, &hashdb->pkg_from_sync); return pkgname; } /* Sync provides */ pkgptr = hashbst_tree_search(hashdb->sync_provides, (void *) pkgname, hashdb->sync, provides_search); if (pkgptr) { hashmap_insert(hashdb->pkg_from, (void *) pkgptr->pkgname, &hashdb->pkg_from_sync); hashmap_insert(hashdb->provides_cache, (void *) pkgname, (void *) pkgptr->pkgname); return pkgptr->pkgname; } search_aur: pkgpair.pkgname = pkgname; pkgpair.pkg = NULL; /* For non RESOLVE_THOROUGH, don't bother downloading PKGBUILD of updated * AUR packages */ if (resolve_lvl != RESOLVE_THOROUGH) { if (hash_search(hashdb->aur, &pkgpair) && !hash_search(hashdb->aur_outdated, (void *) pkgname)) { goto done; } } /* Download and extract from AUR */ if (dl_extract_single_package(curl, pkgname, NULL, 0)) { return NULL; } hash_insert(hashdb->aur_downloaded, (void *) pkgname); hashmap_insert(hashdb->pkg_from, (void *) pkgname, &hashdb->pkg_from_aur); done: return pkgname; }
/* * ShmemInitStruct -- Create/attach to a structure in shared memory. * * This is called during initialization to find or allocate * a data structure in shared memory. If no other process * has created the structure, this routine allocates space * for it. If it exists already, a pointer to the existing * structure is returned. * * Returns: pointer to the object. *foundPtr is set TRUE if the object was * already in the shmem index (hence, already initialized). * * Note: before Postgres 9.0, this function returned NULL for some failure * cases. Now, it always throws error instead, so callers need not check * for NULL. */ void * ShmemInitStruct(const char *name, Size size, bool *foundPtr) { ShmemIndexEnt *result; void *structPtr; LWLockAcquire(ShmemIndexLock, LW_EXCLUSIVE); if (!ShmemIndex) { PGShmemHeader *shmemseghdr = ShmemSegHdr; /* Must be trying to create/attach to ShmemIndex itself */ Assert(strcmp(name, "ShmemIndex") == 0); if (IsUnderPostmaster) { /* Must be initializing a (non-standalone) backend */ Assert(shmemseghdr->index != NULL); structPtr = shmemseghdr->index; *foundPtr = TRUE; } else { /* * If the shmem index doesn't exist, we are bootstrapping: we must * be trying to init the shmem index itself. * * Notice that the ShmemIndexLock is released before the shmem * index has been initialized. This should be OK because no other * process can be accessing shared memory yet. */ Assert(shmemseghdr->index == NULL); structPtr = ShmemAlloc(size); shmemseghdr->index = structPtr; *foundPtr = FALSE; } LWLockRelease(ShmemIndexLock); return structPtr; } /* look it up in the shmem index */ result = (ShmemIndexEnt *) hash_search(ShmemIndex, name, HASH_ENTER_NULL, foundPtr); if (!result) { LWLockRelease(ShmemIndexLock); ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("could not create ShmemIndex entry for data structure \"%s\"", name))); } if (*foundPtr) { /* * Structure is in the shmem index so someone else has allocated it * already. The size better be the same as the size we are trying to * initialize to, or there is a name conflict (or worse). */ if (result->size != size) { LWLockRelease(ShmemIndexLock); ereport(ERROR, (errmsg("ShmemIndex entry size is wrong for data structure" " \"%s\": expected %zu, actual %zu", name, size, result->size))); } structPtr = result->location; } else { /* It isn't in the table yet. allocate and initialize it */ structPtr = ShmemAllocNoError(size); if (structPtr == NULL) { /* out of memory; remove the failed ShmemIndex entry */ hash_search(ShmemIndex, name, HASH_REMOVE, NULL); LWLockRelease(ShmemIndexLock); ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("not enough shared memory for data structure" " \"%s\" (%zu bytes requested)", name, size))); } result->size = size; result->location = structPtr; } LWLockRelease(ShmemIndexLock); Assert(ShmemAddrIsValid(structPtr)); Assert(structPtr == (void *) CACHELINEALIGN(structPtr)); return structPtr; }
void build_dep_graph(struct graph **graph, struct pw_hashdb *hashdb, alpm_list_t *targets, int resolve_lvl) { if (!graph) { return; } if (!*graph) { *graph = graph_new((pw_hash_fn) sdbm, (pw_hashcmp_fn) strcmp); } struct stack *st = stack_new(sizeof(struct pkgpair)); struct hash_table *resolved = hash_new(HASH_TABLE, (pw_hash_fn) sdbm, (pw_hashcmp_fn) strcmp); struct hash_table *immediate = hash_new(HASH_TABLE, (pw_hash_fn) sdbm, (pw_hashcmp_fn) strcmp); int ret; struct pkgpair pkgpair, deppkg; alpm_list_t *i; alpm_list_t *deps; CURL *curl; curl = curl_easy_new(); if (!curl) { error(PW_ERR_CURL_INIT); return; } /* Push all packages down stack */ for (i = targets; i; i = i->next) { pkgpair.pkgname = i->data; pkgpair.pkg = NULL; stack_push(st, &pkgpair); } while (!stack_empty(st)) { stack_pop(st, &pkgpair); deps = NULL; if (hash_search(resolved, (void *) pkgpair.pkgname)) { goto cleanup_deps; } ret = crawl_resolve(curl, hashdb, &pkgpair, &deps, resolve_lvl); if (ret) { pw_fprintf(PW_LOG_ERROR, stderr, "Error in resolving packages.\n"); goto cleanup; } for (i = deps; i; i = i->next) { deppkg.pkgname = i->data; deppkg.pkg = NULL; /* immediate vs thorough resolve */ should_we_continue_resolving(curl, hashdb, st, &deppkg, resolve_lvl); /* dep --> current */ graph_add_edge(*graph, i->data, (void *) pkgpair.pkgname); } hash_insert(resolved, (void *) pkgpair.pkgname); /* Add immediate dependencies, for pretty printing purposes */ add_immediate_deps(hashdb, pkgpair.pkgname, deps, immediate); cleanup_deps: alpm_list_free(deps); } cleanup: hash_free(resolved); hash_free(immediate); stack_free(st); curl_easy_cleanup(curl); }
/* * Handles protocol-level commands that store data * * Returns false to signify a protocol-level error condition, true otherwise */ bool cache_store(command_action_t command, network_buffer_t buffer) { command_t cmd = command->command;; hash_entry_t he; memory_zone_t zone; memory_bucket_t bucket; cache_object_t co; int offset; uint32_t i; he = hash_search(command->action.store.hash, command->action.store.key, command->action.store.keylen); if (he != NULL && cmd == COMMAND_ADD) { /* If key is currently in use, add fails */ command->response.store.response = COMMAND_RESPONSE_NOT_STORED; return (true); } if ((zone = memory_get_zone(command->action.store.size)) == NULL) return (false); for (i = 0; i < zone->bucket_count; ++i) { bucket = zone->buckets[i]; /* No free entries, continue to next bucket */ if (bucket->mask == 0) { continue; } // 1 bucket if ((co = (cache_object_t)malloc(sizeof(*co) + sizeof(cache_object_bucket_t))) == NULL) { return (false); } offset = ffs(bucket->mask); bucket->mask &= ~(1 << offset); memcpy(&buffer->buffer[buffer->offset], &bucket->bucket + offset * zone->quantum, command->action.store.size); co->size = command->action.store.size; co->flags = command->action.store.flags; co->refcnt = 0; co->buckets = 1; co->data[0].fd = zone->zone_fd; co->data[0].offset = bucket->offset + (offset * zone->quantum); if (hash_insert(command->action.store.hash, command->action.store.key, command->action.store.keylen, co)) { command->response.store.response = COMMAND_RESPONSE_STORED; print_storage(); return (true); } else { } command->response.store.response = COMMAND_RESPONSE_NOT_STORED; // free( // unset bit } return (false); }