/* * Make sure there is room for at least one more entry in a ResourceOwner's * prepared statements reference array. * * This is separate from actually inserting an entry because if we run out * of memory, it's critical to do so *before* acquiring the resource. */ void ResourceOwnerEnlargePreparedStmts(ResourceOwner owner) { int newmax; if (owner->nstmts < owner->maxstmts) return; /* nothing to do */ if (owner->stmts == NULL) { newmax = 16; owner->stmts = (char *) MemoryContextAlloc(TopMemoryContext, newmax * CNAME_MAXLEN); owner->maxstmts = newmax; } else { newmax = owner->maxstmts * 2; owner->stmts = (char *) repalloc(owner->stmts, newmax * CNAME_MAXLEN); owner->maxstmts = newmax; } }
/* * Make sure there is room for at least one more entry in a ResourceOwner's * tupdesc reference array. * * This is separate from actually inserting an entry because if we run out * of memory, it's critical to do so *before* acquiring the resource. */ void ResourceOwnerEnlargeTupleDescs(ResourceOwner owner) { int newmax; if (owner->ntupdescs < owner->maxtupdescs) return; /* nothing to do */ if (owner->tupdescs == NULL) { newmax = 16; owner->tupdescs = (TupleDesc *) MemoryContextAlloc(TopMemoryContext, newmax * sizeof(TupleDesc)); owner->maxtupdescs = newmax; } else { newmax = owner->maxtupdescs * 2; owner->tupdescs = (TupleDesc *) repalloc(owner->tupdescs, newmax * sizeof(TupleDesc)); owner->maxtupdescs = newmax; } }
/* * Make sure there is room for at least one more entry in a ResourceOwner's * catcache-list reference array. * * This is separate from actually inserting an entry because if we run out * of memory, it's critical to do so *before* acquiring the resource. */ void ResourceOwnerEnlargeCatCacheListRefs(ResourceOwner owner) { int newmax; if (owner->ncatlistrefs < owner->maxcatlistrefs) return; /* nothing to do */ if (owner->catlistrefs == NULL) { newmax = 16; owner->catlistrefs = (CatCList **) MemoryContextAlloc(TopMemoryContext, newmax * sizeof(CatCList *)); owner->maxcatlistrefs = newmax; } else { newmax = owner->maxcatlistrefs * 2; owner->catlistrefs = (CatCList **) repalloc(owner->catlistrefs, newmax * sizeof(CatCList *)); owner->maxcatlistrefs = newmax; } }
/* * Make sure there is room for at least one more entry in a ResourceOwner's * relcache reference array. * * This is separate from actually inserting an entry because if we run out * of memory, it's critical to do so *before* acquiring the resource. */ void ResourceOwnerEnlargeRelationRefs(ResourceOwner owner) { int newmax; if (owner->nrelrefs < owner->maxrelrefs) return; /* nothing to do */ if (owner->relrefs == NULL) { newmax = 16; owner->relrefs = (Relation *) MemoryContextAlloc(TopMemoryContext, newmax * sizeof(Relation)); owner->maxrelrefs = newmax; } else { newmax = owner->maxrelrefs * 2; owner->relrefs = (Relation *) repalloc(owner->relrefs, newmax * sizeof(Relation)); owner->maxrelrefs = newmax; } }
/* * Make sure there is room for at least one more entry in a ResourceOwner's * files reference array. * * This is separate from actually inserting an entry because if we run out * of memory, it's critical to do so *before* acquiring the resource. */ void ResourceOwnerEnlargeFiles(ResourceOwner owner) { int newmax; if (owner->nfiles < owner->maxfiles) return; /* nothing to do */ if (owner->files == NULL) { newmax = 16; owner->files = (File *) MemoryContextAlloc(TopMemoryContext, newmax * sizeof(File)); owner->maxfiles = newmax; } else { newmax = owner->maxfiles * 2; owner->files = (File *) repalloc(owner->files, newmax * sizeof(File)); owner->maxfiles = newmax; } }
/* * Make sure there is room for at least one more entry in a ResourceOwner's * plancache reference array. * * This is separate from actually inserting an entry because if we run out * of memory, it's critical to do so *before* acquiring the resource. */ void ResourceOwnerEnlargePlanCacheRefs(ResourceOwner owner) { int newmax; if (owner->nplanrefs < owner->maxplanrefs) return; /* nothing to do */ if (owner->planrefs == NULL) { newmax = 16; owner->planrefs = (CachedPlan **) MemoryContextAlloc(TopMemoryContext, newmax * sizeof(CachedPlan *)); owner->maxplanrefs = newmax; } else { newmax = owner->maxplanrefs * 2; owner->planrefs = (CachedPlan **) repalloc(owner->planrefs, newmax * sizeof(CachedPlan *)); owner->maxplanrefs = newmax; } }
/* * Make sure there is room for at least one more entry in a ResourceOwner's * snapshot reference array. * * This is separate from actually inserting an entry because if we run out * of memory, it's critical to do so *before* acquiring the resource. */ void ResourceOwnerEnlargeSnapshots(ResourceOwner owner) { int newmax; if (owner->nsnapshots < owner->maxsnapshots) return; /* nothing to do */ if (owner->snapshots == NULL) { newmax = 16; owner->snapshots = (Snapshot *) MemoryContextAlloc(TopMemoryContext, newmax * sizeof(Snapshot)); owner->maxsnapshots = newmax; } else { newmax = owner->maxsnapshots * 2; owner->snapshots = (Snapshot *) repalloc(owner->snapshots, newmax * sizeof(Snapshot)); owner->maxsnapshots = newmax; } }
/* * finalizeForecastModel * * no more input tuples to read */ void finalizeForecastModel(ModelInfo *model) { int length = 0; FunctionCall1(&(model->algInfo->algFinalizeForecastModel),PointerGetDatum(model->model)); length = model->model->trainingTupleCount; //MEASURE-CHANGE // length=20; if (model->upperBound==0) { if(((int)(length*0.1))<1) model->lowerBound = 1; else model->lowerBound = (((int)(length*0.1))); //XXX: CHANGE FOR MESURING if(!model->errorArray){ if(((int)(length*0.1))<2) model->errorArray = palloc0(2*sizeof(double)); else model->errorArray = palloc0(((int)(length*0.1))*sizeof(double)); } else if(model->upperBound < ((int)(length*0.1))){ model->errorArray = repalloc(model->errorArray, ((int)(length*0.1))*sizeof(double)); model->errorArray[((int)(length*0.1))-1] = 0.0; } if(((int)(length*0.1))<2) model->upperBound = 2; else model->upperBound = ((int)(length*0.1)); } }
/* * insert key value to IStorePairs */ void istore_pairs_insert(IStorePairs *pairs, int32 key, int32 val) { if (pairs->size == pairs->used) { if (pairs->used == PAIRS_MAX(IStorePair)) elog(ERROR, "istore can't have more than %lu keys", PAIRS_MAX(IStorePair)); pairs->size *= 2; // overflow check pairs->size should have been grown but not exceed PAIRS_MAX(IStorePair) if (pairs->size < pairs->used || pairs->size > PAIRS_MAX(IStorePair)) pairs->size = PAIRS_MAX(IStorePair); pairs->pairs = repalloc(pairs->pairs, pairs->size * sizeof(IStorePair)); } pairs->pairs[pairs->used].key = key; pairs->pairs[pairs->used].val = val; pairs->buflen += digits32(key) + digits32(val) + BUFLEN_OFFSET; if (pairs->buflen < 0) elog(ERROR, "istore buffer overflow"); pairs->used++; }
/* * Stores heap item pointer. For robust, it checks that * item pointer are ordered */ static void ginInsertData(BuildAccumulator *accum, EntryAccumulator *entry, ItemPointer heapptr) { if (entry->number >= entry->length) { accum->allocatedMemory += sizeof(ItemPointerData) * entry->length; entry->length *= 2; entry->list = (ItemPointerData *) repalloc(entry->list, sizeof(ItemPointerData) * entry->length); } if (entry->shouldSort == FALSE) { int res = compareItemPointers(entry->list + entry->number - 1, heapptr); Assert(res != 0); if (res > 0) entry->shouldSort = TRUE; } entry->list[entry->number] = *heapptr; entry->number++; }
/* Add new entry to GinEntries */ static int add_gin_entry(GinEntries *entries, Datum entry) { int id = entries->count; if (entries->count >= entries->allocated) { if (entries->allocated) { entries->allocated *= 2; entries->buf = repalloc(entries->buf, sizeof(Datum) * entries->allocated); } else { entries->allocated = 8; entries->buf = palloc(sizeof(Datum) * entries->allocated); } } entries->buf[entries->count++] = entry; return id; }
/* * Collect invalidation messages into SharedInvalidMessagesArray array. */ static void MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n) { /* * Initialise array first time through in each commit */ if (SharedInvalidMessagesArray == NULL) { maxSharedInvalidMessagesArray = FIRSTCHUNKSIZE; numSharedInvalidMessagesArray = 0; /* * Although this is being palloc'd we don't actually free it directly. * We're so close to EOXact that we now we're going to lose it anyhow. */ SharedInvalidMessagesArray = palloc(maxSharedInvalidMessagesArray * sizeof(SharedInvalidationMessage)); } if ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray) { while ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray) maxSharedInvalidMessagesArray *= 2; SharedInvalidMessagesArray = repalloc(SharedInvalidMessagesArray, maxSharedInvalidMessagesArray * sizeof(SharedInvalidationMessage)); } /* * Append the next chunk onto the array */ memcpy(SharedInvalidMessagesArray + numSharedInvalidMessagesArray, msgs, n * sizeof(SharedInvalidationMessage)); numSharedInvalidMessagesArray += n; }
/* * Make sure there is room for at least one more entry in a ResourceOwner's * dynamic shmem segment reference array. * * This is separate from actually inserting an entry because if we run out * of memory, it's critical to do so *before* acquiring the resource. */ void ResourceOwnerEnlargeDSMs(ResourceOwner owner) { int newmax; if (owner->ndsms < owner->maxdsms) return; /* nothing to do */ if (owner->dsms == NULL) { newmax = 16; owner->dsms = (dsm_segment **) MemoryContextAlloc(TopMemoryContext, newmax * sizeof(dsm_segment *)); owner->maxdsms = newmax; } else { newmax = owner->maxdsms * 2; owner->dsms = (dsm_segment **) repalloc(owner->dsms, newmax * sizeof(dsm_segment *)); owner->maxdsms = newmax; } }
/* * Make sure there is room for at least one more entry in a ResourceOwner's * buffer array. * * This is separate from actually inserting an entry because if we run out * of memory, it's critical to do so *before* acquiring the resource. * * We allow the case owner == NULL because the bufmgr is sometimes invoked * outside any transaction (for example, during WAL recovery). */ void ResourceOwnerEnlargeBuffers(ResourceOwner owner) { int newmax; if (owner == NULL || owner->nbuffers < owner->maxbuffers) return; /* nothing to do */ if (owner->buffers == NULL) { newmax = 16; owner->buffers = (Buffer *) MemoryContextAlloc(TopMemoryContext, newmax * sizeof(Buffer)); owner->maxbuffers = newmax; } else { newmax = owner->maxbuffers * 2; owner->buffers = (Buffer *) repalloc(owner->buffers, newmax * sizeof(Buffer)); owner->maxbuffers = newmax; } }
/* * shmem_startup hook: allocate or attach to shared memory, * then load any pre-existing statistics from file. */ static void pgss_shmem_startup(void) { bool found; HASHCTL info; FILE *file; uint32 header; int32 num; int32 i; int query_size; int buffer_size; char *buffer = NULL; if (prev_shmem_startup_hook) prev_shmem_startup_hook(); /* reset in case this is a restart within the postmaster */ pgss = NULL; pgss_hash = NULL; /* * Create or attach to the shared memory state, including hash table */ LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); pgss = ShmemInitStruct("pg_stat_statements", sizeof(pgssSharedState), &found); if (!found) { /* First time through ... */ pgss->lock = LWLockAssign(); pgss->query_size = pgstat_track_activity_query_size; } /* Be sure everyone agrees on the hash table entry size */ query_size = pgss->query_size; memset(&info, 0, sizeof(info)); info.keysize = sizeof(pgssHashKey); info.entrysize = offsetof(pgssEntry, query) +query_size; info.hash = pgss_hash_fn; info.match = pgss_match_fn; pgss_hash = ShmemInitHash("pg_stat_statements hash", pgss_max, pgss_max, &info, HASH_ELEM | HASH_FUNCTION | HASH_COMPARE); LWLockRelease(AddinShmemInitLock); /* * If we're in the postmaster (or a standalone backend...), set up a shmem * exit hook to dump the statistics to disk. */ if (!IsUnderPostmaster) on_shmem_exit(pgss_shmem_shutdown, (Datum) 0); /* * Attempt to load old statistics from the dump file, if this is the first * time through and we weren't told not to. */ if (found || !pgss_save) return; /* * Note: we don't bother with locks here, because there should be no other * processes running when this code is reached. */ file = AllocateFile(PGSS_DUMP_FILE, PG_BINARY_R); if (file == NULL) { if (errno == ENOENT) return; /* ignore not-found error */ goto error; } buffer_size = query_size; buffer = (char *) palloc(buffer_size); if (fread(&header, sizeof(uint32), 1, file) != 1 || header != PGSS_FILE_HEADER || fread(&num, sizeof(int32), 1, file) != 1) goto error; for (i = 0; i < num; i++) { pgssEntry temp; pgssEntry *entry; if (fread(&temp, offsetof(pgssEntry, mutex), 1, file) != 1) goto error; /* Encoding is the only field we can easily sanity-check */ if (!PG_VALID_BE_ENCODING(temp.key.encoding)) goto error; /* Previous incarnation might have had a larger query_size */ if (temp.key.query_len >= buffer_size) { buffer = (char *) repalloc(buffer, temp.key.query_len + 1); buffer_size = temp.key.query_len + 1; } if (fread(buffer, 1, temp.key.query_len, file) != temp.key.query_len) goto error; buffer[temp.key.query_len] = '\0'; /* Clip to available length if needed */ if (temp.key.query_len >= query_size) temp.key.query_len = pg_encoding_mbcliplen(temp.key.encoding, buffer, temp.key.query_len, query_size - 1); temp.key.query_ptr = buffer; /* make the hashtable entry (discards old entries if too many) */ entry = entry_alloc(&temp.key); /* copy in the actual stats */ entry->counters = temp.counters; } pfree(buffer); FreeFile(file); return; error: ereport(LOG, (errcode_for_file_access(), errmsg("could not read pg_stat_statement file \"%s\": %m", PGSS_DUMP_FILE))); if (buffer) pfree(buffer); if (file) FreeFile(file); /* If possible, throw away the bogus file; ignore any error */ unlink(PGSS_DUMP_FILE); }
/* * Form a tuple for entry tree. * * If the tuple would be too big to be stored, function throws a suitable * error if errorTooBig is TRUE, or returns NULL if errorTooBig is FALSE. * * See src/backend/access/gin/README for a description of the index tuple * format that is being built here. We build on the assumption that we * are making a leaf-level key entry containing a posting list of nipd items. * If the caller is actually trying to make a posting-tree entry, non-leaf * entry, or pending-list entry, it should pass dataSize = 0 and then overwrite * the t_tid fields as necessary. In any case, 'data' can be NULL to skip * filling in the posting list; the caller is responsible for filling it * afterwards if data = NULL and nipd > 0. */ IndexTuple GinFormTuple(GinState *ginstate, OffsetNumber attnum, Datum key, GinNullCategory category, Pointer data, Size dataSize, int nipd, bool errorTooBig) { Datum datums[2]; bool isnull[2]; IndexTuple itup; uint32 newsize; /* Build the basic tuple: optional column number, plus key datum */ if (ginstate->oneCol) { datums[0] = key; isnull[0] = (category != GIN_CAT_NORM_KEY); } else { datums[0] = UInt16GetDatum(attnum); isnull[0] = false; datums[1] = key; isnull[1] = (category != GIN_CAT_NORM_KEY); } itup = index_form_tuple(ginstate->tupdesc[attnum - 1], datums, isnull); /* * Determine and store offset to the posting list, making sure there is * room for the category byte if needed. * * Note: because index_form_tuple MAXALIGNs the tuple size, there may well * be some wasted pad space. Is it worth recomputing the data length to * prevent that? That would also allow us to Assert that the real data * doesn't overlap the GinNullCategory byte, which this code currently * takes on faith. */ newsize = IndexTupleSize(itup); if (IndexTupleHasNulls(itup)) { uint32 minsize; Assert(category != GIN_CAT_NORM_KEY); minsize = GinCategoryOffset(itup, ginstate) + sizeof(GinNullCategory); newsize = Max(newsize, minsize); } newsize = SHORTALIGN(newsize); GinSetPostingOffset(itup, newsize); GinSetNPosting(itup, nipd); /* * Add space needed for posting list, if any. Then check that the tuple * won't be too big to store. */ newsize += dataSize; newsize = MAXALIGN(newsize); if (newsize > GinMaxItemSize) { if (errorTooBig) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("index row size %zu exceeds maximum %zu for index \"%s\"", (Size) newsize, (Size) GinMaxItemSize, RelationGetRelationName(ginstate->index)))); pfree(itup); return NULL; } /* * Resize tuple if needed */ if (newsize != IndexTupleSize(itup)) { itup = repalloc(itup, newsize); /* * PostgreSQL 9.3 and earlier did not clear this new space, so we * might find uninitialized padding when reading tuples from disk. */ memset((char *) itup + IndexTupleSize(itup), 0, newsize - IndexTupleSize(itup)); /* set new size in tuple header */ itup->t_info &= ~INDEX_SIZE_MASK; itup->t_info |= newsize; } /* * Copy in the posting list, if provided */ if (data) { char *ptr = GinGetPosting(itup); memcpy(ptr, data, dataSize); } /* * Insert category byte, if needed */ if (category != GIN_CAT_NORM_KEY) { Assert(IndexTupleHasNulls(itup)); GinSetNullCategory(itup, ginstate, category); } return itup; }
static void read_dictionary(DictSyn *d, char *filename) { char *real_filename = get_tsearch_config_filename(filename, "rules"); tsearch_readline_state trst; char *line; int cur = 0; if (!tsearch_readline_begin(&trst, real_filename)) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not open synonym file \"%s\": %m", real_filename))); while ((line = tsearch_readline(&trst)) != NULL) { char *value; char *key; char *pos; char *end; if (*line == '\0') continue; value = lowerstr(line); pfree(line); pos = value; while ((key = find_word(pos, &end)) != NULL) { /* Enlarge syn structure if full */ if (cur == d->len) { d->len = (d->len > 0) ? 2 * d->len : 16; if (d->syn) d->syn = (Syn *) repalloc(d->syn, sizeof(Syn) * d->len); else d->syn = (Syn *) palloc(sizeof(Syn) * d->len); } /* Save first word only if we will match it */ if (pos != value || d->matchorig) { d->syn[cur].key = pnstrdup(key, end - key); d->syn[cur].value = pstrdup(value); cur++; } pos = end; /* Don't bother scanning synonyms if we will not match them */ if (!d->matchsynonyms) break; } pfree(value); } tsearch_readline_end(&trst); d->len = cur; if (cur > 1) qsort(d->syn, d->len, sizeof(Syn), compare_syn); pfree(real_filename); }
static int compute_sql_asm_tsp(char* sql, int sourceVertexId, bool reverseCost, tspPathElementType **path, int *pathCount) { int SPIcode; void *SPIplan; Portal SPIportal; bool moredata = TRUE; int ntuples; tspEdgeType *edges = NULL; int totalTuples = 0; DBG("Sql %s source %d reverse %s",sql,sourceVertexId,reverseCost==true?"true":"false"); tspEdgeType edgeColumns = {.id= -1, .source= -1, .target= -1, .cost= -1 }; char *errMesg; int ret = -1; errMesg=palloc(sizeof(char) * 300); DBG("start compute_sql_asm_tsp %i",*pathCount); SPIcode = SPI_connect(); if (SPIcode != SPI_OK_CONNECT) { elog(ERROR, "compute_sql_asm_tsp: couldn't open a connection to SPI"); return -1; } SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(ERROR, "compute_sql_asm_tsp: couldn't create query plan via SPI"); return -1; } if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) { elog(ERROR, "compute_sql_asm_tsp: SPI_cursor_open('%s') returns NULL", sql); return -1; } while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT); if (edgeColumns.id == -1) { if (!fetchEdgeTspColumns(SPI_tuptable, &edgeColumns,reverseCost)) return finish(SPIcode, ret); } ntuples = SPI_processed; totalTuples += ntuples; if (!edges){ edges = palloc(totalTuples * sizeof(tspEdgeType)); } else { edges = repalloc(edges, totalTuples * sizeof(tspEdgeType)); } if (edges == NULL) { elog(ERROR, "Out of memory"); return finish(SPIcode, ret); } if (ntuples > 0) { int t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; fetchEdgeTsp(&tuple, &tupdesc, &edgeColumns, &edges[totalTuples - ntuples + t],reverseCost); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } DBG("Total %i tuples", totalTuples); DBG("Calling tsp functions total tuples <%i> initial path count <%i>", totalTuples,*pathCount); ret=processATSPData(edges,totalTuples,sourceVertexId,reverseCost, path, pathCount,errMesg); DBG("SIZE %i elements to process",*pathCount); if (!ret ) { elog(ERROR, "Error computing path: %s", errMesg); } return finish(SPIcode, ret); } PG_FUNCTION_INFO_V1(sql_asm_tsp); Datum sql_asm_tsp(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; int callCntr; int maxCalls; TupleDesc tupleDesc; tspPathElementType *path; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; int pathCount = 0; int ret; /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); ret = compute_sql_asm_tsp(text2char(PG_GETARG_TEXT_P(0)), PG_GETARG_INT32(1), PG_GETARG_BOOL(2), &path, &pathCount); #ifdef DEBUG if (ret >= 0) { int i; for (i = 0; i < pathCount; i++) { DBG("Step # %i vertexId %i cost %.4f", i, path[i].vertexId,path[i].cost); } } #endif /* total number of tuples to be returned */ funcctx->max_calls = pathCount; funcctx->user_fctx = path; DBG("Path count %i", pathCount); funcctx->tuple_desc = BlessTupleDesc(RelationNameGetTupleDesc("pgr_costResult")); MemoryContextSwitchTo(oldcontext); } funcctx = SRF_PERCALL_SETUP(); callCntr = funcctx->call_cntr; maxCalls = funcctx->max_calls; tupleDesc = funcctx->tuple_desc; path = (tspPathElementType*) funcctx->user_fctx; if (callCntr < maxCalls) { /* do when there is more left to send */ HeapTuple tuple; Datum result; Datum *values; char* nulls; values = palloc(4 * sizeof(Datum)); nulls = palloc(4 * sizeof(char)); values[0] = Int32GetDatum(callCntr); nulls[0] = ' '; values[1] = Int32GetDatum(path[callCntr].vertexId); nulls[1] = ' '; values[2] = Float8GetDatum(0); // edge id not supplied by this method nulls[2] = ' '; values[3] = Float8GetDatum(path[callCntr].cost); nulls[3] = ' '; tuple = heap_formtuple(tupleDesc, values, nulls); /* make the tuple into a datum */ result = HeapTupleGetDatum(tuple); /* clean up (this is not really necessary) */ pfree(values); pfree(nulls); SRF_RETURN_NEXT(funcctx, result); } else { /* do when there is no more left */ SRF_RETURN_DONE(funcctx); } }
/* * setup_regexp_matches --- do the initial matching for regexp_matches() * or regexp_split() * * To avoid having to re-find the compiled pattern on each call, we do * all the matching in one swoop. The returned regexp_matches_ctx contains * the locations of all the substrings matching the pattern. * * The three bool parameters have only two patterns (one for each caller) * but it seems clearer to distinguish the functionality this way than to * key it all off one "is_split" flag. */ static regexp_matches_ctx * setup_regexp_matches(text *orig_str, text *pattern, text *flags, Oid collation, bool force_glob, bool use_subpatterns, bool ignore_degenerate) { regexp_matches_ctx *matchctx = palloc0(sizeof(regexp_matches_ctx)); int orig_len; pg_wchar *wide_str; int wide_len; pg_re_flags re_flags; regex_t *cpattern; regmatch_t *pmatch; int pmatch_len; int array_len; int array_idx; int prev_match_end; int start_search; /* save original string --- we'll extract result substrings from it */ matchctx->orig_str = orig_str; /* convert string to pg_wchar form for matching */ orig_len = VARSIZE_ANY_EXHDR(orig_str); wide_str = (pg_wchar *) palloc(sizeof(pg_wchar) * (orig_len + 1)); wide_len = pg_mb2wchar_with_len(VARDATA_ANY(orig_str), wide_str, orig_len); /* determine options */ parse_re_flags(&re_flags, flags); if (force_glob) { /* user mustn't specify 'g' for regexp_split */ if (re_flags.glob) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("regexp_split does not support the global option"))); /* but we find all the matches anyway */ re_flags.glob = true; } /* set up the compiled pattern */ cpattern = RE_compile_and_cache(pattern, re_flags.cflags, collation); /* do we want to remember subpatterns? */ if (use_subpatterns && cpattern->re_nsub > 0) { matchctx->npatterns = cpattern->re_nsub; pmatch_len = cpattern->re_nsub + 1; } else { use_subpatterns = false; matchctx->npatterns = 1; pmatch_len = 1; } /* temporary output space for RE package */ pmatch = palloc(sizeof(regmatch_t) * pmatch_len); /* the real output space (grown dynamically if needed) */ array_len = re_flags.glob ? 256 : 32; matchctx->match_locs = (int *) palloc(sizeof(int) * array_len); array_idx = 0; /* search for the pattern, perhaps repeatedly */ prev_match_end = 0; start_search = 0; while (RE_wchar_execute(cpattern, wide_str, wide_len, start_search, pmatch_len, pmatch)) { /* * If requested, ignore degenerate matches, which are zero-length * matches occurring at the start or end of a string or just after a * previous match. */ if (!ignore_degenerate || (pmatch[0].rm_so < wide_len && pmatch[0].rm_eo > prev_match_end)) { /* enlarge output space if needed */ while (array_idx + matchctx->npatterns * 2 > array_len) { array_len *= 2; matchctx->match_locs = (int *) repalloc(matchctx->match_locs, sizeof(int) * array_len); } /* save this match's locations */ if (use_subpatterns) { int i; for (i = 1; i <= matchctx->npatterns; i++) { matchctx->match_locs[array_idx++] = pmatch[i].rm_so; matchctx->match_locs[array_idx++] = pmatch[i].rm_eo; } } else { matchctx->match_locs[array_idx++] = pmatch[0].rm_so; matchctx->match_locs[array_idx++] = pmatch[0].rm_eo; } matchctx->nmatches++; } prev_match_end = pmatch[0].rm_eo; /* if not glob, stop after one match */ if (!re_flags.glob) break; /* * Advance search position. Normally we start the next search at the * end of the previous match; but if the match was of zero length, we * have to advance by one character, or we'd just find the same match * again. */ start_search = prev_match_end; if (pmatch[0].rm_so == pmatch[0].rm_eo) start_search++; if (start_search > wide_len) break; } /* Clean up temp storage */ pfree(wide_str); pfree(pmatch); return matchctx; }
static void parse_hstore(HSParser *state) { int st = WKEY; bool escaped = false; state->plen = 16; state->pairs = (Pairs *) palloc(sizeof(Pairs) * state->plen); state->pcur = 0; state->ptr = state->begin; state->word = NULL; while (1) { if (st == WKEY) { if (!get_val(state, false, &escaped)) return; if (state->pcur >= state->plen) { state->plen *= 2; state->pairs = (Pairs *) repalloc(state->pairs, sizeof(Pairs) * state->plen); } state->pairs[state->pcur].key = state->word; state->pairs[state->pcur].keylen = hstoreCheckKeyLen(state->cur - state->word); state->pairs[state->pcur].val = NULL; state->word = NULL; st = WEQ; } else if (st == WEQ) { if (*(state->ptr) == '=') { st = WGT; } else if (*(state->ptr) == '\0') { elog(ERROR, "Unexpected end of string"); } else if (!isspace((unsigned char) *(state->ptr))) { elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int4) (state->ptr - state->begin)); } } else if (st == WGT) { if (*(state->ptr) == '>') { st = WVAL; } else if (*(state->ptr) == '\0') { elog(ERROR, "Unexpected end of string"); } else { elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int4) (state->ptr - state->begin)); } } else if (st == WVAL) { if (!get_val(state, true, &escaped)) elog(ERROR, "Unexpected end of string"); state->pairs[state->pcur].val = state->word; state->pairs[state->pcur].vallen = hstoreCheckValLen(state->cur - state->word); state->pairs[state->pcur].isnull = false; state->pairs[state->pcur].needfree = true; if (state->cur - state->word == 4 && !escaped) { state->word[4] = '\0'; if (0 == pg_strcasecmp(state->word, "null")) state->pairs[state->pcur].isnull = true; } state->word = NULL; state->pcur++; st = WDEL; } else if (st == WDEL) { if (*(state->ptr) == ',') { st = WKEY; } else if (*(state->ptr) == '\0') { return; } else if (!isspace((unsigned char) *(state->ptr))) { elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int4) (state->ptr - state->begin)); } } else elog(ERROR, "Unknown state %d at line %d in file '%s'", st, __LINE__, __FILE__); state->ptr++; } }
/* * Add an regular expression pattern */ int add_regex_array(RegArray *ar, char *pattern) { int regex_flags; regex_t *regex; char *pat; int len; if (ar == NULL) { ereport(WARNING, (errmsg("failed to add regex pattern, regex array is NULL"))); return -1; } if (pattern == NULL) { ereport(WARNING, (errmsg("failed to add regex pattern, regex pattern is NULL"))); return -1; } len = strlen(pattern); /* Force case insensitive pattern matching */ regex_flags = REG_NOSUB; regex_flags |= REG_ICASE; /* Add extended regex search */ regex_flags |= REG_EXTENDED; pat = palloc(sizeof(char)*(len+3)); if (strncmp(pattern, "^", 1) != 0) { strncpy(pat, "^", 2); strncat(pat, pattern, len + 1); } else { strncpy(pat, pattern, len + 1); } if (len == 0 || (len > 0 && pattern[len - 1] != '$')) { strncat(pat, "$", 2); } /* Compile our regex */ regex = palloc(sizeof(regex_t)); if (regcomp(regex, pat, regex_flags) != 0) { ereport(WARNING, (errmsg("failed to add regex pattern, invalid regex pattern: \"%s\" (%s)", pattern, pat))); pfree(regex); pfree(pat); return -1; } pfree(pat); if (ar->pos == ar->size) { ar->size += AR_ALLOC_UNIT; ar->regex = repalloc(ar->regex, sizeof(regex_t *) * ar->size); } ar->regex[ar->pos] = regex; ar->pos++; return 0; }
void * SPI_repalloc(void *pointer, Size size) { /* No longer need to worry which context chunk was in... */ return repalloc(pointer, size); }
static char * OutputValue(char *key, char *buf, int size) { int i = 0; char *out = buf; char *subst = NULL; int slen = 0; size--; for (;;) { switch (*key) { case '\\': subst = "\\\\"; slen = 2; break; case ' ': subst = "\\011"; slen = 4; break; case '\n': subst = "\\012"; slen = 4; break; case '\'': subst = "\\047"; slen = 4; break; case '\0': out[i] = 0; return (out); default: slen = 1; break; } if (i + slen >= size) { if (out == buf) { out = (char *) palloc(size + ExtendBy); strncpy(out, buf, i); size += ExtendBy; } else { out = (char *) repalloc(out, size + ExtendBy); size += ExtendBy; } } if (slen == 1) out[i++] = *key; else { memcpy(out + i, subst, slen); i += slen; } key++; } return (out); }
Datum gin_extract_jsonb_path(PG_FUNCTION_ARGS) { Jsonb *jb = PG_GETARG_JSONB_P(0); int32 *nentries = (int32 *) PG_GETARG_POINTER(1); int total = 2 * JB_ROOT_COUNT(jb); JsonbIterator *it; JsonbValue v; JsonbIteratorToken r; PathHashStack tail; PathHashStack *stack; int i = 0; Datum *entries; /* If the root level is empty, we certainly have no keys */ if (total == 0) { *nentries = 0; PG_RETURN_POINTER(NULL); } /* Otherwise, use 2 * root count as initial estimate of result size */ entries = (Datum *) palloc(sizeof(Datum) * total); /* We keep a stack of partial hashes corresponding to parent key levels */ tail.parent = NULL; tail.hash = 0; stack = &tail; it = JsonbIteratorInit(&jb->root); while ((r = JsonbIteratorNext(&it, &v, false)) != WJB_DONE) { PathHashStack *parent; /* Since we recurse into the object, we might need more space */ if (i >= total) { total *= 2; entries = (Datum *) repalloc(entries, sizeof(Datum) * total); } switch (r) { case WJB_BEGIN_ARRAY: case WJB_BEGIN_OBJECT: /* Push a stack level for this object */ parent = stack; stack = (PathHashStack *) palloc(sizeof(PathHashStack)); /* * We pass forward hashes from outer nesting levels so that * the hashes for nested values will include outer keys as * well as their own keys. * * Nesting an array within another array will not alter * innermost scalar element hash values, but that seems * inconsequential. */ stack->hash = parent->hash; stack->parent = parent; break; case WJB_KEY: /* mix this key into the current outer hash */ JsonbHashScalarValue(&v, &stack->hash); /* hash is now ready to incorporate the value */ break; case WJB_ELEM: case WJB_VALUE: /* mix the element or value's hash into the prepared hash */ JsonbHashScalarValue(&v, &stack->hash); /* and emit an index entry */ entries[i++] = UInt32GetDatum(stack->hash); /* reset hash for next key, value, or sub-object */ stack->hash = stack->parent->hash; break; case WJB_END_ARRAY: case WJB_END_OBJECT: /* Pop the stack */ parent = stack->parent; pfree(stack); stack = parent; /* reset hash for next key, value, or sub-object */ if (stack->parent) stack->hash = stack->parent->hash; else stack->hash = 0; break; default: elog(ERROR, "invalid JsonbIteratorNext rc: %d", (int) r); } } *nentries = i; PG_RETURN_POINTER(entries); }
int SPI_connect(void) { int newdepth; /* * When procedure called by Executor _SPI_curid expected to be equal to * _SPI_connected */ if (_SPI_curid != _SPI_connected) return SPI_ERROR_CONNECT; if (_SPI_stack == NULL) { if (_SPI_connected != -1 || _SPI_stack_depth != 0) elog(ERROR, "SPI stack corrupted"); newdepth = 16; _SPI_stack = (_SPI_connection *) MemoryContextAlloc(TopTransactionContext, newdepth * sizeof(_SPI_connection)); _SPI_stack_depth = newdepth; } else { if (_SPI_stack_depth <= 0 || _SPI_stack_depth <= _SPI_connected) elog(ERROR, "SPI stack corrupted"); if (_SPI_stack_depth == _SPI_connected + 1) { newdepth = _SPI_stack_depth * 2; _SPI_stack = (_SPI_connection *) repalloc(_SPI_stack, newdepth * sizeof(_SPI_connection)); _SPI_stack_depth = newdepth; } } /* * We're entering procedure where _SPI_curid == _SPI_connected - 1 */ _SPI_connected++; Assert(_SPI_connected >= 0 && _SPI_connected < _SPI_stack_depth); _SPI_current = &(_SPI_stack[_SPI_connected]); _SPI_current->processed = 0; _SPI_current->lastoid = InvalidOid; _SPI_current->tuptable = NULL; _SPI_current->procCxt = NULL; /* in case we fail to create 'em */ _SPI_current->execCxt = NULL; _SPI_current->connectSubid = GetCurrentSubTransactionId(); /* * Create memory contexts for this procedure * * XXX it would be better to use PortalContext as the parent context, but * we may not be inside a portal (consider deferred-trigger execution). * Perhaps CurTransactionContext would do? For now it doesn't matter * because we clean up explicitly in AtEOSubXact_SPI(). */ _SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext, "SPI Proc", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); _SPI_current->execCxt = AllocSetContextCreate(TopTransactionContext, "SPI Exec", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); /* ... and switch to procedure's context */ _SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt); return SPI_OK_CONNECT; }
/* * getCdbComponentDatabases * * * Storage for the SegmentInstances block and all subsidiary * structures are allocated from the caller's context. */ CdbComponentDatabases * getCdbComponentInfo(bool DNSLookupAsError) { #if 0 CdbComponentDatabaseInfo *pOld = NULL; CdbComponentDatabases *component_databases = NULL; Relation gp_seg_config_rel; HeapTuple gp_seg_config_tuple = NULL; HeapScanDesc gp_seg_config_scan; /* * Initial size for info arrays. */ int segment_array_size = 500; int entry_array_size = 4; /* we currently support a max of 2 */ /* * isNull and attr are used when getting the data for a specific column from a HeapTuple */ bool isNull; Datum attr; /* * Local variables for fields from the rows of the tables that we are reading. */ char role; char status = 0; int i; /* * Allocate component_databases return structure and * component_databases->segment_db_info array with an initial size * of 128, and component_databases->entry_db_info with an initial * size of 4. If necessary during row fetching, we grow these by * doubling each time we run out. */ component_databases = palloc0(sizeof(CdbComponentDatabases)); component_databases->segment_db_info = (CdbComponentDatabaseInfo *) palloc0(sizeof(CdbComponentDatabaseInfo) * segment_array_size); component_databases->entry_db_info = (CdbComponentDatabaseInfo *) palloc0(sizeof(CdbComponentDatabaseInfo) * entry_array_size); gp_seg_config_rel = heap_open(GpSegmentConfigRelationId, AccessShareLock); gp_seg_config_scan = heap_beginscan(gp_seg_config_rel, SnapshotNow, 0, NULL); while (HeapTupleIsValid(gp_seg_config_tuple = heap_getnext(gp_seg_config_scan, ForwardScanDirection))) { /* * Grab the fields that we need from gp_configuration. We do * this first, because until we read them, we don't know * whether this is an entry database row or a segment database * row. */ CdbComponentDatabaseInfo *pRow; /* * dbid */ //attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_dbid, RelationGetDescr(gp_seg_config_rel), &isNull); //Assert(!isNull); //dbid = DatumGetInt16(attr); /* * content */ //attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_content, RelationGetDescr(gp_seg_config_rel), &isNull); //Assert(!isNull); //content = DatumGetInt16(attr); /* * role */ attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_role, RelationGetDescr(gp_seg_config_rel), &isNull); Assert(!isNull); role = DatumGetChar(attr); /* * preferred-role */ //attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_preferred_role, RelationGetDescr(gp_seg_config_rel), &isNull); //Assert(!isNull); //preferred_role = DatumGetChar(attr); /* * mode */ //attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_mode, RelationGetDescr(gp_seg_config_rel), &isNull); //Assert(!isNull); //mode = DatumGetChar(attr); /* * status */ attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_status, RelationGetDescr(gp_seg_config_rel), &isNull); Assert(!isNull); status = DatumGetChar(attr); /* * Determine which array to place this rows data in: entry or * segment, based on the content field. */ if (role == SEGMENT_ROLE_PRIMARY) { /* if we have a dbid bigger than our array we'll have to grow the array. (MPP-2104) */ if (dbid >= segment_array_size || component_databases->total_segment_dbs >= segment_array_size) { /* * Expand CdbComponentDatabaseInfo array if we've used up currently allocated space */ segment_array_size = Max((segment_array_size * 2), dbid * 2); pOld = component_databases->segment_db_info; component_databases->segment_db_info = (CdbComponentDatabaseInfo *) repalloc(pOld, sizeof(CdbComponentDatabaseInfo) * segment_array_size); } pRow = &component_databases->segment_db_info[component_databases->total_segment_dbs]; component_databases->total_segment_dbs++; } else { if (component_databases->total_entry_dbs >= entry_array_size) { /* * Expand CdbComponentDatabaseInfo array if we've used up currently allocated space */ entry_array_size *= 2; pOld = component_databases->entry_db_info; component_databases->entry_db_info = (CdbComponentDatabaseInfo *) repalloc(pOld, sizeof(CdbComponentDatabaseInfo) * entry_array_size); } pRow = &component_databases->entry_db_info[component_databases->total_entry_dbs]; component_databases->total_entry_dbs++; } pRow->role = role; pRow->status = status; /* * hostname */ attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_hostname, RelationGetDescr(gp_seg_config_rel), &isNull); Assert(!isNull); pRow->hostname = TextDatumGetCString(attr); /* * address */ attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_address, RelationGetDescr(gp_seg_config_rel), &isNull); Assert(!isNull); pRow->address = TextDatumGetCString(attr); /* * port */ attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_port, RelationGetDescr(gp_seg_config_rel), &isNull); Assert(!isNull); pRow->port = DatumGetInt32(attr); getAddressesForDBid(pRow, DNSLookupAsError ? ERROR : LOG); pRow->hostip = pRow->hostaddrs[0]; } /* * We're done with the catalog entries, cleanup them up, closing * all the relations we opened. */ heap_endscan(gp_seg_config_scan); heap_close(gp_seg_config_rel, AccessShareLock); /* * Validate that there exists at least one entry and one segment * database in the configuration */ if (component_databases->total_segment_dbs == 0) { ereport(ERROR, (errcode(ERRCODE_CARDINALITY_VIOLATION), errmsg("Greenplum Database number of segment databases cannot be 0"))); } if (component_databases->total_entry_dbs == 0) { ereport(ERROR, (errcode(ERRCODE_CARDINALITY_VIOLATION), errmsg("Greenplum Database number of entry databases cannot be 0"))); } /* * Now sort the data by segindex, isprimary desc */ qsort(component_databases->segment_db_info, component_databases->total_segment_dbs, sizeof(CdbComponentDatabaseInfo), CdbComponentDatabaseInfoCompare); qsort(component_databases->entry_db_info, component_databases->total_entry_dbs, sizeof(CdbComponentDatabaseInfo), CdbComponentDatabaseInfoCompare); /* * Now count the number of distinct segindexes. * Since it's sorted, this is easy. */ for (i = 0; i < component_databases->total_segment_dbs; i++) { if (i == 0 || (component_databases->segment_db_info[i].segindex != component_databases->segment_db_info[i - 1].segindex)) { component_databases->total_segments++; } } return component_databases; #endif return NULL; }
/* * Get a combo command id that maps to cmin and cmax. * * We try to reuse old combo command ids when possible. */ static CommandId GetComboCommandId(CommandId cmin, CommandId cmax) { CommandId combocid; ComboCidKeyData key; ComboCidEntry entry; bool found; /* * Create the hash table and array the first time we need to use combo * cids in the transaction. */ if (comboHash == NULL) { HASHCTL hash_ctl; memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(ComboCidKeyData); hash_ctl.entrysize = sizeof(ComboCidEntryData); hash_ctl.hash = tag_hash; hash_ctl.hcxt = TopTransactionContext; comboHash = hash_create("Combo CIDs", CCID_HASH_SIZE, &hash_ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); comboCids = (ComboCidKeyData *) MemoryContextAlloc(TopTransactionContext, sizeof(ComboCidKeyData) * CCID_ARRAY_SIZE); sizeComboCids = CCID_ARRAY_SIZE; usedComboCids = 0; } /* Lookup or create a hash entry with the desired cmin/cmax */ /* We assume there is no struct padding in ComboCidKeyData! */ key.cmin = cmin; key.cmax = cmax; entry = (ComboCidEntry) hash_search(comboHash, (void *) &key, HASH_ENTER, &found); if (found) { /* Reuse an existing combo cid */ return entry->combocid; } /* * We have to create a new combo cid. Check that there's room for it in * the array, and grow it if there isn't. */ if (usedComboCids >= sizeComboCids) { /* We need to grow the array */ int newsize = sizeComboCids * 2; comboCids = (ComboCidKeyData *) repalloc(comboCids, sizeof(ComboCidKeyData) * newsize); sizeComboCids = newsize; } combocid = usedComboCids; comboCids[combocid].cmin = cmin; comboCids[combocid].cmax = cmax; usedComboCids++; entry->combocid = combocid; return combocid; }
static void getAddressesForDBid(CdbComponentDatabaseInfo *c, int elevel) { Relation gp_db_interface_rel; Relation gp_interface_rel; HeapTuple tuple; ScanKeyData key; SysScanDesc dbscan, ifacescan; int j, i=0; struct priority_iface *ifaces=NULL; int iface_count, iface_max=0; Datum attr; bool isNull; int dbid; int iface_id; int priority; char *name; Assert(c != NULL); gp_db_interface_rel = heap_open(GpDbInterfacesRelationId, AccessShareLock); /* CaQL UNDONE: no test coverage */ ScanKeyInit(&key, Anum_gp_db_interfaces_dbid, BTEqualStrategyNumber, F_INT2EQ, ObjectIdGetDatum(0)); dbscan = systable_beginscan(gp_db_interface_rel, GpDbInterfacesDbidIndexId, true, SnapshotNow, 1, &key); while (HeapTupleIsValid(tuple = systable_getnext(dbscan))) { i++; if (i > iface_max) { /* allocate 8-more slots */ if (ifaces == NULL) ifaces = palloc((iface_max + 8) * sizeof(struct priority_iface)); else ifaces = repalloc(ifaces, (iface_max + 8) * sizeof(struct priority_iface)); memset(ifaces + iface_max, 0, 8 * sizeof(struct priority_iface)); iface_max += 8; } /* dbid is for sanity-check on scan condition only */ attr = heap_getattr(tuple, Anum_gp_db_interfaces_dbid, gp_db_interface_rel->rd_att, &isNull); Assert(!isNull); dbid = DatumGetInt16(attr); //Assert(dbid == c->dbid); attr = heap_getattr(tuple, Anum_gp_db_interfaces_interfaceid, gp_db_interface_rel->rd_att, &isNull); Assert(!isNull); iface_id = DatumGetInt16(attr); attr = heap_getattr(tuple, Anum_gp_db_interfaces_priority, gp_db_interface_rel->rd_att, &isNull); Assert(!isNull); priority = DatumGetInt16(attr); ifaces[i-1].priority = priority; ifaces[i-1].interface_id = iface_id; } iface_count = i; /* Finish up scan and close appendonly catalog. */ systable_endscan(dbscan); heap_close(gp_db_interface_rel, AccessShareLock); /* we now have the unsorted list, or an empty list. */ do { /* fallback to using hostname if our list is empty */ if (iface_count == 0) break; qsort(ifaces, iface_count, sizeof(struct priority_iface), iface_priority_compare); /* we now have interfaces, sorted by priority. */ gp_interface_rel = heap_open(GpInterfacesRelationId, AccessShareLock); j=0; for (i=0; i < iface_count; i++) { int status=0; /* CaQL UNDONE: no test coverage */ /* Start a new scan. */ ScanKeyInit(&key, Anum_gp_interfaces_interfaceid, BTEqualStrategyNumber, F_INT2EQ, ObjectIdGetDatum(ifaces[i].interface_id)); ifacescan = systable_beginscan(gp_interface_rel, GpInterfacesInterfaceidIndexId, true, SnapshotNow, 1, &key); tuple = systable_getnext(ifacescan); Assert(HeapTupleIsValid(tuple)); /* iface_id is for sanity-check on scan condition only */ attr = heap_getattr(tuple, Anum_gp_interfaces_interfaceid, gp_interface_rel->rd_att, &isNull); Assert(!isNull); iface_id = DatumGetInt16(attr); Assert(iface_id == ifaces[i].interface_id); attr = heap_getattr(tuple, Anum_gp_interfaces_status, gp_interface_rel->rd_att, &isNull); Assert(!isNull); status = DatumGetInt16(attr); /* if the status is "alive" use the interface. */ if (status == 1) { attr = heap_getattr(tuple, Anum_gp_interfaces_address, gp_interface_rel->rd_att, &isNull); Assert(!isNull); name = getDnsCachedAddress(DatumGetCString(attr), c->port, elevel); if (name) c->hostaddrs[j++] = pstrdup(name); } systable_endscan(ifacescan); } heap_close(gp_interface_rel, AccessShareLock); /* fallback to using hostname if our list is empty */ if (j == 0) break; /* successfully retrieved at least one entry. */ return; } while (0); /* fallback to using hostname */ memset(c->hostaddrs, 0, COMPONENT_DBS_MAX_ADDRS * sizeof(char *)); /* * add an entry, using the first the "address" and then the * "hostname" as fallback. */ name = getDnsCachedAddress(c->address, c->port, elevel); if (name) { c->hostaddrs[0] = pstrdup(name); return; } /* now the hostname. */ name = getDnsCachedAddress(c->hostname, c->port, elevel); if (name) { c->hostaddrs[0] = pstrdup(name); } else { c->hostaddrs[0] = NULL; } return; }
/* * ExecIndexBuildScanKeys * Build the index scan keys from the index qualification expressions * * The index quals are passed to the index AM in the form of a ScanKey array. * This routine sets up the ScanKeys, fills in all constant fields of the * ScanKeys, and prepares information about the keys that have non-constant * comparison values. We divide index qual expressions into five types: * * 1. Simple operator with constant comparison value ("indexkey op constant"). * For these, we just fill in a ScanKey containing the constant value. * * 2. Simple operator with non-constant value ("indexkey op expression"). * For these, we create a ScanKey with everything filled in except the * expression value, and set up an IndexRuntimeKeyInfo struct to drive * evaluation of the expression at the right times. * * 3. RowCompareExpr ("(indexkey, indexkey, ...) op (expr, expr, ...)"). * For these, we create a header ScanKey plus a subsidiary ScanKey array, * as specified in access/skey.h. The elements of the row comparison * can have either constant or non-constant comparison values. * * 4. ScalarArrayOpExpr ("indexkey op ANY (array-expression)"). For these, * we create a ScanKey with everything filled in except the comparison value, * and set up an IndexArrayKeyInfo struct to drive processing of the qual. * (Note that we treat all array-expressions as requiring runtime evaluation, * even if they happen to be constants.) * * 5. NullTest ("indexkey IS NULL/IS NOT NULL"). We just fill in the * ScanKey properly. * * This code is also used to prepare ORDER BY expressions for amcanorderbyop * indexes. The behavior is exactly the same, except that we have to look up * the operator differently. Note that only cases 1 and 2 are currently * possible for ORDER BY. * * Input params are: * * planstate: executor state node we are working for * index: the index we are building scan keys for * scanrelid: varno of the index's relation within current query * quals: indexquals (or indexorderbys) expressions * isorderby: true if processing ORDER BY exprs, false if processing quals * *runtimeKeys: ptr to pre-existing IndexRuntimeKeyInfos, or NULL if none * *numRuntimeKeys: number of pre-existing runtime keys * * Output params are: * * *scanKeys: receives ptr to array of ScanKeys * *numScanKeys: receives number of scankeys * *runtimeKeys: receives ptr to array of IndexRuntimeKeyInfos, or NULL if none * *numRuntimeKeys: receives number of runtime keys * *arrayKeys: receives ptr to array of IndexArrayKeyInfos, or NULL if none * *numArrayKeys: receives number of array keys * * Caller may pass NULL for arrayKeys and numArrayKeys to indicate that * ScalarArrayOpExpr quals are not supported. */ void ExecIndexBuildScanKeys(PlanState *planstate, Relation index, Index scanrelid, List *quals, bool isorderby, ScanKey *scanKeys, int *numScanKeys, IndexRuntimeKeyInfo **runtimeKeys, int *numRuntimeKeys, IndexArrayKeyInfo **arrayKeys, int *numArrayKeys) { ListCell *qual_cell; ScanKey scan_keys; IndexRuntimeKeyInfo *runtime_keys; IndexArrayKeyInfo *array_keys; int n_scan_keys; int n_runtime_keys; int max_runtime_keys; int n_array_keys; int j; /* Allocate array for ScanKey structs: one per qual */ n_scan_keys = list_length(quals); scan_keys = (ScanKey) palloc(n_scan_keys * sizeof(ScanKeyData)); /* * runtime_keys array is dynamically resized as needed. We handle it this * way so that the same runtime keys array can be shared between * indexquals and indexorderbys, which will be processed in separate calls * of this function. Caller must be sure to pass in NULL/0 for first * call. */ runtime_keys = *runtimeKeys; n_runtime_keys = max_runtime_keys = *numRuntimeKeys; /* Allocate array_keys as large as it could possibly need to be */ array_keys = (IndexArrayKeyInfo *) palloc0(n_scan_keys * sizeof(IndexArrayKeyInfo)); n_array_keys = 0; /* * for each opclause in the given qual, convert the opclause into a single * scan key */ j = 0; foreach(qual_cell, quals) { Expr *clause = (Expr *) lfirst(qual_cell); ScanKey this_scan_key = &scan_keys[j++]; Oid opno; /* operator's OID */ RegProcedure opfuncid; /* operator proc id used in scan */ Oid opfamily; /* opfamily of index column */ int op_strategy; /* operator's strategy number */ Oid op_lefttype; /* operator's declared input types */ Oid op_righttype; Expr *leftop; /* expr on lhs of operator */ Expr *rightop; /* expr on rhs ... */ AttrNumber varattno; /* att number used in scan */ if (IsA(clause, OpExpr)) { /* indexkey op const or indexkey op expression */ int flags = 0; Datum scanvalue; opno = ((OpExpr *) clause)->opno; opfuncid = ((OpExpr *) clause)->opfuncid; /* * leftop should be the index key Var, possibly relabeled */ leftop = (Expr *) get_leftop(clause); if (leftop && IsA(leftop, RelabelType)) leftop = ((RelabelType *) leftop)->arg; Assert(leftop != NULL); if (!(IsA(leftop, Var) && ((Var *) leftop)->varno == scanrelid)) elog(ERROR, "indexqual doesn't have key on left side"); varattno = ((Var *) leftop)->varattno; if (varattno < 1 || varattno > index->rd_index->indnatts) elog(ERROR, "bogus index qualification"); /* * We have to look up the operator's strategy number. This * provides a cross-check that the operator does match the index. */ opfamily = index->rd_opfamily[varattno - 1]; get_op_opfamily_properties(opno, opfamily, isorderby, &op_strategy, &op_lefttype, &op_righttype); if (isorderby) flags |= SK_ORDER_BY; /* * rightop is the constant or variable comparison value */ rightop = (Expr *) get_rightop(clause); if (rightop && IsA(rightop, RelabelType)) rightop = ((RelabelType *) rightop)->arg; Assert(rightop != NULL); if (IsA(rightop, Const)) { /* OK, simple constant comparison value */ scanvalue = ((Const *) rightop)->constvalue; if (((Const *) rightop)->constisnull) flags |= SK_ISNULL; } else { /* Need to treat this one as a runtime key */ if (n_runtime_keys >= max_runtime_keys) { if (max_runtime_keys == 0) { max_runtime_keys = 8; runtime_keys = (IndexRuntimeKeyInfo *) palloc(max_runtime_keys * sizeof(IndexRuntimeKeyInfo)); } else { max_runtime_keys *= 2; runtime_keys = (IndexRuntimeKeyInfo *) repalloc(runtime_keys, max_runtime_keys * sizeof(IndexRuntimeKeyInfo)); } } runtime_keys[n_runtime_keys].scan_key = this_scan_key; runtime_keys[n_runtime_keys].key_expr = ExecInitExpr(rightop, planstate); runtime_keys[n_runtime_keys].key_toastable = TypeIsToastable(op_righttype); n_runtime_keys++; scanvalue = (Datum) 0; } /* * initialize the scan key's fields appropriately */ ScanKeyEntryInitialize(this_scan_key, flags, varattno, /* attribute number to scan */ op_strategy, /* op's strategy */ op_righttype, /* strategy subtype */ ((OpExpr *) clause)->inputcollid, /* collation */ opfuncid, /* reg proc to use */ scanvalue); /* constant */ } else if (IsA(clause, RowCompareExpr)) { /* (indexkey, indexkey, ...) op (expression, expression, ...) */ RowCompareExpr *rc = (RowCompareExpr *) clause; ListCell *largs_cell = list_head(rc->largs); ListCell *rargs_cell = list_head(rc->rargs); ListCell *opnos_cell = list_head(rc->opnos); ListCell *collids_cell = list_head(rc->inputcollids); ScanKey first_sub_key; int n_sub_key; Assert(!isorderby); first_sub_key = (ScanKey) palloc(list_length(rc->opnos) * sizeof(ScanKeyData)); n_sub_key = 0; /* Scan RowCompare columns and generate subsidiary ScanKey items */ while (opnos_cell != NULL) { ScanKey this_sub_key = &first_sub_key[n_sub_key]; int flags = SK_ROW_MEMBER; Datum scanvalue; Oid inputcollation; /* * leftop should be the index key Var, possibly relabeled */ leftop = (Expr *) lfirst(largs_cell); largs_cell = lnext(largs_cell); if (leftop && IsA(leftop, RelabelType)) leftop = ((RelabelType *) leftop)->arg; Assert(leftop != NULL); if (!(IsA(leftop, Var) && ((Var *) leftop)->varno == scanrelid)) elog(ERROR, "indexqual doesn't have key on left side"); varattno = ((Var *) leftop)->varattno; /* * We have to look up the operator's associated btree support * function */ opno = lfirst_oid(opnos_cell); opnos_cell = lnext(opnos_cell); if (index->rd_rel->relam != BTREE_AM_OID || varattno < 1 || varattno > index->rd_index->indnatts) elog(ERROR, "bogus RowCompare index qualification"); opfamily = index->rd_opfamily[varattno - 1]; get_op_opfamily_properties(opno, opfamily, isorderby, &op_strategy, &op_lefttype, &op_righttype); if (op_strategy != rc->rctype) elog(ERROR, "RowCompare index qualification contains wrong operator"); opfuncid = get_opfamily_proc(opfamily, op_lefttype, op_righttype, BTORDER_PROC); inputcollation = lfirst_oid(collids_cell); collids_cell = lnext(collids_cell); /* * rightop is the constant or variable comparison value */ rightop = (Expr *) lfirst(rargs_cell); rargs_cell = lnext(rargs_cell); if (rightop && IsA(rightop, RelabelType)) rightop = ((RelabelType *) rightop)->arg; Assert(rightop != NULL); if (IsA(rightop, Const)) { /* OK, simple constant comparison value */ scanvalue = ((Const *) rightop)->constvalue; if (((Const *) rightop)->constisnull) flags |= SK_ISNULL; } else { /* Need to treat this one as a runtime key */ if (n_runtime_keys >= max_runtime_keys) { if (max_runtime_keys == 0) { max_runtime_keys = 8; runtime_keys = (IndexRuntimeKeyInfo *) palloc(max_runtime_keys * sizeof(IndexRuntimeKeyInfo)); } else { max_runtime_keys *= 2; runtime_keys = (IndexRuntimeKeyInfo *) repalloc(runtime_keys, max_runtime_keys * sizeof(IndexRuntimeKeyInfo)); } } runtime_keys[n_runtime_keys].scan_key = this_sub_key; runtime_keys[n_runtime_keys].key_expr = ExecInitExpr(rightop, planstate); runtime_keys[n_runtime_keys].key_toastable = TypeIsToastable(op_righttype); n_runtime_keys++; scanvalue = (Datum) 0; } /* * initialize the subsidiary scan key's fields appropriately */ ScanKeyEntryInitialize(this_sub_key, flags, varattno, /* attribute number */ op_strategy, /* op's strategy */ op_righttype, /* strategy subtype */ inputcollation, /* collation */ opfuncid, /* reg proc to use */ scanvalue); /* constant */ n_sub_key++; } /* Mark the last subsidiary scankey correctly */ first_sub_key[n_sub_key - 1].sk_flags |= SK_ROW_END; /* * We don't use ScanKeyEntryInitialize for the header because it * isn't going to contain a valid sk_func pointer. */ MemSet(this_scan_key, 0, sizeof(ScanKeyData)); this_scan_key->sk_flags = SK_ROW_HEADER; this_scan_key->sk_attno = first_sub_key->sk_attno; this_scan_key->sk_strategy = rc->rctype; /* sk_subtype, sk_collation, sk_func not used in a header */ this_scan_key->sk_argument = PointerGetDatum(first_sub_key); } else if (IsA(clause, ScalarArrayOpExpr)) { /* indexkey op ANY (array-expression) */ ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause; Assert(!isorderby); Assert(saop->useOr); opno = saop->opno; opfuncid = saop->opfuncid; /* * leftop should be the index key Var, possibly relabeled */ leftop = (Expr *) linitial(saop->args); if (leftop && IsA(leftop, RelabelType)) leftop = ((RelabelType *) leftop)->arg; Assert(leftop != NULL); if (!(IsA(leftop, Var) && ((Var *) leftop)->varno == scanrelid)) elog(ERROR, "indexqual doesn't have key on left side"); varattno = ((Var *) leftop)->varattno; if (varattno < 1 || varattno > index->rd_index->indnatts) elog(ERROR, "bogus index qualification"); /* * We have to look up the operator's strategy number. This * provides a cross-check that the operator does match the index. */ opfamily = index->rd_opfamily[varattno - 1]; get_op_opfamily_properties(opno, opfamily, isorderby, &op_strategy, &op_lefttype, &op_righttype); /* * rightop is the constant or variable array value */ rightop = (Expr *) lsecond(saop->args); if (rightop && IsA(rightop, RelabelType)) rightop = ((RelabelType *) rightop)->arg; Assert(rightop != NULL); array_keys[n_array_keys].scan_key = this_scan_key; array_keys[n_array_keys].array_expr = ExecInitExpr(rightop, planstate); /* the remaining fields were zeroed by palloc0 */ n_array_keys++; /* * initialize the scan key's fields appropriately */ ScanKeyEntryInitialize(this_scan_key, 0, /* flags */ varattno, /* attribute number to scan */ op_strategy, /* op's strategy */ op_righttype, /* strategy subtype */ saop->inputcollid, /* collation */ opfuncid, /* reg proc to use */ (Datum) 0); /* constant */ } else if (IsA(clause, NullTest)) { /* indexkey IS NULL or indexkey IS NOT NULL */ NullTest *ntest = (NullTest *) clause; int flags; Assert(!isorderby); /* * argument should be the index key Var, possibly relabeled */ leftop = ntest->arg; if (leftop && IsA(leftop, RelabelType)) leftop = ((RelabelType *) leftop)->arg; Assert(leftop != NULL); if (!(IsA(leftop, Var) && ((Var *) leftop)->varno == scanrelid)) elog(ERROR, "NullTest indexqual has wrong key"); varattno = ((Var *) leftop)->varattno; /* * initialize the scan key's fields appropriately */ switch (ntest->nulltesttype) { case IS_NULL: flags = SK_ISNULL | SK_SEARCHNULL; break; case IS_NOT_NULL: flags = SK_ISNULL | SK_SEARCHNOTNULL; break; default: elog(ERROR, "unrecognized nulltesttype: %d", (int) ntest->nulltesttype); flags = 0; /* keep compiler quiet */ break; } ScanKeyEntryInitialize(this_scan_key, flags, varattno, /* attribute number to scan */ InvalidStrategy, /* no strategy */ InvalidOid, /* no strategy subtype */ InvalidOid, /* no collation */ InvalidOid, /* no reg proc for this */ (Datum) 0); /* constant */ } else elog(ERROR, "unsupported indexqual type: %d", (int) nodeTag(clause)); }
/* * find_inheritance_children * * Returns a list containing the OIDs of all relations which * inherit *directly* from the relation with OID 'parentrelId'. * * The specified lock type is acquired on each child relation (but not on the * given rel; caller should already have locked it). If lockmode is NoLock * then no locks are acquired, but caller must beware of race conditions * against possible DROPs of child relations. */ List * find_inheritance_children(Oid parentrelId, LOCKMODE lockmode) { List *list = NIL; Relation relation; SysScanDesc scan; ScanKeyData key[1]; HeapTuple inheritsTuple; Oid inhrelid; Oid *oidarr; int maxoids, numoids, i; /* * Can skip the scan if pg_class shows the relation has never had a * subclass. */ if (!has_subclass(parentrelId)) return NIL; /* * Scan pg_inherits and build a working array of subclass OIDs. */ maxoids = 32; oidarr = (Oid *) palloc(maxoids * sizeof(Oid)); numoids = 0; relation = heap_open(InheritsRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_inherits_inhparent, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(parentrelId)); scan = systable_beginscan(relation, InheritsParentIndexId, true, NULL, 1, key); while ((inheritsTuple = systable_getnext(scan)) != NULL) { inhrelid = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhrelid; if (numoids >= maxoids) { maxoids *= 2; oidarr = (Oid *) repalloc(oidarr, maxoids * sizeof(Oid)); } oidarr[numoids++] = inhrelid; } systable_endscan(scan); heap_close(relation, AccessShareLock); /* * If we found more than one child, sort them by OID. This ensures * reasonably consistent behavior regardless of the vagaries of an * indexscan. This is important since we need to be sure all backends * lock children in the same order to avoid needless deadlocks. */ if (numoids > 1) qsort(oidarr, numoids, sizeof(Oid), oid_cmp); /* * Acquire locks and build the result list. */ for (i = 0; i < numoids; i++) { inhrelid = oidarr[i]; if (lockmode != NoLock) { /* Get the lock to synchronize against concurrent drop */ LockRelationOid(inhrelid, lockmode); /* * Now that we have the lock, double-check to see if the relation * really exists or not. If not, assume it was dropped while we * waited to acquire lock, and ignore it. */ if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(inhrelid))) { /* Release useless lock */ UnlockRelationOid(inhrelid, lockmode); /* And ignore this relation */ continue; } } list = lappend_oid(list, inhrelid); } pfree(oidarr); return list; }