/* Remove and free a specified request */ static void remove_request(NFS_BaseRequest *rq) { // remove from lists list_delete_first(NfsRequests, search_requests, &(rq->token)); // free memory switch (rq->rt) { case RT_LOOKUP: free((NFS_LookupRequest *) rq); break; case RT_READ: free((NFS_ReadRequest *) rq); break; case RT_WRITE: free((NFS_WriteRequest *) rq); break; case RT_STAT: free((NFS_StatRequest *) rq); break; case RT_DIR: free((NFS_DirRequest *) rq); break; case RT_REMOVE: free((NFS_RemoveRequest *) rq); break; default: dprintf(0, "!!! nfsfs.c: remove_request: invalid request type %d\n", rq->rt); } // run next request in queue run_head_request(); }
/* * Abort lingering subtransactions that have been explicitly started * by plpy.subtransaction().start() and not properly closed. */ static void PLy_abort_open_subtransactions(int save_subxact_level) { Assert(save_subxact_level >= 0); while (list_length(explicit_subtransactions) > save_subxact_level) { PLySubtransactionData *subtransactiondata; Assert(explicit_subtransactions != NIL); ereport(WARNING, (errmsg("forcibly aborting a subtransaction that has not been exited"))); RollbackAndReleaseCurrentSubTransaction(); SPI_restore_connection(); subtransactiondata = (PLySubtransactionData *) linitial(explicit_subtransactions); explicit_subtransactions = list_delete_first(explicit_subtransactions); MemoryContextSwitchTo(subtransactiondata->oldcontext); CurrentResourceOwner = subtransactiondata->oldowner; pfree(subtransactiondata); } }
void RB_clearResource(List **ctnl) { while( (*ctnl) != NULL ) { GRMContainer ctn = (GRMContainer)lfirst(list_head(*ctnl)); MEMORY_CONTEXT_SWITCH_TO(PCONTEXT) (*ctnl) = list_delete_first(*ctnl); MEMORY_CONTEXT_SWITCH_BACK elog(LOG, "Resource broker dropped GRM container "INT64_FORMAT "(%d MB, %d CORE) on host %s", ctn->ID, ctn->MemoryMB, ctn->Core, ctn->HostName == NULL ? "NULL" : ctn->HostName); if ( ctn->CalcDecPending ) { minusResourceBundleData(&(ctn->Resource->DecPending), ctn->MemoryMB, ctn->Core); Assert( ctn->Resource->DecPending.Core >= 0 ); Assert( ctn->Resource->DecPending.MemoryMB >= 0 ); } /* Destroy resource container. */ freeGRMContainer(ctn); PRESPOOL->RetPendingContainerCount--; } }
/* * subxact.__exit__(exc_type, exc, tb) or subxact.exit(exc_type, exc, tb) * * Exit an explicit subtransaction. exc_type is an exception type, exc * is the exception object, tb is the traceback. If exc_type is None, * commit the subtransactiony, if not abort it. * * The method signature is chosen to allow subtransaction objects to * be used as context managers as described in * <http://www.python.org/dev/peps/pep-0343/>. */ static PyObject * PLy_subtransaction_exit(PyObject *self, PyObject *args) { PyObject *type; PyObject *value; PyObject *traceback; PLySubtransactionData *subxactdata; PLySubtransactionObject *subxact = (PLySubtransactionObject *) self; if (!PyArg_ParseTuple(args, "OOO", &type, &value, &traceback)) return NULL; if (!subxact->started) { PLy_exception_set(PyExc_ValueError, "this subtransaction has not been entered"); return NULL; } if (subxact->exited) { PLy_exception_set(PyExc_ValueError, "this subtransaction has already been exited"); return NULL; } if (explicit_subtransactions == NIL) { PLy_exception_set(PyExc_ValueError, "there is no subtransaction to exit from"); return NULL; } subxact->exited = true; if (type != Py_None) { /* Abort the inner transaction */ RollbackAndReleaseCurrentSubTransaction(); } else { ReleaseCurrentSubTransaction(); } subxactdata = (PLySubtransactionData *) linitial(explicit_subtransactions); explicit_subtransactions = list_delete_first(explicit_subtransactions); MemoryContextSwitchTo(subxactdata->oldcontext); CurrentResourceOwner = subxactdata->oldowner; pfree(subxactdata); /* * AtEOSubXact_SPI() should not have popped any SPI context, but just in * case it did, make sure we remain connected. */ SPI_restore_connection(); Py_INCREF(Py_None); return Py_None; }
void freeHostIPV4AddressesAsString(MCTYPE context, List **addresses) { Assert( addresses != NULL ); MEMORY_CONTEXT_SWITCH_TO(context) while( *addresses != NULL ) { rm_pfree(context, lfirst(list_head(*addresses))); *addresses = list_delete_first(*addresses); } MEMORY_CONTEXT_SWITCH_BACK }
/* * codegen_exit_loop: Fix up loop stack when a loop is exited. Break statements * jump to the current file position. */ void codegen_exit_loop(void) { list_type p; /* Backpatch break statements to jump to end of loop */ for (p = current_loop->break_list; p != NULL; p = p->next) BackpatchGoto(outfile, (int) p->data, FileCurPos(outfile)); /* Remove current list from loop "stack" */ loop_stack = list_delete_first(loop_stack); /* Restore current_loop to correct state */ current_loop = (loop_type) list_first_item(loop_stack); }
/* Use connection id. */ int useConnectionID(int32_t *connid) { /* Ensure that we have potential enough connection IDs to utilize. */ if ( PCONTRACK->FreeConnIDs == NULL ) { *connid = INVALID_CONNID; return CONNTRACK_CONNID_FULL; } *connid = lfirst_int(list_head(PCONTRACK->FreeConnIDs)); MEMORY_CONTEXT_SWITCH_TO(PCONTEXT) PCONTRACK->FreeConnIDs = list_delete_first(PCONTRACK->FreeConnIDs); MEMORY_CONTEXT_SWITCH_BACK elog(DEBUG3, "Resource manager uses connection track ID %d", *connid); return FUNC_RETURN_OK; }
/* * AtSubCommit_Notify() --- Take care of subtransaction commit. * * Reassign all items in the pending notifies list to the parent transaction. */ void AtSubCommit_Notify(void) { List *parentPendingNotifies; parentPendingNotifies = (List *) linitial(upperPendingNotifies); upperPendingNotifies = list_delete_first(upperPendingNotifies); Assert(list_length(upperPendingNotifies) == GetCurrentTransactionNestLevel() - 2); /* * We could try to eliminate duplicates here, but it seems not worthwhile. */ pendingNotifies = list_concat(parentPendingNotifies, pendingNotifies); }
void RB_freeClusterReport(List **segments) { if (CurrentRBImp.freeClusterReport != NULL) { CurrentRBImp.freeClusterReport(segments); return; } MEMORY_CONTEXT_SWITCH_TO(PCONTEXT) /* default implementation for freeing the cluster report objects. */ while( list_length(*segments) > 0 ) { SegInfo seginfo = (SegInfo)lfirst(list_head(*segments)); *segments = list_delete_first(*segments); rm_pfree(PCONTEXT, seginfo); } MEMORY_CONTEXT_SWITCH_BACK }
/* * AtSubAbort_Notify() --- Take care of subtransaction abort. */ void AtSubAbort_Notify(void) { int my_level = GetCurrentTransactionNestLevel(); /* * All we have to do is pop the stack --- the notifies made in this * subxact are no longer interesting, and the space will be freed when * CurTransactionContext is recycled. * * This routine could be called more than once at a given nesting level if * there is trouble during subxact abort. Avoid dumping core by using * GetCurrentTransactionNestLevel as the indicator of how far we need to * prune the list. */ while (list_length(upperPendingNotifies) > my_level - 2) { pendingNotifies = (List *) linitial(upperPendingNotifies); upperPendingNotifies = list_delete_first(upperPendingNotifies); } }
/* * Traverse the tree to find path from root page to specified "child" block. * * returns a new insertion stack, starting from the parent of "child", up * to the root. *downlinkoffnum is set to the offset of the downlink in the * direct parent of child. * * To prevent deadlocks, this should lock only one page at a time. */ static GISTInsertStack * gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum) { Page page; Buffer buffer; OffsetNumber i, maxoff; ItemId iid; IndexTuple idxtuple; List *fifo; GISTInsertStack *top, *ptr; BlockNumber blkno; top = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack)); top->blkno = GIST_ROOT_BLKNO; top->downlinkoffnum = InvalidOffsetNumber; fifo = list_make1(top); while (fifo != NIL) { /* Get next page to visit */ top = linitial(fifo); fifo = list_delete_first(fifo); buffer = ReadBuffer(r, top->blkno); LockBuffer(buffer, GIST_SHARE); gistcheckpage(r, buffer); page = (Page) BufferGetPage(buffer); if (GistPageIsLeaf(page)) { /* * Because we scan the index top-down, all the rest of the pages * in the queue must be leaf pages as well. */ UnlockReleaseBuffer(buffer); break; } top->lsn = PageGetLSN(page); /* * If F_FOLLOW_RIGHT is set, the page to the right doesn't have a * downlink. This should not normally happen.. */ if (GistFollowRight(page)) elog(ERROR, "concurrent GiST page split was incomplete"); if (top->parent && top->parent->lsn < GistPageGetNSN(page) && GistPageGetOpaque(page)->rightlink != InvalidBlockNumber /* sanity check */ ) { /* * Page was split while we looked elsewhere. We didn't see the * downlink to the right page when we scanned the parent, so add * it to the queue now. * * Put the right page ahead of the queue, so that we visit it * next. That's important, because if this is the lowest internal * level, just above leaves, we might already have queued up some * leaf pages, and we assume that there can't be any non-leaf * pages behind leaf pages. */ ptr = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack)); ptr->blkno = GistPageGetOpaque(page)->rightlink; ptr->downlinkoffnum = InvalidOffsetNumber; ptr->parent = top->parent; fifo = lcons(ptr, fifo); } maxoff = PageGetMaxOffsetNumber(page); for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) { iid = PageGetItemId(page, i); idxtuple = (IndexTuple) PageGetItem(page, iid); blkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid)); if (blkno == child) { /* Found it! */ UnlockReleaseBuffer(buffer); *downlinkoffnum = i; return top; } else { /* Append this child to the list of pages to visit later */ ptr = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack)); ptr->blkno = blkno; ptr->downlinkoffnum = i; ptr->parent = top; fifo = lappend(fifo, ptr); } } UnlockReleaseBuffer(buffer); } elog(ERROR, "failed to re-find parent of a page in index \"%s\", block %u", RelationGetRelationName(r), child); return NULL; /* keep compiler quiet */ }
/* AddUpdResqueueCapabilityEntryInternal: * * Internal function to add a new entry to pg_resqueuecapability, or * update an existing one. Key cols are queueid, restypint. If * old_tuple is set (ie not InvalidOid), the update the ressetting column, * else insert a new row. * */ static List * AddUpdResqueueCapabilityEntryInternal( cqContext *pcqCtx, List *stmtOptIdList, Oid queueid, int resTypeInt, char *pResSetting, Relation rel, HeapTuple old_tuple) { HeapTuple new_tuple; Datum values[Natts_pg_resqueuecapability]; bool isnull[Natts_pg_resqueuecapability]; bool new_record_repl[Natts_pg_resqueuecapability]; MemSet(isnull, 0, sizeof(bool) * Natts_pg_resqueuecapability); MemSet(new_record_repl, 0, sizeof(bool) * Natts_pg_resqueuecapability); values[Anum_pg_resqueuecapability_resqueueid - 1] = ObjectIdGetDatum(queueid); values[Anum_pg_resqueuecapability_restypid - 1] = resTypeInt; Assert(pResSetting); values[Anum_pg_resqueuecapability_ressetting - 1] = CStringGetTextDatum(pResSetting); /* set this column to update */ new_record_repl[Anum_pg_resqueuecapability_ressetting - 1] = true; ValidateResqueueCapabilityEntry(resTypeInt, pResSetting); if (HeapTupleIsValid(old_tuple)) { new_tuple = caql_modify_current(pcqCtx, values, isnull, new_record_repl); caql_update_current(pcqCtx, new_tuple); /* and Update indexes (implicit) */ } else { Oid s1; new_tuple = caql_form_tuple(pcqCtx, values, isnull); /* MPP-11858: synchronize the oids for CREATE/ALTER options... */ if ((Gp_role != GP_ROLE_DISPATCH) && list_length(stmtOptIdList)) { Oid s2 = list_nth_oid(stmtOptIdList, 0); stmtOptIdList = list_delete_first(stmtOptIdList); if (OidIsValid(s2)) HeapTupleSetOid(new_tuple, s2); } s1 = caql_insert(pcqCtx, new_tuple); /* and Update indexes (implicit) */ if (Gp_role == GP_ROLE_DISPATCH) { stmtOptIdList = lappend_oid(stmtOptIdList, s1); } } if (HeapTupleIsValid(old_tuple)) heap_freetuple(new_tuple); return stmtOptIdList; } /* end AddUpdResqueueCapabilityEntryInternal */
LPDIRECT3DTEXTURE9 D3DCacheTextureLookupSwizzled(d3d_texture_cache *pTextureCache, d3d_render_packet_new *pPacket, int effect) { LPDIRECT3DTEXTURE9 pTexture = NULL; D3DSURFACE_DESC surfDesc; d3d_texture_cache_entry *pTexEntry; list_type list; int curTex = 0; for (list = pTextureCache->textureList; list != NULL; list = list->next) { pTexEntry = (d3d_texture_cache_entry *)list->data; if ((pPacket->pDib->uniqueID == pTexEntry->pDibID) && (pPacket->pDib->uniqueID2 == pTexEntry->pDibID2) && (pPacket->pDib->frame == pTexEntry->frame)) { if ((pPacket->xLat0 == pTexEntry->xLat0) && (pPacket->xLat0 == pTexEntry->xLat0) && (effect == pTexEntry->effects)) { return pTexEntry->pTexture; } } } while (pTextureCache->size > pTextureCache->max) { pTexEntry = (d3d_texture_cache_entry *)list_first_item(pTextureCache->textureList); if (pTexEntry) { if (pTexEntry->pTexture) IDirect3DTexture9_Release(pTexEntry->pTexture); pTexEntry->pTexture = NULL; pTextureCache->size -= pTexEntry->size; free(pTextureCache->textureList->data); pTextureCache->textureList = list_delete_first(pTextureCache->textureList); } } pTexture = D3DRenderTextureCreateFromBGFSwizzled(pPacket->pDib, pPacket->xLat0, pPacket->xLat1, effect); if (NULL == pTexture) return NULL; pTexEntry = (d3d_texture_cache_entry *)D3DRenderMalloc(sizeof(d3d_texture_cache_entry)); assert(pTexEntry); IDirect3DTexture9_GetLevelDesc(pTexture, 0, &surfDesc); pTexEntry->effects = effect; pTexEntry->pDibID = pPacket->pDib->uniqueID; pTexEntry->pDibID2 = pPacket->pDib->uniqueID2; pTexEntry->frame = pPacket->pDib->frame; pTexEntry->pTexture = pTexture; pTexEntry->xLat0 = pPacket->xLat0; pTexEntry->xLat1 = pPacket->xLat1; pTexEntry->size = getTextureSizeBytes(surfDesc); pTextureCache->textureList = list_add_item(pTextureCache->textureList, pTexEntry); pTextureCache->size += pTexEntry->size; return pTexEntry->pTexture; }
/* * PopRestrictionContext removes the most recently added restriction context from * context list. The function assumes the list is not empty. */ void PopRestrictionContext(void) { relationRestrictionContextList = list_delete_first(relationRestrictionContextList); }
static void call_graph_fmgr_hook(FmgrHookEventType event, FmgrInfo *flinfo, Datum *args) { bool aborted = false; EdgeHashKey key; EdgeHashElem *elem; instr_time current_time; if (next_fmgr_hook) (*next_fmgr_hook) (event, flinfo, args); INSTR_TIME_SET_CURRENT(current_time); switch (event) { case FHET_START: { bool found; if (call_stack == NIL) { top_level_function_oid = flinfo->fn_oid; /* We're about to enter the top level function; check whether we've been disabled */ if (!enable_call_graph) { tracking_current_graph = false; recursion_depth = 1; return; } /* Start tracking the call graph; we need to create the hash table */ create_edge_hash_table(); tracking_current_graph = true; /* If we're tracking table usage, take a stat snapshot now */ if (track_table_usage) table_stat_snapshot = get_table_stat_snapshot(); /* Use InvalidOid for the imaginary edge into the top level function */ key.caller = InvalidOid; } else { if (!tracking_current_graph) { /* * Not tracking this graph, just see whether we've recursed into the top level function * (see the comments near the beginning of the file) */ if (flinfo->fn_oid == top_level_function_oid) recursion_depth++; return; } elem = linitial(call_stack); /* Calculate the self time we spent in the previous function (elem->key.callee in this case). */ INSTR_TIME_ACCUM_DIFF(elem->self_time, current_time, current_self_time_start); key.caller = elem->key.callee; } key.callee = flinfo->fn_oid; elem = hash_search(edge_hash_table, (void *) &key, HASH_ENTER, &found); if (found) elem->num_calls++; else { elem->key = key; elem->num_calls = 1; INSTR_TIME_SET_ZERO(elem->total_time); INSTR_TIME_SET_ZERO(elem->self_time); } call_stack = lcons(elem, call_stack); INSTR_TIME_SET_CURRENT(elem->total_time_start); memcpy(¤t_self_time_start, &elem->total_time_start, sizeof(instr_time)); } break; /* * In both ABORT and END cases we pop off the last element from the call stack, and if the stack * is empty, we process the data we gathered. * * XXX for some reason if the top level function aborted SPI won't work correctly. */ case FHET_ABORT: aborted = true; case FHET_END: /* * If we're not tracking this particular graph, we only need to see whether we're done * with the graph or not. */ if (!tracking_current_graph) { if (top_level_function_oid == flinfo->fn_oid) { recursion_depth--; if (recursion_depth == 0) top_level_function_oid = InvalidOid; } Assert(table_stat_snapshot == NULL); return; } Assert(((EdgeHashElem *) linitial(call_stack))->key.callee == flinfo->fn_oid); elem = linitial(call_stack); INSTR_TIME_ACCUM_DIFF(elem->self_time, current_time, current_self_time_start); INSTR_TIME_ACCUM_DIFF(elem->total_time, current_time, elem->total_time_start); call_stack = list_delete_first(call_stack); if (call_stack != NIL) { /* we're going back to the previous node, start recording its self_time */ INSTR_TIME_SET_CURRENT(current_self_time_start); break; } /* * At this point we're done with the graph. If the top level function exited cleanly, we can * process the data we've gathered in the hash table and add that data into the buffer table. */ if (!aborted) { /* * It is in some cases possible that process_edge_data() throws an exception. We really need to * clean up our state in case that happens. */ PG_TRY(); { Datum buffer_id = assign_callgraph_buffer_id(); /* Better check both conditions here */ if (table_stat_snapshot && track_table_usage) insert_snapshot_delta(buffer_id, table_stat_snapshot); process_edge_data(buffer_id); } PG_CATCH(); { if (table_stat_snapshot) { release_table_stat_snapshot(table_stat_snapshot); table_stat_snapshot = NULL; } destroy_edge_hash_table(); top_level_function_oid = InvalidOid; PG_RE_THROW(); } PG_END_TRY(); } if (table_stat_snapshot) { release_table_stat_snapshot(table_stat_snapshot); table_stat_snapshot = NULL; } destroy_edge_hash_table(); top_level_function_oid = InvalidOid; break; default: elog(ERROR, "Unknown FmgrHookEventType %d", event); return; } }
chunk_t* chunk_init(off_t index, off_t len, chunk_pool_t* cpool){ if(!cpool) { //log_write(ERROR, "chunk_init: invalid args (cpool)); return NULL; } chunk_t* chunk; if(cpool->free_chunks->size == 0) { if(cpool->zero_chunks->size == 0) { cpool->loafs = (chunk_t**)realloc(cpool->loafs, (cpool->loafs_count+1)*sizeof(chunk_t*)); if(!cpool->loafs) { //log_write(ERROR, "chunk_init: cannot reallocate memory); return NULL; } cpool->loafs[cpool->loafs_count] = (chunk_t*)calloc(DEFAULT_ARRAY_SIZE, sizeof(chunk_t)); if(!cpool->loafs[cpool->loafs_count]) { //log_write(ERROR, "chunk_init: cannot allocate memory); return NULL; } cpool->loafs_count += 1; int i = 0; for(i; i < DEFAULT_ARRAY_SIZE; i++) { list_add_last(cpool->free_chunks, (value_t*)&(cpool->loafs[cpool->loafs_count - 1])[i]); } chunk = cpool->free_chunks->head->value; list_delete_first(cpool->free_chunks); } else { chunk = cpool->zero_chunks->head->value; if(munmap(chunk->data, chunk->len) == -1) { //log_write(ERROR, "chunk_init: cannot munmap memory); return errno; } list_delete_first(cpool->zero_chunks); } } else { chunk = cpool->free_chunks->head->value; list_delete_first(cpool->free_chunks); } if(!chunk) { //log_write(ERROR, "chunk_init: cannot allocate memory); return NULL; } int size = cpool->pg_size; chunk->data = mmap(NULL, size*len, cpool->protection, MAP_SHARED, cpool->fd, size*index); if(chunk->data == MAP_FAILED) { return NULL; } chunk->index = index; chunk->len = len; chunk->ref_counter = 1; chunk->cpool = cpool; int error = ht_add_item(cpool->hash, chunk->index, chunk); if(error) { //log_write(ERROR, "chunk_init: cannot add chunk to hash table); return error; } //log_write(DEBUG, "chunk_init: end of work); return chunk; }
static void FunctionParserInit(FunctionParser *self, Checker *checker, const char *infile, TupleDesc desc, bool multi_process, Oid collation) { int i; ParsedFunction function; int nargs; Oid funcid; HeapTuple ftup; Form_pg_proc pp; bool tupledesc_matched = false; if (pg_strcasecmp(infile, "stdin") == 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot load from STDIN in the case of \"TYPE = FUNCTION\""))); if (checker->encoding != -1) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("does not support parameter \"ENCODING\" in \"TYPE = FUNCTION\""))); function = ParseFunction(infile, false); funcid = function.oid; fmgr_info(funcid, &self->flinfo); if (!self->flinfo.fn_retset) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("function must return set"))); ftup = SearchSysCache(PROCOID, ObjectIdGetDatum(funcid), 0, 0, 0); pp = (Form_pg_proc) GETSTRUCT(ftup); /* Check data type of the function result value */ if (pp->prorettype == desc->tdtypeid && desc->tdtypeid != RECORDOID) tupledesc_matched = true; else if (pp->prorettype == RECORDOID) { TupleDesc resultDesc = NULL; /* Check for OUT parameters defining a RECORD result */ resultDesc = build_function_result_tupdesc_t(ftup); if (resultDesc) { tupledesc_match(desc, resultDesc); tupledesc_matched = true; FreeTupleDesc(resultDesc); } } else if (get_typtype(pp->prorettype) != TYPTYPE_COMPOSITE) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("function return data type and target table data type do not match"))); if (tupledesc_matched && checker->tchecker) checker->tchecker->status = NO_COERCION; /* * assign arguments */ nargs = function.nargs; for (i = 0; #if PG_VERSION_NUM >= 80400 i < nargs - function.nvargs; #else i < nargs; #endif ++i) { if (function.args[i] == NULL) { if (self->flinfo.fn_strict) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("function is strict, but argument %d is NULL", i))); self->fcinfo.argnull[i] = true; } else { Oid typinput; Oid typioparam; getTypeInputInfo(pp->proargtypes.values[i], &typinput, &typioparam); self->fcinfo.arg[i] = OidInputFunctionCall(typinput, (char *) function.args[i], typioparam, -1); self->fcinfo.argnull[i] = false; pfree(function.args[i]); } } /* * assign variadic arguments */ #if PG_VERSION_NUM >= 80400 if (function.nvargs > 0) { int nfixedarg; Oid func; Oid element_type; int16 elmlen; bool elmbyval; char elmalign; char elmdelim; Oid elmioparam; Datum *elems; bool *nulls; int dims[1]; int lbs[1]; ArrayType *arry; nfixedarg = i; element_type = pp->provariadic; /* * Get info about element type, including its input conversion proc */ get_type_io_data(element_type, IOFunc_input, &elmlen, &elmbyval, &elmalign, &elmdelim, &elmioparam, &func); elems = (Datum *) palloc(function.nvargs * sizeof(Datum)); nulls = (bool *) palloc0(function.nvargs * sizeof(bool)); for (i = 0; i < function.nvargs; i++) { if (function.args[nfixedarg + i] == NULL) nulls[i] = true; else { elems[i] = OidInputFunctionCall(func, (char *) function.args[nfixedarg + i], elmioparam, -1); pfree(function.args[nfixedarg + i]); } } dims[0] = function.nvargs; lbs[0] = 1; arry = construct_md_array(elems, nulls, 1, dims, lbs, element_type, elmlen, elmbyval, elmalign); self->fcinfo.arg[nfixedarg] = PointerGetDatum(arry); } /* * assign default arguments */ if (function.ndargs > 0) { Datum proargdefaults; bool isnull; char *str; List *defaults; int ndelete; ListCell *l; /* shouldn't happen, FuncnameGetCandidates messed up */ if (function.ndargs > pp->pronargdefaults) elog(ERROR, "not enough default arguments"); proargdefaults = SysCacheGetAttr(PROCOID, ftup, Anum_pg_proc_proargdefaults, &isnull); Assert(!isnull); str = TextDatumGetCString(proargdefaults); defaults = (List *) stringToNode(str); Assert(IsA(defaults, List)); pfree(str); /* Delete any unused defaults from the returned list */ ndelete = list_length(defaults) - function.ndargs; while (ndelete-- > 0) defaults = list_delete_first(defaults); self->arg_econtext = CreateStandaloneExprContext(); foreach(l, defaults) { Expr *expr = (Expr *) lfirst(l); ExprState *argstate; ExprDoneCond thisArgIsDone; /* probably shouldn't happen ... */ if (nargs >= FUNC_MAX_ARGS) ereport(ERROR, (errcode(ERRCODE_TOO_MANY_ARGUMENTS), errmsg("cannot pass more than %d arguments to a function", FUNC_MAX_ARGS))); argstate = ExecInitExpr(expr, NULL); self->fcinfo.arg[nargs] = ExecEvalExpr(argstate, self->arg_econtext, &self->fcinfo.argnull[nargs], &thisArgIsDone); if (thisArgIsDone != ExprSingleResult) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("functions and operators can take at most one set argument"))); nargs++; }
/* * recv_writer_queue - return the number of queued items */ static int recv_writer_queue(void) { PGconn *conn; List *queue; int ret; char *instid = NULL; bool connection_used = false; pthread_mutex_lock(&writer_queue_lock); queue = writer_queue; writer_queue = NIL; pthread_mutex_unlock(&writer_queue_lock); if (list_length(queue) == 0) return 0; /* install writer schema */ if ((conn = writer_connect(superuser_connect)) == NULL) { elog(ERROR, "could not connect to repository"); /* discard current queue if can't connect to repository */ if (list_length(queue) > 0) { elog(WARNING, "writer discards %d items", list_length(queue)); list_destroy(queue, destroy_writer_queue); } return 0; } /* do the writer queue process */ if ((instid = get_instid(conn)) != NULL) { connection_used = true; while (list_length(queue) > 0) { QueueItem *item = (QueueItem *) linitial(queue); if (!item->exec(item, conn, instid)) { if (++item->retry < DB_MAX_RETRY) break; /* retry the item */ /* * discard if the retry count is exceeded to avoid infinite * loops at one bad item. */ elog(WARNING, "writer discard an item"); } item->free(item); queue = list_delete_first(queue); } } free(instid); /* delay on error */ if (list_length(queue) > 0) delay(); /* * When we have failed items, we concatenate to the head of writer queue * and retry them in the next cycle. */ pthread_mutex_lock(&writer_queue_lock); writer_queue = list_concat(queue, writer_queue); ret = list_length(writer_queue); pthread_mutex_unlock(&writer_queue_lock); /* update last used time of the connection. */ if (connection_used) writer_conn_last_used = time(NULL); return ret; }