/* * ProcessCatchupEvent * * Respond to a catchup event (PROCSIG_CATCHUP_INTERRUPT) from another * backend. * * This is called either directly from the PROCSIG_CATCHUP_INTERRUPT * signal handler, or the next time control reaches the outer idle loop * (assuming there's still anything to do by then). */ static void ProcessCatchupEvent(void) { bool notify_enabled; bool client_wait_timeout_enabled; DtxContext saveDistributedTransactionContext; /* * Funny indentation to keep the code inside identical to upstream * while at the same time supporting CMockery which has problems with * multiple bracing on column 1. */ PG_TRY(); { in_process_catchup_event = 1; /* Must prevent SIGUSR2 and SIGALRM(for IdleSessionGangTimeout) interrupt while I am running */ notify_enabled = DisableNotifyInterrupt(); client_wait_timeout_enabled = DisableClientWaitTimeoutInterrupt(); /* * What we need to do here is cause ReceiveSharedInvalidMessages() to run, * which will do the necessary work and also reset the * catchupInterruptOccurred flag. If we are inside a transaction we can * just call AcceptInvalidationMessages() to do this. If we aren't, we * start and immediately end a transaction; the call to * AcceptInvalidationMessages() happens down inside transaction start. * * It is awfully tempting to just call AcceptInvalidationMessages() * without the rest of the xact start/stop overhead, and I think that * would actually work in the normal case; but I am not sure that things * would clean up nicely if we got an error partway through. */ if (IsTransactionOrTransactionBlock()) { elog(DEBUG1, "ProcessCatchupEvent inside transaction"); AcceptInvalidationMessages(); } else { elog(DEBUG1, "ProcessCatchupEvent outside transaction"); /* * Save distributed transaction context first. */ saveDistributedTransactionContext = DistributedTransactionContext; DistributedTransactionContext = DTX_CONTEXT_LOCAL_ONLY; StartTransactionCommand(); CommitTransactionCommand(); DistributedTransactionContext = saveDistributedTransactionContext; } if (notify_enabled) EnableNotifyInterrupt(); if (client_wait_timeout_enabled) EnableClientWaitTimeoutInterrupt(); in_process_catchup_event = 0; } PG_CATCH(); { in_process_catchup_event = 0; PG_RE_THROW(); } PG_END_TRY(); }
static Datum PLyMapping_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *mapping) { Datum result; HeapTuple tuple; Datum *values; bool *nulls; volatile int i; Assert(PyMapping_Check(mapping)); /* Build tuple */ values = palloc(sizeof(Datum) * desc->natts); nulls = palloc(sizeof(bool) * desc->natts); for (i = 0; i < desc->natts; ++i) { char *key; PyObject *volatile value; PLyObToDatum *att; Form_pg_attribute attr = TupleDescAttr(desc, i); if (attr->attisdropped) { values[i] = (Datum) 0; nulls[i] = true; continue; } key = NameStr(attr->attname); value = NULL; att = &arg->u.tuple.atts[i]; PG_TRY(); { value = PyMapping_GetItemString(mapping, key); if (!value) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("key \"%s\" not found in mapping", key), errhint("To return null in a column, " "add the value None to the mapping with the key named after the column."))); values[i] = att->func(att, value, &nulls[i], false); Py_XDECREF(value); value = NULL; } PG_CATCH(); { Py_XDECREF(value); PG_RE_THROW(); } PG_END_TRY(); } tuple = heap_form_tuple(desc, values, nulls); result = heap_copy_tuple_as_datum(tuple, desc); heap_freetuple(tuple); pfree(values); pfree(nulls); return result; }
static Datum PLyGenericObject_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *object, bool inarray) { Datum result; HeapTuple tuple; Datum *values; bool *nulls; volatile int i; /* Build tuple */ values = palloc(sizeof(Datum) * desc->natts); nulls = palloc(sizeof(bool) * desc->natts); for (i = 0; i < desc->natts; ++i) { char *key; PyObject *volatile value; PLyObToDatum *att; Form_pg_attribute attr = TupleDescAttr(desc, i); if (attr->attisdropped) { values[i] = (Datum) 0; nulls[i] = true; continue; } key = NameStr(attr->attname); value = NULL; att = &arg->u.tuple.atts[i]; PG_TRY(); { value = PyObject_GetAttrString(object, key); if (!value) { /* * No attribute for this column in the object. * * If we are parsing a composite type in an array, a likely * cause is that the function contained something like "[[123, * 'foo']]". Before PostgreSQL 10, that was interpreted as an * array, with a composite type (123, 'foo') in it. But now * it's interpreted as a two-dimensional array, and we try to * interpret "123" as the composite type. See also similar * heuristic in PLyObject_ToScalar(). */ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("attribute \"%s\" does not exist in Python object", key), inarray ? errhint("To return a composite type in an array, return the composite type as a Python tuple, e.g., \"[('foo',)]\".") : errhint("To return null in a column, let the returned object have an attribute named after column with value None."))); } values[i] = att->func(att, value, &nulls[i], false); Py_XDECREF(value); value = NULL; } PG_CATCH(); { Py_XDECREF(value); PG_RE_THROW(); } PG_END_TRY(); } tuple = heap_form_tuple(desc, values, nulls); result = heap_copy_tuple_as_datum(tuple, desc); heap_freetuple(tuple); pfree(values); pfree(nulls); return result; }
/* * PersistHoldablePortal * * Prepare the specified Portal for access outside of the current * transaction. When this function returns, all future accesses to the * portal must be done via the Tuplestore (not by invoking the * executor). */ void PersistHoldablePortal(Portal portal) { QueryDesc *queryDesc = PortalGetQueryDesc(portal); Portal saveActivePortal; ResourceOwner saveResourceOwner; MemoryContext savePortalContext; MemoryContext oldcxt; /* * If we're preserving a holdable portal, we had better be inside the * transaction that originally created it. */ Assert(portal->createSubid != InvalidSubTransactionId); Assert(queryDesc != NULL); /* * Caller must have created the tuplestore already. */ Assert(portal->holdContext != NULL); Assert(portal->holdStore != NULL); /* * Before closing down the executor, we must copy the tupdesc into * long-term memory, since it was created in executor memory. */ oldcxt = MemoryContextSwitchTo(portal->holdContext); portal->tupDesc = CreateTupleDescCopy(portal->tupDesc); MemoryContextSwitchTo(oldcxt); /* * Check for improper portal use, and mark portal active. */ if (portal->status != PORTAL_READY) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("portal \"%s\" cannot be run", portal->name))); portal->status = PORTAL_ACTIVE; /* * Set up global portal context pointers. */ saveActivePortal = ActivePortal; saveResourceOwner = CurrentResourceOwner; savePortalContext = PortalContext; PG_TRY(); { ActivePortal = portal; if (portal->resowner) CurrentResourceOwner = portal->resowner; PortalContext = PortalGetHeapMemory(portal); MemoryContextSwitchTo(PortalContext); PushActiveSnapshot(queryDesc->snapshot); /* * Rewind the executor: we need to store the entire result set in the * tuplestore, so that subsequent backward FETCHs can be processed. */ ExecutorRewind(queryDesc); /* * Change the destination to output to the tuplestore. Note we tell * the tuplestore receiver to detoast all data passed through it. */ queryDesc->dest = CreateDestReceiver(DestTuplestore); SetTuplestoreDestReceiverParams(queryDesc->dest, portal->holdStore, portal->holdContext, true); /* Fetch the result set into the tuplestore */ ExecutorRun(queryDesc, ForwardScanDirection, 0L); (*queryDesc->dest->rDestroy) (queryDesc->dest); queryDesc->dest = NULL; /* * Now shut down the inner executor. */ portal->queryDesc = NULL; /* prevent double shutdown */ ExecutorFinish(queryDesc); ExecutorEnd(queryDesc); FreeQueryDesc(queryDesc); /* * Set the position in the result set. */ MemoryContextSwitchTo(portal->holdContext); if (portal->atEnd) { /* * We can handle this case even if posOverflow: just force the * tuplestore forward to its end. The size of the skip request * here is arbitrary. */ while (tuplestore_skiptuples(portal->holdStore, 1000000, true)) /* continue */ ; } else { if (portal->posOverflow) /* oops, cannot trust portalPos */ ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("could not reposition held cursor"))); tuplestore_rescan(portal->holdStore); if (!tuplestore_skiptuples(portal->holdStore, portal->portalPos, true)) elog(ERROR, "unexpected end of tuple stream"); } } PG_CATCH(); { /* Uncaught error while executing portal: mark it dead */ MarkPortalFailed(portal); /* Restore global vars and propagate error */ ActivePortal = saveActivePortal; CurrentResourceOwner = saveResourceOwner; PortalContext = savePortalContext; PG_RE_THROW(); } PG_END_TRY(); MemoryContextSwitchTo(oldcxt); /* Mark portal not active */ portal->status = PORTAL_READY; ActivePortal = saveActivePortal; CurrentResourceOwner = saveResourceOwner; PortalContext = savePortalContext; PopActiveSnapshot(); /* * We can now release any subsidiary memory of the portal's heap context; * we'll never use it again. The executor already dropped its context, * but this will clean up anything that glommed onto the portal's heap via * PortalContext. */ MemoryContextDeleteChildren(PortalGetHeapMemory(portal)); }
/* * Internal version of the CacheRelease function * * Unregisters the entry from the cleanup list if requested. */ static void Cache_ReleaseCached(Cache *cache, CacheEntry *entry, bool unregisterCleanup) { Assert(NULL != cache); Assert(NULL != entry); Cache_ComputeEntryHashcode(cache, entry); volatile CacheAnchor *anchor = SyncHTLookup(cache->syncHashtable, &entry->hashvalue); Assert(anchor != NULL); /* Acquire anchor lock to touch the entry */ SpinLockAcquire(&anchor->spinlock); uint32 pinCount = Cache_EntryDecRef(cache, entry); bool deleteEntry = false; if (pinCount == 0 && entry->state == CACHE_ENTRY_DELETED) { /* Delete the cache entry if pin-count = 0 and it is marked for deletion */ Cache_UnlinkEntry(cache, (CacheAnchor *) anchor, entry); deleteEntry = true; Cache_DecPerfCounter(&cache->cacheHdr->cacheStats.noDeletedEntries, 1 /* delta */); } SpinLockRelease(&anchor->spinlock); /* * Releasing anchor to hashtable. * Ignoring 'removed' return value, both values are valid */ SyncHTRelease(cache->syncHashtable, (void *) anchor); /* If requested, unregister entry from the cleanup list */ if (unregisterCleanup) { Cache_UnregisterCleanup(cache, entry); } if (deleteEntry) { PG_TRY(); { /* Call client-specific cleanup function before removing entry from cache */ cache->cleanupEntry(CACHE_ENTRY_PAYLOAD(entry)); } PG_CATCH(); { /* Grab entry lock to ensure exclusive access to it while we're touching it */ Cache_LockEntry(cache, entry); entry->state = CACHE_ENTRY_FREE; #ifdef USE_ASSERT_CHECKING Cache_MemsetPayload(cache, entry); #endif Cache_UnlockEntry(cache, entry); /* Link entry back in the freelist */ Cache_AddToFreelist(cache, entry); PG_RE_THROW(); } PG_END_TRY(); /* Grab entry lock to ensure exclusive access to it while we're touching it */ Cache_LockEntry(cache, entry); entry->state = CACHE_ENTRY_FREE; #ifdef USE_ASSERT_CHECKING Cache_MemsetPayload(cache, entry); #endif Cache_UnlockEntry(cache, entry); /* Link entry back in the freelist */ Cache_AddToFreelist(cache, entry); } }
/* * Helper function for the various SQL callable logical decoding functions. */ static Datum pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool binary) { Name name = PG_GETARG_NAME(0); XLogRecPtr upto_lsn; int32 upto_nchanges; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; MemoryContext per_query_ctx; MemoryContext oldcontext; XLogRecPtr end_of_wal; XLogRecPtr startptr; LogicalDecodingContext *ctx; ResourceOwner old_resowner = CurrentResourceOwner; ArrayType *arr; Size ndim; List *options = NIL; DecodingOutputState *p; if (PG_ARGISNULL(1)) upto_lsn = InvalidXLogRecPtr; else upto_lsn = PG_GETARG_LSN(1); if (PG_ARGISNULL(2)) upto_nchanges = InvalidXLogRecPtr; else upto_nchanges = PG_GETARG_INT32(2); /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); if (!(rsinfo->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not allowed in this context"))); /* state to write output to */ p = palloc0(sizeof(DecodingOutputState)); p->binary_output = binary; /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &p->tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); check_permissions(); CheckLogicalDecodingRequirements(); arr = PG_GETARG_ARRAYTYPE_P(3); ndim = ARR_NDIM(arr); per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); if (ndim > 1) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("array must be one-dimensional"))); } else if (array_contains_nulls(arr)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("array must not contain nulls"))); } else if (ndim == 1) { int nelems; Datum *datum_opts; int i; Assert(ARR_ELEMTYPE(arr) == TEXTOID); deconstruct_array(arr, TEXTOID, -1, false, 'i', &datum_opts, NULL, &nelems); if (nelems % 2 != 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("array must have even number of elements"))); for (i = 0; i < nelems; i += 2) { char *name = TextDatumGetCString(datum_opts[i]); char *opt = TextDatumGetCString(datum_opts[i + 1]); options = lappend(options, makeDefElem(name, (Node *) makeString(opt))); } } p->tupstore = tuplestore_begin_heap(true, false, work_mem); rsinfo->returnMode = SFRM_Materialize; rsinfo->setResult = p->tupstore; rsinfo->setDesc = p->tupdesc; /* compute the current end-of-wal */ if (!RecoveryInProgress()) end_of_wal = GetFlushRecPtr(); else end_of_wal = GetXLogReplayRecPtr(NULL); CheckLogicalDecodingRequirements(); ReplicationSlotAcquire(NameStr(*name)); PG_TRY(); { ctx = CreateDecodingContext(InvalidXLogRecPtr, options, logical_read_local_xlog_page, LogicalOutputPrepareWrite, LogicalOutputWrite); MemoryContextSwitchTo(oldcontext); /* * Check whether the output pluggin writes textual output if that's * what we need. */ if (!binary && ctx->options.output_type != OUTPUT_PLUGIN_TEXTUAL_OUTPUT) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("output plugin cannot produce binary output"))); ctx->output_writer_private = p; startptr = MyReplicationSlot->data.restart_lsn; CurrentResourceOwner = ResourceOwnerCreate(CurrentResourceOwner, "logical decoding"); /* invalidate non-timetravel entries */ InvalidateSystemCaches(); while ((startptr != InvalidXLogRecPtr && startptr < end_of_wal) || (ctx->reader->EndRecPtr && ctx->reader->EndRecPtr < end_of_wal)) { XLogRecord *record; char *errm = NULL; record = XLogReadRecord(ctx->reader, startptr, &errm); if (errm) elog(ERROR, "%s", errm); startptr = InvalidXLogRecPtr; /* * The {begin_txn,change,commit_txn}_wrapper callbacks above will * store the description into our tuplestore. */ if (record != NULL) LogicalDecodingProcessRecord(ctx, record); /* check limits */ if (upto_lsn != InvalidXLogRecPtr && upto_lsn <= ctx->reader->EndRecPtr) break; if (upto_nchanges != 0 && upto_nchanges <= p->returned_rows) break; } } PG_CATCH(); { /* clear all timetravel entries */ InvalidateSystemCaches(); PG_RE_THROW(); } PG_END_TRY(); tuplestore_donestoring(tupstore); CurrentResourceOwner = old_resowner; /* * Next time, start where we left off. (Hunting things, the family * business..) */ if (ctx->reader->EndRecPtr != InvalidXLogRecPtr && confirm) LogicalConfirmReceivedLocation(ctx->reader->EndRecPtr); /* free context, call shutdown callback */ FreeDecodingContext(ctx); ReplicationSlotRelease(); InvalidateSystemCaches(); return (Datum) 0; }
static void call_graph_fmgr_hook(FmgrHookEventType event, FmgrInfo *flinfo, Datum *args) { bool aborted = false; EdgeHashKey key; EdgeHashElem *elem; instr_time current_time; if (next_fmgr_hook) (*next_fmgr_hook) (event, flinfo, args); INSTR_TIME_SET_CURRENT(current_time); switch (event) { case FHET_START: { bool found; if (call_stack == NIL) { top_level_function_oid = flinfo->fn_oid; /* We're about to enter the top level function; check whether we've been disabled */ if (!enable_call_graph) { tracking_current_graph = false; recursion_depth = 1; return; } /* Start tracking the call graph; we need to create the hash table */ create_edge_hash_table(); tracking_current_graph = true; /* If we're tracking table usage, take a stat snapshot now */ if (track_table_usage) table_stat_snapshot = get_table_stat_snapshot(); /* Use InvalidOid for the imaginary edge into the top level function */ key.caller = InvalidOid; } else { if (!tracking_current_graph) { /* * Not tracking this graph, just see whether we've recursed into the top level function * (see the comments near the beginning of the file) */ if (flinfo->fn_oid == top_level_function_oid) recursion_depth++; return; } elem = linitial(call_stack); /* Calculate the self time we spent in the previous function (elem->key.callee in this case). */ INSTR_TIME_ACCUM_DIFF(elem->self_time, current_time, current_self_time_start); key.caller = elem->key.callee; } key.callee = flinfo->fn_oid; elem = hash_search(edge_hash_table, (void *) &key, HASH_ENTER, &found); if (found) elem->num_calls++; else { elem->key = key; elem->num_calls = 1; INSTR_TIME_SET_ZERO(elem->total_time); INSTR_TIME_SET_ZERO(elem->self_time); } call_stack = lcons(elem, call_stack); INSTR_TIME_SET_CURRENT(elem->total_time_start); memcpy(¤t_self_time_start, &elem->total_time_start, sizeof(instr_time)); } break; /* * In both ABORT and END cases we pop off the last element from the call stack, and if the stack * is empty, we process the data we gathered. * * XXX for some reason if the top level function aborted SPI won't work correctly. */ case FHET_ABORT: aborted = true; case FHET_END: /* * If we're not tracking this particular graph, we only need to see whether we're done * with the graph or not. */ if (!tracking_current_graph) { if (top_level_function_oid == flinfo->fn_oid) { recursion_depth--; if (recursion_depth == 0) top_level_function_oid = InvalidOid; } Assert(table_stat_snapshot == NULL); return; } Assert(((EdgeHashElem *) linitial(call_stack))->key.callee == flinfo->fn_oid); elem = linitial(call_stack); INSTR_TIME_ACCUM_DIFF(elem->self_time, current_time, current_self_time_start); INSTR_TIME_ACCUM_DIFF(elem->total_time, current_time, elem->total_time_start); call_stack = list_delete_first(call_stack); if (call_stack != NIL) { /* we're going back to the previous node, start recording its self_time */ INSTR_TIME_SET_CURRENT(current_self_time_start); break; } /* * At this point we're done with the graph. If the top level function exited cleanly, we can * process the data we've gathered in the hash table and add that data into the buffer table. */ if (!aborted) { /* * It is in some cases possible that process_edge_data() throws an exception. We really need to * clean up our state in case that happens. */ PG_TRY(); { Datum buffer_id = assign_callgraph_buffer_id(); /* Better check both conditions here */ if (table_stat_snapshot && track_table_usage) insert_snapshot_delta(buffer_id, table_stat_snapshot); process_edge_data(buffer_id); } PG_CATCH(); { if (table_stat_snapshot) { release_table_stat_snapshot(table_stat_snapshot); table_stat_snapshot = NULL; } destroy_edge_hash_table(); top_level_function_oid = InvalidOid; PG_RE_THROW(); } PG_END_TRY(); } if (table_stat_snapshot) { release_table_stat_snapshot(table_stat_snapshot); table_stat_snapshot = NULL; } destroy_edge_hash_table(); top_level_function_oid = InvalidOid; break; default: elog(ERROR, "Unknown FmgrHookEventType %d", event); return; } }
int send_alert_from_chunks(const PipeProtoChunk *chunk, const PipeProtoChunk * saved_chunks_in) { int ret = -1; GpErrorData errorData; CSVChunkStr chunkstr = { chunk, chunk->data + sizeof(GpErrorDataFixFields) }; memset(&errorData, 0, sizeof(errorData)); memcpy(&errorData.fix_fields, chunk->data, sizeof(errorData.fix_fields)); if (chunk == NULL) return -1; if (chunk->hdr.len == 0) return -1; if (chunk->hdr.zero != 0) return -1; if (chunk->hdr.log_format != 'c') elog(ERROR,"send_alert_from_chunks only works when CSV logging is enabled"); errorData.username = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.databasename = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.remote_host = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.remote_port = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.error_severity = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.sql_state = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.error_message = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.error_detail = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.error_hint = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.internal_query = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.error_context = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.debug_query_string = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.error_func_name = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.error_filename = get_str_from_chunk(&chunkstr,saved_chunks_in); errorData.stacktrace = get_str_from_chunk(&chunkstr,saved_chunks_in); PG_TRY(); { ret = send_alert(&errorData); } PG_CATCH(); { elog(LOG,"send_alert failed. Not sending the alert"); free(errorData.stacktrace ); errorData.stacktrace = NULL; free((char *)errorData.error_filename ); errorData.error_filename = NULL; free((char *)errorData.error_func_name ); errorData.error_func_name = NULL; free(errorData.debug_query_string ); errorData.debug_query_string = NULL; free(errorData.error_context); errorData.error_context = NULL; free(errorData.internal_query ); errorData.internal_query = NULL; free(errorData.error_hint ); errorData.error_hint = NULL; free(errorData.error_detail ); errorData.error_detail = NULL; free(errorData.error_message ); errorData.error_message = NULL; free(errorData.sql_state ); errorData.sql_state = NULL; free((char *)errorData.error_severity ); errorData.error_severity = NULL; free(errorData.remote_port ); errorData.remote_port = NULL; free(errorData.remote_host ); errorData.remote_host = NULL; free(errorData.databasename ); errorData.databasename = NULL; free(errorData.username ); errorData.username = NULL; /* Carry on with error handling. */ PG_RE_THROW(); } PG_END_TRY(); // Don't forget to free them! Best in reverse order of the mallocs. free(errorData.stacktrace ); errorData.stacktrace = NULL; free((char *)errorData.error_filename ); errorData.error_filename = NULL; free((char *)errorData.error_func_name ); errorData.error_func_name = NULL; free(errorData.debug_query_string ); errorData.debug_query_string = NULL; free(errorData.error_context); errorData.error_context = NULL; free(errorData.internal_query ); errorData.internal_query = NULL; free(errorData.error_hint ); errorData.error_hint = NULL; free(errorData.error_detail ); errorData.error_detail = NULL; free(errorData.error_message ); errorData.error_message = NULL; free(errorData.sql_state ); errorData.sql_state = NULL; free((char *)errorData.error_severity ); errorData.error_severity = NULL; free(errorData.remote_port ); errorData.remote_port = NULL; free(errorData.remote_host ); errorData.remote_host = NULL; free(errorData.databasename ); errorData.databasename = NULL; free(errorData.username ); errorData.username = NULL; return ret; }
/* * Function used a side effect to simulate throwing exception * by a certain function. */ void throw_exception_side_effect() { PG_RE_THROW(); }
/* * Create a new file set * type is the WorkFileType for the files: BUFFILE or BFZ * can_be_reused: if set to false, then we don't insert this set into the cache, * since the caller is telling us there is no point. This can happen for * example when spilling during index creation. * ps is the PlanState for the subtree rooted at the operator * snapshot contains snapshot information for the current transaction * */ workfile_set * workfile_mgr_create_set(enum ExecWorkFileType type, bool can_be_reused, PlanState *ps, workfile_set_snapshot snapshot) { Assert(NULL != workfile_mgr_cache); Plan *plan = NULL; if (ps != NULL) { plan = ps->plan; } AssertImply(can_be_reused, plan != NULL); NodeTag node_type = T_Invalid; if (ps != NULL) { node_type = ps->type; } char *dir_path = create_workset_directory(node_type, currentSliceId); /* Create parameter info for the populate function */ workset_info set_info; set_info.file_type = type; set_info.snapshot = snapshot; set_info.nodeType = node_type; set_info.can_be_reused = can_be_reused && workfile_mgr_is_reusable(ps); set_info.dir_path = dir_path; set_info.session_start_time = GetCurrentTimestamp(); set_info.operator_work_mem = get_operator_work_mem(ps); set_info.on_disk = true; CacheEntry *newEntry = NULL; PG_TRY(); { newEntry = acquire_entry_retry(workfile_mgr_cache, &set_info); } PG_CATCH(); { /* Failed to acquire new entry, cache full. Clean up the directory we created. */ workfile_mgr_delete_set_directory(dir_path); PG_RE_THROW(); } PG_END_TRY(); /* Path has now been copied to the workfile_set. We can free it */ pfree(dir_path); /* Complete initialization of the entry with post-acquire actions */ Assert(NULL != newEntry); workfile_set *work_set = CACHE_ENTRY_PAYLOAD(newEntry); Assert(work_set != NULL); if (work_set->can_be_reused) { Assert(plan != NULL); Assert(nodeTag(plan) >= T_Plan && nodeTag(plan) < T_PlanInvalItem); workfile_set_plan *s_plan = workfile_mgr_serialize_plan(ps); work_set->key = workfile_mgr_hash_key(s_plan); workfile_mgr_save_plan(work_set, s_plan); workfile_mgr_free_plan(s_plan); } elog(gp_workfile_caching_loglevel, "new spill file set. key=0x%x can_be_reused=%d prefix=%s opMemKB=" INT64_FORMAT, work_set->key, work_set->can_be_reused, work_set->path, work_set->metadata.operator_work_mem); return work_set; }
/* * ExecRefreshMatView -- execute a REFRESH MATERIALIZED VIEW command * * This refreshes the materialized view by creating a new table and swapping * the relfilenodes of the new table and the old materialized view, so the OID * of the original materialized view is preserved. Thus we do not lose GRANT * nor references to this materialized view. * * If WITH NO DATA was specified, this is effectively like a TRUNCATE; * otherwise it is like a TRUNCATE followed by an INSERT using the SELECT * statement associated with the materialized view. The statement node's * skipData field shows whether the clause was used. * * Indexes are rebuilt too, via REINDEX. Since we are effectively bulk-loading * the new heap, it's better to create the indexes afterwards than to fill them * incrementally while we load. * * The matview's "populated" state is changed based on whether the contents * reflect the result set of the materialized view's query. */ ObjectAddress ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString, ParamListInfo params, char *completionTag) { Oid matviewOid; Relation matviewRel; RewriteRule *rule; List *actions; Query *dataQuery; Oid tableSpace; Oid relowner; Oid OIDNewHeap; DestReceiver *dest; bool concurrent; LOCKMODE lockmode; char relpersistence; Oid save_userid; int save_sec_context; int save_nestlevel; ObjectAddress address; /* Determine strength of lock needed. */ concurrent = stmt->concurrent; lockmode = concurrent ? ExclusiveLock : AccessExclusiveLock; /* * Get a lock until end of transaction. */ matviewOid = RangeVarGetRelidExtended(stmt->relation, lockmode, false, false, RangeVarCallbackOwnsTable, NULL); matviewRel = heap_open(matviewOid, NoLock); /* Make sure it is a materialized view. */ if (matviewRel->rd_rel->relkind != RELKIND_MATVIEW) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("\"%s\" is not a materialized view", RelationGetRelationName(matviewRel)))); /* Check that CONCURRENTLY is not specified if not populated. */ if (concurrent && !RelationIsPopulated(matviewRel)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("CONCURRENTLY cannot be used when the materialized view is not populated"))); /* Check that conflicting options have not been specified. */ if (concurrent && stmt->skipData) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("CONCURRENTLY and WITH NO DATA options cannot be used together"))); /* We don't allow an oid column for a materialized view. */ Assert(!matviewRel->rd_rel->relhasoids); /* * Check that everything is correct for a refresh. Problems at this point * are internal errors, so elog is sufficient. */ if (matviewRel->rd_rel->relhasrules == false || matviewRel->rd_rules->numLocks < 1) elog(ERROR, "materialized view \"%s\" is missing rewrite information", RelationGetRelationName(matviewRel)); if (matviewRel->rd_rules->numLocks > 1) elog(ERROR, "materialized view \"%s\" has too many rules", RelationGetRelationName(matviewRel)); rule = matviewRel->rd_rules->rules[0]; if (rule->event != CMD_SELECT || !(rule->isInstead)) elog(ERROR, "the rule for materialized view \"%s\" is not a SELECT INSTEAD OF rule", RelationGetRelationName(matviewRel)); actions = rule->actions; if (list_length(actions) != 1) elog(ERROR, "the rule for materialized view \"%s\" is not a single action", RelationGetRelationName(matviewRel)); /* * The stored query was rewritten at the time of the MV definition, but * has not been scribbled on by the planner. */ dataQuery = (Query *) linitial(actions); Assert(IsA(dataQuery, Query)); /* * Check for active uses of the relation in the current transaction, such * as open scans. * * NB: We count on this to protect us against problems with refreshing the * data using HEAP_INSERT_FROZEN. */ CheckTableNotInUse(matviewRel, "REFRESH MATERIALIZED VIEW"); /* * Tentatively mark the matview as populated or not (this will roll back * if we fail later). */ SetMatViewPopulatedState(matviewRel, !stmt->skipData); relowner = matviewRel->rd_rel->relowner; /* * Switch to the owner's userid, so that any functions are run as that * user. Also arrange to make GUC variable changes local to this command. * Don't lock it down too tight to create a temporary table just yet. We * will switch modes when we are about to execute user code. */ GetUserIdAndSecContext(&save_userid, &save_sec_context); SetUserIdAndSecContext(relowner, save_sec_context | SECURITY_LOCAL_USERID_CHANGE); save_nestlevel = NewGUCNestLevel(); /* Concurrent refresh builds new data in temp tablespace, and does diff. */ if (concurrent) { tableSpace = GetDefaultTablespace(RELPERSISTENCE_TEMP); relpersistence = RELPERSISTENCE_TEMP; } else { tableSpace = matviewRel->rd_rel->reltablespace; relpersistence = matviewRel->rd_rel->relpersistence; } /* * Create the transient table that will receive the regenerated data. Lock * it against access by any other process until commit (by which time it * will be gone). */ OIDNewHeap = make_new_heap(matviewOid, tableSpace, relpersistence, ExclusiveLock); LockRelationOid(OIDNewHeap, AccessExclusiveLock); dest = CreateTransientRelDestReceiver(OIDNewHeap); /* * Now lock down security-restricted operations. */ SetUserIdAndSecContext(relowner, save_sec_context | SECURITY_RESTRICTED_OPERATION); /* Generate the data, if wanted. */ if (!stmt->skipData) refresh_matview_datafill(dest, dataQuery, queryString); heap_close(matviewRel, NoLock); /* Make the matview match the newly generated data. */ if (concurrent) { int old_depth = matview_maintenance_depth; PG_TRY(); { refresh_by_match_merge(matviewOid, OIDNewHeap, relowner, save_sec_context); } PG_CATCH(); { matview_maintenance_depth = old_depth; PG_RE_THROW(); } PG_END_TRY(); Assert(matview_maintenance_depth == old_depth); } else refresh_by_heap_swap(matviewOid, OIDNewHeap, relpersistence); /* Roll back any GUC changes */ AtEOXact_GUC(false, save_nestlevel); /* Restore userid and security context */ SetUserIdAndSecContext(save_userid, save_sec_context); ObjectAddressSet(address, RelationRelationId, matviewOid); return address; }
/* * ExecWorkFile_Write * write the given data from the end of the last write position. * * This function returns true if the write succeeds. Otherwise, return false. */ bool ExecWorkFile_Write(ExecWorkFile *workfile, void *data, uint64 size) { Assert(workfile != NULL); uint64 bytes; if (data == NULL || size == 0) { return false; } /* Test the per-query and per-segment limit */ if ((workfile->flags & EXEC_WORKFILE_LIMIT_SIZE) && !WorkfileDiskspace_Reserve(size)) { /* Failed to reserve additional disk space, notify caller */ workfile_mgr_report_error(); } switch(workfile->fileType) { case BUFFILE: {} BufFile *buffile = (BufFile *)workfile->file; int64 current_size = BufFileGetSize(buffile); int64 new_size = 0; PG_TRY(); { bytes = BufFileWrite(buffile, data, size); } PG_CATCH(); { new_size = BufFileGetSize(buffile); workfile->size = new_size; WorkfileDiskspace_Commit( (new_size - current_size), size, true /* update_query_size */); int64 size_evicted = workfile_mgr_evict(MIN_EVICT_SIZE); elog(gp_workfile_caching_loglevel, "Hit out of disk space, evicted " INT64_FORMAT " bytes", size_evicted); PG_RE_THROW(); } PG_END_TRY(); new_size = BufFileGetSize(buffile); workfile->size = new_size; WorkfileDiskspace_Commit( (new_size - current_size), size, true /* update_query_size */); workfile_update_in_progress_size(workfile, new_size - current_size); if (bytes != size) { workfile_mgr_report_error(); } break; case BFZ: PG_TRY(); { bfz_append((bfz_t *)workfile->file, data, size); } PG_CATCH(); { Assert(WorkfileDiskspace_IsFull()); WorkfileDiskspace_Commit(0, size, true /* update_query_size */); int64 size_evicted = workfile_mgr_evict(MIN_EVICT_SIZE); elog(gp_workfile_caching_loglevel, "Hit out of disk space, evicted " INT64_FORMAT " bytes", size_evicted); PG_RE_THROW(); } PG_END_TRY(); /* bfz_append always adds to the file size */ workfile->size += size; if ((workfile->flags & EXEC_WORKFILE_LIMIT_SIZE)) { WorkfileDiskspace_Commit(size, size, true /* update_query_size */); } workfile_update_in_progress_size(workfile, size); break; default: insist_log(false, "invalid work file type: %d", workfile->fileType); } return true; }
/* * Emit a PG error or notice, together with any available info about * the current Python error, previously set by PLy_exception_set(). * This should be used to propagate Python errors into PG. If fmt is * NULL, the Python error becomes the primary error message, otherwise * it becomes the detail. If there is a Python traceback, it is put * in the context. */ void PLy_elog(int elevel, const char *fmt,...) { char *xmsg; char *tbmsg; int tb_depth; StringInfoData emsg; PyObject *exc, *val, *tb; const char *primary = NULL; int sqlerrcode = 0; char *detail = NULL; char *hint = NULL; char *query = NULL; int position = 0; PyErr_Fetch(&exc, &val, &tb); if (exc != NULL) { PyErr_NormalizeException(&exc, &val, &tb); if (PyErr_GivenExceptionMatches(val, PLy_exc_spi_error)) PLy_get_spi_error_data(val, &sqlerrcode, &detail, &hint, &query, &position); else if (PyErr_GivenExceptionMatches(val, PLy_exc_fatal)) elevel = FATAL; } /* this releases our refcount on tb! */ PLy_traceback(exc, val, tb, &xmsg, &tbmsg, &tb_depth); if (fmt) { initStringInfo(&emsg); for (;;) { va_list ap; int needed; va_start(ap, fmt); needed = appendStringInfoVA(&emsg, dgettext(TEXTDOMAIN, fmt), ap); va_end(ap); if (needed == 0) break; enlargeStringInfo(&emsg, needed); } primary = emsg.data; /* Since we have a format string, we cannot have a SPI detail. */ Assert(detail == NULL); /* If there's an exception message, it goes in the detail. */ if (xmsg) detail = xmsg; } else { if (xmsg) primary = xmsg; } PG_TRY(); { ereport(elevel, (errcode(sqlerrcode ? sqlerrcode : ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg_internal("%s", primary ? primary : "no exception data"), (detail) ? errdetail_internal("%s", detail) : 0, (tb_depth > 0 && tbmsg) ? errcontext("%s", tbmsg) : 0, (hint) ? errhint("%s", hint) : 0, (query) ? internalerrquery(query) : 0, (position) ? internalerrposition(position) : 0)); } PG_CATCH(); { if (fmt) pfree(emsg.data); if (xmsg) pfree(xmsg); if (tbmsg) pfree(tbmsg); Py_XDECREF(exc); Py_XDECREF(val); PG_RE_THROW(); } PG_END_TRY(); if (fmt) pfree(emsg.data); if (xmsg) pfree(xmsg); if (tbmsg) pfree(tbmsg); Py_XDECREF(exc); Py_XDECREF(val); }
/* * Extract a Python traceback from the given exception data. * * The exception error message is returned in xmsg, the traceback in * tbmsg (both as palloc'd strings) and the traceback depth in * tb_depth. * * We release refcounts on all the Python objects in the traceback stack, * but not on e or v. */ static void PLy_traceback(PyObject *e, PyObject *v, PyObject *tb, char **xmsg, char **tbmsg, int *tb_depth) { PyObject *e_type_o; PyObject *e_module_o; char *e_type_s = NULL; char *e_module_s = NULL; PyObject *vob = NULL; char *vstr; StringInfoData xstr; StringInfoData tbstr; /* * if no exception, return nulls */ if (e == NULL) { *xmsg = NULL; *tbmsg = NULL; *tb_depth = 0; return; } /* * Format the exception and its value and put it in xmsg. */ e_type_o = PyObject_GetAttrString(e, "__name__"); e_module_o = PyObject_GetAttrString(e, "__module__"); if (e_type_o) e_type_s = PyString_AsString(e_type_o); if (e_type_s) e_module_s = PyString_AsString(e_module_o); if (v && ((vob = PyObject_Str(v)) != NULL)) vstr = PyString_AsString(vob); else vstr = "unknown"; initStringInfo(&xstr); if (!e_type_s || !e_module_s) { if (PyString_Check(e)) /* deprecated string exceptions */ appendStringInfoString(&xstr, PyString_AsString(e)); else /* shouldn't happen */ appendStringInfoString(&xstr, "unrecognized exception"); } /* mimics behavior of traceback.format_exception_only */ else if (strcmp(e_module_s, "builtins") == 0 || strcmp(e_module_s, "__main__") == 0 || strcmp(e_module_s, "exceptions") == 0) appendStringInfo(&xstr, "%s", e_type_s); else appendStringInfo(&xstr, "%s.%s", e_module_s, e_type_s); appendStringInfo(&xstr, ": %s", vstr); *xmsg = xstr.data; /* * Now format the traceback and put it in tbmsg. */ *tb_depth = 0; initStringInfo(&tbstr); /* Mimick Python traceback reporting as close as possible. */ appendStringInfoString(&tbstr, "Traceback (most recent call last):"); while (tb != NULL && tb != Py_None) { PyObject *volatile tb_prev = NULL; PyObject *volatile frame = NULL; PyObject *volatile code = NULL; PyObject *volatile name = NULL; PyObject *volatile lineno = NULL; PyObject *volatile filename = NULL; PG_TRY(); { /* * Ancient versions of Python (circa 2.3) contain a bug whereby * the fetches below can fail if the error indicator is set. */ PyErr_Clear(); lineno = PyObject_GetAttrString(tb, "tb_lineno"); if (lineno == NULL) elog(ERROR, "could not get line number from Python traceback"); frame = PyObject_GetAttrString(tb, "tb_frame"); if (frame == NULL) elog(ERROR, "could not get frame from Python traceback"); code = PyObject_GetAttrString(frame, "f_code"); if (code == NULL) elog(ERROR, "could not get code object from Python frame"); name = PyObject_GetAttrString(code, "co_name"); if (name == NULL) elog(ERROR, "could not get function name from Python code object"); filename = PyObject_GetAttrString(code, "co_filename"); if (filename == NULL) elog(ERROR, "could not get file name from Python code object"); } PG_CATCH(); { Py_XDECREF(frame); Py_XDECREF(code); Py_XDECREF(name); Py_XDECREF(lineno); Py_XDECREF(filename); PG_RE_THROW(); } PG_END_TRY(); /* The first frame always points at <module>, skip it. */ if (*tb_depth > 0) { PLyExecutionContext *exec_ctx = PLy_current_execution_context(); char *proname; char *fname; char *line; char *plain_filename; long plain_lineno; /* * The second frame points at the internal function, but to mimick * Python error reporting we want to say <module>. */ if (*tb_depth == 1) fname = "<module>"; else fname = PyString_AsString(name); proname = PLy_procedure_name(exec_ctx->curr_proc); plain_filename = PyString_AsString(filename); plain_lineno = PyInt_AsLong(lineno); if (proname == NULL) appendStringInfo( &tbstr, "\n PL/Python anonymous code block, line %ld, in %s", plain_lineno - 1, fname); else appendStringInfo( &tbstr, "\n PL/Python function \"%s\", line %ld, in %s", proname, plain_lineno - 1, fname); /* * function code object was compiled with "<string>" as the * filename */ if (exec_ctx->curr_proc && plain_filename != NULL && strcmp(plain_filename, "<string>") == 0) { /* * If we know the current procedure, append the exact line * from the source, again mimicking Python's traceback.py * module behavior. We could store the already line-split * source to avoid splitting it every time, but producing a * traceback is not the most important scenario to optimize * for. But we do not go as far as traceback.py in reading * the source of imported modules. */ line = get_source_line(exec_ctx->curr_proc->src, plain_lineno); if (line) { appendStringInfo(&tbstr, "\n %s", line); pfree(line); } } } Py_DECREF(frame); Py_DECREF(code); Py_DECREF(name); Py_DECREF(lineno); Py_DECREF(filename); /* Release the current frame and go to the next one. */ tb_prev = tb; tb = PyObject_GetAttrString(tb, "tb_next"); Assert(tb_prev != Py_None); Py_DECREF(tb_prev); if (tb == NULL) elog(ERROR, "could not traverse Python traceback"); (*tb_depth)++; } /* Return the traceback. */ *tbmsg = tbstr.data; Py_XDECREF(e_type_o); Py_XDECREF(e_module_o); Py_XDECREF(vob); }
/* * This is used when a process is waiting for its counterpart to attach to the * queue. We exit when the other process attaches as expected, or, if * handle != NULL, when the referenced background process or the postmaster * dies. Note that if handle == NULL, and the process fails to attach, we'll * potentially get stuck here forever waiting for a process that may never * start. We do check for interrupts, though. * * ptr is a pointer to the memory address that we're expecting to become * non-NULL when our counterpart attaches to the queue. */ static bool shm_mq_wait_internal(volatile shm_mq *mq, PGPROC *volatile * ptr, BackgroundWorkerHandle *handle) { bool save_set_latch_on_sigusr1; bool result = false; save_set_latch_on_sigusr1 = set_latch_on_sigusr1; if (handle != NULL) set_latch_on_sigusr1 = true; PG_TRY(); { for (;;) { BgwHandleStatus status; pid_t pid; bool detached; /* Acquire the lock just long enough to check the pointer. */ SpinLockAcquire(&mq->mq_mutex); detached = mq->mq_detached; result = (*ptr != NULL); SpinLockRelease(&mq->mq_mutex); /* Fail if detached; else succeed if initialized. */ if (detached) { result = false; break; } if (result) break; if (handle != NULL) { /* Check for unexpected worker death. */ status = GetBackgroundWorkerPid(handle, &pid); if (status != BGWH_STARTED && status != BGWH_NOT_YET_STARTED) { result = false; break; } } /* Wait to be signalled. */ WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0); /* Reset the latch so we don't spin. */ ResetLatch(&MyProc->procLatch); /* An interrupt may have occurred while we were waiting. */ CHECK_FOR_INTERRUPTS(); } } PG_CATCH(); { set_latch_on_sigusr1 = save_set_latch_on_sigusr1; PG_RE_THROW(); } PG_END_TRY(); return result; }
/* -------------------------------- * pq_getmessage - get a message with length word from connection * * The return value is placed in an expansible StringInfo, which has * already been initialized by the caller. * Only the message body is placed in the StringInfo; the length word * is removed. Also, s->cursor is initialized to zero for convenience * in scanning the message contents. * * If maxlen is not zero, it is an upper limit on the length of the * message we are willing to accept. We abort the connection (by * returning EOF) if client tries to send more than that. * * returns 0 if OK, EOF if trouble * -------------------------------- */ int pq_getmessage(StringInfo s, int maxlen) { int32 len; resetStringInfo(s); /* Read message length word */ if (pq_getbytes((char *) &len, 4) == EOF) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("unexpected EOF within message length word"))); return EOF; } len = ntohl(len); if (len < 4 || (maxlen > 0 && len > maxlen)) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid message length"))); return EOF; } len -= 4; /* discount length itself */ if (len > 0) { /* * Allocate space for message. If we run out of room (ridiculously * large message), we will elog(ERROR), but we want to discard the * message body so as not to lose communication sync. */ PG_TRY(); { enlargeStringInfo(s, len); } PG_CATCH(); { if (pq_discardbytes(len) == EOF) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete message from client"))); PG_RE_THROW(); } PG_END_TRY(); /* And grab the message */ if (pq_getbytes(s->data, len) == EOF) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete message from client"))); return EOF; } s->len = len; /* Place a trailing null per StringInfo convention */ s->data[len] = '\0'; } return 0; }
/* * This method will emulate the real ExceptionalCondition * function by re-throwing the exception, essentially falling * back to the next available PG_CATCH(); */ void _ExceptionalCondition() { PG_RE_THROW(); }
/** * @brief Entry point of the user-defined function for pg_bulkload. * @return Returns number of loaded tuples. If the case of errors, -1 will be * returned. */ Datum pg_bulkload(PG_FUNCTION_ARGS) { Reader *rd = NULL; Writer *wt = NULL; Datum options; MemoryContext ctx; MemoryContext ccxt; PGRUsage ru0; PGRUsage ru1; int64 count; int64 parse_errors; int64 skip; WriterResult ret; char *start; char *end; float8 system; float8 user; float8 duration; TupleDesc tupdesc; Datum values[PG_BULKLOAD_COLS]; bool nulls[PG_BULKLOAD_COLS]; HeapTuple result; /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); BULKLOAD_PROFILE_PUSH(); pg_rusage_init(&ru0); /* must be the super user */ if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to use pg_bulkload"))); options = PG_GETARG_DATUM(0); ccxt = CurrentMemoryContext; /* * STEP 1: Initialization */ /* parse options and create reader and writer */ ParseOptions(options, &rd, &wt, ru0.tv.tv_sec); /* initialize reader */ ReaderInit(rd); /* * We need to split PG_TRY block because gcc optimizes if-branches with * longjmp codes too much. Local variables initialized in either branch * cannot be handled another branch. */ PG_TRY(); { /* truncate heap */ if (wt->truncate) TruncateTable(wt->relid); /* initialize writer */ WriterInit(wt); /* initialize checker */ CheckerInit(&rd->checker, wt->rel, wt->tchecker); /* initialize parser */ ParserInit(rd->parser, &rd->checker, rd->infile, wt->desc, wt->multi_process, PG_GET_COLLATION()); } PG_CATCH(); { if (rd) ReaderClose(rd, true); if (wt) WriterClose(wt, true); PG_RE_THROW(); } PG_END_TRY(); /* No throwable codes here! */ PG_TRY(); { /* create logger */ CreateLogger(rd->logfile, wt->verbose, rd->infile[0] == ':'); start = timeval_to_cstring(ru0.tv); LoggerLog(INFO, "\npg_bulkload %s on %s\n\n", PG_BULKLOAD_VERSION, start); ReaderDumpParams(rd); WriterDumpParams(wt); LoggerLog(INFO, "\n"); BULKLOAD_PROFILE(&prof_init); /* * STEP 2: Build heap */ /* Switch into its memory context */ Assert(wt->context); ctx = MemoryContextSwitchTo(wt->context); /* Loop for each input file record. */ while (wt->count < rd->limit) { HeapTuple tuple; CHECK_FOR_INTERRUPTS(); /* read tuple */ BULKLOAD_PROFILE_PUSH(); tuple = ReaderNext(rd); BULKLOAD_PROFILE_POP(); BULKLOAD_PROFILE(&prof_reader); if (tuple == NULL) break; /* write tuple */ BULKLOAD_PROFILE_PUSH(); WriterInsert(wt, tuple); wt->count += 1; BULKLOAD_PROFILE_POP(); BULKLOAD_PROFILE(&prof_writer); MemoryContextReset(wt->context); BULKLOAD_PROFILE(&prof_reset); } MemoryContextSwitchTo(ctx); /* * STEP 3: Finalize heap and merge indexes */ count = wt->count; parse_errors = rd->parse_errors; /* * close writer first and reader second because shmem_exit callback * is managed by a simple stack. */ ret = WriterClose(wt, false); wt = NULL; skip = ReaderClose(rd, false); rd = NULL; } PG_CATCH(); { ErrorData *errdata; MemoryContext ecxt; ecxt = MemoryContextSwitchTo(ccxt); errdata = CopyErrorData(); LoggerLog(INFO, "%s\n", errdata->message); FreeErrorData(errdata); /* close writer first, and reader second */ if (wt) WriterClose(wt, true); if (rd) ReaderClose(rd, true); MemoryContextSwitchTo(ecxt); PG_RE_THROW(); } PG_END_TRY(); count -= ret.num_dup_new; LoggerLog(INFO, "\n" " " int64_FMT " Rows skipped.\n" " " int64_FMT " Rows successfully loaded.\n" " " int64_FMT " Rows not loaded due to parse errors.\n" " " int64_FMT " Rows not loaded due to duplicate errors.\n" " " int64_FMT " Rows replaced with new rows.\n\n", skip, count, parse_errors, ret.num_dup_new, ret.num_dup_old); pg_rusage_init(&ru1); system = diffTime(ru1.ru.ru_stime, ru0.ru.ru_stime); user = diffTime(ru1.ru.ru_utime, ru0.ru.ru_utime); duration = diffTime(ru1.tv, ru0.tv); end = timeval_to_cstring(ru1.tv); memset(nulls, 0, sizeof(nulls)); values[0] = Int64GetDatum(skip); values[1] = Int64GetDatum(count); values[2] = Int64GetDatum(parse_errors); values[3] = Int64GetDatum(ret.num_dup_new); values[4] = Int64GetDatum(ret.num_dup_old); values[5] = Float8GetDatumFast(system); values[6] = Float8GetDatumFast(user); values[7] = Float8GetDatumFast(duration); LoggerLog(INFO, "Run began on %s\n" "Run ended on %s\n\n" "CPU %.2fs/%.2fu sec elapsed %.2f sec\n", start, end, system, user, duration); LoggerClose(); result = heap_form_tuple(tupdesc, values, nulls); BULKLOAD_PROFILE(&prof_fini); BULKLOAD_PROFILE_POP(); BULKLOAD_PROFILE_PRINT(); PG_RETURN_DATUM(HeapTupleGetDatum(result)); }
/* * initTrie - create trie from file. * * Function converts UTF8-encoded file into current encoding. */ static TrieChar * initTrie(const char *filename) { TrieChar *volatile rootTrie = NULL; MemoryContext ccxt = CurrentMemoryContext; tsearch_readline_state trst; volatile bool skip; filename = get_tsearch_config_filename(filename, "rules"); if (!tsearch_readline_begin(&trst, filename)) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not open unaccent file \"%s\": %m", filename))); do { /* * pg_do_encoding_conversion() (called by tsearch_readline()) will * emit exception if it finds untranslatable characters in current * locale. We just skip such lines, continuing with the next. */ skip = true; PG_TRY(); { char *line; while ((line = tsearch_readline(&trst)) != NULL) { /*---------- * The format of each line must be "src" or "src trg", where * src and trg are sequences of one or more non-whitespace * characters, separated by whitespace. Whitespace at start * or end of line is ignored. If trg is omitted, an empty * string is used as the replacement. * * We use a simple state machine, with states * 0 initial (before src) * 1 in src * 2 in whitespace after src * 3 in trg * 4 in whitespace after trg * -1 syntax error detected *---------- */ int state; char *ptr; char *src = NULL; char *trg = NULL; int ptrlen; int srclen = 0; int trglen = 0; state = 0; for (ptr = line; *ptr; ptr += ptrlen) { ptrlen = pg_mblen(ptr); /* ignore whitespace, but end src or trg */ if (t_isspace(ptr)) { if (state == 1) state = 2; else if (state == 3) state = 4; continue; } switch (state) { case 0: /* start of src */ src = ptr; srclen = ptrlen; state = 1; break; case 1: /* continue src */ srclen += ptrlen; break; case 2: /* start of trg */ trg = ptr; trglen = ptrlen; state = 3; break; case 3: /* continue trg */ trglen += ptrlen; break; default: /* bogus line format */ state = -1; break; } } if (state == 1 || state == 2) { /* trg was omitted, so use "" */ trg = ""; trglen = 0; } if (state > 0) rootTrie = placeChar(rootTrie, (unsigned char *) src, srclen, trg, trglen); else if (state < 0) ereport(WARNING, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid syntax: more than two strings in unaccent rule"))); pfree(line); } skip = false; } PG_CATCH(); { ErrorData *errdata; MemoryContext ecxt; ecxt = MemoryContextSwitchTo(ccxt); errdata = CopyErrorData(); if (errdata->sqlerrcode == ERRCODE_UNTRANSLATABLE_CHARACTER) { FlushErrorState(); } else { MemoryContextSwitchTo(ecxt); PG_RE_THROW(); } } PG_END_TRY(); } while (skip); tsearch_readline_end(&trst); return rootTrie; }
/* * ExecRefreshMatView -- execute a REFRESH MATERIALIZED VIEW command * * This refreshes the materialized view by creating a new table and swapping * the relfilenodes of the new table and the old materialized view, so the OID * of the original materialized view is preserved. Thus we do not lose GRANT * nor references to this materialized view. * * If WITH NO DATA was specified, this is effectively like a TRUNCATE; * otherwise it is like a TRUNCATE followed by an INSERT using the SELECT * statement associated with the materialized view. The statement node's * skipData field shows whether the clause was used. * * Indexes are rebuilt too, via REINDEX. Since we are effectively bulk-loading * the new heap, it's better to create the indexes afterwards than to fill them * incrementally while we load. * * The matview's "populated" state is changed based on whether the contents * reflect the result set of the materialized view's query. */ void ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString, ParamListInfo params, char *completionTag) { Oid matviewOid; Relation matviewRel; RewriteRule *rule; List *actions; Query *dataQuery; Oid tableSpace; Oid owner; Oid OIDNewHeap; DestReceiver *dest; bool concurrent; LOCKMODE lockmode; /* Determine strength of lock needed. */ concurrent = stmt->concurrent; lockmode = concurrent ? ExclusiveLock : AccessExclusiveLock; /* * Get a lock until end of transaction. */ matviewOid = RangeVarGetRelidExtended(stmt->relation, lockmode, false, false, RangeVarCallbackOwnsTable, NULL); matviewRel = heap_open(matviewOid, NoLock); /* Make sure it is a materialized view. */ if (matviewRel->rd_rel->relkind != RELKIND_MATVIEW) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("\"%s\" is not a materialized view", RelationGetRelationName(matviewRel)))); /* Check that CONCURRENTLY is not specified if not populated. */ if (concurrent && !RelationIsPopulated(matviewRel)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("CONCURRENTLY cannot be used when the materialized view is not populated"))); /* Check that conflicting options have not been specified. */ if (concurrent && stmt->skipData) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("CONCURRENTLY and WITH NO DATA options cannot be used together"))); /* We're not using materialized views in the system catalogs. */ Assert(!IsSystemRelation(matviewRel)); /* We don't allow an oid column for a materialized view. */ Assert(!matviewRel->rd_rel->relhasoids); /* * Check that everything is correct for a refresh. Problems at this point * are internal errors, so elog is sufficient. */ if (matviewRel->rd_rel->relhasrules == false || matviewRel->rd_rules->numLocks < 1) elog(ERROR, "materialized view \"%s\" is missing rewrite information", RelationGetRelationName(matviewRel)); if (matviewRel->rd_rules->numLocks > 1) elog(ERROR, "materialized view \"%s\" has too many rules", RelationGetRelationName(matviewRel)); rule = matviewRel->rd_rules->rules[0]; if (rule->event != CMD_SELECT || !(rule->isInstead)) elog(ERROR, "the rule for materialized view \"%s\" is not a SELECT INSTEAD OF rule", RelationGetRelationName(matviewRel)); actions = rule->actions; if (list_length(actions) != 1) elog(ERROR, "the rule for materialized view \"%s\" is not a single action", RelationGetRelationName(matviewRel)); /* * The stored query was rewritten at the time of the MV definition, but * has not been scribbled on by the planner. */ dataQuery = (Query *) linitial(actions); Assert(IsA(dataQuery, Query)); /* * Check for active uses of the relation in the current transaction, such * as open scans. * * NB: We count on this to protect us against problems with refreshing the * data using HEAP_INSERT_FROZEN. */ CheckTableNotInUse(matviewRel, "REFRESH MATERIALIZED VIEW"); /* * Tentatively mark the matview as populated or not (this will roll back * if we fail later). */ SetMatViewPopulatedState(matviewRel, !stmt->skipData); /* Concurrent refresh builds new data in temp tablespace, and does diff. */ if (concurrent) tableSpace = GetDefaultTablespace(RELPERSISTENCE_TEMP); else tableSpace = matviewRel->rd_rel->reltablespace; owner = matviewRel->rd_rel->relowner; heap_close(matviewRel, NoLock); /* Create the transient table that will receive the regenerated data. */ OIDNewHeap = make_new_heap(matviewOid, tableSpace, concurrent, ExclusiveLock); dest = CreateTransientRelDestReceiver(OIDNewHeap); /* Generate the data, if wanted. */ if (!stmt->skipData) refresh_matview_datafill(dest, dataQuery, queryString, owner); /* Make the matview match the newly generated data. */ if (concurrent) { int old_depth = matview_maintenance_depth; PG_TRY(); { refresh_by_match_merge(matviewOid, OIDNewHeap); } PG_CATCH(); { matview_maintenance_depth = old_depth; PG_RE_THROW(); } PG_END_TRY(); Assert(matview_maintenance_depth == old_depth); } else refresh_by_heap_swap(matviewOid, OIDNewHeap); }
/* Function passed to testing framework * in order to force SetupInterconnect to fail */ void _RETHROW( ) { PG_RE_THROW(); }
static PyObject * PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit) { volatile int nargs; int i, rv; PLyPlanObject *plan; volatile MemoryContext oldcontext; volatile ResourceOwner oldowner; PyObject *ret; if (list != NULL) { if (!PySequence_Check(list) || PyString_Check(list) || PyUnicode_Check(list)) { PLy_exception_set(PyExc_TypeError, "plpy.execute takes a sequence as its second argument"); return NULL; } nargs = PySequence_Length(list); } else nargs = 0; plan = (PLyPlanObject *) ob; if (nargs != plan->nargs) { char *sv; PyObject *so = PyObject_Str(list); if (!so) PLy_elog(ERROR, "could not execute plan"); sv = PyString_AsString(so); PLy_exception_set_plural(PyExc_TypeError, "Expected sequence of %d argument, got %d: %s", "Expected sequence of %d arguments, got %d: %s", plan->nargs, plan->nargs, nargs, sv); Py_DECREF(so); return NULL; } oldcontext = CurrentMemoryContext; oldowner = CurrentResourceOwner; PLy_spi_subtransaction_begin(oldcontext, oldowner); PG_TRY(); { PLyExecutionContext *exec_ctx = PLy_current_execution_context(); char *volatile nulls; volatile int j; if (nargs > 0) nulls = palloc(nargs * sizeof(char)); else nulls = NULL; for (j = 0; j < nargs; j++) { PyObject *elem; elem = PySequence_GetItem(list, j); if (elem != Py_None) { PG_TRY(); { plan->values[j] = plan->args[j].out.d.func(&(plan->args[j].out.d), -1, elem); } PG_CATCH(); { Py_DECREF(elem); PG_RE_THROW(); } PG_END_TRY(); Py_DECREF(elem); nulls[j] = ' '; } else { Py_DECREF(elem); plan->values[j] = InputFunctionCall(&(plan->args[j].out.d.typfunc), NULL, plan->args[j].out.d.typioparam, -1); nulls[j] = 'n'; } } rv = SPI_execute_plan(plan->plan, plan->values, nulls, exec_ctx->curr_proc->fn_readonly, limit); ret = PLy_spi_execute_fetch_result(SPI_tuptable, SPI_processed, rv); if (nargs > 0) pfree(nulls); PLy_spi_subtransaction_commit(oldcontext, oldowner); } PG_CATCH(); { int k; /* * cleanup plan->values array */ for (k = 0; k < nargs; k++) { if (!plan->args[k].out.d.typbyval && (plan->values[k] != PointerGetDatum(NULL))) { pfree(DatumGetPointer(plan->values[k])); plan->values[k] = PointerGetDatum(NULL); } } PLy_spi_subtransaction_abort(oldcontext, oldowner); return NULL; } PG_END_TRY(); for (i = 0; i < nargs; i++) { if (!plan->args[i].out.d.typbyval && (plan->values[i] != PointerGetDatum(NULL))) { pfree(DatumGetPointer(plan->values[i])); plan->values[i] = PointerGetDatum(NULL); } } if (rv < 0) { PLy_exception_set(PLy_exc_spi_error, "SPI_execute_plan failed: %s", SPI_result_code_string(rv)); return NULL; } return ret; }
/* * ProcessUtility hook */ static void pgss_ProcessUtility(Node *parsetree, const char *queryString, ParamListInfo params, bool isTopLevel, DestReceiver *dest, char *completionTag) { if (pgss_track_utility && pgss_enabled()) { instr_time start; instr_time duration; uint64 rows = 0; BufferUsage bufusage; bufusage = pgBufferUsage; INSTR_TIME_SET_CURRENT(start); nested_level++; PG_TRY(); { if (prev_ProcessUtility) prev_ProcessUtility(parsetree, queryString, params, isTopLevel, dest, completionTag); else standard_ProcessUtility(parsetree, queryString, params, isTopLevel, dest, completionTag); nested_level--; } PG_CATCH(); { nested_level--; PG_RE_THROW(); } PG_END_TRY(); INSTR_TIME_SET_CURRENT(duration); INSTR_TIME_SUBTRACT(duration, start); /* parse command tag to retrieve the number of affected rows. */ if (completionTag && sscanf(completionTag, "COPY " UINT64_FORMAT, &rows) != 1) rows = 0; /* calc differences of buffer counters. */ bufusage.shared_blks_hit = pgBufferUsage.shared_blks_hit - bufusage.shared_blks_hit; bufusage.shared_blks_read = pgBufferUsage.shared_blks_read - bufusage.shared_blks_read; bufusage.shared_blks_written = pgBufferUsage.shared_blks_written - bufusage.shared_blks_written; bufusage.local_blks_hit = pgBufferUsage.local_blks_hit - bufusage.local_blks_hit; bufusage.local_blks_read = pgBufferUsage.local_blks_read - bufusage.local_blks_read; bufusage.local_blks_written = pgBufferUsage.local_blks_written - bufusage.local_blks_written; bufusage.temp_blks_read = pgBufferUsage.temp_blks_read - bufusage.temp_blks_read; bufusage.temp_blks_written = pgBufferUsage.temp_blks_written - bufusage.temp_blks_written; pgss_store(queryString, INSTR_TIME_GET_DOUBLE(duration), rows, &bufusage); } else { if (prev_ProcessUtility) prev_ProcessUtility(parsetree, queryString, params, isTopLevel, dest, completionTag); else standard_ProcessUtility(parsetree, queryString, params, isTopLevel, dest, completionTag); } }
static PyObject * PLy_spi_execute_fetch_result(SPITupleTable *tuptable, int rows, int status) { PLyResultObject *result; volatile MemoryContext oldcontext; result = (PLyResultObject *) PLy_result_new(); Py_DECREF(result->status); result->status = PyInt_FromLong(status); if (status > 0 && tuptable == NULL) { Py_DECREF(result->nrows); result->nrows = PyInt_FromLong(rows); } else if (status > 0 && tuptable != NULL) { PLyTypeInfo args; int i; Py_DECREF(result->nrows); result->nrows = PyInt_FromLong(rows); PLy_typeinfo_init(&args); oldcontext = CurrentMemoryContext; PG_TRY(); { MemoryContext oldcontext2; if (rows) { Py_DECREF(result->rows); result->rows = PyList_New(rows); PLy_input_tuple_funcs(&args, tuptable->tupdesc); for (i = 0; i < rows; i++) { PyObject *row = PLyDict_FromTuple(&args, tuptable->vals[i], tuptable->tupdesc); PyList_SetItem(result->rows, i, row); } } /* * Save tuple descriptor for later use by result set metadata * functions. Save it in TopMemoryContext so that it survives * outside of an SPI context. We trust that PLy_result_dealloc() * will clean it up when the time is right. (Do this as late as * possible, to minimize the number of ways the tupdesc could get * leaked due to errors.) */ oldcontext2 = MemoryContextSwitchTo(TopMemoryContext); result->tupdesc = CreateTupleDescCopy(tuptable->tupdesc); MemoryContextSwitchTo(oldcontext2); } PG_CATCH(); { MemoryContextSwitchTo(oldcontext); PLy_typeinfo_dealloc(&args); SPI_freetuptable(tuptable); Py_DECREF(result); PG_RE_THROW(); } PG_END_TRY(); PLy_typeinfo_dealloc(&args); SPI_freetuptable(tuptable); } return (PyObject *) result; }
/* * Helper function for the various SQL callable logical decoding functions. */ static Datum pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool binary) { Name name; XLogRecPtr upto_lsn; int32 upto_nchanges; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; MemoryContext per_query_ctx; MemoryContext oldcontext; XLogRecPtr end_of_wal; XLogRecPtr startptr; LogicalDecodingContext *ctx; ResourceOwner old_resowner = CurrentResourceOwner; ArrayType *arr; Size ndim; List *options = NIL; DecodingOutputState *p; check_permissions(); CheckLogicalDecodingRequirements(); if (PG_ARGISNULL(0)) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("slot name must not be null"))); name = PG_GETARG_NAME(0); if (PG_ARGISNULL(1)) upto_lsn = InvalidXLogRecPtr; else upto_lsn = PG_GETARG_LSN(1); if (PG_ARGISNULL(2)) upto_nchanges = InvalidXLogRecPtr; else upto_nchanges = PG_GETARG_INT32(2); if (PG_ARGISNULL(3)) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("options array must not be null"))); arr = PG_GETARG_ARRAYTYPE_P(3); /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); if (!(rsinfo->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not allowed in this context"))); /* state to write output to */ p = palloc0(sizeof(DecodingOutputState)); p->binary_output = binary; /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &p->tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); /* Deconstruct options array */ ndim = ARR_NDIM(arr); if (ndim > 1) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("array must be one-dimensional"))); } else if (array_contains_nulls(arr)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("array must not contain nulls"))); } else if (ndim == 1) { int nelems; Datum *datum_opts; int i; Assert(ARR_ELEMTYPE(arr) == TEXTOID); deconstruct_array(arr, TEXTOID, -1, false, 'i', &datum_opts, NULL, &nelems); if (nelems % 2 != 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("array must have even number of elements"))); for (i = 0; i < nelems; i += 2) { char *name = TextDatumGetCString(datum_opts[i]); char *opt = TextDatumGetCString(datum_opts[i + 1]); options = lappend(options, makeDefElem(name, (Node *) makeString(opt), -1)); } } p->tupstore = tuplestore_begin_heap(true, false, work_mem); rsinfo->returnMode = SFRM_Materialize; rsinfo->setResult = p->tupstore; rsinfo->setDesc = p->tupdesc; /* compute the current end-of-wal */ if (!RecoveryInProgress()) end_of_wal = GetFlushRecPtr(); else end_of_wal = GetXLogReplayRecPtr(NULL); ReplicationSlotAcquire(NameStr(*name)); PG_TRY(); { /* restart at slot's confirmed_flush */ ctx = CreateDecodingContext(InvalidXLogRecPtr, options, logical_read_local_xlog_page, LogicalOutputPrepareWrite, LogicalOutputWrite); MemoryContextSwitchTo(oldcontext); /* * Check whether the output plugin writes textual output if that's * what we need. */ if (!binary && ctx->options.output_type !=OUTPUT_PLUGIN_TEXTUAL_OUTPUT) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("logical decoding output plugin \"%s\" produces binary output, but function \"%s\" expects textual data", NameStr(MyReplicationSlot->data.plugin), format_procedure(fcinfo->flinfo->fn_oid)))); ctx->output_writer_private = p; /* * Decoding of WAL must start at restart_lsn so that the entirety of * xacts that committed after the slot's confirmed_flush can be * accumulated into reorder buffers. */ startptr = MyReplicationSlot->data.restart_lsn; CurrentResourceOwner = ResourceOwnerCreate(CurrentResourceOwner, "logical decoding"); /* invalidate non-timetravel entries */ InvalidateSystemCaches(); while ((startptr != InvalidXLogRecPtr && startptr < end_of_wal) || (ctx->reader->EndRecPtr != InvalidXLogRecPtr && ctx->reader->EndRecPtr < end_of_wal)) { XLogRecord *record; char *errm = NULL; record = XLogReadRecord(ctx->reader, startptr, &errm); if (errm) elog(ERROR, "%s", errm); /* * Now that we've set up the xlog reader state, subsequent calls * pass InvalidXLogRecPtr to say "continue from last record" */ startptr = InvalidXLogRecPtr; /* * The {begin_txn,change,commit_txn}_wrapper callbacks above will * store the description into our tuplestore. */ if (record != NULL) LogicalDecodingProcessRecord(ctx, ctx->reader); /* check limits */ if (upto_lsn != InvalidXLogRecPtr && upto_lsn <= ctx->reader->EndRecPtr) break; if (upto_nchanges != 0 && upto_nchanges <= p->returned_rows) break; CHECK_FOR_INTERRUPTS(); } tuplestore_donestoring(tupstore); CurrentResourceOwner = old_resowner; /* * Next time, start where we left off. (Hunting things, the family * business..) */ if (ctx->reader->EndRecPtr != InvalidXLogRecPtr && confirm) { LogicalConfirmReceivedLocation(ctx->reader->EndRecPtr); /* * If only the confirmed_flush_lsn has changed the slot won't get * marked as dirty by the above. Callers on the walsender interface * are expected to keep track of their own progress and don't need * it written out. But SQL-interface users cannot specify their own * start positions and it's harder for them to keep track of their * progress, so we should make more of an effort to save it for them. * * Dirty the slot so it's written out at the next checkpoint. We'll * still lose its position on crash, as documented, but it's better * than always losing the position even on clean restart. */ ReplicationSlotMarkDirty(); } /* free context, call shutdown callback */ FreeDecodingContext(ctx); ReplicationSlotRelease(); InvalidateSystemCaches(); } PG_CATCH(); { /* clear all timetravel entries */ InvalidateSystemCaches(); PG_RE_THROW(); } PG_END_TRY(); return (Datum) 0; }
/* * Connect to remote server using specified server and user mapping properties. */ static PGconn * connect_pg_server(ForeignServer *server, UserMapping *user) { PGconn *volatile conn = NULL; /* * Use PG_TRY block to ensure closing connection on error. */ PG_TRY(); { const char **keywords; const char **values; int n; /* * Construct connection params from generic options of ForeignServer * and UserMapping. (Some of them might not be libpq options, in * which case we'll just waste a few array slots.) Add 3 extra slots * for fallback_application_name, client_encoding, end marker. */ n = list_length(server->options) + list_length(user->options) + 3; keywords = (const char **) palloc(n * sizeof(char *)); values = (const char **) palloc(n * sizeof(char *)); n = 0; n += ExtractConnectionOptions(server->options, keywords + n, values + n); n += ExtractConnectionOptions(user->options, keywords + n, values + n); /* Use "postgres_fdw" as fallback_application_name. */ keywords[n] = "fallback_application_name"; values[n] = "postgres_fdw"; n++; /* Set client_encoding so that libpq can convert encoding properly. */ keywords[n] = "client_encoding"; values[n] = GetDatabaseEncodingName(); n++; keywords[n] = values[n] = NULL; /* verify connection parameters and make connection */ check_conn_params(keywords, values); conn = PQconnectdbParams(keywords, values, false); if (!conn || PQstatus(conn) != CONNECTION_OK) { char *connmessage; int msglen; /* libpq typically appends a newline, strip that */ connmessage = pstrdup(PQerrorMessage(conn)); msglen = strlen(connmessage); if (msglen > 0 && connmessage[msglen - 1] == '\n') connmessage[msglen - 1] = '\0'; ereport(ERROR, (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), errmsg("could not connect to server \"%s\"", server->servername), errdetail_internal("%s", connmessage))); } /* * Check that non-superuser has used password to establish connection; * otherwise, he's piggybacking on the postgres server's user * identity. See also dblink_security_check() in contrib/dblink. */ if (!superuser() && !PQconnectionUsedPassword(conn)) ereport(ERROR, (errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), errmsg("password is required"), errdetail("Non-superuser cannot connect if the server does not request a password."), errhint("Target server's authentication method must be changed."))); /* Prepare new session for use */ configure_remote_session(conn); pfree(keywords); pfree(values); } PG_CATCH(); { /* Release PGconn data structure if we managed to create one */ if (conn) PQfinish(conn); PG_RE_THROW(); } PG_END_TRY(); return conn; }
static Datum PLySequence_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *sequence) { Datum result; HeapTuple tuple; Datum *values; bool *nulls; volatile int idx; volatile int i; Assert(PySequence_Check(sequence)); /* * Check that sequence length is exactly same as PG tuple's. We actually * can ignore exceeding items or assume missing ones as null but to avoid * plpython developer's errors we are strict here */ idx = 0; for (i = 0; i < desc->natts; i++) { if (!TupleDescAttr(desc, i)->attisdropped) idx++; } if (PySequence_Length(sequence) != idx) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("length of returned sequence did not match number of columns in row"))); /* Build tuple */ values = palloc(sizeof(Datum) * desc->natts); nulls = palloc(sizeof(bool) * desc->natts); idx = 0; for (i = 0; i < desc->natts; ++i) { PyObject *volatile value; PLyObToDatum *att; if (TupleDescAttr(desc, i)->attisdropped) { values[i] = (Datum) 0; nulls[i] = true; continue; } value = NULL; att = &arg->u.tuple.atts[i]; PG_TRY(); { value = PySequence_GetItem(sequence, idx); Assert(value); values[i] = att->func(att, value, &nulls[i], false); Py_XDECREF(value); value = NULL; } PG_CATCH(); { Py_XDECREF(value); PG_RE_THROW(); } PG_END_TRY(); idx++; } tuple = heap_form_tuple(desc, values, nulls); result = heap_copy_tuple_as_datum(tuple, desc); heap_freetuple(tuple); pfree(values); pfree(nulls); return result; }
/* * Assign hook routine for "gp_role" option. This variablle has context * PGC_SUSET so that is can only be set by a superuser via the SET command. * (It can also be set using an option on postmaster start, but this isn't * interesting beccause the derived global CdbRole is always set (along with * CdbSessionRole) on backend startup for a new connection. * * See src/backend/util/misc/guc.c for option definition. */ const char * assign_gp_role(const char *newval, bool doit, GucSource source) { #if FALSE elog(DEBUG1, "assign_gp_role: gp_role=%s, newval=%s, doit=%s", show_gp_role(), newval, (doit ? "true" : "false")); #endif GpRoleValue newrole = string_to_role(newval); GpRoleValue oldrole = Gp_role; if (newrole == GP_ROLE_UNDEFINED) { return NULL; } if (doit) { /* * When changing between roles, we must * call cdb_cleanup and then cdb_setup to get * setup and connections appropriate to the new role. */ bool do_disconnect = false; bool do_connect = false; if (Gp_role != newrole && IsUnderPostmaster) { if (Gp_role != GP_ROLE_UTILITY) do_disconnect = true; if (newrole != GP_ROLE_UTILITY) do_connect = true; } if (do_disconnect) cdb_cleanup(0,0); Gp_role = newrole; if (source != PGC_S_DEFAULT) { if (do_connect) { /* * In case there are problems with the Greenplum Database tables or data, * we catch any error coming out of cdblink_setup so we can set the * gp_role back to what it was. Otherwise we may be left with * inappropriate connections for the new role. */ PG_TRY(); { cdb_setup(); } PG_CATCH(); { cdb_cleanup(0,0); Gp_role = oldrole; if (Gp_role != GP_ROLE_UTILITY) cdb_setup(); PG_RE_THROW(); } PG_END_TRY(); } } } return newval; }
/* * SearchCatCacheList * * Generate a list of all tuples matching a partial key (that is, * a key specifying just the first K of the cache's N key columns). * * The caller must not modify the list object or the pointed-to tuples, * and must call ReleaseCatCacheList() when done with the list. */ CatCList * SearchCatCacheList(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4) { ScanKeyData cur_skey[CATCACHE_MAXKEYS]; uint32 lHashValue; Dlelem *elt; CatCList *cl; CatCTup *ct; List *volatile ctlist; ListCell *ctlist_item; int nmembers; bool ordered; HeapTuple ntp; MemoryContext oldcxt; int i; /* * one-time startup overhead for each cache */ if (cache->cc_tupdesc == NULL) CatalogCacheInitializeCache(cache); Assert(nkeys > 0 && nkeys < cache->cc_nkeys); #ifdef CATCACHE_STATS cache->cc_lsearches++; #endif /* * initialize the search key information */ memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey)); cur_skey[0].sk_argument = v1; cur_skey[1].sk_argument = v2; cur_skey[2].sk_argument = v3; cur_skey[3].sk_argument = v4; /* * compute a hash value of the given keys for faster search. We don't * presently divide the CatCList items into buckets, but this still lets * us skip non-matching items quickly most of the time. */ lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey); /* * scan the items until we find a match or exhaust our list */ for (elt = DLGetHead(&cache->cc_lists); elt; elt = DLGetSucc(elt)) { bool res; cl = (CatCList *) DLE_VAL(elt); if (cl->dead) continue; /* ignore dead entries */ if (cl->hash_value != lHashValue) continue; /* quickly skip entry if wrong hash val */ /* * see if the cached list matches our key. */ if (cl->nkeys != nkeys) continue; HeapKeyTest(&cl->tuple, cache->cc_tupdesc, nkeys, cur_skey, res); if (!res) continue; /* * We found a matching list. Move the list to the front of the * cache's list-of-lists, to speed subsequent searches. (We do not * move the members to the fronts of their hashbucket lists, however, * since there's no point in that unless they are searched for * individually.) */ DLMoveToFront(&cl->cache_elem); /* Bump the list's refcount and return it */ ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner); cl->refcount++; ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl); CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list", cache->cc_relname); #ifdef CATCACHE_STATS cache->cc_lhits++; #endif return cl; } /* * List was not found in cache, so we have to build it by reading the * relation. For each matching tuple found in the relation, use an * existing cache entry if possible, else build a new one. * * We have to bump the member refcounts temporarily to ensure they won't * get dropped from the cache while loading other members. We use a PG_TRY * block to ensure we can undo those refcounts if we get an error before * we finish constructing the CatCList. */ ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner); ctlist = NIL; PG_TRY(); { Relation relation; SysScanDesc scandesc; relation = heap_open(cache->cc_reloid, AccessShareLock); scandesc = systable_beginscan(relation, cache->cc_indexoid, IndexScanOK(cache, cur_skey), SnapshotNow, nkeys, cur_skey); /* The list will be ordered iff we are doing an index scan */ ordered = (scandesc->irel != NULL); while (HeapTupleIsValid(ntp = systable_getnext(scandesc))) { uint32 hashValue; Index hashIndex; /* * See if there's an entry for this tuple already. */ ct = NULL; hashValue = CatalogCacheComputeTupleHashValue(cache, ntp); hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets); for (elt = DLGetHead(&cache->cc_bucket[hashIndex]); elt; elt = DLGetSucc(elt)) { ct = (CatCTup *) DLE_VAL(elt); if (ct->dead || ct->negative) continue; /* ignore dead and negative entries */ if (ct->hash_value != hashValue) continue; /* quickly skip entry if wrong hash val */ if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self))) continue; /* not same tuple */ /* * Found a match, but can't use it if it belongs to another * list already */ if (ct->c_list) continue; break; /* A-OK */ } if (elt == NULL) { /* We didn't find a usable entry, so make a new one */ ct = CatalogCacheCreateEntry(cache, ntp, hashValue, hashIndex, false); } /* Careful here: add entry to ctlist, then bump its refcount */ /* This way leaves state correct if lappend runs out of memory */ ctlist = lappend(ctlist, ct); ct->refcount++; } systable_endscan(scandesc); heap_close(relation, AccessShareLock); /* * Now we can build the CatCList entry. First we need a dummy tuple * containing the key values... */ ntp = build_dummy_tuple(cache, nkeys, cur_skey); oldcxt = MemoryContextSwitchTo(CacheMemoryContext); nmembers = list_length(ctlist); cl = (CatCList *) palloc(sizeof(CatCList) + nmembers * sizeof(CatCTup *)); heap_copytuple_with_tuple(ntp, &cl->tuple); MemoryContextSwitchTo(oldcxt); heap_freetuple(ntp); /* * We are now past the last thing that could trigger an elog before we * have finished building the CatCList and remembering it in the * resource owner. So it's OK to fall out of the PG_TRY, and indeed * we'd better do so before we start marking the members as belonging * to the list. */ } PG_CATCH(); { foreach(ctlist_item, ctlist) { ct = (CatCTup *) lfirst(ctlist_item); Assert(ct->c_list == NULL); Assert(ct->refcount > 0); ct->refcount--; if ( #ifndef CATCACHE_FORCE_RELEASE ct->dead && #endif ct->refcount == 0 && (ct->c_list == NULL || ct->c_list->refcount == 0)) CatCacheRemoveCTup(cache, ct); } PG_RE_THROW(); }
Datum xpath_table(PG_FUNCTION_ARGS) { /* Function parameters */ char *pkeyfield = text_to_cstring(PG_GETARG_TEXT_PP(0)); char *xmlfield = text_to_cstring(PG_GETARG_TEXT_PP(1)); char *relname = text_to_cstring(PG_GETARG_TEXT_PP(2)); char *xpathset = text_to_cstring(PG_GETARG_TEXT_PP(3)); char *condition = text_to_cstring(PG_GETARG_TEXT_PP(4)); /* SPI (input tuple) support */ SPITupleTable *tuptable; HeapTuple spi_tuple; TupleDesc spi_tupdesc; /* Output tuple (tuplestore) support */ Tuplestorestate *tupstore = NULL; TupleDesc ret_tupdesc; HeapTuple ret_tuple; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; AttInMetadata *attinmeta; MemoryContext per_query_ctx; MemoryContext oldcontext; char **values; xmlChar **xpaths; char *pos; const char *pathsep = "|"; int numpaths; int ret; int proc; int i; int j; int rownr; /* For issuing multiple rows from one original * document */ bool had_values; /* To determine end of nodeset results */ StringInfoData query_buf; PgXmlErrorContext *xmlerrcxt; volatile xmlDocPtr doctree = NULL; /* We only have a valid tuple description in table function mode */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); if (rsinfo->expectedDesc == NULL) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("xpath_table must be called as a table function"))); /* * We want to materialise because it means that we don't have to carry * libxml2 parser state between invocations of this function */ if (!(rsinfo->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("xpath_table requires Materialize mode, but it is not " "allowed in this context"))); /* * The tuplestore must exist in a higher context than this function call * (per_query_ctx is used) */ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); /* * Create the tuplestore - work_mem is the max in-memory size before a * file is created on disk to hold it. */ tupstore = tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random, false, work_mem); MemoryContextSwitchTo(oldcontext); /* get the requested return tuple description */ ret_tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc); /* must have at least one output column (for the pkey) */ if (ret_tupdesc->natts < 1) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("xpath_table must have at least one output column"))); /* * At the moment we assume that the returned attributes make sense for the * XPath specififed (i.e. we trust the caller). It's not fatal if they get * it wrong - the input function for the column type will raise an error * if the path result can't be converted into the correct binary * representation. */ attinmeta = TupleDescGetAttInMetadata(ret_tupdesc); /* Set return mode and allocate value space. */ rsinfo->returnMode = SFRM_Materialize; rsinfo->setDesc = ret_tupdesc; values = (char **) palloc(ret_tupdesc->natts * sizeof(char *)); xpaths = (xmlChar **) palloc(ret_tupdesc->natts * sizeof(xmlChar *)); /* * Split XPaths. xpathset is a writable CString. * * Note that we stop splitting once we've done all needed for tupdesc */ numpaths = 0; pos = xpathset; while (numpaths < (ret_tupdesc->natts - 1)) { xpaths[numpaths++] = (xmlChar *) pos; pos = strstr(pos, pathsep); if (pos != NULL) { *pos = '\0'; pos++; } else break; } /* Now build query */ initStringInfo(&query_buf); /* Build initial sql statement */ appendStringInfo(&query_buf, "SELECT %s, %s FROM %s WHERE %s", pkeyfield, xmlfield, relname, condition); if ((ret = SPI_connect()) < 0) elog(ERROR, "xpath_table: SPI_connect returned %d", ret); if ((ret = SPI_exec(query_buf.data, 0)) != SPI_OK_SELECT) elog(ERROR, "xpath_table: SPI execution failed for query %s", query_buf.data); proc = SPI_processed; /* elog(DEBUG1,"xpath_table: SPI returned %d rows",proc); */ tuptable = SPI_tuptable; spi_tupdesc = tuptable->tupdesc; /* Switch out of SPI context */ MemoryContextSwitchTo(oldcontext); /* * Check that SPI returned correct result. If you put a comma into one of * the function parameters, this will catch it when the SPI query returns * e.g. 3 columns. */ if (spi_tupdesc->natts != 2) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("expression returning multiple columns is not valid in parameter list"), errdetail("Expected two columns in SPI result, got %d.", spi_tupdesc->natts))); } /* * Setup the parser. This should happen after we are done evaluating the * query, in case it calls functions that set up libxml differently. */ xmlerrcxt = pgxml_parser_init(PG_XML_STRICTNESS_LEGACY); PG_TRY(); { /* For each row i.e. document returned from SPI */ for (i = 0; i < proc; i++) { char *pkey; char *xmldoc; xmlXPathContextPtr ctxt; xmlXPathObjectPtr res; xmlChar *resstr; xmlXPathCompExprPtr comppath; /* Extract the row data as C Strings */ spi_tuple = tuptable->vals[i]; pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1); xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2); /* * Clear the values array, so that not-well-formed documents * return NULL in all columns. Note that this also means that * spare columns will be NULL. */ for (j = 0; j < ret_tupdesc->natts; j++) values[j] = NULL; /* Insert primary key */ values[0] = pkey; /* Parse the document */ if (xmldoc) doctree = xmlParseMemory(xmldoc, strlen(xmldoc)); else /* treat NULL as not well-formed */ doctree = NULL; if (doctree == NULL) { /* not well-formed, so output all-NULL tuple */ ret_tuple = BuildTupleFromCStrings(attinmeta, values); tuplestore_puttuple(tupstore, ret_tuple); heap_freetuple(ret_tuple); } else { /* New loop here - we have to deal with nodeset results */ rownr = 0; do { /* Now evaluate the set of xpaths. */ had_values = false; for (j = 0; j < numpaths; j++) { ctxt = xmlXPathNewContext(doctree); ctxt->node = xmlDocGetRootElement(doctree); /* compile the path */ comppath = xmlXPathCompile(xpaths[j]); if (comppath == NULL) xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION, "XPath Syntax Error"); /* Now evaluate the path expression. */ res = xmlXPathCompiledEval(comppath, ctxt); xmlXPathFreeCompExpr(comppath); if (res != NULL) { switch (res->type) { case XPATH_NODESET: /* We see if this nodeset has enough nodes */ if (res->nodesetval != NULL && rownr < res->nodesetval->nodeNr) { resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]); had_values = true; } else resstr = NULL; break; case XPATH_STRING: resstr = xmlStrdup(res->stringval); break; default: elog(NOTICE, "unsupported XQuery result: %d", res->type); resstr = xmlStrdup((const xmlChar *) "<unsupported/>"); } /* * Insert this into the appropriate column in the * result tuple. */ values[j + 1] = (char *) resstr; } xmlXPathFreeContext(ctxt); } /* Now add the tuple to the output, if there is one. */ if (had_values) { ret_tuple = BuildTupleFromCStrings(attinmeta, values); tuplestore_puttuple(tupstore, ret_tuple); heap_freetuple(ret_tuple); } rownr++; } while (had_values); } if (doctree != NULL) xmlFreeDoc(doctree); doctree = NULL; if (pkey) pfree(pkey); if (xmldoc) pfree(xmldoc); } } PG_CATCH(); { if (doctree != NULL) xmlFreeDoc(doctree); pg_xml_done(xmlerrcxt, true); PG_RE_THROW(); } PG_END_TRY(); if (doctree != NULL) xmlFreeDoc(doctree); pg_xml_done(xmlerrcxt, false); tuplestore_donestoring(tupstore); SPI_finish(); rsinfo->setResult = tupstore; /* * SFRM_Materialize mode expects us to return a NULL Datum. The actual * tuples are in our tuplestore and passed back through rsinfo->setResult. * rsinfo->setDesc is set to the tuple description that we actually used * to build our tuples with, so the caller can verify we did what it was * expecting. */ return (Datum) 0; }