char *lookup_analysis_thing(MemoryContext cxt, char *thing) { char *definition = ""; StringInfo query; SPI_connect(); query = makeStringInfo(); appendStringInfo(query, "select (to_json(name) || ':' || definition) from %s;", TextDatumGetCString(DirectFunctionCall1(quote_ident, CStringGetTextDatum(thing)))); if (SPI_execute(query->data, true, 0) != SPI_OK_SELECT) elog(ERROR, "Problem looking up analysis thing with query: %s", query->data); if (SPI_processed > 0) { StringInfo json = makeStringInfo(); int i; for (i = 0; i < SPI_processed; i++) { if (i > 0) appendStringInfoCharMacro(json, ','); appendStringInfo(json, "%s", SPI_getvalue(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1)); } definition = (char *) MemoryContextAllocZero(cxt, (Size) json->len + 1); memcpy(definition, json->data, json->len); } SPI_finish(); return definition; }
PxfFilterDesc* build_filter(char lopcode, int lattnum, char* lconststr, char ropcode, int rattnum, char* rconststr, PxfOperatorCode op) { PxfFilterDesc *filter = (PxfFilterDesc*) palloc0(sizeof(PxfFilterDesc)); filter->l.opcode = lopcode; filter->l.attnum = lattnum; if (lconststr) { filter->l.conststr = makeStringInfo(); appendStringInfoString(filter->l.conststr, lconststr); } filter->r.opcode = ropcode; filter->r.attnum = rattnum; if (rconststr) { filter->r.conststr = makeStringInfo(); appendStringInfoString(filter->r.conststr, rconststr); } filter->op = op; return filter; }
/* * pg_start_backup: set up for taking an on-line backup dump * * Essentially what this does is to create a backup label file in $PGDATA, * where it will be archived as part of the backup dump. The label file * contains the user-supplied label string (typically this would be used * to tell where the backup dump will be stored) and the starting time and * starting WAL location for the dump. * * Permission checking for this function is managed through the normal * GRANT system. */ Datum pg_start_backup(PG_FUNCTION_ARGS) { text *backupid = PG_GETARG_TEXT_P(0); bool fast = PG_GETARG_BOOL(1); bool exclusive = PG_GETARG_BOOL(2); char *backupidstr; XLogRecPtr startpoint; DIR *dir; backupidstr = text_to_cstring(backupid); if (exclusive_backup_running || nonexclusive_backup_running) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("a backup is already in progress in this session"))); /* Make sure we can open the directory with tablespaces in it */ dir = AllocateDir("pg_tblspc"); if (!dir) ereport(ERROR, (errmsg("could not open directory \"%s\": %m", "pg_tblspc"))); if (exclusive) { startpoint = do_pg_start_backup(backupidstr, fast, NULL, NULL, dir, NULL, NULL, false, true); exclusive_backup_running = true; } else { MemoryContext oldcontext; /* * Label file and tablespace map file need to be long-lived, since they * are read in pg_stop_backup. */ oldcontext = MemoryContextSwitchTo(TopMemoryContext); label_file = makeStringInfo(); tblspc_map_file = makeStringInfo(); MemoryContextSwitchTo(oldcontext); startpoint = do_pg_start_backup(backupidstr, fast, NULL, label_file, dir, NULL, tblspc_map_file, false, true); nonexclusive_backup_running = true; before_shmem_exit(nonexclusive_base_backup_cleanup, (Datum) 0); } FreeDir(dir); PG_RETURN_LSN(startpoint); }
/* * Returns the estimate normalized frequency of the item */ Datum cmsketch_norm_frequency(PG_FUNCTION_ARGS) { CountMinSketch *cms; Datum elem = PG_GETARG_DATUM(1); float8 freq = 0; Oid val_type = get_fn_expr_argtype(fcinfo->flinfo, 1); TypeCacheEntry *typ; StringInfo buf; if (val_type == InvalidOid) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("could not determine input data type"))); if (PG_ARGISNULL(0)) PG_RETURN_FLOAT8(freq); cms = (CountMinSketch *) PG_GETARG_VARLENA_P(0); typ = lookup_type_cache(val_type, 0); buf = makeStringInfo(); DatumToBytes(elem, typ, buf); freq = CountMinSketchEstimateNormFrequency(cms, buf->data, buf->len); pfree(buf->data); pfree(buf); PG_RETURN_FLOAT8(freq); }
/* * Build string containing stack traces where all exclusively-held * locks were acquired; */ const char* LWLocksHeldStackTraces() { if (num_held_lwlocks == 0) { return NULL; } StringInfo append = makeStringInfo(); /* palloc'd */ uint32 i = 0, cnt = 1; /* append stack trace for each held lock */ for (i = 0; i < num_held_lwlocks; i++) { if (!LWLOCK_IS_PREDEFINED(held_lwlocks[i])) { continue; } appendStringInfo(append, "%d: LWLock %d:\n", cnt++, held_lwlocks[i] ); char *stackTrace = gp_stacktrace(held_lwlocks_addresses[i], held_lwlocks_depth[i]); Assert(stackTrace != NULL); appendStringInfoString(append, stackTrace); pfree(stackTrace); } Assert(append->len > 0); return append->data; }
static Source * CreateRemoteSource(const char *path, TupleDesc desc) { RemoteSource *self = (RemoteSource *) palloc0(sizeof(RemoteSource)); self->base.close = (SourceCloseProc) RemoteSourceClose; if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 3) { /* new way */ StringInfoData buf; int16 format; int nattrs; int i; self->base.read = (SourceReadProc) RemoteSourceRead; /* count valid fields */ for (nattrs = 0, i = 0; i < desc->natts; i++) { if (desc->attrs[i]->attisdropped) continue; nattrs++; } format = (IsBinaryCopy() ? 1 : 0); pq_beginmessage(&buf, 'G'); pq_sendbyte(&buf, format); /* overall format */ pq_sendint(&buf, nattrs, 2); for (i = 0; i < nattrs; i++) pq_sendint(&buf, format, 2); /* per-column formats */ pq_endmessage(&buf); self->buffer = makeStringInfo(); } else if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 2) { self->base.read = (SourceReadProc) RemoteSourceReadOld; /* old way */ if (IsBinaryCopy()) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("COPY BINARY is not supported to stdout or from stdin"))); pq_putemptymessage('G'); } else { self->base.read = (SourceReadProc) RemoteSourceReadOld; /* very old way */ if (IsBinaryCopy()) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("COPY BINARY is not supported to stdout or from stdin"))); pq_putemptymessage('D'); } /* We *must* flush here to ensure FE knows it can send. */ pq_flush(); return (Source *) self; }
Datum orafce_to_char_numeric(PG_FUNCTION_ARGS) { Numeric arg0 = PG_GETARG_NUMERIC(0); StringInfo buf = makeStringInfo(); struct lconv *lconv = PGLC_localeconv(); char *p; char *decimal = NULL; appendStringInfoString(buf, DatumGetCString(DirectFunctionCall1(numeric_out, NumericGetDatum(arg0)))); for (p = buf->data; *p; p++) if (*p == '.') { *p = lconv->decimal_point[0]; decimal = p; /* save decimal point position for the next loop */ } /* Simulate the default Oracle to_char template (TM9 - Text Minimum) by removing unneeded digits after the decimal point; if no digits are left, then remove the decimal point too */ for (p = buf->data + buf->len - 1; decimal && p >= decimal; p--) { if (*p == '0' || *p == lconv->decimal_point[0]) *p = 0; else break; /* non-zero digit found, exit the loop */ } PG_RETURN_TEXT_P(cstring_to_text(buf->data)); }
char *lookup_field_mapping(MemoryContext cxt, Oid tableRelId, char *fieldname) { char *definition = NULL; StringInfo query; SPI_connect(); query = makeStringInfo(); appendStringInfo(query, "select definition from zdb_mappings where table_name = %d::regclass and field_name = %s;", tableRelId, TextDatumGetCString(DirectFunctionCall1(quote_literal, CStringGetTextDatum(fieldname)))); if (SPI_execute(query->data, true, 2) != SPI_OK_SELECT) elog(ERROR, "Problem looking up analysis thing with query: %s", query->data); if (SPI_processed > 1) { elog(ERROR, "Too many mappings found"); } else if (SPI_processed == 1) { char *json = SPI_getvalue(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1); Size len = strlen(json); definition = (char *) MemoryContextAllocZero(cxt, (Size) len + 1); memcpy(definition, json, len); } SPI_finish(); return definition; }
char *lookup_primary_key(char *schemaName, char *tableName, bool failOnMissing) { StringInfo sql = makeStringInfo(); char *keyname; SPI_connect(); appendStringInfo(sql, "SELECT column_name FROM information_schema.key_column_usage WHERE table_schema = '%s' AND table_name = '%s'", schemaName, tableName); SPI_execute(sql->data, true, 1); if (SPI_processed == 0) { if (failOnMissing) elog(ERROR, "Cannot find primary key column for: %s.%s", schemaName, tableName); else { SPI_finish(); return NULL; } } keyname = SPI_getvalue(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1); if (keyname == NULL) elog(ERROR, "Primary Key field is null for: %s.%s", schemaName, tableName); keyname = MemoryContextStrdup(TopTransactionContext, keyname); SPI_finish(); return keyname; }
bool type_is_domain(char *type_name, Oid *base_type) { bool rc; StringInfo query; SPI_connect(); query = makeStringInfo(); appendStringInfo(query, "SELECT typtype = 'd', typbasetype FROM pg_type WHERE typname = %s", TextDatumGetCString(DirectFunctionCall1(quote_literal, CStringGetTextDatum(type_name)))); if (SPI_execute(query->data, true, 1) != SPI_OK_SELECT) elog(ERROR, "Problem determing if %s is a domain with query: %s", type_name, query->data); if (SPI_processed == 0) { rc = false; } else { bool isnull; Datum d; d = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull); rc = isnull || DatumGetBool(d); d = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 2, &isnull); *base_type = isnull ? InvalidOid : DatumGetObjectId(d); } SPI_finish(); return rc; }
/* * GenerateAlterTableAttachPartitionCommand returns the necessary command to * attach the given partition to its parent. */ char * GenerateAlterTableAttachPartitionCommand(Oid partitionTableId) { StringInfo createPartitionCommand = makeStringInfo(); #if (PG_VERSION_NUM >= 100000) char *partitionBoundCString = NULL; Oid parentId = InvalidOid; char *tableQualifiedName = NULL; char *parentTableQualifiedName = NULL; if (!PartitionTable(partitionTableId)) { char *relationName = get_rel_name(partitionTableId); ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName))); } parentId = get_partition_parent(partitionTableId); tableQualifiedName = generate_qualified_relation_name(partitionTableId); parentTableQualifiedName = generate_qualified_relation_name(parentId); partitionBoundCString = PartitionBound(partitionTableId); appendStringInfo(createPartitionCommand, "ALTER TABLE %s ATTACH PARTITION %s %s;", parentTableQualifiedName, tableQualifiedName, partitionBoundCString); #endif return createPartitionCommand->data; }
/* * PrepareRemoteTransactions prepares all transactions on connections in * connectionList for commit if the 2PC commit protocol is enabled. * On failure, it reports an error and stops. */ void PrepareRemoteTransactions(List *connectionList) { ListCell *connectionCell = NULL; foreach(connectionCell, connectionList) { TransactionConnection *transactionConnection = (TransactionConnection *) lfirst(connectionCell); PGconn *connection = transactionConnection->connection; int64 connectionId = transactionConnection->connectionId; PGresult *result = NULL; StringInfo command = makeStringInfo(); StringInfo transactionName = BuildTransactionName(connectionId); appendStringInfo(command, "PREPARE TRANSACTION '%s'", transactionName->data); result = PQexec(connection, command->data); if (PQresultStatus(result) != PGRES_COMMAND_OK) { /* a failure to prepare is an implicit rollback */ transactionConnection->transactionState = TRANSACTION_STATE_CLOSED; WarnRemoteError(connection, result); PQclear(result); ereport(ERROR, (errcode(ERRCODE_IO_ERROR), errmsg("failed to prepare transaction"))); } PQclear(result); transactionConnection->transactionState = TRANSACTION_STATE_PREPARED; }
StringInfo pgq_init_varbuf(void) { StringInfo buf; buf = makeStringInfo(); appendStringInfoString(buf, "XXXX"); return buf; }
static StringInfo get_dymanic_record_fields(PlxFn *plx_fn, FunctionCallInfo fcinfo) { StringInfo buf; Oid oid; TupleDesc tuple_desc; int i; get_call_result_type(fcinfo, &oid, &tuple_desc); buf = makeStringInfo(); for (i = 0; i < tuple_desc->natts; i++) { Form_pg_attribute a; HeapTuple type_tuple; Form_pg_type type_struct; a = tuple_desc->attrs[i]; type_tuple = SearchSysCache(TYPEOID, ObjectIdGetDatum(a->atttypid), 0, 0, 0); if (!HeapTupleIsValid(type_tuple)) plx_error(plx_fn, "cache lookup failed for type %u", a->atttypid); type_struct = (Form_pg_type) GETSTRUCT(type_tuple); { appendStringInfo( buf, "%s%s %s", ((i > 0) ? ", " : ""), quote_identifier(NameStr(a->attname)), quote_identifier(NameStr(type_struct->typname))); } ReleaseSysCache(type_tuple); } return buf; }
/* * worker_cleanup_job_schema_cache walks over all schemas in the database, and * removes schemas whose names start with the job schema prefix. Note that this * function does not perform any locking; we expect it to be called at process * start-up time before any merge tasks are run. Further note that this function * runs within the scope of a particular database (template1, postgres) and can * only delete schemas within that database. */ Datum worker_cleanup_job_schema_cache(PG_FUNCTION_ARGS) { Relation pgNamespace = NULL; HeapScanDesc scanDescriptor = NULL; ScanKey scanKey = NULL; int scanKeyCount = 0; HeapTuple heapTuple = NULL; pgNamespace = heap_open(NamespaceRelationId, AccessExclusiveLock); scanDescriptor = heap_beginscan_catalog(pgNamespace, scanKeyCount, scanKey); heapTuple = heap_getnext(scanDescriptor, ForwardScanDirection); while (HeapTupleIsValid(heapTuple)) { Form_pg_namespace schemaForm = (Form_pg_namespace) GETSTRUCT(heapTuple); char *schemaName = NameStr(schemaForm->nspname); char *jobSchemaFound = strstr(schemaName, JOB_SCHEMA_PREFIX); if (jobSchemaFound != NULL) { StringInfo jobSchemaName = makeStringInfo(); appendStringInfoString(jobSchemaName, schemaName); RemoveJobSchema(jobSchemaName); } heapTuple = heap_getnext(scanDescriptor, ForwardScanDirection); } heap_endscan(scanDescriptor); heap_close(pgNamespace, AccessExclusiveLock); PG_RETURN_VOID(); }
/* * Retrieves the operator name. * Result is palloc-ed in the current memory context. */ static StringInfo get_name_from_nodeType(const NodeTag node_type) { StringInfo operator_name = makeStringInfo(); switch ( node_type ) { case T_AggState: appendStringInfoString(operator_name,"Agg"); break; case T_HashJoinState: appendStringInfoString(operator_name,"HashJoin"); break; case T_MaterialState: appendStringInfoString(operator_name,"Material"); break; case T_SortState: appendStringInfoString(operator_name,"Sort"); break; case T_Invalid: /* When spilling from a builtin function, we don't have a valid node type */ appendStringInfoString(operator_name,"BuiltinFunction"); break; default: Assert(false && "Operator not supported by the workfile manager"); } return operator_name; }
void run__const_to_str__negative(Const* input, StringInfo result, char* value) { StringInfo err_msg = makeStringInfo(); appendStringInfo(err_msg, "internal error in pxffilters.c:const_to_str. " "Using unsupported data type (%d) (value %s)", input->consttype, value); /* Setting the test -- code omitted -- */ PG_TRY(); { /* This will throw a ereport(ERROR).*/ const_to_str(input, result); } PG_CATCH(); { CurrentMemoryContext = 1; ErrorData *edata = CopyErrorData(); /* Validate the type of expected error */ assert_true(edata->sqlerrcode == ERRCODE_INTERNAL_ERROR); assert_true(edata->elevel == ERROR); assert_string_equal(edata->message, err_msg->data); pfree(err_msg->data); pfree(err_msg); return; } PG_END_TRY(); assert_true(false); }
static rc testStringInfo(void) { StringInfo str = makeStringInfo(); appendStringInfoChar(str,'a'); ASSERT_EQUALS_STRING("a", str->data, "data is a"); appendStringInfoString(str, "hello"); ASSERT_EQUALS_STRING("ahello", str->data, "data is ahello"); ASSERT_EQUALS_INT(6, str->len, "length is 6"); for(int i = 0; i < 256; i++) appendStringInfoChar(str, 'b'); ASSERT_EQUALS_INT(6 + 256, str->len, "length is 6 + 256"); for(int i = 255; i < 256 + 6; i++) ASSERT_EQUALS_INT('b', str->data[i], "chars are all b"); resetStringInfo(str); ASSERT_EQUALS_INT(0, str->len, "after reset length is 0"); appendStringInfo(str, "%s", "test"); ASSERT_EQUALS_STRING("test", str->data, "data is test"); return PASS; }
/* * CmsTopnEstimateItemFrequency calculates estimated frequency for the given * item and returns it. */ static Frequency CmsTopnEstimateItemFrequency(CmsTopn *cmsTopn, Datum item, TypeCacheEntry *itemTypeCacheEntry) { uint64 hashValueArray[2] = {0, 0}; StringInfo itemString = makeStringInfo(); Frequency frequency = 0; /* if datum is toasted, detoast it */ if (itemTypeCacheEntry->typlen == -1) { Datum detoastedItem = PointerGetDatum(PG_DETOAST_DATUM(item)); ConvertDatumToBytes(detoastedItem, itemTypeCacheEntry, itemString); } else { ConvertDatumToBytes(item, itemTypeCacheEntry, itemString); } /* * Calculate hash values for the given item and then get frequency estimate * with these hashed values. */ MurmurHash3_x64_128(itemString->data, itemString->len, MURMUR_SEED, &hashValueArray); frequency = CmsTopnEstimateHashedItemFrequency(cmsTopn, hashValueArray); return frequency; }
/* * Returns whether the Bloom filter contains the item or not */ Datum bloom_contains(PG_FUNCTION_ARGS) { BloomFilter *bloom; Datum elem = PG_GETARG_DATUM(1); bool contains = false; Oid val_type = get_fn_expr_argtype(fcinfo->flinfo, 1); TypeCacheEntry *typ; StringInfo buf; if (val_type == InvalidOid) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("could not determine input data type"))); if (PG_ARGISNULL(0)) PG_RETURN_BOOL(contains); bloom = (BloomFilter *) PG_GETARG_VARLENA_P(0); typ = lookup_type_cache(val_type, 0); buf = makeStringInfo(); DatumToBytes(elem, typ, buf); contains = BloomFilterContains(bloom, buf->data, buf->len); pfree(buf->data); pfree(buf); PG_RETURN_BOOL(contains); }
void verify__const_to_str(bool is_null, char* const_value, Oid const_type, char* expected) { StringInfo result = makeStringInfo(); char* value = NULL; Const* input = (Const*) palloc0(sizeof(Const)); input->constisnull = is_null; input->consttype = const_type; /* need to prepare inner functions */ if (!is_null) { value = strdup(const_value); /* will be free'd by const_to_str */ mock__const_to_str(const_type, value); } /* no expected value means it's a negative test */ if (expected) { run__const_to_str(input, result, expected); } else { run__const_to_str__negative(input, result, value); pfree(value); /* value was not freed by const_to_str b/c of failure */ } pfree(result->data); pfree(result); pfree(input); }
/* * GenerateDetachPartitionCommand gets a partition table and returns * "ALTER TABLE parent_table DETACH PARTITION partitionName" command. */ char * GenerateDetachPartitionCommand(Oid partitionTableId) { StringInfo detachPartitionCommand = makeStringInfo(); #if (PG_VERSION_NUM >= 100000) Oid parentId = InvalidOid; char *tableQualifiedName = NULL; char *parentTableQualifiedName = NULL; if (!PartitionTable(partitionTableId)) { char *relationName = get_rel_name(partitionTableId); ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName))); } parentId = get_partition_parent(partitionTableId); tableQualifiedName = generate_qualified_relation_name(partitionTableId); parentTableQualifiedName = generate_qualified_relation_name(parentId); appendStringInfo(detachPartitionCommand, "ALTER TABLE IF EXISTS %s DETACH PARTITION %s;", parentTableQualifiedName, tableQualifiedName); #endif return detachPartitionCommand->data; }
static orafce_lexnode * compose(orafce_lexnode *a, orafce_lexnode *b) { orafce_lexnode *result; StringInfo sinfo; sinfo = makeStringInfo(); result = NEWNODE(IDENT); result->lloc = a->lloc; if (strcmp(SF(mod(a)), "dq") == 0) appendStringInfo(sinfo, "\"%s\".", a->str); else { appendStringInfoString(sinfo, a->str); appendStringInfoChar(sinfo, '.'); } if (strcmp(SF(mod(b)), "dq") == 0) appendStringInfo(sinfo, "\"%s\"", b->str); else appendStringInfoString(sinfo, b->str); result->str = sinfo->data; return result; }
void init_json_lex_context(JsonLexContext *lex, char *json) { lex->input = lex->token_terminator = lex->line_start = json; lex->line_number = 1; lex->input_length = strlen(json); lex->strval = makeStringInfo(); }
/** * Init nodeDML, which initializes the insert TupleTableSlot. * */ DMLState* ExecInitDML(DML *node, EState *estate, int eflags) { /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK | EXEC_FLAG_REWIND))); DMLState *dmlstate = makeNode(DMLState); dmlstate->ps.plan = (Plan *)node; dmlstate->ps.state = estate; ExecInitResultTupleSlot(estate, &dmlstate->ps); dmlstate->ps.targetlist = (List *) ExecInitExpr((Expr *) node->plan.targetlist, (PlanState *) dmlstate); Plan *outerPlan = outerPlan(node); outerPlanState(dmlstate) = ExecInitNode(outerPlan, estate, eflags); ExecAssignResultTypeFromTL(&dmlstate->ps); /* Create expression evaluation context. This will be used for projections */ ExecAssignExprContext(estate, &dmlstate->ps); /* * Create projection info from the child tuple descriptor and our target list * Projection will be placed in the ResultSlot */ TupleTableSlot *childResultSlot = outerPlanState(dmlstate)->ps_ResultTupleSlot; ExecAssignProjectionInfo(&dmlstate->ps, childResultSlot->tts_tupleDescriptor); /* * Initialize slot to insert/delete using output relation descriptor. */ dmlstate->cleanedUpSlot = ExecInitExtraTupleSlot(estate); /* * Both input and output of the junk filter include dropped attributes, so * the junk filter doesn't need to do anything special there about them */ TupleDesc cleanTupType = CreateTupleDescCopy(dmlstate->ps.state->es_result_relation_info->ri_RelationDesc->rd_att); dmlstate->junkfilter = ExecInitJunkFilter(node->plan.targetlist, cleanTupType, dmlstate->cleanedUpSlot); if (estate->es_instrument) { dmlstate->ps.cdbexplainbuf = makeStringInfo(); /* Request a callback at end of query. */ dmlstate->ps.cdbexplainfun = ExecDMLExplainEnd; } initGpmonPktForDML((Plan *)node, &dmlstate->ps.gpmon_pkt, estate); return dmlstate; }
// create SQL code from query block model char * qbToSQL (Node *node) { StringInfo str = makeStringInfo(); nodeToSQL(str, node); return str->data; }
/* Constructs a standardized task table name for the given task id. */ StringInfo TaskTableName(uint32 taskId) { StringInfo taskTableName = makeStringInfo(); appendStringInfo(taskTableName, "%s%0*u", TASK_TABLE_PREFIX, MIN_TASK_FILENAME_WIDTH, taskId); return taskTableName; }
/* * GetConnection returns a PGconn which can be used to execute queries on a * remote PostgreSQL server. If no suitable connection to the specified node on * the specified port yet exists, the function establishes a new connection and * returns that. * * Returned connections are guaranteed to be in the CONNECTION_OK state. If the * requested connection cannot be established, or if it was previously created * but is now in an unrecoverable bad state, this function returns NULL. * * This function throws an error if a hostname over 255 characters is provided. */ PGconn * GetConnection(char *nodeName, int32 nodePort) { PGconn *connection = NULL; NodeConnectionKey nodeConnectionKey; NodeConnectionEntry *nodeConnectionEntry = NULL; bool entryFound = false; bool needNewConnection = true; /* check input */ if (strnlen(nodeName, MAX_NODE_LENGTH + 1) > MAX_NODE_LENGTH) { ereport(ERROR, (errmsg("hostnames may not exceed 255 characters"))); } /* if first call, initialize the connection hash */ if (NodeConnectionHash == NULL) { NodeConnectionHash = CreateNodeConnectionHash(); } memset(&nodeConnectionKey, 0, sizeof(nodeConnectionKey)); strncpy(nodeConnectionKey.nodeName, nodeName, MAX_NODE_LENGTH); nodeConnectionKey.nodePort = nodePort; nodeConnectionEntry = hash_search(NodeConnectionHash, &nodeConnectionKey, HASH_FIND, &entryFound); if (entryFound) { connection = nodeConnectionEntry->connection; if (PQstatus(connection) == CONNECTION_OK) { needNewConnection = false; } else { PurgeConnection(connection); } } if (needNewConnection) { StringInfo nodePortString = makeStringInfo(); appendStringInfo(nodePortString, "%d", nodePort); connection = ConnectToNode(nodeName, nodePortString->data); if (connection != NULL) { nodeConnectionEntry = hash_search(NodeConnectionHash, &nodeConnectionKey, HASH_ENTER, &entryFound); nodeConnectionEntry->connection = connection; } } return connection; }
/* * Decode an INSERT entry */ static void decoder_raw_insert(StringInfo s, Relation relation, HeapTuple tuple) { TupleDesc tupdesc = RelationGetDescr(relation); int natt; bool first_column = true; StringInfo values = makeStringInfo(); /* Initialize string info for values */ initStringInfo(values); /* Query header */ appendStringInfo(s, "INSERT INTO "); print_relname(s, relation); appendStringInfo(s, " ("); /* Build column names and values */ for (natt = 0; natt < tupdesc->natts; natt++) { Form_pg_attribute attr; Datum origval; bool isnull; attr = tupdesc->attrs[natt]; /* Skip dropped columns and system columns */ if (attr->attisdropped || attr->attnum < 0) continue; /* Skip comma for first colums */ if (!first_column) { appendStringInfoString(s, ", "); appendStringInfoString(values, ", "); } else first_column = false; /* Print attribute name */ appendStringInfo(s, "%s", quote_identifier(NameStr(attr->attname))); /* Get Datum from tuple */ origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull); /* Get output function */ print_value(values, origval, attr->atttypid, isnull); } /* Append values */ appendStringInfo(s, ") VALUES (%s);", values->data); /* Clean up */ resetStringInfo(values); }
Datum xmlnode_debug_print(PG_FUNCTION_ARGS) { xmlnode nodeRaw = (xmlnode) PG_GETARG_VARLENA_P(0); char *data = (char *) VARDATA(nodeRaw); StringInfo output = makeStringInfo(); dumpXMLNodeDebug(output, data, XNODE_ROOT_OFFSET(nodeRaw)); PG_RETURN_TEXT_P(cstring_to_text(output->data)); }