static void
add_indent(StringInfo out, bool indent, int level)
{
	if (indent)
	{
		int			i;

		appendStringInfoCharMacro(out, '\n');
		for (i = 0; i < level; i++)
			appendBinaryStringInfo(out, "    ", 4);
	}
}
Beispiel #2
0
static JsQuery*
joinJsQuery(JsQueryItemType type, JsQuery *jq1, JsQuery *jq2)
{
	JsQuery			*out;
	StringInfoData	buf;
	int32			left, right, chld;
	JsQueryItem	v;	

	initStringInfo(&buf);
	enlargeStringInfo(&buf, VARSIZE_ANY(jq1) + VARSIZE_ANY(jq2) + 4 * sizeof(int32) + VARHDRSZ);

	appendStringInfoSpaces(&buf, VARHDRSZ);

	/* form jqiAnd/jqiOr header */
	appendStringInfoChar(&buf, (char)type);
	alignStringInfoInt(&buf);

	/* nextPos field of header*/
	chld = 0; /* actual value, not a fake */
	appendBinaryStringInfo(&buf, (char*)&chld, sizeof(chld));

	left = buf.len;
	appendBinaryStringInfo(&buf, (char*)&left /* fake value */, sizeof(left));
	right = buf.len;
	appendBinaryStringInfo(&buf, (char*)&right /* fake value */, sizeof(right));

	/* dump left and right subtree */
	jsqInit(&v, jq1);
	chld = copyJsQuery(&buf, &v);
	*(int32*)(buf.data + left) = chld;
	jsqInit(&v, jq2);
	chld = copyJsQuery(&buf, &v);
	*(int32*)(buf.data + right) = chld;

	out = (JsQuery*)buf.data;
	SET_VARSIZE(out, buf.len);

	return out;
}
Beispiel #3
0
int write_frame(LogicalDecodingContext *ctx, plugin_state *state) {
    int err = 0;
    bytea *output = NULL;

    check(err, try_writing(&output, &write_avro_binary, &state->frame_value));

    OutputPluginPrepareWrite(ctx, true);
    appendBinaryStringInfo(ctx->out, VARDATA(output), VARSIZE(output) - VARHDRSZ);
    OutputPluginWrite(ctx, true);

    pfree(output);
    return err;
}
Beispiel #4
0
static StringInfo
buf_init(txid xmin, txid xmax)
{
	TxidSnapshot snap;
	StringInfo	buf;

	snap.xmin = xmin;
	snap.xmax = xmax;
	snap.nxip = 0;

	buf = makeStringInfo();
	appendBinaryStringInfo(buf, (char *) &snap, TXID_SNAPSHOT_SIZE(0));
	return buf;
}
Beispiel #5
0
/* --------------------------------
 *		pq_getstring	- get a null terminated string from connection
 *
 *		The return value is placed in an expansible StringInfo, which has
 *		already been initialized by the caller.
 *
 *		This is used only for dealing with old-protocol clients.  The idea
 *		is to produce a StringInfo that looks the same as we would get from
 *		pq_getmessage() with a newer client; we will then process it with
 *		pq_getmsgstring.  Therefore, no character set conversion is done here,
 *		even though this is presumably useful only for text.
 *
 *		returns 0 if OK, EOF if trouble
 * --------------------------------
 */
int
pq_getstring(StringInfo s)
{
	int			i;

	/* Reset string to empty */
	s->len = 0;
	s->data[0] = '\0';
	s->cursor = 0;

	/* Read until we get the terminating '\0' */
	for (;;)
	{
		while (PqRecvPointer >= PqRecvLength)
		{
			if (pq_recvbuf())	/* If nothing in buffer, then recv some */
				return EOF;		/* Failed to recv data */
		}

		for (i = PqRecvPointer; i < PqRecvLength; i++)
		{
			if (PqRecvBuffer[i] == '\0')
			{
				/* include the '\0' in the copy */
				appendBinaryStringInfo(s, PqRecvBuffer + PqRecvPointer,
									   i - PqRecvPointer + 1);
				PqRecvPointer = i + 1;	/* advance past \0 */
				return 0;
			}
		}

		/* If we're here we haven't got the \0 in the buffer yet. */
		appendBinaryStringInfo(s, PqRecvBuffer + PqRecvPointer,
							   PqRecvLength - PqRecvPointer);
		PqRecvPointer = PqRecvLength;
	}
}
Beispiel #6
0
Datum
jsquery_not(PG_FUNCTION_ARGS)
{
	JsQuery			*jq = PG_GETARG_JSQUERY(0);
	JsQuery			*out;
	StringInfoData	buf;
	int32			arg, chld;
	JsQueryItem		v;

	initStringInfo(&buf);
	enlargeStringInfo(&buf, VARSIZE_ANY(jq) + 4 * sizeof(int32) + VARHDRSZ);

	appendStringInfoSpaces(&buf, VARHDRSZ);

	/* form jsquery header */
	appendStringInfoChar(&buf, (char)jqiNot);
	alignStringInfoInt(&buf);

	/* nextPos field of header*/
	chld = 0; /* actual value, not a fake */
	appendBinaryStringInfo(&buf, (char*)&chld, sizeof(chld));

	arg = buf.len;
	appendBinaryStringInfo(&buf, (char*)&arg /* fake value */, sizeof(arg));

	jsqInit(&v, jq);
	chld = copyJsQuery(&buf, &v);
	*(int32*)(buf.data + arg) = chld;

	out = (JsQuery*)buf.data;
	SET_VARSIZE(out, buf.len);

	PG_FREE_IF_COPY(jq, 0);

	PG_RETURN_JSQUERY(out);
}
Beispiel #7
0
/*
 * nodeToBinaryStringFast -
 *	   returns a binary representation of the Node as a palloc'd string
 */
char *
nodeToBinaryStringFast(void *obj, int * length)
{
	StringInfoData str;
	int16 tg = (int16) 0xDEAD;

	/* see stringinfo.h for an explanation of this maneuver */
	initStringInfoOfSize(&str, 4096);

	_outNode(&str, obj);

	/* Add something special at the end that we can check in readfast.c */
	appendBinaryStringInfo(&str, (const char *)&tg, sizeof(int16));

	*length = str.len;
	return str.data;
}
Beispiel #8
0
Datum UDT_send(UDT udt, PG_FUNCTION_ARGS)
{
	StringInfoData buf;
	int32 dataLen = Type_getLength((Type)udt);

	if(!UDT_isScalar(udt))
		ereport(ERROR, (
			errcode(ERRCODE_CANNOT_COERCE),
			errmsg("UDT with Oid %d is not scalar", Type_getOid((Type)udt))));

	if(dataLen == -1)
		return byteasend(fcinfo);

	if(dataLen == -2)
		return unknownsend(fcinfo);

	pq_begintypsend(&buf);
	appendBinaryStringInfo(&buf, PG_GETARG_POINTER(0), dataLen);
    PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
Beispiel #9
0
static void
_outTupleDescNode(StringInfo str, TupleDescNode *node)
{
	int			i;

	Assert(node->tuple->tdtypeid == RECORDOID);

	WRITE_NODE_TYPE("TUPLEDESCNODE");
	WRITE_INT_FIELD(natts);
	WRITE_INT_FIELD(tuple->natts);

	for (i = 0; i < node->tuple->natts; i++)
		appendBinaryStringInfo(str, node->tuple->attrs[i], ATTRIBUTE_FIXED_PART_SIZE);

	Assert(node->tuple->constr == NULL);

	WRITE_OID_FIELD(tuple->tdtypeid);
	WRITE_INT_FIELD(tuple->tdtypmod);
	WRITE_INT_FIELD(tuple->tdqdtypmod);
	WRITE_BOOL_FIELD(tuple->tdhasoid);
	WRITE_INT_FIELD(tuple->tdrefcount);
}
Beispiel #10
0
/**
 * SerializeMemoryAccount:
 * 		A visitor function that serializes a particular memory account. Called
 * 		from the walker to serialize the whole tree.
 *
 * memoryAccount: The memory account which will be represented as CSV
 * context: context information to pass between successive function call
 * depth: The depth in the tree for current node. Used to generate indentation.
 * parentWalkSerial: parent node's "walk serial"
 * curWalkSerial: current node's "walk serial"
 */
static CdbVisitOpt
SerializeMemoryAccount(MemoryAccount *memoryAccount, void *context, uint32 depth,
		uint32 parentWalkSerial, uint32 curWalkSerial)
{
	if (memoryAccount == NULL) return CdbVisit_Walk;

	Assert(MemoryAccountIsValid(memoryAccount));

	MemoryAccountSerializerCxt *memAccountCxt = (MemoryAccountSerializerCxt*) context;

	SerializedMemoryAccount serializedMemoryAccount;
	serializedMemoryAccount.type = T_SerializedMemoryAccount;
	serializedMemoryAccount.memoryAccount = *memoryAccount;
	serializedMemoryAccount.memoryAccountSerial = curWalkSerial;
	serializedMemoryAccount.parentMemoryAccountSerial = parentWalkSerial;

	appendBinaryStringInfo(memAccountCxt->buffer, &serializedMemoryAccount, sizeof(SerializedMemoryAccount));

    memAccountCxt->memoryAccountCount++;

    return CdbVisit_Walk;
}
Beispiel #11
0
/*
 * Wrapper for libchurl
 */
static void process_request(ClientContext* client_context, char *uri)
{
	size_t n = 0;
	char buffer[RAW_BUF_SIZE];

	print_http_headers(client_context->http_headers);
	client_context->handle = churl_init_download(uri, client_context->http_headers);
	memset(buffer, 0, RAW_BUF_SIZE);
	resetStringInfo(&(client_context->the_rest_buf));

	/*
	 * This try-catch ensures that in case of an exception during the "communication with PXF and the accumulation of
	 * PXF data in client_context->the_rest_buf", we still get to terminate the libcurl connection nicely and avoid
	 * leaving the PXF server connection hung.
	 */
	PG_TRY();
	{
		/* read some bytes to make sure the connection is established */
		churl_read_check_connectivity(client_context->handle);
		while ((n = churl_read(client_context->handle, buffer, sizeof(buffer))) != 0)
		{
			appendBinaryStringInfo(&(client_context->the_rest_buf), buffer, n);
			memset(buffer, 0, RAW_BUF_SIZE);
		}
		churl_cleanup(client_context->handle, false);
	}
	PG_CATCH();
	{
		if (client_context->handle)
			churl_cleanup(client_context->handle, true);
		PG_RE_THROW();
	}
	PG_END_TRY();


}
Beispiel #12
0
static size_t curl_write_func(char *ptr, size_t size, size_t nmemb, void *userdata) {
    StringInfo response = (StringInfo) userdata;
    appendBinaryStringInfo(response, ptr, size * nmemb);
    return size * nmemb;
}
Beispiel #13
0
/*
 * kafka_consume_main
 *
 * Main function for Kafka consumers running as background workers
 */
void
kafka_consume_main(Datum arg)
{
	char err_msg[512];
	rd_kafka_topic_conf_t *topic_conf;
	rd_kafka_t *kafka;
	rd_kafka_topic_t *topic;
	rd_kafka_message_t **messages;
	const struct rd_kafka_metadata *meta;
	struct rd_kafka_metadata_topic topic_meta;
	rd_kafka_resp_err_t err;
	bool found;
	Oid id = (Oid) arg;
	ListCell *lc;
	KafkaConsumerProc *proc = hash_search(consumer_procs, &id, HASH_FIND, &found);
	KafkaConsumer consumer;
	CopyStmt *copy;
	int valid_brokers = 0;
	int i;
	int my_partitions = 0;

	if (!found)
		elog(ERROR, "kafka consumer %d not found", id);

	pqsignal(SIGTERM, kafka_consume_main_sigterm);
#define BACKTRACE_SEGFAULTS
#ifdef BACKTRACE_SEGFAULTS
	pqsignal(SIGSEGV, debug_segfault);
#endif

	/* we're now ready to receive signals */
	BackgroundWorkerUnblockSignals();

	/* give this proc access to the database */
	BackgroundWorkerInitializeConnection(NameStr(proc->dbname), NULL);

	/* load saved consumer state */
	StartTransactionCommand();
	load_consumer_state(proc->consumer_id, &consumer);
	copy = get_copy_statement(&consumer);

	topic_conf = rd_kafka_topic_conf_new();
	kafka = rd_kafka_new(RD_KAFKA_CONSUMER, NULL, err_msg, sizeof(err_msg));
	rd_kafka_set_logger(kafka, logger);

	/*
	 * Add all brokers currently in pipeline_kafka_brokers
	 */
	if (consumer.brokers == NIL)
		elog(ERROR, "no valid brokers were found");

	foreach(lc, consumer.brokers)
		valid_brokers += rd_kafka_brokers_add(kafka, lfirst(lc));

	if (!valid_brokers)
		elog(ERROR, "no valid brokers were found");

	/*
	 * Set up our topic to read from
	 */
	topic = rd_kafka_topic_new(kafka, consumer.topic, topic_conf);
	err = rd_kafka_metadata(kafka, false, topic, &meta, CONSUMER_TIMEOUT);

	if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
		elog(ERROR, "failed to acquire metadata: %s", rd_kafka_err2str(err));

	Assert(meta->topic_cnt == 1);
	topic_meta = meta->topics[0];

	load_consumer_offsets(&consumer, &topic_meta, proc->offset);
	CommitTransactionCommand();

	/*
	* Begin consuming all partitions that this process is responsible for
	*/
	for (i = 0; i < topic_meta.partition_cnt; i++)
	{
		int partition = topic_meta.partitions[i].id;

		Assert(partition <= consumer.num_partitions);
		if (partition % consumer.parallelism != proc->partition_group)
			continue;

		elog(LOG, "[kafka consumer] %s <- %s consuming partition %d from offset %ld",
				consumer.rel->relname, consumer.topic, partition, consumer.offsets[partition]);

		if (rd_kafka_consume_start(topic, partition, consumer.offsets[partition]) == -1)
			elog(ERROR, "failed to start consuming: %s", rd_kafka_err2str(rd_kafka_errno2err(errno)));

		my_partitions++;
	}

	/*
	* No point doing anything if we don't have any partitions assigned to us
	*/
	if (my_partitions == 0)
	{
		elog(LOG, "[kafka consumer] %s <- %s consumer %d doesn't have any partitions to read from",
				consumer.rel->relname, consumer.topic, MyProcPid);
		goto done;
	}

	messages = palloc0(sizeof(rd_kafka_message_t) * consumer.batch_size);

	/*
	 * Consume messages until we are terminated
	 */
	while (!got_sigterm)
	{
		ssize_t num_consumed;
		int i;
		int messages_buffered = 0;
		int partition;
		StringInfoData buf;
		bool xact = false;

		for (partition = 0; partition < consumer.num_partitions; partition++)
		{
			if (partition % consumer.parallelism != proc->partition_group)
				continue;

			num_consumed = rd_kafka_consume_batch(topic, partition,
					CONSUMER_TIMEOUT, messages, consumer.batch_size);

			if (num_consumed <= 0)
				continue;

			if (!xact)
			{
				StartTransactionCommand();
				xact = true;
			}

			initStringInfo(&buf);
			for (i = 0; i < num_consumed; i++)
			{
				if (messages[i]->payload != NULL)
				{
					appendBinaryStringInfo(&buf, messages[i]->payload, messages[i]->len);
					if (buf.len > 0 && buf.data[buf.len - 1] != '\n')
						appendStringInfoChar(&buf, '\n');
					messages_buffered++;
				}
				consumer.offsets[partition] = messages[i]->offset;
				rd_kafka_message_destroy(messages[i]);
			}
		}

		if (!xact)
		{
			pg_usleep(1 * 1000);
			continue;
		}

		/* we don't want to die in the event of any errors */
		PG_TRY();
		{
			if (messages_buffered)
				execute_copy(copy, &buf);
		}
		PG_CATCH();
		{
			elog(LOG, "[kafka consumer] %s <- %s failed to process batch, dropped %d message%s:",
					consumer.rel->relname, consumer.topic, (int) num_consumed, (num_consumed == 1 ? "" : "s"));
			EmitErrorReport();
			FlushErrorState();

			AbortCurrentTransaction();
			xact = false;
		}
		PG_END_TRY();

		if (!xact)
			StartTransactionCommand();

		if (messages_buffered)
			save_consumer_state(&consumer, proc->partition_group);

		CommitTransactionCommand();
	}

done:

	hash_search(consumer_procs, &id, HASH_REMOVE, NULL);

	rd_kafka_topic_destroy(topic);
	rd_kafka_destroy(kafka);
	rd_kafka_wait_destroyed(CONSUMER_TIMEOUT);
}
Beispiel #14
0
Datum
hstore_to_json_loose(PG_FUNCTION_ARGS)
{
	HStore	   *in = PG_GETARG_HS(0);
	int			i;
	int			count = HS_COUNT(in);
	char	   *base = STRPTR(in);
	HEntry	   *entries = ARRPTR(in);
	bool		is_number;
	StringInfoData tmp,
				dst;

	if (count == 0)
		PG_RETURN_TEXT_P(cstring_to_text_with_len("{}",2));

	initStringInfo(&tmp);
	initStringInfo(&dst);

	appendStringInfoChar(&dst, '{');

	for (i = 0; i < count; i++)
	{
		resetStringInfo(&tmp);
		appendBinaryStringInfo(&tmp, HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
		escape_json(&dst, tmp.data);
		appendStringInfoString(&dst, ": ");
		if (HS_VALISNULL(entries, i))
			appendStringInfoString(&dst, "null");
		/* guess that values of 't' or 'f' are booleans */
		else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 't')
			appendStringInfoString(&dst, "true");
		else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 'f')
			appendStringInfoString(&dst, "false");
		else
		{
			is_number = false;
			resetStringInfo(&tmp);
			appendBinaryStringInfo(&tmp, HS_VAL(entries, base, i), HS_VALLEN(entries, i));

			/*
			 * don't treat something with a leading zero followed by another
			 * digit as numeric - could be a zip code or similar
			 */
			if (tmp.len > 0 &&
				!(tmp.data[0] == '0' &&
				  isdigit((unsigned char) tmp.data[1])) &&
				strspn(tmp.data, "+-0123456789Ee.") == tmp.len)
			{
				/*
				 * might be a number. See if we can input it as a numeric
				 * value. Ignore any actual parsed value.
				 */
				char	   *endptr = "junk";
				long		lval;

				lval = strtol(tmp.data, &endptr, 10);
				(void) lval;
				if (*endptr == '\0')
				{
					/*
					 * strol man page says this means the whole string is
					 * valid
					 */
					is_number = true;
				}
				else
				{
					/* not an int - try a double */
					double		dval;

					dval = strtod(tmp.data, &endptr);
					(void) dval;
					if (*endptr == '\0')
						is_number = true;
				}
			}
			if (is_number)
				appendBinaryStringInfo(&dst, tmp.data, tmp.len);
			else
				escape_json(&dst, tmp.data);
		}

		if (i + 1 != count)
			appendStringInfoString(&dst, ", ");
	}
	appendStringInfoChar(&dst, '}');

	PG_RETURN_TEXT_P(cstring_to_text(dst.data));
}
Beispiel #15
0
/*
 * Append those parts of path that has not yet been appended. The HashMap unique is
 * keeping track of what has been appended already. First appended part will be
 * prefixed with prefix.
 */
static void appendPathParts(const char* path, StringInfoData* bld, HashMap unique, const char* prefix)
{
	StringInfoData buf;
	if(path == 0 || strlen(path) == 0)
		return;

	for (;;)
	{
		char* pathPart;
		size_t len;
		if(*path == 0)
			break;

		len = strcspn(path, ";:");

		if(len == 1 && *(path+1) == ':' && isalnum(*path))
			/*
			 * Windows drive designator, leave it "as is".
			 */
			len = strcspn(path+2, ";:") + 2;
		else
		if(len == 0)
			{
			/* Ignore zero length components.
			 */
			++path;
			continue;
			}

		initStringInfo(&buf);
		if(*path == '$')
		{
			if(len == 7 || (strcspn(path, "/\\") == 7 && strncmp(path, "$libdir", 7) == 0))
			{
				char pathbuf[MAXPGPATH];
				get_pkglib_path(my_exec_path, pathbuf);
				len -= 7;
				path += 7;
				appendStringInfoString(&buf, pathbuf);
			}
			else
				ereport(ERROR, (
					errcode(ERRCODE_INVALID_NAME),
					errmsg("invalid macro name '%*s' in PL/Java classpath", (int)len, path)));
		}

		if(len > 0)
		{
			appendBinaryStringInfo(&buf, path, (int)len);
			path += len;
		}

		pathPart = buf.data;
		if(HashMap_getByString(unique, pathPart) == 0)
		{
			if(HashMap_size(unique) == 0)
				appendStringInfo(bld, "%s", prefix);
			else
#if defined(WIN32)
				appendStringInfoChar(bld, ';');
#else
				appendStringInfoChar(bld, ':');
#endif
			appendStringInfo(bld, "%s", pathPart);
			HashMap_putByString(unique, pathPart, (void*)1);
		}
		pfree(pathPart);
		if(*path == 0)
			break;
		++path; /* Skip ':' */
	}
}
Beispiel #16
0
/*
 * Deserialize a HeapTuple's data from a byte-array.
 *
 * This code is based on the binary input handling functions in copy.c.
 */
HeapTuple
DeserializeTuple(SerTupInfo * pSerInfo, StringInfo serialTup)
{
	MemoryContext oldCtxt;
	TupleDesc	tupdesc;
	HeapTuple	htup;
	int			natts;
	SerAttrInfo *attrInfo;
	uint32		attr_size;

	int			i;
	StringInfoData attr_data;
	bool		fHandled;

	AssertArg(pSerInfo != NULL);
	AssertArg(serialTup != NULL);

	tupdesc = pSerInfo->tupdesc;
	natts = tupdesc->natts;

	/*
	 * Flip to our tuple-serialization memory-context, to speed up memory
	 * reclamation operations.
	 */
	AssertState(s_tupSerMemCtxt != NULL);
	oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt);

	/* Receive nulls character-array. */
	pq_copymsgbytes(serialTup, pSerInfo->nulls, natts);
	skipPadding(serialTup);

	/* Deserialize the non-NULL attributes of this tuple */
	initStringInfo(&attr_data);
	for (i = 0; i < natts; ++i)
	{
		attrInfo = pSerInfo->myinfo + i;

		if (pSerInfo->nulls[i])	/* NULL field. */
		{
			pSerInfo->values[i] = (Datum) 0;
			continue;
		}

		/*
		 * Assume that the data's output will be handled by the special IO
		 * code, and if not then we can handle it the slow way.
		 */
		fHandled = true;
		switch (attrInfo->atttypid)
		{
			case INT4OID:
				pSerInfo->values[i] = Int32GetDatum(stringInfoGetInt32(serialTup));
				break;

			case CHAROID:
				pSerInfo->values[i] = CharGetDatum(pq_getmsgbyte(serialTup));
				skipPadding(serialTup);
				break;

			case BPCHAROID:
			case VARCHAROID:
			case INT2VECTOROID: /* postgres serialization logic broken, use our own */
			case OIDVECTOROID: /* postgres serialization logic broken, use our own */
			case ANYARRAYOID:
			{
				text	   *pText;
				int			textSize;

				textSize = stringInfoGetInt32(serialTup);

#ifdef TUPSER_SCRATCH_SPACE
				if (textSize + VARHDRSZ <= attrInfo->varlen_scratch_size)
					pText = (text *) attrInfo->pv_varlen_scratch;
				else
					pText = (text *) palloc(textSize + VARHDRSZ);
#else
				pText = (text *) palloc(textSize + VARHDRSZ);
#endif

				SET_VARSIZE(pText, textSize + VARHDRSZ);
				pq_copymsgbytes(serialTup, VARDATA(pText), textSize);
				skipPadding(serialTup);
				pSerInfo->values[i] = PointerGetDatum(pText);
				break;
			}

			case DATEOID:
			{
				/*
				 * TODO:  I would LIKE to do something more efficient, but
				 * DateADT is not strictly limited to 4 bytes by its
				 * definition.
				 */
				DateADT date;

				pq_copymsgbytes(serialTup, (char *) &date, sizeof(DateADT));
				skipPadding(serialTup);
				pSerInfo->values[i] = DateADTGetDatum(date);
				break;
			}

			case NUMERICOID:
			{
				/*
				 * Treat the numeric as a varlena variable, and just push
				 * the whole shebang to the output-buffer.	We don't care
				 * about the guts of the numeric.
				 */
				Numeric		num;
				int			numSize;

				numSize = stringInfoGetInt32(serialTup);

#ifdef TUPSER_SCRATCH_SPACE
				if (numSize + VARHDRSZ <= attrInfo->varlen_scratch_size)
					num = (Numeric) attrInfo->pv_varlen_scratch;
				else
					num = (Numeric) palloc(numSize + VARHDRSZ);
#else
				num = (Numeric) palloc(numSize + VARHDRSZ);
#endif

				SET_VARSIZE(num, numSize + VARHDRSZ);
				pq_copymsgbytes(serialTup, VARDATA(num), numSize);
				skipPadding(serialTup);
				pSerInfo->values[i] = NumericGetDatum(num);
				break;
			}

			case ACLITEMOID:
			{
				int		aclSize, k, cnt;
				char		*inputstring, *starsfree;

				aclSize = stringInfoGetInt32(serialTup);
				inputstring = (char*) palloc(aclSize  + 1);
				starsfree = (char*) palloc(aclSize  + 1);
				cnt = 0;
	

				pq_copymsgbytes(serialTup, inputstring, aclSize);
				skipPadding(serialTup);
				inputstring[aclSize] = '\0';
				for(k=0; k<aclSize; k++)
				{					
					if( inputstring[k] != '*')
					{
						starsfree[cnt] = inputstring[k];
						cnt++;
					}
				}
				starsfree[cnt] = '\0';

				pSerInfo->values[i] = DirectFunctionCall1(aclitemin, CStringGetDatum(starsfree));
				pfree(inputstring);
				break;
			}

			case 210:
			{
				int 		strsize;
				char		*smgrstr;

				strsize = stringInfoGetInt32(serialTup);
				smgrstr = (char*) palloc(strsize + 1);
				pq_copymsgbytes(serialTup, smgrstr, strsize);
				skipPadding(serialTup);
				smgrstr[strsize] = '\0';

				pSerInfo->values[i] = DirectFunctionCall1(smgrin, CStringGetDatum(smgrstr));
				break;
			}
			default:
				fHandled = false;
		}

		if (fHandled)
			continue;

		attr_size = stringInfoGetInt32(serialTup);

		/* reset attr_data to empty, and load raw data into it */

		attr_data.len = 0;
		attr_data.data[0] = '\0';
		attr_data.cursor = 0;

		appendBinaryStringInfo(&attr_data,
							   pq_getmsgbytes(serialTup, attr_size), attr_size);
		skipPadding(serialTup);

		/* Call the attribute type's binary input converter. */
		if (attrInfo->recv_finfo.fn_nargs == 1)
			pSerInfo->values[i] = FunctionCall1(&attrInfo->recv_finfo,
												PointerGetDatum(&attr_data));
		else if (attrInfo->recv_finfo.fn_nargs == 2)
			pSerInfo->values[i] = FunctionCall2(&attrInfo->recv_finfo,
												PointerGetDatum(&attr_data),
												ObjectIdGetDatum(attrInfo->recv_typio_param));
		else if (attrInfo->recv_finfo.fn_nargs == 3)
			pSerInfo->values[i] = FunctionCall3(&attrInfo->recv_finfo,
												PointerGetDatum(&attr_data),
												ObjectIdGetDatum(attrInfo->recv_typio_param),
												Int32GetDatum(tupdesc->attrs[i]->atttypmod) );  
		else
		{
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
					 errmsg("Conversion function takes %d args",attrInfo->recv_finfo.fn_nargs)));
		}

		/* Trouble if it didn't eat the whole buffer */
		if (attr_data.cursor != attr_data.len)
		{
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
					 errmsg("incorrect binary data format")));
		}
	}

	/*
	 * Construct the tuple from the Datums and nulls values.  NOTE:  Switch
	 * out of our temporary context before we form the tuple!
	 */
	MemoryContextSwitchTo(oldCtxt);

	htup = heap_form_tuple(tupdesc, pSerInfo->values, pSerInfo->nulls);

	MemoryContextReset(s_tupSerMemCtxt);

	/* All done.  Return the result. */
	return htup;
}
Beispiel #17
0
/*
 * The next token in the input stream is known to be a string; lex it.
 */
static inline void
json_lex_string(JsonLexContext *lex)
{
	char	   *s;
	int			len;
	int			hi_surrogate = -1;

	if (lex->strval != NULL)
		resetStringInfo(lex->strval);

	Assert(lex->input_length > 0);
	s = lex->token_start;
	len = lex->token_start - lex->input;
	for (;;)
	{
		s++;
		len++;
		/* Premature end of the string. */
		if (len >= lex->input_length)
		{
			lex->token_terminator = s;
			report_invalid_token(lex);
		}
		else if (*s == '"')
			break;
		else if ((unsigned char) *s < 32)
		{
			/* Per RFC4627, these characters MUST be escaped. */
			/* Since *s isn't printable, exclude it from the context string */
			lex->token_terminator = s;
			ereport(ERROR,
					(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
					 errmsg("invalid input syntax for type json"),
					 errdetail("Character with value 0x%02x must be escaped.",
							   (unsigned char) *s),
					 report_json_context(lex)));
		}
		else if (*s == '\\')
		{
			/* OK, we have an escape character. */
			s++;
			len++;
			if (len >= lex->input_length)
			{
				lex->token_terminator = s;
				report_invalid_token(lex);
			}
			else if (*s == 'u')
			{
				int			i;
				int			ch = 0;

				for (i = 1; i <= 4; i++)
				{
					s++;
					len++;
					if (len >= lex->input_length)
					{
						lex->token_terminator = s;
						report_invalid_token(lex);
					}
					else if (*s >= '0' && *s <= '9')
						ch = (ch * 16) + (*s - '0');
					else if (*s >= 'a' && *s <= 'f')
						ch = (ch * 16) + (*s - 'a') + 10;
					else if (*s >= 'A' && *s <= 'F')
						ch = (ch * 16) + (*s - 'A') + 10;
					else
					{
						lex->token_terminator = s + pg_mblen(s);
						ereport(ERROR,
								(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
								 errmsg("invalid input syntax for type json"),
								 errdetail("\"\\u\" must be followed by four hexadecimal digits."),
								 report_json_context(lex)));
					}
				}
				if (lex->strval != NULL)
				{
					char		utf8str[5];
					int			utf8len;

					if (ch >= 0xd800 && ch <= 0xdbff)
					{
						if (hi_surrogate != -1)
							ereport(ERROR,
							   (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
								errmsg("invalid input syntax for type json"),
								errdetail("Unicode high surrogate must not follow a high surrogate."),
								report_json_context(lex)));
						hi_surrogate = (ch & 0x3ff) << 10;
						continue;
					}
					else if (ch >= 0xdc00 && ch <= 0xdfff)
					{
						if (hi_surrogate == -1)
							ereport(ERROR,
							   (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
								errmsg("invalid input syntax for type json"),
								errdetail("Unicode low surrogate must follow a high surrogate."),
								report_json_context(lex)));
						ch = 0x10000 + hi_surrogate + (ch & 0x3ff);
						hi_surrogate = -1;
					}

					if (hi_surrogate != -1)
						ereport(ERROR,
								(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
								 errmsg("invalid input syntax for type json"),
								 errdetail("Unicode low surrogate must follow a high surrogate."),
								 report_json_context(lex)));

					/*
					 * For UTF8, replace the escape sequence by the actual utf8
					 * character in lex->strval. Do this also for other encodings
					 * if the escape designates an ASCII character, otherwise
					 * raise an error. We don't ever unescape a \u0000, since that
					 * would result in an impermissible nul byte.
					 */

					if (ch == 0)
					{
						appendStringInfoString(lex->strval, "\\u0000");
					}
					else if (GetDatabaseEncoding() == PG_UTF8)
					{
						unicode_to_utf8(ch, (unsigned char *) utf8str);
						utf8len = pg_utf_mblen((unsigned char *) utf8str);
						appendBinaryStringInfo(lex->strval, utf8str, utf8len);
					}
					else if (ch <= 0x007f)
					{
						/*
						 * This is the only way to designate things like a form feed
						 * character in JSON, so it's useful in all encodings.
						 */
						appendStringInfoChar(lex->strval, (char) ch);
					}
					else
					{
						ereport(ERROR,
								(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
								 errmsg("invalid input syntax for type json"),
								 errdetail("Unicode escape values cannot be used for code point values above 007F when the server encoding is not UTF8."),
								 report_json_context(lex)));
					}

				}
			}
			else if (lex->strval != NULL)
			{
				if (hi_surrogate != -1)
					ereport(ERROR,
							(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
							 errmsg("invalid input syntax for type json"),
							 errdetail("Unicode low surrogate must follow a high surrogate."),
							 report_json_context(lex)));

				switch (*s)
				{
					case '"':
					case '\\':
					case '/':
						appendStringInfoChar(lex->strval, *s);
						break;
					case 'b':
						appendStringInfoChar(lex->strval, '\b');
						break;
					case 'f':
						appendStringInfoChar(lex->strval, '\f');
						break;
					case 'n':
						appendStringInfoChar(lex->strval, '\n');
						break;
					case 'r':
						appendStringInfoChar(lex->strval, '\r');
						break;
					case 't':
						appendStringInfoChar(lex->strval, '\t');
						break;
					default:
						/* Not a valid string escape, so error out. */
						lex->token_terminator = s + pg_mblen(s);
						ereport(ERROR,
								(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
								 errmsg("invalid input syntax for type json"),
							errdetail("Escape sequence \"\\%s\" is invalid.",
									  extract_mb_char(s)),
								 report_json_context(lex)));
				}
			}
			else if (strchr("\"\\/bfnrt", *s) == NULL)
			{
				/*
				 * Simpler processing if we're not bothered about de-escaping
				 *
				 * It's very tempting to remove the strchr() call here and
				 * replace it with a switch statement, but testing so far has
				 * shown it's not a performance win.
				 */
				lex->token_terminator = s + pg_mblen(s);
				ereport(ERROR,
						(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
						 errmsg("invalid input syntax for type json"),
						 errdetail("Escape sequence \"\\%s\" is invalid.",
								   extract_mb_char(s)),
						 report_json_context(lex)));
			}

		}
		else if (lex->strval != NULL)
		{
			if (hi_surrogate != -1)
				ereport(ERROR,
						(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
						 errmsg("invalid input syntax for type json"),
						 errdetail("Unicode low surrogate must follow a high surrogate."),
						 report_json_context(lex)));

			appendStringInfoChar(lex->strval, *s);
		}

	}

	if (hi_surrogate != -1)
		ereport(ERROR,
				(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
				 errmsg("invalid input syntax for type json"),
		errdetail("Unicode low surrogate must follow a high surrogate."),
				 report_json_context(lex)));

	/* Hooray, we found the end of the string! */
	lex->prev_token_terminator = lex->token_terminator;
	lex->token_terminator = s + 1;
}
/*
 * Write a int1 to the buffer
 */
static void appendInt1ToBuffer(StringInfo buf, uint8 val)
{
	unsigned char n8;
	n8 = (unsigned char)val;
	appendBinaryStringInfo(buf, (char *) &n8, 1);
}
Beispiel #19
0
/*
 * JsonbToCString
 *	   Converts jsonb value to a C-string.
 *
 * If 'out' argument is non-null, the resulting C-string is stored inside the
 * StringBuffer.  The resulting string is always returned.
 *
 * A typical case for passing the StringInfo in rather than NULL is where the
 * caller wants access to the len attribute without having to call strlen, e.g.
 * if they are converting it to a text* object.
 */
char *
JsonbToCString(StringInfo out, JsonbSuperHeader in, int estimated_len)
{
	bool		first = true;
	JsonbIterator *it;
	int			type = 0;
	JsonbValue	v;
	int			level = 0;
	bool		redo_switch = false;

	if (out == NULL)
		out = makeStringInfo();

	enlargeStringInfo(out, (estimated_len >= 0) ? estimated_len : 64);

	it = JsonbIteratorInit(in);

	while (redo_switch ||
		   ((type = JsonbIteratorNext(&it, &v, false)) != WJB_DONE))
	{
		redo_switch = false;
		switch (type)
		{
			case WJB_BEGIN_ARRAY:
				if (!first)
					appendBinaryStringInfo(out, ", ", 2);
				first = true;

				if (!v.val.array.rawScalar)
					appendStringInfoChar(out, '[');
				level++;
				break;
			case WJB_BEGIN_OBJECT:
				if (!first)
					appendBinaryStringInfo(out, ", ", 2);
				first = true;
				appendStringInfoCharMacro(out, '{');

				level++;
				break;
			case WJB_KEY:
				if (!first)
					appendBinaryStringInfo(out, ", ", 2);
				first = true;

				/* json rules guarantee this is a string */
				jsonb_put_escaped_value(out, &v);
				appendBinaryStringInfo(out, ": ", 2);

				type = JsonbIteratorNext(&it, &v, false);
				if (type == WJB_VALUE)
				{
					first = false;
					jsonb_put_escaped_value(out, &v);
				}
				else
				{
					Assert(type == WJB_BEGIN_OBJECT || type == WJB_BEGIN_ARRAY);

					/*
					 * We need to rerun the current switch() since we need to
					 * output the object which we just got from the iterator
					 * before calling the iterator again.
					 */
					redo_switch = true;
				}
				break;
			case WJB_ELEM:
				if (!first)
					appendBinaryStringInfo(out, ", ", 2);
				else
					first = false;

				jsonb_put_escaped_value(out, &v);
				break;
			case WJB_END_ARRAY:
				level--;
				if (!v.val.array.rawScalar)
					appendStringInfoChar(out, ']');
				first = false;
				break;
			case WJB_END_OBJECT:
				level--;
				appendStringInfoCharMacro(out, '}');
				first = false;
				break;
			default:
				elog(ERROR, "unknown flag of jsonb iterator");
		}
	}

	Assert(level == 0);

	return out->data;
}
Beispiel #20
0
/*
 * Export data out of GPDB.
 */
Datum
gphdfsprotocol_export(PG_FUNCTION_ARGS)
{
	URL_FILE *myData;
	char     *data;
	int       datlen;
	size_t    wrote = 0;
	static char	ebuf[512] = {0};
	int	    	ebuflen = 512;

	/* Must be called via the external table format manager */
	if (!CALLED_AS_EXTPROTOCOL(fcinfo))
		elog(ERROR, "cannot execute gphdfsprotocol_export outside protocol manager");

	/* Get our internal description of the protocol */
	myData = (URL_FILE *) EXTPROTOCOL_GET_USER_CTX(fcinfo);

	/* =======================================================================
	 *                            DO CLOSE
	 * ======================================================================= */
	if (EXTPROTOCOL_IS_LAST_CALL(fcinfo))
	{
		if (myData)
			url_fclose(myData, true, "gphdfs protocol");
		PG_RETURN_INT32(0);
	}

	/* =======================================================================
	 *                            DO OPEN
	 * ======================================================================= */
	if (myData == NULL)
	{
		myData = gphdfs_fopen(fcinfo, true);
		EXTPROTOCOL_SET_USER_CTX(fcinfo, myData);

		/* add schema info to pipe */
		StringInfo schema_data = makeStringInfo();

		Relation relation = FORMATTER_GET_RELATION(fcinfo);
		ExtTableEntry *exttbl = GetExtTableEntry(relation->rd_id);
		if (fmttype_is_avro(exttbl->fmtcode) || fmttype_is_parquet(exttbl->fmtcode) )
		{
			int relNameLen = strlen(relation->rd_rel->relname.data);
			appendIntToBuffer(schema_data, relNameLen);
			appendBinaryStringInfo(schema_data, relation->rd_rel->relname.data, relNameLen);

			int ncolumns = relation->rd_att->natts;
			appendIntToBuffer(schema_data, ncolumns);
			int i = 0;
			for (; i< ncolumns; i++)
			{
				Oid type = relation->rd_att->attrs[i]->atttypid;

				/* add attname,atttypid,attnotnull,attndims to schema_data filed */
				int attNameLen = strlen(relation->rd_att->attrs[i]->attname.data);
				appendIntToBuffer(schema_data, attNameLen);
				appendBinaryStringInfo(schema_data, relation->rd_att->attrs[i]->attname.data, attNameLen);

				appendIntToBuffer(schema_data, type);

				bool notNull = relation->rd_att->attrs[i]->attnotnull;
				appendInt1ToBuffer(schema_data, notNull?1:0);

				appendIntToBuffer(schema_data, relation->rd_att->attrs[i]->attndims);

				/* add type delimiter, for udt, it can be anychar */
				char delim = 0;
				int16 typlen;
				bool typbyval;
				char typalien;
				Oid typioparam;
				Oid func;
				get_type_io_data(type, IOFunc_input, &typlen, &typbyval, &typalien, &delim, &typioparam, &func);
				appendInt1ToBuffer(schema_data, delim);
			}

			StringInfo schema_head = makeStringInfo();
			appendIntToBuffer(schema_head, schema_data->len + 2);
			appendInt2ToBuffer(schema_head, 2);

			url_execute_fwrite(schema_head->data, schema_head->len, myData, NULL);
			url_execute_fwrite(schema_data->data, schema_data->len, myData, NULL);

			pfree(schema_head->data);
			pfree(schema_data->data);
		}
	}


	/* =======================================================================
	 *                            DO THE EXPORT
	 * ======================================================================= */
	data   = EXTPROTOCOL_GET_DATABUF(fcinfo);
	datlen = EXTPROTOCOL_GET_DATALEN(fcinfo);

	if (datlen > 0)
		wrote = url_execute_fwrite(data, datlen, myData, NULL);

	if (url_ferror(myData, wrote, ebuf, ebuflen))
	{
		ereport(ERROR,
				(errcode_for_file_access(),
				 strlen(ebuf) > 0 ? errmsg("could not write to external resource:\n%s",ebuf) :
				 errmsg("could not write to external resource: %m")));
	}

	PG_RETURN_INT32((int)wrote);
}
/*
 * Write a tuple to the outputstream, in the most efficient format possible.
 */
static void
pglogical_write_tuple(StringInfo out, PGLogicalOutputData *data,
					   Relation rel, HeapTuple tuple)
{
	TupleDesc	desc;
	Datum		values[MaxTupleAttributeNumber];
	bool		isnull[MaxTupleAttributeNumber];
	int			i;
	uint16		nliveatts = 0;

	desc = RelationGetDescr(rel);

	pq_sendbyte(out, 'T');			/* sending TUPLE */

	for (i = 0; i < desc->natts; i++)
	{
		if (desc->attrs[i]->attisdropped)
			continue;
		nliveatts++;
	}
	pq_sendint(out, nliveatts, 2);

	/* try to allocate enough memory from the get go */
	enlargeStringInfo(out, tuple->t_len +
					  nliveatts * (1 + 4));

	/*
	 * XXX: should this prove to be a relevant bottleneck, it might be
	 * interesting to inline heap_deform_tuple() here, we don't actually need
	 * the information in the form we get from it.
	 */
	heap_deform_tuple(tuple, desc, values, isnull);

	for (i = 0; i < desc->natts; i++)
	{
		HeapTuple	typtup;
		Form_pg_type typclass;
		Form_pg_attribute att = desc->attrs[i];
		char		transfer_type;

		/* skip dropped columns */
		if (att->attisdropped)
			continue;

		if (isnull[i])
		{
			pq_sendbyte(out, 'n');	/* null column */
			continue;
		}
		else if (att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(values[i]))
		{
			pq_sendbyte(out, 'u');	/* unchanged toast column */
			continue;
		}

		typtup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(att->atttypid));
		if (!HeapTupleIsValid(typtup))
			elog(ERROR, "cache lookup failed for type %u", att->atttypid);
		typclass = (Form_pg_type) GETSTRUCT(typtup);

		transfer_type = decide_datum_transfer(att, typclass,
											  data->allow_internal_basetypes,
											  data->allow_binary_basetypes);
        pq_sendbyte(out, transfer_type);
		switch (transfer_type)
		{
			case 'b':	/* internal-format binary data follows */

				/* pass by value */
				if (att->attbyval)
				{
					pq_sendint(out, att->attlen, 4); /* length */

					enlargeStringInfo(out, att->attlen);
					store_att_byval(out->data + out->len, values[i],
									att->attlen);
					out->len += att->attlen;
					out->data[out->len] = '\0';
				}
				/* fixed length non-varlena pass-by-reference type */
				else if (att->attlen > 0)
				{
					pq_sendint(out, att->attlen, 4); /* length */

					appendBinaryStringInfo(out, DatumGetPointer(values[i]),
										   att->attlen);
				}
				/* varlena type */
				else if (att->attlen == -1)
				{
					char *data = DatumGetPointer(values[i]);

					/* send indirect datums inline */
					if (VARATT_IS_EXTERNAL_INDIRECT(values[i]))
					{
						struct varatt_indirect redirect;
						VARATT_EXTERNAL_GET_POINTER(redirect, data);
						data = (char *) redirect.pointer;
					}

					Assert(!VARATT_IS_EXTERNAL(data));

					pq_sendint(out, VARSIZE_ANY(data), 4); /* length */

					appendBinaryStringInfo(out, data, VARSIZE_ANY(data));
				}
				else
					elog(ERROR, "unsupported tuple type");

				break;

			case 's': /* binary send/recv data follows */
				{
					bytea	   *outputbytes;
					int			len;

					outputbytes = OidSendFunctionCall(typclass->typsend,
													  values[i]);

					len = VARSIZE(outputbytes) - VARHDRSZ;
					pq_sendint(out, len, 4); /* length */
					pq_sendbytes(out, VARDATA(outputbytes), len); /* data */
					pfree(outputbytes);
				}
				break;

			default:
				{
					char   	   *outputstr;
					int			len;

					outputstr =	OidOutputFunctionCall(typclass->typoutput,
													  values[i]);
					len = strlen(outputstr) + 1;
					pq_sendint(out, len, 4); /* length */
					appendBinaryStringInfo(out, outputstr, len); /* data */
					pfree(outputstr);
				}
		}

		ReleaseSysCache(typtup);
	}
}
Beispiel #22
0
/**
 * @brief Performs data loading.
 *
 * Invokes pg_bulkload() user-defined function with given parameters
 * in single transaction.
 *
 * @return exitcode (always 0).
 */
static int
LoaderLoadMain(List *options)
{
	PGresult	   *res;
	const char	   *params[1];
	StringInfoData	buf;
	int				encoding;
	int				errors;
	ListCell	   *cell;

	if (options == NIL)
		ereport(ERROR,
			(errcode(EXIT_FAILURE),
			 errmsg("requires control file or command line options")));

	initStringInfo(&buf);
	reconnect(ERROR);
	encoding = PQclientEncoding(connection);

	elog(NOTICE, "BULK LOAD START");

	/* form options as text[] */
	appendStringInfoString(&buf, "{\"");
	foreach (cell, options)
	{
		const char *item = lfirst(cell);

		if (buf.len > 2)
			appendStringInfoString(&buf, "\",\"");

		/* escape " and \ */
		while (*item)
		{
			if (*item == '"' || *item == '\\')
			{
				appendStringInfoChar(&buf, '\\');
				appendStringInfoChar(&buf, *item);
				item++;
			}
			else if (!IS_HIGHBIT_SET(*item))
			{
				appendStringInfoChar(&buf, *item);
				item++;
			}
			else
			{
				int	n = PQmblen(item, encoding);
				appendBinaryStringInfo(&buf, item, n);
				item += n;
			}
		}
	}
	appendStringInfoString(&buf, "\"}");

	command("BEGIN", 0, NULL);
	params[0] = buf.data;
	res = execute("SELECT * FROM pg_bulkload($1)", 1, params);
	if (PQresultStatus(res) == PGRES_COPY_IN)
	{
		PQclear(res);
		res = RemoteLoad(connection, stdin, type_binary);
		if (PQresultStatus(res) != PGRES_TUPLES_OK)
			elog(ERROR, "copy failed: %s", PQerrorMessage(connection));
	}
	command("COMMIT", 0, NULL);

	errors = atoi(PQgetvalue(res, 0, 2)) +	/* parse errors */
			 atoi(PQgetvalue(res, 0, 3));	/* duplicate errors */

	elog(NOTICE, "BULK LOAD END\n"
				 "\t%s Rows skipped.\n"
				 "\t%s Rows successfully loaded.\n"
				 "\t%s Rows not loaded due to parse errors.\n"
				 "\t%s Rows not loaded due to duplicate errors.\n"
				 "\t%s Rows replaced with new rows.",
				 PQgetvalue(res, 0, 0), PQgetvalue(res, 0, 1),
				 PQgetvalue(res, 0, 2), PQgetvalue(res, 0, 3),
				 PQgetvalue(res, 0, 4));
	PQclear(res);

	disconnect();
	termStringInfo(&buf);

	if (errors > 0)
	{
		elog(WARNING, "some rows were not loaded due to errors.");
		return E_PG_USER;
	}
	else
		return 0;	/* succeeded without errors */
}
/*
 * Write a int4 to the buffer
 */
static void appendIntToBuffer(StringInfo buf, int val)
{
	uint32 n32 = htonl((uint32) val);
	appendBinaryStringInfo(buf, (char *) &n32, 4);
}
Beispiel #24
0
/*
 * _outNode -
 *	  converts a Node into binary string and append it to 'str'
 */
static void
_outNode(StringInfo str, void *obj)
{
	if (obj == NULL)
	{
		int16 tg = 0;
		appendBinaryStringInfo(str, (const char *)&tg, sizeof(int16));
		return;
	}
	else if (IsA(obj, List) ||IsA(obj, IntList) || IsA(obj, OidList))
		_outList(str, obj);
	else if (IsA(obj, Integer) ||
			 IsA(obj, Float) ||
			 IsA(obj, String) ||
			 IsA(obj, Null) ||
			 IsA(obj, BitString))
	{
		_outValue(str, obj);
	}
	else
	{

		switch (nodeTag(obj))
		{
			case T_PlannedStmt:
				_outPlannedStmt(str,obj);
				break;
			case T_Plan:
				_outPlan(str, obj);
				break;
			case T_Result:
				_outResult(str, obj);
				break;
			case T_Repeat:
				_outRepeat(str, obj);
				break;
			case T_Append:
				_outAppend(str, obj);
				break;
			case T_Sequence:
				_outSequence(str, obj);
				break;
			case T_BitmapAnd:
				_outBitmapAnd(str, obj);
				break;
			case T_BitmapOr:
				_outBitmapOr(str, obj);
				break;
			case T_Scan:
				_outScan(str, obj);
				break;
			case T_SeqScan:
				_outSeqScan(str, obj);
				break;
			case T_AppendOnlyScan:
				_outAppendOnlyScan(str, obj);
				break;
			case T_AOCSScan:
				_outAOCSScan(str, obj);
				break;
			case T_TableScan:
				_outTableScan(str, obj);
				break;
			case T_DynamicTableScan:
				_outDynamicTableScan(str, obj);
				break;
			case T_ExternalScan:
				_outExternalScan(str, obj);
				break;
			case T_IndexScan:
				_outIndexScan(str, obj);
				break;
			case T_DynamicIndexScan:
				_outDynamicIndexScan(str, obj);
				break;
			case T_BitmapIndexScan:
				_outBitmapIndexScan(str, obj);
				break;
			case T_BitmapHeapScan:
				_outBitmapHeapScan(str, obj);
				break;
			case T_BitmapAppendOnlyScan:
				_outBitmapAppendOnlyScan(str, obj);
				break;
			case T_BitmapTableScan:
				_outBitmapTableScan(str, obj);
				break;
			case T_TidScan:
				_outTidScan(str, obj);
				break;
			case T_SubqueryScan:
				_outSubqueryScan(str, obj);
				break;
			case T_FunctionScan:
				_outFunctionScan(str, obj);
				break;
			case T_ValuesScan:
				_outValuesScan(str, obj);
				break;
			case T_Join:
				_outJoin(str, obj);
				break;
			case T_NestLoop:
				_outNestLoop(str, obj);
				break;
			case T_MergeJoin:
				_outMergeJoin(str, obj);
				break;
			case T_HashJoin:
				_outHashJoin(str, obj);
				break;
			case T_Agg:
				_outAgg(str, obj);
				break;
			case T_WindowKey:
				_outWindowKey(str, obj);
				break;
			case T_Window:
				_outWindow(str, obj);
				break;
			case T_TableFunctionScan:
				_outTableFunctionScan(str, obj);
				break;
			case T_Material:
				_outMaterial(str, obj);
				break;
			case T_ShareInputScan:
				_outShareInputScan(str, obj);
				break;
			case T_Sort:
				_outSort(str, obj);
				break;
			case T_Unique:
				_outUnique(str, obj);
				break;
			case T_SetOp:
				_outSetOp(str, obj);
				break;
			case T_Limit:
				_outLimit(str, obj);
				break;
			case T_Hash:
				_outHash(str, obj);
				break;
			case T_Motion:
				_outMotion(str, obj);
				break;
			case T_DML:
				_outDML(str, obj);
				break;
			case T_SplitUpdate:
				_outSplitUpdate(str, obj);
				break;
			case T_RowTrigger:
				_outRowTrigger(str, obj);
				break;
			case T_AssertOp:
				_outAssertOp(str, obj);
				break;
			case T_PartitionSelector:
				_outPartitionSelector(str, obj);
				break;
			case T_Alias:
				_outAlias(str, obj);
				break;
			case T_RangeVar:
				_outRangeVar(str, obj);
				break;
			case T_IntoClause:
				_outIntoClause(str, obj);
				break;
			case T_Var:
				_outVar(str, obj);
				break;
			case T_Const:
				_outConst(str, obj);
				break;
			case T_Param:
				_outParam(str, obj);
				break;
			case T_Aggref:
				_outAggref(str, obj);
				break;
			case T_AggOrder:
				_outAggOrder(str, obj);
				break;
			case T_WindowRef:
				_outWindowRef(str, obj);
				break;
			case T_ArrayRef:
				_outArrayRef(str, obj);
				break;
			case T_FuncExpr:
				_outFuncExpr(str, obj);
				break;
			case T_OpExpr:
				_outOpExpr(str, obj);
				break;
			case T_DistinctExpr:
				_outDistinctExpr(str, obj);
				break;
			case T_ScalarArrayOpExpr:
				_outScalarArrayOpExpr(str, obj);
				break;
			case T_BoolExpr:
				_outBoolExpr(str, obj);
				break;
			case T_SubLink:
				_outSubLink(str, obj);
				break;
			case T_SubPlan:
				_outSubPlan(str, obj);
				break;
			case T_FieldSelect:
				_outFieldSelect(str, obj);
				break;
			case T_FieldStore:
				_outFieldStore(str, obj);
				break;
			case T_RelabelType:
				_outRelabelType(str, obj);
				break;
			case T_ConvertRowtypeExpr:
				_outConvertRowtypeExpr(str, obj);
				break;
			case T_CaseExpr:
				_outCaseExpr(str, obj);
				break;
			case T_CaseWhen:
				_outCaseWhen(str, obj);
				break;
			case T_CaseTestExpr:
				_outCaseTestExpr(str, obj);
				break;
			case T_ArrayExpr:
				_outArrayExpr(str, obj);
				break;
			case T_RowExpr:
				_outRowExpr(str, obj);
				break;
			case T_RowCompareExpr:
				_outRowCompareExpr(str, obj);
				break;
			case T_CoalesceExpr:
				_outCoalesceExpr(str, obj);
				break;
			case T_MinMaxExpr:
				_outMinMaxExpr(str, obj);
				break;
			case T_NullIfExpr:
				_outNullIfExpr(str, obj);
				break;
			case T_NullTest:
				_outNullTest(str, obj);
				break;
			case T_BooleanTest:
				_outBooleanTest(str, obj);
				break;
			case T_XmlExpr:
				_outXmlExpr(str, obj);
				break;
			case T_CoerceToDomain:
				_outCoerceToDomain(str, obj);
				break;
			case T_CoerceToDomainValue:
				_outCoerceToDomainValue(str, obj);
				break;
			case T_SetToDefault:
				_outSetToDefault(str, obj);
				break;
			case T_CurrentOfExpr:
				_outCurrentOfExpr(str, obj);
				break;
			case T_TargetEntry:
				_outTargetEntry(str, obj);
				break;
			case T_RangeTblRef:
				_outRangeTblRef(str, obj);
				break;
			case T_JoinExpr:
				_outJoinExpr(str, obj);
				break;
			case T_FromExpr:
				_outFromExpr(str, obj);
				break;
			case T_Flow:
				_outFlow(str, obj);
				break;

			case T_Path:
				_outPath(str, obj);
				break;
			case T_IndexPath:
				_outIndexPath(str, obj);
				break;
			case T_BitmapHeapPath:
				_outBitmapHeapPath(str, obj);
				break;
			case T_BitmapAppendOnlyPath:
				_outBitmapAppendOnlyPath(str, obj);
				break;
			case T_BitmapAndPath:
				_outBitmapAndPath(str, obj);
				break;
			case T_BitmapOrPath:
				_outBitmapOrPath(str, obj);
				break;
			case T_TidPath:
				_outTidPath(str, obj);
				break;
			case T_AppendPath:
				_outAppendPath(str, obj);
				break;
			case T_AppendOnlyPath:
				_outAppendOnlyPath(str, obj);
				break;
			case T_AOCSPath:
				_outAOCSPath(str, obj);
				break;
			case T_ResultPath:
				_outResultPath(str, obj);
				break;
			case T_MaterialPath:
				_outMaterialPath(str, obj);
				break;
			case T_UniquePath:
				_outUniquePath(str, obj);
				break;
			case T_NestPath:
				_outNestPath(str, obj);
				break;
			case T_MergePath:
				_outMergePath(str, obj);
				break;
			case T_HashPath:
				_outHashPath(str, obj);
				break;
            case T_CdbMotionPath:
                _outCdbMotionPath(str, obj);
                break;
			case T_PlannerInfo:
				_outPlannerInfo(str, obj);
				break;
			case T_RelOptInfo:
				_outRelOptInfo(str, obj);
				break;
			case T_IndexOptInfo:
				_outIndexOptInfo(str, obj);
				break;
			case T_CdbRelDedupInfo:
				_outCdbRelDedupInfo(str, obj);
				break;
			case T_PathKeyItem:
				_outPathKeyItem(str, obj);
				break;
			case T_RestrictInfo:
				_outRestrictInfo(str, obj);
				break;
			case T_InnerIndexscanInfo:
				_outInnerIndexscanInfo(str, obj);
				break;
			case T_OuterJoinInfo:
				_outOuterJoinInfo(str, obj);
				break;
			case T_InClauseInfo:
				_outInClauseInfo(str, obj);
				break;
			case T_AppendRelInfo:
				_outAppendRelInfo(str, obj);
				break;


			case T_GrantStmt:
				_outGrantStmt(str, obj);
				break;
			case T_PrivGrantee:
				_outPrivGrantee(str, obj);
				break;
			case T_FuncWithArgs:
				_outFuncWithArgs(str, obj);
				break;
			case T_GrantRoleStmt:
				_outGrantRoleStmt(str, obj);
				break;
			case T_LockStmt:
				_outLockStmt(str, obj);
				break;

			case T_CreateStmt:
				_outCreateStmt(str, obj);
				break;
			case T_ColumnReferenceStorageDirective:
				_outColumnReferenceStorageDirective(str, obj);
				break;
			case T_PartitionBy:
				_outPartitionBy(str, obj);
				break;
			case T_PartitionElem:
				_outPartitionElem(str, obj);
				break;
			case T_PartitionRangeItem:
				_outPartitionRangeItem(str, obj);
				break;
			case T_PartitionBoundSpec:
				_outPartitionBoundSpec(str, obj);
				break;
			case T_PartitionSpec:
				_outPartitionSpec(str, obj);
				break;
			case T_PartitionValuesSpec:
				_outPartitionValuesSpec(str, obj);
				break;
			case T_Partition:
				_outPartition(str, obj);
				break;
			case T_PartitionRule:
				_outPartitionRule(str, obj);
				break;
			case T_PartitionNode:
				_outPartitionNode(str, obj);
				break;
			case T_PgPartRule:
				_outPgPartRule(str, obj);
				break;

			case T_SegfileMapNode:
				_outSegfileMapNode(str, obj);
				break;

			case T_ExtTableTypeDesc:
				_outExtTableTypeDesc(str, obj);
				break;
            case T_CreateExternalStmt:
				_outCreateExternalStmt(str, obj);
				break;

			case T_IndexStmt:
				_outIndexStmt(str, obj);
				break;
			case T_ReindexStmt:
				_outReindexStmt(str, obj);
				break;

			case T_ConstraintsSetStmt:
				_outConstraintsSetStmt(str, obj);
				break;

			case T_CreateFunctionStmt:
				_outCreateFunctionStmt(str, obj);
				break;
			case T_FunctionParameter:
				_outFunctionParameter(str, obj);
				break;
			case T_RemoveFuncStmt:
				_outRemoveFuncStmt(str, obj);
				break;
			case T_AlterFunctionStmt:
				_outAlterFunctionStmt(str, obj);
				break;

			case T_DefineStmt:
				_outDefineStmt(str,obj);
				break;

			case T_CompositeTypeStmt:
				_outCompositeTypeStmt(str,obj);
				break;
			case T_CreateCastStmt:
				_outCreateCastStmt(str,obj);
				break;
			case T_DropCastStmt:
				_outDropCastStmt(str,obj);
				break;
			case T_CreateOpClassStmt:
				_outCreateOpClassStmt(str,obj);
				break;
			case T_CreateOpClassItem:
				_outCreateOpClassItem(str,obj);
				break;
			case T_RemoveOpClassStmt:
				_outRemoveOpClassStmt(str,obj);
				break;
			case T_CreateConversionStmt:
				_outCreateConversionStmt(str,obj);
				break;


			case T_ViewStmt:
				_outViewStmt(str, obj);
				break;
			case T_RuleStmt:
				_outRuleStmt(str, obj);
				break;
			case T_DropStmt:
				_outDropStmt(str, obj);
				break;
			case T_DropPropertyStmt:
				_outDropPropertyStmt(str, obj);
				break;
			case T_DropOwnedStmt:
				_outDropOwnedStmt(str, obj);
				break;
			case T_ReassignOwnedStmt:
				_outReassignOwnedStmt(str, obj);
				break;
			case T_TruncateStmt:
				_outTruncateStmt(str, obj);
				break;
			case T_AlterTableStmt:
				_outAlterTableStmt(str, obj);
				break;
			case T_AlterTableCmd:
				_outAlterTableCmd(str, obj);
				break;
			case T_SetDistributionCmd:
				_outSetDistributionCmd(str, obj);
				break;
			case T_InheritPartitionCmd:
				_outInheritPartitionCmd(str, obj);
				break;

			case T_AlterPartitionCmd:
				_outAlterPartitionCmd(str, obj);
				break;
			case T_AlterPartitionId:
				_outAlterPartitionId(str, obj);
				break;

			case T_CreateRoleStmt:
				_outCreateRoleStmt(str, obj);
				break;
			case T_DropRoleStmt:
				_outDropRoleStmt(str, obj);
				break;
			case T_AlterRoleStmt:
				_outAlterRoleStmt(str, obj);
				break;
			case T_AlterRoleSetStmt:
				_outAlterRoleSetStmt(str, obj);
				break;

			case T_AlterObjectSchemaStmt:
				_outAlterObjectSchemaStmt(str, obj);
				break;

			case T_AlterOwnerStmt:
				_outAlterOwnerStmt(str, obj);
				break;

			case T_RenameStmt:
				_outRenameStmt(str, obj);
				break;

			case T_CreateSeqStmt:
				_outCreateSeqStmt(str, obj);
				break;
			case T_AlterSeqStmt:
				_outAlterSeqStmt(str, obj);
				break;
			case T_ClusterStmt:
				_outClusterStmt(str, obj);
				break;
			case T_CreatedbStmt:
				_outCreatedbStmt(str, obj);
				break;
			case T_DropdbStmt:
				_outDropdbStmt(str, obj);
				break;
			case T_CreateDomainStmt:
				_outCreateDomainStmt(str, obj);
				break;
			case T_AlterDomainStmt:
				_outAlterDomainStmt(str, obj);
				break;

			case T_TransactionStmt:
				_outTransactionStmt(str, obj);
				break;

			case T_NotifyStmt:
				_outNotifyStmt(str, obj);
				break;
			case T_DeclareCursorStmt:
				_outDeclareCursorStmt(str, obj);
				break;
			case T_SingleRowErrorDesc:
				_outSingleRowErrorDesc(str, obj);
				break;
			case T_CopyStmt:
				_outCopyStmt(str, obj);
				break;
			case T_ColumnDef:
				_outColumnDef(str, obj);
				break;
			case T_TypeName:
				_outTypeName(str, obj);
				break;
			case T_TypeCast:
				_outTypeCast(str, obj);
				break;
			case T_IndexElem:
				_outIndexElem(str, obj);
				break;
			case T_Query:
				_outQuery(str, obj);
				break;
			case T_SortClause:
				_outSortClause(str, obj);
				break;
			case T_GroupClause:
				_outGroupClause(str, obj);
				break;
			case T_GroupingClause:
				_outGroupingClause(str, obj);
				break;
			case T_GroupingFunc:
				_outGroupingFunc(str, obj);
				break;
			case T_Grouping:
				_outGrouping(str, obj);
				break;
			case T_GroupId:
				_outGroupId(str, obj);
				break;
			case T_WindowSpecParse:
				_outWindowSpecParse(str, obj);
				break;
			case T_WindowSpec:
				_outWindowSpec(str, obj);
				break;
			case T_WindowFrame:
				_outWindowFrame(str, obj);
				break;
			case T_WindowFrameEdge:
				_outWindowFrameEdge(str, obj);
				break;
			case T_PercentileExpr:
				_outPercentileExpr(str, obj);
				break;
			case T_RowMarkClause:
				_outRowMarkClause(str, obj);
				break;
			case T_WithClause:
				_outWithClause(str, obj);
				break;
			case T_CommonTableExpr:
				_outCommonTableExpr(str, obj);
				break;
			case T_SetOperationStmt:
				_outSetOperationStmt(str, obj);
				break;
			case T_RangeTblEntry:
				_outRangeTblEntry(str, obj);
				break;
			case T_A_Expr:
				_outAExpr(str, obj);
				break;
			case T_ColumnRef:
				_outColumnRef(str, obj);
				break;
			case T_ParamRef:
				_outParamRef(str, obj);
				break;
			case T_A_Const:
				_outAConst(str, obj);
				break;
			case T_A_Indices:
				_outA_Indices(str, obj);
				break;
			case T_A_Indirection:
				_outA_Indirection(str, obj);
				break;
			case T_ResTarget:
				_outResTarget(str, obj);
				break;
			case T_Constraint:
				_outConstraint(str, obj);
				break;
			case T_FkConstraint:
				_outFkConstraint(str, obj);
				break;
			case T_FuncCall:
				_outFuncCall(str, obj);
				break;
			case T_DefElem:
				_outDefElem(str, obj);
				break;
			case T_LockingClause:
				_outLockingClause(str, obj);
				break;
			case T_XmlSerialize:
				_outXmlSerialize(str, obj);
				break;

			case T_CreateSchemaStmt:
				_outCreateSchemaStmt(str, obj);
				break;
			case T_CreatePLangStmt:
				_outCreatePLangStmt(str, obj);
				break;
			case T_DropPLangStmt:
				_outDropPLangStmt(str, obj);
				break;
			case T_VacuumStmt:
				_outVacuumStmt(str, obj);
				break;
			case T_CdbProcess:
				_outCdbProcess(str, obj);
				break;
			case T_Slice:
				_outSlice(str, obj);
				break;
			case T_SliceTable:
				_outSliceTable(str, obj);
				break;
			case T_VariableResetStmt:
				_outVariableResetStmt(str, obj);
				break;

			case T_DMLActionExpr:
				_outDMLActionExpr(str, obj);
				break;

			case T_PartOidExpr:
				_outPartOidExpr(str, obj);
				break;

			case T_PartDefaultExpr:
				_outPartDefaultExpr(str, obj);
				break;

			case T_PartBoundExpr:
				_outPartBoundExpr(str, obj);
				break;

			case T_PartBoundInclusionExpr:
				_outPartBoundInclusionExpr(str, obj);
				break;

			case T_PartBoundOpenExpr:
				_outPartBoundOpenExpr(str, obj);
				break;

			case T_CreateTrigStmt:
				_outCreateTrigStmt(str, obj);
				break;

			case T_CreateFileSpaceStmt:
				_outCreateFileSpaceStmt(str, obj);
				break;

			case T_FileSpaceEntry:
				_outFileSpaceEntry(str, obj);
				break;

			case T_CreateTableSpaceStmt:
				_outCreateTableSpaceStmt(str, obj);
				break;

			case T_CreateQueueStmt:
				_outCreateQueueStmt(str, obj);
				break;
			case T_AlterQueueStmt:
				_outAlterQueueStmt(str, obj);
				break;
			case T_DropQueueStmt:
				_outDropQueueStmt(str, obj);
				break;

            case T_CommentStmt:
                _outCommentStmt(str, obj);
                break;
			case T_TableValueExpr:
				_outTableValueExpr(str, obj);
                break;
			case T_DenyLoginInterval:
				_outDenyLoginInterval(str, obj);
				break;
			case T_DenyLoginPoint:
				_outDenyLoginPoint(str, obj);
				break;

			case T_AlterTypeStmt:
				_outAlterTypeStmt(str, obj);
				break;
			case T_TupleDescNode:
				_outTupleDescNode(str, obj);
				break;

			default:
				elog(ERROR, "could not serialize unrecognized node type: %d",
						 (int) nodeTag(obj));
				break;
		}

	}
}
/*
 * Write a int2 to the buffer
 */
static void appendInt2ToBuffer(StringInfo buf, uint16 val)
{
	uint16 n16 = htons((uint16) val);
	appendBinaryStringInfo(buf, (char *) &n16, 2);
}
Beispiel #26
0
/*
 * CopyOps_BuildOneRowTo
 * Build one row message to be sent to remote nodes through COPY protocol
 */
char *
CopyOps_BuildOneRowTo(TupleDesc tupdesc, Datum *values, bool *nulls, int *len)
{
	bool		need_delim = false;
	char	   *res;
	int			i;
	FmgrInfo   *out_functions;
	Form_pg_attribute *attr = tupdesc->attrs;
	StringInfo	buf;

	/* Get info about the columns we need to process. */
	out_functions = (FmgrInfo *) palloc(tupdesc->natts * sizeof(FmgrInfo));
	for (i = 0; i < tupdesc->natts; i++)
	{
		Oid			out_func_oid;
		bool		isvarlena;

		/* Do not need any information for dropped attributes */
		if (attr[i]->attisdropped)
			continue;

		getTypeOutputInfo(attr[i]->atttypid,
						  &out_func_oid,
						  &isvarlena);
		fmgr_info(out_func_oid, &out_functions[i]);
	}

	/* Initialize output buffer */
	buf = makeStringInfo();

	for (i = 0; i < tupdesc->natts; i++)
	{
		Datum		value = values[i];
		bool		isnull = nulls[i];

		/* Do not need any information for dropped attributes */
		if (attr[i]->attisdropped)
			continue;

		if (need_delim)
			appendStringInfoCharMacro(buf, COPYOPS_DELIMITER);
		need_delim = true;

		if (isnull)
		{
			/* Null print value to client */
			appendBinaryStringInfo(buf, "\\N", strlen("\\N"));
		}
		else
		{
			char *string;
			string = OutputFunctionCall(&out_functions[i],
										value);
			attribute_out_text(buf, string);
			pfree(string);
		}
	}

	/* Record length of message */
	*len = buf->len;
	res = pstrdup(buf->data);
	pfree(out_functions);
	pfree(buf->data);
	pfree(buf);
	return res;
}
Beispiel #27
0
/*
 * appendStringInfoString
 *
 * Append a null-terminated string to str.
 * Like appendStringInfo(str, "%s", s) but faster.
 */
void
appendStringInfoString(StringInfo str, const char *s)
{
    appendBinaryStringInfo(str, s, strlen(s));
}
Beispiel #28
0
/*
 * checkSharedDependencies
 *
 * Check whether there are shared dependency entries for a given shared
 * object.	Returns a string containing a newline-separated list of object
 * descriptions that depend on the shared object, or NULL if none is found.
 * The size of the returned string is limited to about MAX_REPORTED_DEPS lines;
 * if there are more objects than that, the output is returned truncated at
 * that point while the full message is logged to the postmaster log.
 *
 * We can find three different kinds of dependencies: dependencies on objects
 * of the current database; dependencies on shared objects; and dependencies
 * on objects local to other databases.  We can (and do) provide descriptions
 * of the two former kinds of objects, but we can't do that for "remote"
 * objects, so we just provide a count of them.
 *
 * If we find a SHARED_DEPENDENCY_PIN entry, we can error out early.
 */
char *
checkSharedDependencies(Oid classId, Oid objectId)
{
	Relation	sdepRel;
	ScanKeyData key[2];
	SysScanDesc scan;
	HeapTuple	tup;
	int			numReportedDeps = 0;
	int			numNotReportedDeps = 0;
	int			numNotReportedDbs = 0;
	List	   *remDeps = NIL;
	ListCell   *cell;
	ObjectAddress object;
	StringInfoData descs;
	StringInfoData alldescs;

	/*
	 * We limit the number of dependencies reported to the client to
	 * MAX_REPORTED_DEPS, since client software may not deal well with
	 * enormous error strings.	The server log always gets a full report,
	 * which is collected in a separate StringInfo if and only if we detect
	 * that the client report is going to be truncated.
	 */
#define MAX_REPORTED_DEPS 100

	initStringInfo(&descs);
	initStringInfo(&alldescs);

	sdepRel = heap_open(SharedDependRelationId, AccessShareLock);

	ScanKeyInit(&key[0],
				Anum_pg_shdepend_refclassid,
				BTEqualStrategyNumber, F_OIDEQ,
				ObjectIdGetDatum(classId));
	ScanKeyInit(&key[1],
				Anum_pg_shdepend_refobjid,
				BTEqualStrategyNumber, F_OIDEQ,
				ObjectIdGetDatum(objectId));

	scan = systable_beginscan(sdepRel, SharedDependReferenceIndexId, true,
							  SnapshotNow, 2, key);

	while (HeapTupleIsValid(tup = systable_getnext(scan)))
	{
		Form_pg_shdepend sdepForm = (Form_pg_shdepend) GETSTRUCT(tup);

		/* This case can be dispatched quickly */
		if (sdepForm->deptype == SHARED_DEPENDENCY_PIN)
		{
			object.classId = classId;
			object.objectId = objectId;
			object.objectSubId = 0;
			ereport(ERROR,
					(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
					 errmsg("cannot drop %s because it is required by the database system",
							getObjectDescription(&object))));
		}

		object.classId = sdepForm->classid;
		object.objectId = sdepForm->objid;
		object.objectSubId = 0;

		/*
		 * If it's a dependency local to this database or it's a shared
		 * object, describe it.
		 *
		 * If it's a remote dependency, keep track of it so we can report the
		 * number of them later.
		 */
		if (sdepForm->dbid == MyDatabaseId)
		{
			if (numReportedDeps < MAX_REPORTED_DEPS)
			{
				numReportedDeps++;
				storeObjectDescription(&descs, LOCAL_OBJECT, &object,
									   sdepForm->deptype, 0);
			}
			else
			{
				numNotReportedDeps++;
				/* initialize the server-only log line */
				if (alldescs.len == 0)
					appendBinaryStringInfo(&alldescs, descs.data, descs.len);

				storeObjectDescription(&alldescs, LOCAL_OBJECT, &object,
									   sdepForm->deptype, 0);
			}
		}
		else if (sdepForm->dbid == InvalidOid)
		{
			if (numReportedDeps < MAX_REPORTED_DEPS)
			{
				numReportedDeps++;
				storeObjectDescription(&descs, SHARED_OBJECT, &object,
									   sdepForm->deptype, 0);
			}
			else
			{
				numNotReportedDeps++;
				/* initialize the server-only log line */
				if (alldescs.len == 0)
					appendBinaryStringInfo(&alldescs, descs.data, descs.len);

				storeObjectDescription(&alldescs, SHARED_OBJECT, &object,
									   sdepForm->deptype, 0);
			}
		}
		else
		{
			/* It's not local nor shared, so it must be remote. */
			remoteDep  *dep;
			bool		stored = false;

			/*
			 * XXX this info is kept on a simple List.	Maybe it's not good
			 * for performance, but using a hash table seems needlessly
			 * complex.  The expected number of databases is not high anyway,
			 * I suppose.
			 */
			foreach(cell, remDeps)
			{
				dep = lfirst(cell);
				if (dep->dbOid == sdepForm->dbid)
				{
					dep->count++;
					stored = true;
					break;
				}
			}
			if (!stored)
			{
				dep = (remoteDep *) palloc(sizeof(remoteDep));
				dep->dbOid = sdepForm->dbid;
				dep->count = 1;
				remDeps = lappend(remDeps, dep);
			}
		}
	}
Beispiel #29
0
/*
 * Process data received through the syslogger pipe.
 *
 * This routine interprets the log pipe protocol which sends log messages as
 * (hopefully atomic) chunks - such chunks are detected and reassembled here.
 *
 * The protocol has a header that starts with two nul bytes, then has a 16 bit
 * length, the pid of the sending process, and a flag to indicate if it is
 * the last chunk in a message. Incomplete chunks are saved until we read some
 * more, and non-final chunks are accumulated until we get the final chunk.
 *
 * All of this is to avoid 2 problems:
 * . partial messages being written to logfiles (messes rotation), and
 * . messages from different backends being interleaved (messages garbled).
 *
 * Any non-protocol messages are written out directly. These should only come
 * from non-PostgreSQL sources, however (e.g. third party libraries writing to
 * stderr).
 *
 * logbuffer is the data input buffer, and *bytes_in_logbuffer is the number
 * of bytes present.  On exit, any not-yet-eaten data is left-justified in
 * logbuffer, and *bytes_in_logbuffer is updated.
 */
static void
process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
{
	char	   *cursor = logbuffer;
	int			count = *bytes_in_logbuffer;
	int			dest = LOG_DESTINATION_STDERR;

	/* While we have enough for a header, process data... */
	while (count >= (int) sizeof(PipeProtoHeader))
	{
		PipeProtoHeader p;
		int			chunklen;

		/* Do we have a valid header? */
		memcpy(&p, cursor, sizeof(PipeProtoHeader));
		if (p.nuls[0] == '\0' && p.nuls[1] == '\0' &&
			p.len > 0 && p.len <= PIPE_MAX_PAYLOAD &&
			p.pid != 0 &&
			(p.is_last == 't' || p.is_last == 'f' ||
			 p.is_last == 'T' || p.is_last == 'F'))
		{
			List	   *buffer_list;
			ListCell   *cell;
			save_buffer *existing_slot = NULL,
					   *free_slot = NULL;
			StringInfo	str;

			chunklen = PIPE_HEADER_SIZE + p.len;

			/* Fall out of loop if we don't have the whole chunk yet */
			if (count < chunklen)
				break;

			dest = (p.is_last == 'T' || p.is_last == 'F') ?
				LOG_DESTINATION_CSVLOG : LOG_DESTINATION_STDERR;

			/* Locate any existing buffer for this source pid */
			buffer_list = buffer_lists[p.pid % NBUFFER_LISTS];
			foreach(cell, buffer_list)
			{
				save_buffer *buf = (save_buffer *) lfirst(cell);

				if (buf->pid == p.pid)
				{
					existing_slot = buf;
					break;
				}
				if (buf->pid == 0 && free_slot == NULL)
					free_slot = buf;
			}

			if (p.is_last == 'f' || p.is_last == 'F')
			{
				/*
				 * Save a complete non-final chunk in a per-pid buffer
				 */
				if (existing_slot != NULL)
				{
					/* Add chunk to data from preceding chunks */
					str = &(existing_slot->data);
					appendBinaryStringInfo(str,
										   cursor + PIPE_HEADER_SIZE,
										   p.len);
				}
				else
				{
					/* First chunk of message, save in a new buffer */
					if (free_slot == NULL)
					{
						/*
						 * Need a free slot, but there isn't one in the list,
						 * so create a new one and extend the list with it.
						 */
						free_slot = palloc(sizeof(save_buffer));
						buffer_list = lappend(buffer_list, free_slot);
						buffer_lists[p.pid % NBUFFER_LISTS] = buffer_list;
					}
					free_slot->pid = p.pid;
					str = &(free_slot->data);
					initStringInfo(str);
					appendBinaryStringInfo(str,
										   cursor + PIPE_HEADER_SIZE,
										   p.len);
				}
			}
			else
			{
				/*
				 * Final chunk --- add it to anything saved for that pid, and
				 * either way write the whole thing out.
				 */
				if (existing_slot != NULL)
				{
					str = &(existing_slot->data);
					appendBinaryStringInfo(str,
										   cursor + PIPE_HEADER_SIZE,
										   p.len);
					write_syslogger_file(str->data, str->len, dest);
					/* Mark the buffer unused, and reclaim string storage */
					existing_slot->pid = 0;
					pfree(str->data);
				}
				else
				{
					/* The whole message was one chunk, evidently. */
					write_syslogger_file(cursor + PIPE_HEADER_SIZE, p.len,
										 dest);
				}
			}

			/* Finished processing this chunk */
			cursor += chunklen;
			count -= chunklen;
		}
Beispiel #30
0
HeapTuple
CvtChunksToHeapTup(TupleChunkList tcList, SerTupInfo * pSerInfo)
{
	StringInfoData serData;
	TupleChunkListItem tcItem;
	int			i;
	HeapTuple	htup;
	TupleChunkType tcType;

	AssertArg(tcList != NULL);
	AssertArg(tcList->p_first != NULL);
	AssertArg(pSerInfo != NULL);

	tcItem = tcList->p_first;

	if (tcList->num_chunks == 1)
	{
		GetChunkType(tcItem, &tcType);

		if (tcType == TC_EMPTY)
		{
			/*
			 * the sender is indicating that there was a row with no attributes:
			 * return a NULL tuple
			 */
			clearTCList(NULL, tcList);

			htup = heap_form_tuple(pSerInfo->tupdesc, pSerInfo->values, pSerInfo->nulls);

			return htup;
		}
	}

	/*
	 * Dump all of the data in the tuple chunk list into a single StringInfo,
	 * so that we can convert it into a HeapTuple.	Check chunk types based on
	 * whether there is only one chunk, or multiple chunks.
	 *
	 * We know roughly how much space we'll need, allocate all in one go.
	 *
	 */
	initStringInfoOfSize(&serData, tcList->num_chunks * tcList->max_chunk_length);

	i = 0;
	do
	{
		/* Make sure that the type of this tuple chunk is correct! */

		GetChunkType(tcItem, &tcType);
		if (i == 0)
		{
			if (tcItem->p_next == NULL)
			{
				if (tcType != TC_WHOLE)
				{
					ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION),
									errmsg("Single chunk's type must be TC_WHOLE.")));
				}
			}
			else
				/* tcItem->p_next != NULL */
			{
				if (tcType != TC_PARTIAL_START)
				{
					ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION),
									errmsg("First chunk of collection must have type"
										   " TC_PARTIAL_START.")));
				}
			}
		}
		else
			/* i > 0 */
		{
			if (tcItem->p_next == NULL)
			{
				if (tcType != TC_PARTIAL_END)
				{
					ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION),
									errmsg("Last chunk of collection must have type"
										   " TC_PARTIAL_END.")));
				}
			}
			else
				/* tcItem->p_next != NULL */
			{
				if (tcType != TC_PARTIAL_MID)
				{
					ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION),
									errmsg("Last chunk of collection must have type"
										   " TC_PARTIAL_MID.")));
				}
			}
		}

		/* Copy this chunk into the tuple data.  Don't include the header! */
		appendBinaryStringInfo(&serData,
							   (const char *) GetChunkDataPtr(tcItem) + TUPLE_CHUNK_HEADER_SIZE,
							   tcItem->chunk_length - TUPLE_CHUNK_HEADER_SIZE);

		/* Go to the next chunk. */
		tcItem = tcItem->p_next;
		i++;
	}
	while (tcItem != NULL);

	/* we've finished with the TCList, free it now. */
	clearTCList(NULL, tcList);

	{
		TupSerHeader *tshp;
		unsigned int	datalen;
		unsigned int	nullslen;
		unsigned int	hoff;
		HeapTupleHeader t_data;
		char *pos = (char *)serData.data;

		tshp = (TupSerHeader *)pos;

		if ((tshp->tuplen & MEMTUP_LEAD_BIT) != 0)
		{
			uint32 tuplen = memtuple_size_from_uint32(tshp->tuplen);
			htup = (HeapTuple) palloc(tuplen);
			memcpy(htup, pos, tuplen);

			pos += TYPEALIGN(TUPLE_CHUNK_ALIGN,tuplen);
		}
		else
		{
			pos += sizeof(TupSerHeader);	
			/* if the tuple had toasted elements we have to deserialize
			 * the old slow way. */
			if ((tshp->infomask & HEAP_HASEXTERNAL) != 0)
			{
				serData.cursor += sizeof(TupSerHeader);

				htup = DeserializeTuple(pSerInfo, &serData);

				/* Free up memory we used. */
				pfree(serData.data);
				return htup;
			}

			/* reconstruct lengths of null bitmap and data part */
			if (tshp->infomask & HEAP_HASNULL)
				nullslen = BITMAPLEN(tshp->natts);
			else
				nullslen = 0;

			if (tshp->tuplen < sizeof(TupSerHeader) + nullslen)
				ereport(ERROR, (errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
								errmsg("Interconnect error: cannot convert chunks to a  heap tuple."),
								errdetail("tuple len %d < nullslen %d + headersize (%d)",
										  tshp->tuplen, nullslen, (int)sizeof(TupSerHeader))));

			datalen = tshp->tuplen - sizeof(TupSerHeader) - TYPEALIGN(TUPLE_CHUNK_ALIGN, nullslen);

			/* determine overhead size of tuple (should match heap_form_tuple) */
			hoff = offsetof(HeapTupleHeaderData, t_bits) + TYPEALIGN(TUPLE_CHUNK_ALIGN, nullslen);
			if (tshp->infomask & HEAP_HASOID)
				hoff += sizeof(Oid);
			hoff = MAXALIGN(hoff);

			/* Allocate the space in one chunk, like heap_form_tuple */
			htup = (HeapTuple)palloc(HEAPTUPLESIZE + hoff + datalen);

			t_data = (HeapTupleHeader) ((char *)htup + HEAPTUPLESIZE);

			/* make sure unused header fields are zeroed */
			MemSetAligned(t_data, 0, hoff);

			/* reconstruct the HeapTupleData fields */
			htup->t_len = hoff + datalen;
			ItemPointerSetInvalid(&(htup->t_self));
			htup->t_data = t_data;

			/* reconstruct the HeapTupleHeaderData fields */
			ItemPointerSetInvalid(&(t_data->t_ctid));
			HeapTupleHeaderSetNatts(t_data, tshp->natts);
			t_data->t_infomask = tshp->infomask & ~HEAP_XACT_MASK;
			t_data->t_infomask |= HEAP_XMIN_INVALID | HEAP_XMAX_INVALID;
			t_data->t_hoff = hoff;

			if (nullslen)
			{
				memcpy((void *)t_data->t_bits, pos, nullslen);
				pos += TYPEALIGN(TUPLE_CHUNK_ALIGN,nullslen);
			}

			/* does the tuple descriptor expect an OID ? Note: we don't
			 * have to set the oid itself, just the flag! (see heap_formtuple()) */
			if (pSerInfo->tupdesc->tdhasoid)		/* else leave infomask = 0 */
			{
				t_data->t_infomask |= HEAP_HASOID;
			}

			/* and now the data proper (it would be nice if we could just
			 * point our caller into our existing buffer in-place, but
			 * we'll leave that for another day) */
			memcpy((char *)t_data + hoff, pos, datalen);
		}
	}

	/* Free up memory we used. */
	pfree(serData.data);

	return htup;
}