/* * appendStringInfo * * Format text data under the control of fmt (an sprintf-style format string) * and append it to whatever is already in str. More space is allocated * to str if necessary. This is sort of like a combination of sprintf and * strcat. */ void appendStringInfo(StringInfo str, const char *fmt,...) { for (;;) { va_list args; bool success; /* Try to format the data. */ va_start(args, fmt); success = appendStringInfoVA(str, fmt, args); va_end(args); if (success) break; /* Double the buffer size and try again. */ enlargeStringInfo(str, str->maxlen); } }
/* * Error handler for libxml error messages */ static void xml_error_handler(void *ctxt, const char *msg,...) { /* Append the formatted text to xml_err_buf */ for (;;) { va_list args; bool success; /* Try to format the data. */ va_start(args, msg); success = appendStringInfoVA(xml_error_buf, msg, args); va_end(args); if (success) break; /* Double the buffer size and try again. */ enlargeStringInfo(xml_error_buf, xml_error_buf->maxlen); } }
void appendBinaryStringInfoAndStripLineBreaks(StringInfo str, const char *data, int datalen) { int i; Assert(str != NULL); /* Make more room if needed */ enlargeStringInfo(str, datalen+1); /* OK, append the data */ for (i=0; i<datalen; i++) { char ch = data[i]; switch (ch) { case '\r': case '\n': appendStringInfoCharMacro(str, ' '); break; default: appendStringInfoCharMacro(str, ch); break; } } }
static JsQuery* joinJsQuery(JsQueryItemType type, JsQuery *jq1, JsQuery *jq2) { JsQuery *out; StringInfoData buf; int32 left, right, chld; JsQueryItem v; initStringInfo(&buf); enlargeStringInfo(&buf, VARSIZE_ANY(jq1) + VARSIZE_ANY(jq2) + 4 * sizeof(int32) + VARHDRSZ); appendStringInfoSpaces(&buf, VARHDRSZ); /* form jqiAnd/jqiOr header */ appendStringInfoChar(&buf, (char)type); alignStringInfoInt(&buf); /* nextPos field of header*/ chld = 0; /* actual value, not a fake */ appendBinaryStringInfo(&buf, (char*)&chld, sizeof(chld)); left = buf.len; appendBinaryStringInfo(&buf, (char*)&left /* fake value */, sizeof(left)); right = buf.len; appendBinaryStringInfo(&buf, (char*)&right /* fake value */, sizeof(right)); /* dump left and right subtree */ jsqInit(&v, jq1); chld = copyJsQuery(&buf, &v); *(int32*)(buf.data + left) = chld; jsqInit(&v, jq2); chld = copyJsQuery(&buf, &v); *(int32*)(buf.data + right) = chld; out = (JsQuery*)buf.data; SET_VARSIZE(out, buf.len); return out; }
/* * appendStringInfo * * Format text data under the control of fmt (an sprintf-style format string) * and append it to whatever is already in str. More space is allocated * to str if necessary. This is sort of like a combination of sprintf and * strcat. */ void appendStringInfo(StringInfo str, const char *fmt,...) { int save_errno = errno; for (;;) { va_list args; int needed; /* Try to format the data. */ errno = save_errno; va_start(args, fmt); needed = appendStringInfoVA(str, fmt, args); va_end(args); if (needed == 0) break; /* success */ /* Increase the buffer size and try again. */ enlargeStringInfo(str, needed); } }
/* * SerializeBoolArray serializes the given boolean array and returns the result * as a StringInfo. This function packs every 8 boolean values into one byte. */ static StringInfo SerializeBoolArray(bool *boolArray, uint32 boolArrayLength) { StringInfo boolArrayBuffer = NULL; uint32 boolArrayIndex = 0; uint32 byteCount = (boolArrayLength + 7) / 8; boolArrayBuffer = makeStringInfo(); enlargeStringInfo(boolArrayBuffer, byteCount); boolArrayBuffer->len = byteCount; memset(boolArrayBuffer->data, 0, byteCount); for (boolArrayIndex = 0; boolArrayIndex < boolArrayLength; boolArrayIndex++) { if (boolArray[boolArrayIndex]) { uint32 byteIndex = boolArrayIndex / 8; uint32 bitIndex = boolArrayIndex % 8; boolArrayBuffer->data[byteIndex] |= (1 << bitIndex); } } return boolArrayBuffer; }
Datum jsquery_not(PG_FUNCTION_ARGS) { JsQuery *jq = PG_GETARG_JSQUERY(0); JsQuery *out; StringInfoData buf; int32 arg, chld; JsQueryItem v; initStringInfo(&buf); enlargeStringInfo(&buf, VARSIZE_ANY(jq) + 4 * sizeof(int32) + VARHDRSZ); appendStringInfoSpaces(&buf, VARHDRSZ); /* form jsquery header */ appendStringInfoChar(&buf, (char)jqiNot); alignStringInfoInt(&buf); /* nextPos field of header*/ chld = 0; /* actual value, not a fake */ appendBinaryStringInfo(&buf, (char*)&chld, sizeof(chld)); arg = buf.len; appendBinaryStringInfo(&buf, (char*)&arg /* fake value */, sizeof(arg)); jsqInit(&v, jq); chld = copyJsQuery(&buf, &v); *(int32*)(buf.data + arg) = chld; out = (JsQuery*)buf.data; SET_VARSIZE(out, buf.len); PG_FREE_IF_COPY(jq, 0); PG_RETURN_JSQUERY(out); }
/* -------------------------------- * pq_getmessage - get a message with length word from connection * * The return value is placed in an expansible StringInfo, which has * already been initialized by the caller. * Only the message body is placed in the StringInfo; the length word * is removed. Also, s->cursor is initialized to zero for convenience * in scanning the message contents. * * If maxlen is not zero, it is an upper limit on the length of the * message we are willing to accept. We abort the connection (by * returning EOF) if client tries to send more than that. * * returns 0 if OK, EOF if trouble * -------------------------------- */ int pq_getmessage(StringInfo s, int maxlen) { int32 len; resetStringInfo(s); /* Read message length word */ if (pq_getbytes((char *) &len, 4) == EOF) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("unexpected EOF within message length word"))); return EOF; } len = ntohl(len); if (len < 4 || (maxlen > 0 && len > maxlen)) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid message length"))); return EOF; } len -= 4; /* discount length itself */ if (len > 0) { /* * Allocate space for message. If we run out of room (ridiculously * large message), we will elog(ERROR), but we want to discard the * message body so as not to lose communication sync. */ PG_TRY(); { enlargeStringInfo(s, len); } PG_CATCH(); { if (pq_discardbytes(len) == EOF) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete message from client"))); PG_RE_THROW(); } PG_END_TRY(); /* And grab the message */ if (pq_getbytes(s->data, len) == EOF) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete message from client"))); return EOF; } s->len = len; /* Place a trailing null per StringInfo convention */ s->data[len] = '\0'; } return 0; }
static char *start_append(StringInfo buf, int alloc_len) { enlargeStringInfo(buf, alloc_len); return buf->data + buf->len; }
/* * Send description for each column when using v3+ protocol */ static void SendRowDescriptionCols_3(StringInfo buf, TupleDesc typeinfo, List *targetlist, int16 *formats) { int natts = typeinfo->natts; int i; ListCell *tlist_item = list_head(targetlist); /* * Preallocate memory for the entire message to be sent. That allows to * use the significantly faster inline pqformat.h functions and to avoid * reallocations. * * Have to overestimate the size of the column-names, to account for * character set overhead. */ enlargeStringInfo(buf, (NAMEDATALEN * MAX_CONVERSION_GROWTH /* attname */ + sizeof(Oid) /* resorigtbl */ + sizeof(AttrNumber) /* resorigcol */ + sizeof(Oid) /* atttypid */ + sizeof(int16) /* attlen */ + sizeof(int32) /* attypmod */ + sizeof(int16) /* format */ ) * natts); for (i = 0; i < natts; ++i) { Form_pg_attribute att = TupleDescAttr(typeinfo, i); Oid atttypid = att->atttypid; int32 atttypmod = att->atttypmod; Oid resorigtbl; AttrNumber resorigcol; int16 format; /* * If column is a domain, send the base type and typmod instead. * Lookup before sending any ints, for efficiency. */ atttypid = getBaseTypeAndTypmod(atttypid, &atttypmod); /* Do we have a non-resjunk tlist item? */ while (tlist_item && ((TargetEntry *) lfirst(tlist_item))->resjunk) tlist_item = lnext(tlist_item); if (tlist_item) { TargetEntry *tle = (TargetEntry *) lfirst(tlist_item); resorigtbl = tle->resorigtbl; resorigcol = tle->resorigcol; tlist_item = lnext(tlist_item); } else { /* No info available, so send zeroes */ resorigtbl = 0; resorigcol = 0; } if (formats) format = formats[i]; else format = 0; pq_writestring(buf, NameStr(att->attname)); pq_writeint32(buf, resorigtbl); pq_writeint16(buf, resorigcol); pq_writeint32(buf, atttypid); pq_writeint16(buf, att->attlen); pq_writeint32(buf, atttypmod); pq_writeint16(buf, format); } }
/* * JsonbToCString * Converts jsonb value to a C-string. * * If 'out' argument is non-null, the resulting C-string is stored inside the * StringBuffer. The resulting string is always returned. * * A typical case for passing the StringInfo in rather than NULL is where the * caller wants access to the len attribute without having to call strlen, e.g. * if they are converting it to a text* object. */ char * JsonbToCString(StringInfo out, JsonbSuperHeader in, int estimated_len) { bool first = true; JsonbIterator *it; int type = 0; JsonbValue v; int level = 0; bool redo_switch = false; if (out == NULL) out = makeStringInfo(); enlargeStringInfo(out, (estimated_len >= 0) ? estimated_len : 64); it = JsonbIteratorInit(in); while (redo_switch || ((type = JsonbIteratorNext(&it, &v, false)) != WJB_DONE)) { redo_switch = false; switch (type) { case WJB_BEGIN_ARRAY: if (!first) appendBinaryStringInfo(out, ", ", 2); first = true; if (!v.val.array.rawScalar) appendStringInfoChar(out, '['); level++; break; case WJB_BEGIN_OBJECT: if (!first) appendBinaryStringInfo(out, ", ", 2); first = true; appendStringInfoCharMacro(out, '{'); level++; break; case WJB_KEY: if (!first) appendBinaryStringInfo(out, ", ", 2); first = true; /* json rules guarantee this is a string */ jsonb_put_escaped_value(out, &v); appendBinaryStringInfo(out, ": ", 2); type = JsonbIteratorNext(&it, &v, false); if (type == WJB_VALUE) { first = false; jsonb_put_escaped_value(out, &v); } else { Assert(type == WJB_BEGIN_OBJECT || type == WJB_BEGIN_ARRAY); /* * We need to rerun the current switch() since we need to * output the object which we just got from the iterator * before calling the iterator again. */ redo_switch = true; } break; case WJB_ELEM: if (!first) appendBinaryStringInfo(out, ", ", 2); else first = false; jsonb_put_escaped_value(out, &v); break; case WJB_END_ARRAY: level--; if (!v.val.array.rawScalar) appendStringInfoChar(out, ']'); first = false; break; case WJB_END_OBJECT: level--; appendStringInfoCharMacro(out, '}'); first = false; break; default: elog(ERROR, "unknown flag of jsonb iterator"); } } Assert(level == 0); return out->data; }
static Datum coerceScalarObject(UDT self, jobject value) { Datum result; int32 dataLen = Type_getLength((Type)self); bool isJavaBasedScalar = 0 != self->toString; if(dataLen == -2) { jstring jstr = (jstring)JNI_callObjectMethod(value, self->toString); char* tmp = String_createNTS(jstr); result = CStringGetDatum(tmp); JNI_deleteLocalRef(jstr); } else { jobject outputStream; StringInfoData buffer; bool passByValue = Type_isByValue((Type)self); MemoryContext currCtx = Invocation_switchToUpperContext(); initStringInfo(&buffer); MemoryContextSwitchTo(currCtx); /* buffer remembers its context */ if(dataLen < 0) /* * Reserve space for an int32 at the beginning. We are building * a varlena */ appendBinaryStringInfo(&buffer, (char*)&dataLen, sizeof(int32)); else enlargeStringInfo(&buffer, dataLen); outputStream = SQLOutputToChunk_create(&buffer, isJavaBasedScalar); JNI_callVoidMethod(value, self->writeSQL, outputStream); SQLOutputToChunk_close(outputStream); if(dataLen < 0) { /* Assign the correct length. */ SET_VARSIZE(buffer.data, buffer.len); } else if(dataLen != buffer.len) { ereport(ERROR, ( errcode(ERRCODE_CANNOT_COERCE), errmsg("UDT for Oid %d produced image with incorrect size. Expected %d, was %d", Type_getOid((Type)self), dataLen, buffer.len))); } if (passByValue) { memset(&result, 0, SIZEOF_DATUM); /* pass by value data is stored in the least * significant bits of a Datum. */ #ifdef WORDS_BIGENDIAN memcpy(&result + SIZEOF_DATUM - dataLen, buffer.data, dataLen); #else memcpy(&result, buffer.data, dataLen); #endif } else { result = PointerGetDatum(buffer.data); } } return result; }
/* * Write a tuple to the outputstream, in the most efficient format possible. */ static void pglogical_write_tuple(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple tuple) { TupleDesc desc; Datum values[MaxTupleAttributeNumber]; bool isnull[MaxTupleAttributeNumber]; int i; uint16 nliveatts = 0; desc = RelationGetDescr(rel); pq_sendbyte(out, 'T'); /* sending TUPLE */ for (i = 0; i < desc->natts; i++) { if (desc->attrs[i]->attisdropped) continue; nliveatts++; } pq_sendint(out, nliveatts, 2); /* try to allocate enough memory from the get go */ enlargeStringInfo(out, tuple->t_len + nliveatts * (1 + 4)); /* * XXX: should this prove to be a relevant bottleneck, it might be * interesting to inline heap_deform_tuple() here, we don't actually need * the information in the form we get from it. */ heap_deform_tuple(tuple, desc, values, isnull); for (i = 0; i < desc->natts; i++) { HeapTuple typtup; Form_pg_type typclass; Form_pg_attribute att = desc->attrs[i]; char transfer_type; /* skip dropped columns */ if (att->attisdropped) continue; if (isnull[i]) { pq_sendbyte(out, 'n'); /* null column */ continue; } else if (att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(values[i])) { pq_sendbyte(out, 'u'); /* unchanged toast column */ continue; } typtup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(att->atttypid)); if (!HeapTupleIsValid(typtup)) elog(ERROR, "cache lookup failed for type %u", att->atttypid); typclass = (Form_pg_type) GETSTRUCT(typtup); transfer_type = decide_datum_transfer(att, typclass, data->allow_internal_basetypes, data->allow_binary_basetypes); pq_sendbyte(out, transfer_type); switch (transfer_type) { case 'b': /* internal-format binary data follows */ /* pass by value */ if (att->attbyval) { pq_sendint(out, att->attlen, 4); /* length */ enlargeStringInfo(out, att->attlen); store_att_byval(out->data + out->len, values[i], att->attlen); out->len += att->attlen; out->data[out->len] = '\0'; } /* fixed length non-varlena pass-by-reference type */ else if (att->attlen > 0) { pq_sendint(out, att->attlen, 4); /* length */ appendBinaryStringInfo(out, DatumGetPointer(values[i]), att->attlen); } /* varlena type */ else if (att->attlen == -1) { char *data = DatumGetPointer(values[i]); /* send indirect datums inline */ if (VARATT_IS_EXTERNAL_INDIRECT(values[i])) { struct varatt_indirect redirect; VARATT_EXTERNAL_GET_POINTER(redirect, data); data = (char *) redirect.pointer; } Assert(!VARATT_IS_EXTERNAL(data)); pq_sendint(out, VARSIZE_ANY(data), 4); /* length */ appendBinaryStringInfo(out, data, VARSIZE_ANY(data)); } else elog(ERROR, "unsupported tuple type"); break; case 's': /* binary send/recv data follows */ { bytea *outputbytes; int len; outputbytes = OidSendFunctionCall(typclass->typsend, values[i]); len = VARSIZE(outputbytes) - VARHDRSZ; pq_sendint(out, len, 4); /* length */ pq_sendbytes(out, VARDATA(outputbytes), len); /* data */ pfree(outputbytes); } break; default: { char *outputstr; int len; outputstr = OidOutputFunctionCall(typclass->typoutput, values[i]); len = strlen(outputstr) + 1; pq_sendint(out, len, 4); /* length */ appendBinaryStringInfo(out, outputstr, len); /* data */ pfree(outputstr); } } ReleaseSysCache(typtup); } }
/* * Emit a PG error or notice, together with any available info about * the current Python error, previously set by PLy_exception_set(). * This should be used to propagate Python errors into PG. If fmt is * NULL, the Python error becomes the primary error message, otherwise * it becomes the detail. If there is a Python traceback, it is put * in the context. */ void PLy_elog(int elevel, const char *fmt,...) { char *xmsg; char *tbmsg; int tb_depth; StringInfoData emsg; PyObject *exc, *val, *tb; const char *primary = NULL; int sqlerrcode = 0; char *detail = NULL; char *hint = NULL; char *query = NULL; int position = 0; char *schema_name = NULL; char *table_name = NULL; char *column_name = NULL; char *datatype_name = NULL; char *constraint_name = NULL; PyErr_Fetch(&exc, &val, &tb); if (exc != NULL) { PyErr_NormalizeException(&exc, &val, &tb); if (PyErr_GivenExceptionMatches(val, PLy_exc_spi_error)) PLy_get_spi_error_data(val, &sqlerrcode, &detail, &hint, &query, &position, &schema_name, &table_name, &column_name, &datatype_name, &constraint_name); else if (PyErr_GivenExceptionMatches(val, PLy_exc_error)) PLy_get_error_data(val, &sqlerrcode, &detail, &hint, &schema_name, &table_name, &column_name, &datatype_name, &constraint_name); else if (PyErr_GivenExceptionMatches(val, PLy_exc_fatal)) elevel = FATAL; } /* this releases our refcount on tb! */ PLy_traceback(exc, val, tb, &xmsg, &tbmsg, &tb_depth); if (fmt) { initStringInfo(&emsg); for (;;) { va_list ap; int needed; va_start(ap, fmt); needed = appendStringInfoVA(&emsg, dgettext(TEXTDOMAIN, fmt), ap); va_end(ap); if (needed == 0) break; enlargeStringInfo(&emsg, needed); } primary = emsg.data; /* Since we have a format string, we cannot have a SPI detail. */ Assert(detail == NULL); /* If there's an exception message, it goes in the detail. */ if (xmsg) detail = xmsg; } else { if (xmsg) primary = xmsg; } PG_TRY(); { ereport(elevel, (errcode(sqlerrcode ? sqlerrcode : ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg_internal("%s", primary ? primary : "no exception data"), (detail) ? errdetail_internal("%s", detail) : 0, (tb_depth > 0 && tbmsg) ? errcontext("%s", tbmsg) : 0, (hint) ? errhint("%s", hint) : 0, (query) ? internalerrquery(query) : 0, (position) ? internalerrposition(position) : 0, (schema_name) ? err_generic_string(PG_DIAG_SCHEMA_NAME, schema_name) : 0, (table_name) ? err_generic_string(PG_DIAG_TABLE_NAME, table_name) : 0, (column_name) ? err_generic_string(PG_DIAG_COLUMN_NAME, column_name) : 0, (datatype_name) ? err_generic_string(PG_DIAG_DATATYPE_NAME, datatype_name) : 0, (constraint_name) ? err_generic_string(PG_DIAG_CONSTRAINT_NAME, constraint_name) : 0)); } PG_CATCH(); { if (fmt) pfree(emsg.data); if (xmsg) pfree(xmsg); if (tbmsg) pfree(tbmsg); Py_XDECREF(exc); Py_XDECREF(val); PG_RE_THROW(); } PG_END_TRY(); if (fmt) pfree(emsg.data); if (xmsg) pfree(xmsg); if (tbmsg) pfree(tbmsg); Py_XDECREF(exc); Py_XDECREF(val); }
/* * Emit a PG error or notice, together with any available info about * the current Python error, previously set by PLy_exception_set(). * This should be used to propagate Python errors into PG. If fmt is * NULL, the Python error becomes the primary error message, otherwise * it becomes the detail. If there is a Python traceback, it is put * in the context. */ void PLy_elog(int elevel, const char *fmt,...) { char *xmsg; char *tbmsg; int tb_depth; StringInfoData emsg; PyObject *exc, *val, *tb; const char *primary = NULL; int sqlerrcode = 0; char *detail = NULL; char *hint = NULL; char *query = NULL; int position = 0; PyErr_Fetch(&exc, &val, &tb); if (exc != NULL) { if (PyErr_GivenExceptionMatches(val, PLy_exc_spi_error)) PLy_get_spi_error_data(val, &sqlerrcode, &detail, &hint, &query, &position); else if (PyErr_GivenExceptionMatches(val, PLy_exc_fatal)) elevel = FATAL; } PyErr_Restore(exc, val, tb); PLy_traceback(&xmsg, &tbmsg, &tb_depth); if (fmt) { initStringInfo(&emsg); for (;;) { va_list ap; bool success; va_start(ap, fmt); success = appendStringInfoVA(&emsg, dgettext(TEXTDOMAIN, fmt), ap); va_end(ap); if (success) break; enlargeStringInfo(&emsg, emsg.maxlen); } primary = emsg.data; /* Since we have a format string, we cannot have a SPI detail. */ Assert(detail == NULL); /* If there's an exception message, it goes in the detail. */ if (xmsg) detail = xmsg; } else { if (xmsg) primary = xmsg; } PG_TRY(); { ereport(elevel, (errcode(sqlerrcode ? sqlerrcode : ERRCODE_INTERNAL_ERROR), errmsg_internal("%s", primary ? primary : "no exception data"), (detail) ? errdetail_internal("%s", detail) : 0, (tb_depth > 0 && tbmsg) ? errcontext("%s", tbmsg) : 0, (hint) ? errhint("%s", hint) : 0, (query) ? internalerrquery(query) : 0, (position) ? internalerrposition(position) : 0)); } PG_CATCH(); { if (fmt) pfree(emsg.data); if (xmsg) pfree(xmsg); if (tbmsg) pfree(tbmsg); PG_RE_THROW(); } PG_END_TRY(); if (fmt) pfree(emsg.data); if (xmsg) pfree(xmsg); if (tbmsg) pfree(tbmsg); }
/* * SerializeBlockData serializes and compresses block data at given block index with given * compression type for every column. */ static void SerializeBlockData(TableWriteState *writeState, uint32 blockIndex, uint32 rowCount) { uint32 columnIndex = 0; StripeBuffers *stripeBuffers = writeState->stripeBuffers; ColumnBlockData **blockDataArray = writeState->blockDataArray; CompressionType requestedCompressionType = writeState->compressionType; const uint32 columnCount = stripeBuffers->columnCount; StringInfo compressionBuffer = writeState->compressionBuffer; /* serialize exist values, data values are already serialized */ for (columnIndex = 0; columnIndex < columnCount; columnIndex++) { ColumnBuffers *columnBuffers = stripeBuffers->columnBuffersArray[columnIndex]; ColumnBlockBuffers *blockBuffers = columnBuffers->blockBuffersArray[blockIndex]; ColumnBlockData *blockData = blockDataArray[columnIndex]; blockBuffers->existsBuffer = SerializeBoolArray(blockData->existsArray, rowCount); } /* * check and compress value buffers, if a value buffer is not compressable * then keep it as uncompressed, store compression information. */ for (columnIndex = 0; columnIndex < columnCount; columnIndex++) { uint64 maximumLength = 0; bool compressable = false; ColumnBuffers *columnBuffers = stripeBuffers->columnBuffersArray[columnIndex]; ColumnBlockBuffers *blockBuffers = columnBuffers->blockBuffersArray[blockIndex]; ColumnBlockData *blockData = blockDataArray[columnIndex]; StringInfo serializedValueBuffer = NULL; CompressionType actualCompressionType = COMPRESSION_NONE; serializedValueBuffer = blockData->valueBuffer; /* the only other supported compression type is pg_lz for now */ Assert(requestedCompressionType == COMPRESSION_NONE || requestedCompressionType == COMPRESSION_PG_LZ); /* * if serializedValueBuffer is be compressed, update serializedValueBuffer * with compressed data and store compression type. */ if (requestedCompressionType == COMPRESSION_PG_LZ) { maximumLength = PGLZ_MAX_OUTPUT(serializedValueBuffer->len); resetStringInfo(compressionBuffer); enlargeStringInfo(compressionBuffer, maximumLength); compressable = cstore_pglz_compress((const char *) serializedValueBuffer->data, serializedValueBuffer->len, (PGLZ_Header*)compressionBuffer->data, PGLZ_strategy_always); if (compressable) { serializedValueBuffer = compressionBuffer; serializedValueBuffer->len = VARSIZE(compressionBuffer->data); actualCompressionType = COMPRESSION_PG_LZ; } } /* store (compressed) value buffer */ blockBuffers->valueCompressionType = actualCompressionType; blockBuffers->valueBuffer = CopyStringInfo(serializedValueBuffer); /* valueBuffer needs to be reset for next block's data */ resetStringInfo(blockData->valueBuffer); } }
/* * pgstrom_create_param_buffer * * It construct a param-buffer on the shared memory segment, according to * the supplied Const/Param list. Its initial reference counter is 1, so * this buffer can be released using pgstrom_put_param_buffer(). */ kern_parambuf * pgstrom_create_kern_parambuf(List *used_params, ExprContext *econtext) { StringInfoData str; kern_parambuf *kpbuf; char padding[STROMALIGN_LEN]; ListCell *cell; Size offset; int index = 0; int nparams = list_length(used_params); /* seek to the head of variable length field */ offset = STROMALIGN(offsetof(kern_parambuf, poffset[nparams])); initStringInfo(&str); enlargeStringInfo(&str, offset); memset(str.data, 0, offset); str.len = offset; /* walks on the Para/Const list */ foreach (cell, used_params) { Node *node = lfirst(cell); if (IsA(node, Const)) { Const *con = (Const *) node; kpbuf = (kern_parambuf *)str.data; if (con->constisnull) kpbuf->poffset[index] = 0; /* null */ else { kpbuf->poffset[index] = str.len; if (con->constlen > 0) appendBinaryStringInfo(&str, (char *)&con->constvalue, con->constlen); else appendBinaryStringInfo(&str, DatumGetPointer(con->constvalue), VARSIZE(con->constvalue)); } } else if (IsA(node, Param)) { ParamListInfo param_info = econtext->ecxt_param_list_info; Param *param = (Param *) node; if (param_info && param->paramid > 0 && param->paramid <= param_info->numParams) { ParamExternData *prm = ¶m_info->params[param->paramid - 1]; /* give hook a chance in case parameter is dynamic */ if (!OidIsValid(prm->ptype) && param_info->paramFetch != NULL) (*param_info->paramFetch) (param_info, param->paramid); kpbuf = (kern_parambuf *)str.data; if (!OidIsValid(prm->ptype)) { elog(INFO, "debug: Param has no particular data type"); kpbuf->poffset[index++] = 0; /* null */ continue; } /* safety check in case hook did something unexpected */ if (prm->ptype != param->paramtype) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("type of parameter %d (%s) does not match that when preparing the plan (%s)", param->paramid, format_type_be(prm->ptype), format_type_be(param->paramtype)))); if (prm->isnull) kpbuf->poffset[index] = 0; /* null */ else { int typlen = get_typlen(prm->ptype); if (typlen == 0) elog(ERROR, "cache lookup failed for type %u", prm->ptype); if (typlen > 0) appendBinaryStringInfo(&str, (char *)&prm->value, typlen); else appendBinaryStringInfo(&str, DatumGetPointer(prm->value), VARSIZE(prm->value)); } } } else elog(ERROR, "unexpected node: %s", nodeToString(node)); /* alignment */ if (STROMALIGN(str.len) != str.len) appendBinaryStringInfo(&str, padding, STROMALIGN(str.len) - str.len); index++; }
bool read_csv(FILE *fp, StringInfo buf, int ncolumns, size_t *columns) { int column; int retry_count; size_t read_len; size_t restart; if (buf->data == NULL) initStringInfo(buf); else resetStringInfo(buf); memset(columns, 0, ncolumns * sizeof(size_t)); column = 0; retry_count = 0; restart = 0; retry: read_len = 0; do { char *buffer; size_t len; enlargeStringInfo(buf, 1000); buffer = buf->data + buf->len; if (fgets(buffer, buf->maxlen - buf->len, fp) == NULL) break; len = strlen(buffer); if (len == 0) break; read_len += len; buf->len += len; } while (buf->data[buf->len - 1] != '\n'); if (buf->len == 0) return false; if (read_len == 0 || buf->data[buf->len - 1] != '\n') { if (retry_count < CSV_READ_RETRY_MAX) { usleep(100 * 1000); /* 100ms */ retry_count++; goto retry; } return badcsv(column + 1, buf->data); } /* split log with comma */ while (column < ncolumns) { char *buffer; char *next; buffer = buf->data + columns[column]; if (buffer[0] == '\0') break; else if (buffer[0] == '"') { /* quoted field */ if (restart) { buffer = buf->data + restart; restart = 0; } else buffer++; for (;;) { next = strchr(buffer, '"'); if (next == NULL) { /* save parse restart point */ restart = buf->len; goto retry; /* line-break in quote, needs more buffers */ } else if (next[1] == ',' || next[1] == '\n') { next[0] = '\0'; columns[column]++; columns[++column] = next - buf->data + 2; break; } else if (next[1] == '"') { /* combine "" to " */ memmove(next, next + 1, strlen(next + 1) + 1); buf->len--; buffer = next + 1; } else { return badcsv(column + 1, buf->data); } } } else { /* unquoted field */ next = strpbrk(buffer, ",\n"); if (next == NULL) { return badcsv(column + 1, buf->data); } else { next[0] = '\0'; columns[++column] = next - buf->data + 1; } } } /* throw an error if column number does not reach a necessary number. */ if (column < ncolumns) return badcsv(column + 1, buf->data); return true; }
/* * common worker for above two functions */ static char * JsonbToCStringWorker(StringInfo out, JsonbContainer *in, int estimated_len, bool indent) { bool first = true; JsonbIterator *it; JsonbValue v; JsonbIteratorToken type = WJB_DONE; int level = 0; bool redo_switch = false; /* If we are indenting, don't add a space after a comma */ int ispaces = indent ? 1 : 2; /* * Don't indent the very first item. This gets set to the indent flag at * the bottom of the loop. */ bool use_indent = false; bool raw_scalar = false; bool last_was_key = false; if (out == NULL) out = makeStringInfo(); enlargeStringInfo(out, (estimated_len >= 0) ? estimated_len : 64); it = JsonbIteratorInit(in); while (redo_switch || ((type = JsonbIteratorNext(&it, &v, false)) != WJB_DONE)) { redo_switch = false; switch (type) { case WJB_BEGIN_ARRAY: if (!first) appendBinaryStringInfo(out, ", ", ispaces); if (!v.val.array.rawScalar) { add_indent(out, use_indent && !last_was_key, level); appendStringInfoCharMacro(out, '['); } else raw_scalar = true; first = true; level++; break; case WJB_BEGIN_OBJECT: if (!first) appendBinaryStringInfo(out, ", ", ispaces); add_indent(out, use_indent && !last_was_key, level); appendStringInfoCharMacro(out, '{'); first = true; level++; break; case WJB_KEY: if (!first) appendBinaryStringInfo(out, ", ", ispaces); first = true; add_indent(out, use_indent, level); /* json rules guarantee this is a string */ jsonb_put_escaped_value(out, &v); appendBinaryStringInfo(out, ": ", 2); type = JsonbIteratorNext(&it, &v, false); if (type == WJB_VALUE) { first = false; jsonb_put_escaped_value(out, &v); } else { Assert(type == WJB_BEGIN_OBJECT || type == WJB_BEGIN_ARRAY); /* * We need to rerun the current switch() since we need to * output the object which we just got from the iterator * before calling the iterator again. */ redo_switch = true; } break; case WJB_ELEM: if (!first) appendBinaryStringInfo(out, ", ", ispaces); first = false; if (!raw_scalar) add_indent(out, use_indent, level); jsonb_put_escaped_value(out, &v); break; case WJB_END_ARRAY: level--; if (!raw_scalar) { add_indent(out, use_indent, level); appendStringInfoCharMacro(out, ']'); } first = false; break; case WJB_END_OBJECT: level--; add_indent(out, use_indent, level); appendStringInfoCharMacro(out, '}'); first = false; break; default: elog(ERROR, "unknown jsonb iterator token type"); } use_indent = indent; last_was_key = redo_switch; } Assert(level == 0); return out->data; }
/* * Write a tuple to the outputstream, in the most efficient format possible. */ static void logicalrep_write_tuple(StringInfo out, Relation rel, HeapTuple tuple) { TupleDesc desc; Datum values[MaxTupleAttributeNumber]; bool isnull[MaxTupleAttributeNumber]; int i; uint16 nliveatts = 0; desc = RelationGetDescr(rel); for (i = 0; i < desc->natts; i++) { if (TupleDescAttr(desc, i)->attisdropped) continue; nliveatts++; } pq_sendint(out, nliveatts, 2); /* try to allocate enough memory from the get-go */ enlargeStringInfo(out, tuple->t_len + nliveatts * (1 + 4)); heap_deform_tuple(tuple, desc, values, isnull); /* Write the values */ for (i = 0; i < desc->natts; i++) { HeapTuple typtup; Form_pg_type typclass; Form_pg_attribute att = TupleDescAttr(desc, i); char *outputstr; /* skip dropped columns */ if (att->attisdropped) continue; if (isnull[i]) { pq_sendbyte(out, 'n'); /* null column */ continue; } else if (att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(values[i])) { pq_sendbyte(out, 'u'); /* unchanged toast column */ continue; } typtup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(att->atttypid)); if (!HeapTupleIsValid(typtup)) elog(ERROR, "cache lookup failed for type %u", att->atttypid); typclass = (Form_pg_type) GETSTRUCT(typtup); pq_sendbyte(out, 't'); /* 'text' data follows */ outputstr = OidOutputFunctionCall(typclass->typoutput, values[i]); pq_sendcountedtext(out, outputstr, strlen(outputstr), false); pfree(outputstr); ReleaseSysCache(typtup); } }