/** * Strong quoting for command line argument * * some input arguements, such as URI, are not validated. * we need to strong quote it. * * Input: * value - null terminated string * * Return: * a strong quoted null terminated string */ static char* quoteArgument(char* value) { StringInfoData quotedVal; char *valptr; /* Guess the size of the quoted one * I don't think it's common to have quote inside the URI. * So, let's guess we only need to account for the begin/end quote and 3 more. * That means 5 more char than the input value. */ initStringInfoOfSize("edVal, strlen(value)+5); /* It starts with a quote. */ appendStringInfoChar("edVal, '\''); /* Copy each char and append to quotedVal * if the char is a quote or a slash, escape it. */ for(valptr=value; *value != 0; value++) { char chr = *value; if (chr == '\'' || chr == '\\') appendStringInfoChar("edVal, '\\'); appendStringInfoChar("edVal, chr); } /* It ends with a quote. */ appendStringInfoChar("edVal, '\''); return quotedVal.data; }
/* * Using the Hadoop connector requires proper setup of GUCs * This procedure does sanity check on gp_hadoop_connector_jardir, * gp_hadoop_target_version and gp_hadoop_home. * It also update GUC gp_hadoop_connector_version for the current gp_hadoop_target_version. * * It checks the following: * 1. $GPHOME/<gp_hadoop_jardir>/$GP_HADOOP_CONN_VERSION.jar must exists. * 2. if gp_hadoop_home is set, then gp_hadoop_home must exists. */ static void checkHadoopGUCs() { char gphome[MAXPGPATH]; StringInfoData path; int jarFD; /* Check the existence of $GPHOME/<gp_hadoop_jardir>/$GP_HADOOP_CONN_VERSION.jar * * To get $GPHOME, we go from my_exec_path, which is $GPHOME/bin/postgres, and * go up 2 levels. * * Currently, gp_hadoop_connector_jardir is fixed. We look up $GP_HADOOP_CONN_VERSION * using gp_hadoop_target_version. */ snprintf(gphome, sizeof(gphome), "%s", my_exec_path); get_parent_directory(gphome); get_parent_directory(gphome); initStringInfoOfSize(&path, MAXPGPATH); gp_hadoop_connector_version = (char*)getConnectorVersion(); appendStringInfo(&path, "%s/%s/%s.jar", gphome, gp_hadoop_connector_jardir, gp_hadoop_connector_version); jarFD = BasicOpenFile(path.data, O_RDONLY | PG_BINARY, 0); if (jarFD == -1) { ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg("cannot open Hadoop Cross Connect in %s: %m", path.data))); } close(jarFD); /* Check the existence of gp_hadoop_home, if specified. * * If user has already specified $HADOOP_HOME in the env, then * there's no need to setup this GUC. */ if (strlen(gp_hadoop_home)> 0) { int hdHomeFD = BasicOpenFile(gp_hadoop_home, O_RDONLY, 0); if (hdHomeFD == -1) { ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg("cannot open gp_hadoop_home in %s: %m", gp_hadoop_home))); } close(hdHomeFD); } }
/* * nodeToBinaryStringFast - * returns a binary representation of the Node as a palloc'd string */ char * nodeToBinaryStringFast(void *obj, int * length) { StringInfoData str; int16 tg = (int16) 0xDEAD; /* see stringinfo.h for an explanation of this maneuver */ initStringInfoOfSize(&str, 4096); _outNode(&str, obj); /* Add something special at the end that we can check in readfast.c */ appendBinaryStringInfo(&str, (const char *)&tg, sizeof(int16)); *length = str.len; return str.data; }
HeapTuple CvtChunksToHeapTup(TupleChunkList tcList, SerTupInfo * pSerInfo) { StringInfoData serData; TupleChunkListItem tcItem; int i; HeapTuple htup; TupleChunkType tcType; AssertArg(tcList != NULL); AssertArg(tcList->p_first != NULL); AssertArg(pSerInfo != NULL); tcItem = tcList->p_first; if (tcList->num_chunks == 1) { GetChunkType(tcItem, &tcType); if (tcType == TC_EMPTY) { /* * the sender is indicating that there was a row with no attributes: * return a NULL tuple */ clearTCList(NULL, tcList); htup = heap_form_tuple(pSerInfo->tupdesc, pSerInfo->values, pSerInfo->nulls); return htup; } } /* * Dump all of the data in the tuple chunk list into a single StringInfo, * so that we can convert it into a HeapTuple. Check chunk types based on * whether there is only one chunk, or multiple chunks. * * We know roughly how much space we'll need, allocate all in one go. * */ initStringInfoOfSize(&serData, tcList->num_chunks * tcList->max_chunk_length); i = 0; do { /* Make sure that the type of this tuple chunk is correct! */ GetChunkType(tcItem, &tcType); if (i == 0) { if (tcItem->p_next == NULL) { if (tcType != TC_WHOLE) { ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("Single chunk's type must be TC_WHOLE."))); } } else /* tcItem->p_next != NULL */ { if (tcType != TC_PARTIAL_START) { ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("First chunk of collection must have type" " TC_PARTIAL_START."))); } } } else /* i > 0 */ { if (tcItem->p_next == NULL) { if (tcType != TC_PARTIAL_END) { ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("Last chunk of collection must have type" " TC_PARTIAL_END."))); } } else /* tcItem->p_next != NULL */ { if (tcType != TC_PARTIAL_MID) { ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("Last chunk of collection must have type" " TC_PARTIAL_MID."))); } } } /* Copy this chunk into the tuple data. Don't include the header! */ appendBinaryStringInfo(&serData, (const char *) GetChunkDataPtr(tcItem) + TUPLE_CHUNK_HEADER_SIZE, tcItem->chunk_length - TUPLE_CHUNK_HEADER_SIZE); /* Go to the next chunk. */ tcItem = tcItem->p_next; i++; } while (tcItem != NULL); /* we've finished with the TCList, free it now. */ clearTCList(NULL, tcList); { TupSerHeader *tshp; unsigned int datalen; unsigned int nullslen; unsigned int hoff; HeapTupleHeader t_data; char *pos = (char *)serData.data; tshp = (TupSerHeader *)pos; if ((tshp->tuplen & MEMTUP_LEAD_BIT) != 0) { uint32 tuplen = memtuple_size_from_uint32(tshp->tuplen); htup = (HeapTuple) palloc(tuplen); memcpy(htup, pos, tuplen); pos += TYPEALIGN(TUPLE_CHUNK_ALIGN,tuplen); } else { pos += sizeof(TupSerHeader); /* if the tuple had toasted elements we have to deserialize * the old slow way. */ if ((tshp->infomask & HEAP_HASEXTERNAL) != 0) { serData.cursor += sizeof(TupSerHeader); htup = DeserializeTuple(pSerInfo, &serData); /* Free up memory we used. */ pfree(serData.data); return htup; } /* reconstruct lengths of null bitmap and data part */ if (tshp->infomask & HEAP_HASNULL) nullslen = BITMAPLEN(tshp->natts); else nullslen = 0; if (tshp->tuplen < sizeof(TupSerHeader) + nullslen) ereport(ERROR, (errcode(ERRCODE_GP_INTERCONNECTION_ERROR), errmsg("Interconnect error: cannot convert chunks to a heap tuple."), errdetail("tuple len %d < nullslen %d + headersize (%d)", tshp->tuplen, nullslen, (int)sizeof(TupSerHeader)))); datalen = tshp->tuplen - sizeof(TupSerHeader) - TYPEALIGN(TUPLE_CHUNK_ALIGN, nullslen); /* determine overhead size of tuple (should match heap_form_tuple) */ hoff = offsetof(HeapTupleHeaderData, t_bits) + TYPEALIGN(TUPLE_CHUNK_ALIGN, nullslen); if (tshp->infomask & HEAP_HASOID) hoff += sizeof(Oid); hoff = MAXALIGN(hoff); /* Allocate the space in one chunk, like heap_form_tuple */ htup = (HeapTuple)palloc(HEAPTUPLESIZE + hoff + datalen); t_data = (HeapTupleHeader) ((char *)htup + HEAPTUPLESIZE); /* make sure unused header fields are zeroed */ MemSetAligned(t_data, 0, hoff); /* reconstruct the HeapTupleData fields */ htup->t_len = hoff + datalen; ItemPointerSetInvalid(&(htup->t_self)); htup->t_data = t_data; /* reconstruct the HeapTupleHeaderData fields */ ItemPointerSetInvalid(&(t_data->t_ctid)); HeapTupleHeaderSetNatts(t_data, tshp->natts); t_data->t_infomask = tshp->infomask & ~HEAP_XACT_MASK; t_data->t_infomask |= HEAP_XMIN_INVALID | HEAP_XMAX_INVALID; t_data->t_hoff = hoff; if (nullslen) { memcpy((void *)t_data->t_bits, pos, nullslen); pos += TYPEALIGN(TUPLE_CHUNK_ALIGN,nullslen); } /* does the tuple descriptor expect an OID ? Note: we don't * have to set the oid itself, just the flag! (see heap_formtuple()) */ if (pSerInfo->tupdesc->tdhasoid) /* else leave infomask = 0 */ { t_data->t_infomask |= HEAP_HASOID; } /* and now the data proper (it would be nice if we could just * point our caller into our existing buffer in-place, but * we'll leave that for another day) */ memcpy((char *)t_data + hoff, pos, datalen); } } /* Free up memory we used. */ pfree(serData.data); return htup; }