/* * MemoryContextInit * Start up the memory-context subsystem. * * This must be called before creating contexts or allocating memory in * contexts. TopMemoryContext and ErrorContext are initialized here; * other contexts must be created afterwards. * * In normal multi-backend operation, this is called once during * postmaster startup, and not at all by individual backend startup * (since the backends inherit an already-initialized context subsystem * by virtue of being forked off the postmaster). * * In a standalone backend this must be called during backend startup. */ void MemoryContextInit(void) { AssertState(TopMemoryContext == NULL); AssertState(CurrentMemoryContext == NULL); AssertState(MemoryAccountMemoryContext == NULL); /* * Initialize TopMemoryContext as an AllocSetContext with slow growth rate * --- we don't really expect much to be allocated in it. * * (There is special-case code in MemoryContextCreate() for this call.) */ TopMemoryContext = AllocSetContextCreate((MemoryContext) NULL, "TopMemoryContext", 0, 8 * 1024, 8 * 1024); /* * Not having any other place to point CurrentMemoryContext, make it point * to TopMemoryContext. Caller should change this soon! */ CurrentMemoryContext = TopMemoryContext; /* * Initialize ErrorContext as an AllocSetContext with slow growth rate --- * we don't really expect much to be allocated in it. More to the point, * require it to contain at least 8K at all times. This is the only case * where retained memory in a context is *essential* --- we want to be * sure ErrorContext still has some memory even if we've run out * elsewhere! */ ErrorContext = AllocSetContextCreate(TopMemoryContext, "ErrorContext", 8 * 1024, 8 * 1024, 8 * 1024); MemoryAccounting_Reset(); }
void WPMStatHandlerTest::ConstructorTest() { StartTrace(WPMStatHandlerTest.ConstructorTest); Anything expectedState; expectedState[0L]["Value"] = 100; expectedState[0L]["Message"] = "fPoolSize wrong value"; WPMStatHandler wpm(100); t_assertm(AssertState(wpm, expectedState), "wrong state"); }
void WPMStatHandlerTest::StatEvtTests() { StartTrace(WPMStatHandlerTest.StatEvtTests); Anything expectedState; expectedState[0L]["Value"] = 100; expectedState[0L]["Message"] = "fPoolSize wrong value"; expectedState[1L]["Value"] = 2; expectedState[1L]["Message"] = "fCurrentParallelRequests wrong value"; expectedState[2L]["Value"] = 0; expectedState[2L]["Message"] = "fTotalRequests wrong value"; expectedState[3L]["Value"] = 0; expectedState[3L]["Message"] = "fTotalTime wrong value"; WPMStatHandler wpm(100); wpm.HandleStatEvt(WPMStatHandler::eEnter); wpm.HandleStatEvt(WPMStatHandler::eEnter); t_assertm(AssertState(wpm, expectedState), "wrong state"); wpm.HandleStatEvt(WPMStatHandler::eLeave); expectedState[1L]["Value"] = 1; expectedState[1L]["Message"] = "fCurrentParallelRequests wrong value"; expectedState[2L]["Value"] = 1; expectedState[2L]["Message"] = "fTotalRequests wrong value"; expectedState[3L]["Value"] = 0; expectedState[3L]["Message"] = "fTotalTime wrong value"; t_assertm(AssertState(wpm, expectedState), "wrong state"); wpm.HandleStatEvt(WPMStatHandler::eLeave); expectedState[1L]["Value"] = 0; expectedState[1L]["Message"] = "fCurrentParallelRequests wrong value"; expectedState[2L]["Value"] = 2; expectedState[2L]["Message"] = "fTotalRequests wrong value"; expectedState[3L]["Value"] = 0; expectedState[3L]["Message"] = "fTotalTime wrong value"; t_assertm(AssertState(wpm, expectedState), "wrong state"); }
/* * getOidListDiff * Helper for updateAclDependencies. * * Takes two Oid arrays and returns elements from the first not found in the * second. We assume both arrays are sorted and de-duped, and that the * second array does not contain any values not found in the first. * * NOTE: Both input arrays are pfreed. */ static int getOidListDiff(Oid *list1, int nlist1, Oid *list2, int nlist2, Oid **diff) { Oid *result; int i, j, k = 0; AssertArg(nlist1 >= nlist2 && nlist2 >= 0); result = palloc(sizeof(Oid) * (nlist1 - nlist2)); *diff = result; for (i = 0, j = 0; i < nlist1 && j < nlist2;) { if (list1[i] == list2[j]) { i++; j++; } else if (list1[i] < list2[j]) { result[k++] = list1[i]; i++; } else { /* can't happen */ elog(WARNING, "invalid element %u in shorter list", list2[j]); j++; } } for (; i < nlist1; i++) result[k++] = list1[i]; /* We should have copied the exact number of elements */ AssertState(k == (nlist1 - nlist2)); if (list1) pfree(list1); if (list2) pfree(list2); return k; }
/* * MemoryContextInit * Start up the memory-context subsystem. * * This must be called before creating contexts or allocating memory in * contexts. TopMemoryContext and ErrorContext are initialized here; * other contexts must be created afterwards. * * In normal multi-backend operation, this is called once during * postmaster startup, and not at all by individual backend startup * (since the backends inherit an already-initialized context subsystem * by virtue of being forked off the postmaster). But in an EXEC_BACKEND * build, each process must do this for itself. * * In a standalone backend this must be called during backend startup. */ void MemoryContextInit(void) { AssertState(TopMemoryContext == NULL); /* * Initialize TopMemoryContext as an AllocSetContext with slow growth rate * --- we don't really expect much to be allocated in it. * * (There is special-case code in MemoryContextCreate() for this call.) */ TopMemoryContext = AllocSetContextCreate((MemoryContext) NULL, "TopMemoryContext", 0, 8 * 1024, 8 * 1024); /* * Not having any other place to point CurrentMemoryContext, make it point * to TopMemoryContext. Caller should change this soon! */ CurrentMemoryContext = TopMemoryContext; /* * Initialize ErrorContext as an AllocSetContext with slow growth rate --- * we don't really expect much to be allocated in it. More to the point, * require it to contain at least 8K at all times. This is the only case * where retained memory in a context is *essential* --- we want to be * sure ErrorContext still has some memory even if we've run out * elsewhere! Also, allow allocations in ErrorContext within a critical * section. Otherwise a PANIC will cause an assertion failure in the error * reporting code, before printing out the real cause of the failure. * * This should be the last step in this function, as elog.c assumes memory * management works once ErrorContext is non-null. */ ErrorContext = AllocSetContextCreate(TopMemoryContext, "ErrorContext", 8 * 1024, 8 * 1024, 8 * 1024); MemoryContextAllowInCriticalSection(ErrorContext, true); }
/* * PortalDefineQuery * A simple subroutine to establish a portal's query. * * Notes: commandTag shall be NULL if and only if the original query string * (before rewriting) was an empty string. Also, the passed commandTag must * be a pointer to a constant string, since it is not copied. The caller is * responsible for ensuring that the passed sourceText (if any), parse and * plan trees have adequate lifetime. Also, queryContext must accurately * describe the location of the parse and plan trees. */ void PortalDefineQuery(Portal portal, const char *sourceText, const char *commandTag, List *parseTrees, List *planTrees, MemoryContext queryContext) { AssertArg(PortalIsValid(portal)); AssertState(portal->queryContext == NULL); /* else defined already */ Assert(list_length(parseTrees) == list_length(planTrees)); Assert(commandTag != NULL || parseTrees == NIL); portal->sourceText = sourceText; portal->commandTag = commandTag; portal->parseTrees = parseTrees; portal->planTrees = planTrees; portal->queryContext = queryContext; }
/* * Read tuples in correct sort order from tuplesort, and load them into * btree leaves. */ static void _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) { BTPageState *state = NULL; bool merge = (btspool2 != NULL); IndexTuple itup, itup2 = NULL; bool load1; TupleDesc tupdes = RelationGetDescr(wstate->index); int i, keysz = RelationGetNumberOfAttributes(wstate->index); ScanKey indexScanKey = NULL; SortSupport sortKeys; if (merge) { /* * Another BTSpool for dead tuples exists. Now we have to merge * btspool and btspool2. */ /* the preparation of merge */ itup = tuplesort_getindextuple(btspool->sortstate, true); itup2 = tuplesort_getindextuple(btspool2->sortstate, true); indexScanKey = _bt_mkscankey_nodata(wstate->index); /* Prepare SortSupport data for each column */ sortKeys = (SortSupport) palloc0(keysz * sizeof(SortSupportData)); for (i = 0; i < keysz; i++) { SortSupport sortKey = sortKeys + i; ScanKey scanKey = indexScanKey + i; int16 strategy; sortKey->ssup_cxt = CurrentMemoryContext; sortKey->ssup_collation = scanKey->sk_collation; sortKey->ssup_nulls_first = (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0; sortKey->ssup_attno = scanKey->sk_attno; /* Abbreviation is not supported here */ sortKey->abbreviate = false; AssertState(sortKey->ssup_attno != 0); strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ? BTGreaterStrategyNumber : BTLessStrategyNumber; PrepareSortSupportFromIndexRel(wstate->index, strategy, sortKey); } _bt_freeskey(indexScanKey); for (;;) { load1 = true; /* load BTSpool next ? */ if (itup2 == NULL) { if (itup == NULL) break; } else if (itup != NULL) { for (i = 1; i <= keysz; i++) { SortSupport entry; Datum attrDatum1, attrDatum2; bool isNull1, isNull2; int32 compare; entry = sortKeys + i - 1; attrDatum1 = index_getattr(itup, i, tupdes, &isNull1); attrDatum2 = index_getattr(itup2, i, tupdes, &isNull2); compare = ApplySortComparator(attrDatum1, isNull1, attrDatum2, isNull2, entry); if (compare > 0) { load1 = false; break; } else if (compare < 0) break; } } else load1 = false; /* When we see first tuple, create first index page */ if (state == NULL) state = _bt_pagestate(wstate, 0); if (load1) { _bt_buildadd(wstate, state, itup); itup = tuplesort_getindextuple(btspool->sortstate, true); } else { _bt_buildadd(wstate, state, itup2); itup2 = tuplesort_getindextuple(btspool2->sortstate, true); } } pfree(sortKeys); } else { /* merge is unnecessary */ while ((itup = tuplesort_getindextuple(btspool->sortstate, true)) != NULL) { /* When we see first tuple, create first index page */ if (state == NULL) state = _bt_pagestate(wstate, 0); _bt_buildadd(wstate, state, itup); } } /* Close down final pages and write the metapage */ _bt_uppershutdown(wstate, state); /* * If the index is WAL-logged, we must fsync it down to disk before it's * safe to commit the transaction. (For a non-WAL-logged index we don't * care since the index will be uninteresting after a crash anyway.) * * It's obvious that we must do this when not WAL-logging the build. It's * less obvious that we have to do it even if we did WAL-log the index * pages. The reason is that since we're building outside shared buffers, * a CHECKPOINT occurring during the build has no way to flush the * previously written data to disk (indeed it won't know the index even * exists). A crash later on would replay WAL from the checkpoint, * therefore it wouldn't replay our earlier WAL entries. If we do not * fsync those pages here, they might still not be on disk when the crash * occurs. */ if (RelationNeedsWAL(wstate->index)) { RelationOpenSmgr(wstate->index); smgrimmedsync(wstate->index->rd_smgr, MAIN_FORKNUM); } }
/* * Deserialize a HeapTuple's data from a byte-array. * * This code is based on the binary input handling functions in copy.c. */ HeapTuple DeserializeTuple(SerTupInfo * pSerInfo, StringInfo serialTup) { MemoryContext oldCtxt; TupleDesc tupdesc; HeapTuple htup; int natts; SerAttrInfo *attrInfo; uint32 attr_size; int i; StringInfoData attr_data; bool fHandled; AssertArg(pSerInfo != NULL); AssertArg(serialTup != NULL); tupdesc = pSerInfo->tupdesc; natts = tupdesc->natts; /* * Flip to our tuple-serialization memory-context, to speed up memory * reclamation operations. */ AssertState(s_tupSerMemCtxt != NULL); oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt); /* Receive nulls character-array. */ pq_copymsgbytes(serialTup, pSerInfo->nulls, natts); skipPadding(serialTup); /* Deserialize the non-NULL attributes of this tuple */ initStringInfo(&attr_data); for (i = 0; i < natts; ++i) { attrInfo = pSerInfo->myinfo + i; if (pSerInfo->nulls[i]) /* NULL field. */ { pSerInfo->values[i] = (Datum) 0; continue; } /* * Assume that the data's output will be handled by the special IO * code, and if not then we can handle it the slow way. */ fHandled = true; switch (attrInfo->atttypid) { case INT4OID: pSerInfo->values[i] = Int32GetDatum(stringInfoGetInt32(serialTup)); break; case CHAROID: pSerInfo->values[i] = CharGetDatum(pq_getmsgbyte(serialTup)); skipPadding(serialTup); break; case BPCHAROID: case VARCHAROID: case INT2VECTOROID: /* postgres serialization logic broken, use our own */ case OIDVECTOROID: /* postgres serialization logic broken, use our own */ case ANYARRAYOID: { text *pText; int textSize; textSize = stringInfoGetInt32(serialTup); #ifdef TUPSER_SCRATCH_SPACE if (textSize + VARHDRSZ <= attrInfo->varlen_scratch_size) pText = (text *) attrInfo->pv_varlen_scratch; else pText = (text *) palloc(textSize + VARHDRSZ); #else pText = (text *) palloc(textSize + VARHDRSZ); #endif SET_VARSIZE(pText, textSize + VARHDRSZ); pq_copymsgbytes(serialTup, VARDATA(pText), textSize); skipPadding(serialTup); pSerInfo->values[i] = PointerGetDatum(pText); break; } case DATEOID: { /* * TODO: I would LIKE to do something more efficient, but * DateADT is not strictly limited to 4 bytes by its * definition. */ DateADT date; pq_copymsgbytes(serialTup, (char *) &date, sizeof(DateADT)); skipPadding(serialTup); pSerInfo->values[i] = DateADTGetDatum(date); break; } case NUMERICOID: { /* * Treat the numeric as a varlena variable, and just push * the whole shebang to the output-buffer. We don't care * about the guts of the numeric. */ Numeric num; int numSize; numSize = stringInfoGetInt32(serialTup); #ifdef TUPSER_SCRATCH_SPACE if (numSize + VARHDRSZ <= attrInfo->varlen_scratch_size) num = (Numeric) attrInfo->pv_varlen_scratch; else num = (Numeric) palloc(numSize + VARHDRSZ); #else num = (Numeric) palloc(numSize + VARHDRSZ); #endif SET_VARSIZE(num, numSize + VARHDRSZ); pq_copymsgbytes(serialTup, VARDATA(num), numSize); skipPadding(serialTup); pSerInfo->values[i] = NumericGetDatum(num); break; } case ACLITEMOID: { int aclSize, k, cnt; char *inputstring, *starsfree; aclSize = stringInfoGetInt32(serialTup); inputstring = (char*) palloc(aclSize + 1); starsfree = (char*) palloc(aclSize + 1); cnt = 0; pq_copymsgbytes(serialTup, inputstring, aclSize); skipPadding(serialTup); inputstring[aclSize] = '\0'; for(k=0; k<aclSize; k++) { if( inputstring[k] != '*') { starsfree[cnt] = inputstring[k]; cnt++; } } starsfree[cnt] = '\0'; pSerInfo->values[i] = DirectFunctionCall1(aclitemin, CStringGetDatum(starsfree)); pfree(inputstring); break; } case 210: { int strsize; char *smgrstr; strsize = stringInfoGetInt32(serialTup); smgrstr = (char*) palloc(strsize + 1); pq_copymsgbytes(serialTup, smgrstr, strsize); skipPadding(serialTup); smgrstr[strsize] = '\0'; pSerInfo->values[i] = DirectFunctionCall1(smgrin, CStringGetDatum(smgrstr)); break; } default: fHandled = false; } if (fHandled) continue; attr_size = stringInfoGetInt32(serialTup); /* reset attr_data to empty, and load raw data into it */ attr_data.len = 0; attr_data.data[0] = '\0'; attr_data.cursor = 0; appendBinaryStringInfo(&attr_data, pq_getmsgbytes(serialTup, attr_size), attr_size); skipPadding(serialTup); /* Call the attribute type's binary input converter. */ if (attrInfo->recv_finfo.fn_nargs == 1) pSerInfo->values[i] = FunctionCall1(&attrInfo->recv_finfo, PointerGetDatum(&attr_data)); else if (attrInfo->recv_finfo.fn_nargs == 2) pSerInfo->values[i] = FunctionCall2(&attrInfo->recv_finfo, PointerGetDatum(&attr_data), ObjectIdGetDatum(attrInfo->recv_typio_param)); else if (attrInfo->recv_finfo.fn_nargs == 3) pSerInfo->values[i] = FunctionCall3(&attrInfo->recv_finfo, PointerGetDatum(&attr_data), ObjectIdGetDatum(attrInfo->recv_typio_param), Int32GetDatum(tupdesc->attrs[i]->atttypmod) ); else { ereport(ERROR, (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), errmsg("Conversion function takes %d args",attrInfo->recv_finfo.fn_nargs))); } /* Trouble if it didn't eat the whole buffer */ if (attr_data.cursor != attr_data.len) { ereport(ERROR, (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), errmsg("incorrect binary data format"))); } } /* * Construct the tuple from the Datums and nulls values. NOTE: Switch * out of our temporary context before we form the tuple! */ MemoryContextSwitchTo(oldCtxt); htup = heap_form_tuple(tupdesc, pSerInfo->values, pSerInfo->nulls); MemoryContextReset(s_tupSerMemCtxt); /* All done. Return the result. */ return htup; }
/* * Convert a HeapTuple into a byte-sequence, and store it directly * into a chunklist for transmission. * * This code is based on the printtup_internal_20() function in printtup.c. */ void SerializeTupleIntoChunks(HeapTuple tuple, SerTupInfo * pSerInfo, TupleChunkList tcList) { TupleChunkListItem tcItem = NULL; MemoryContext oldCtxt; TupleDesc tupdesc; int i, natts; bool fHandled; AssertArg(tcList != NULL); AssertArg(tuple != NULL); AssertArg(pSerInfo != NULL); tupdesc = pSerInfo->tupdesc; natts = tupdesc->natts; /* get ready to go */ tcList->p_first = NULL; tcList->p_last = NULL; tcList->num_chunks = 0; tcList->serialized_data_length = 0; tcList->max_chunk_length = Gp_max_tuple_chunk_size; if (natts == 0) { tcItem = getChunkFromCache(&pSerInfo->chunkCache); if (tcItem == NULL) { ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("Could not allocate space for first chunk item in new chunk list."))); } /* TC_EMTPY is just one chunk */ SetChunkType(tcItem->chunk_data, TC_EMPTY); tcItem->chunk_length = TUPLE_CHUNK_HEADER_SIZE; appendChunkToTCList(tcList, tcItem); return; } tcItem = getChunkFromCache(&pSerInfo->chunkCache); if (tcItem == NULL) { ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("Could not allocate space for first chunk item in new chunk list."))); } /* assume that we'll take a single chunk */ SetChunkType(tcItem->chunk_data, TC_WHOLE); tcItem->chunk_length = TUPLE_CHUNK_HEADER_SIZE; appendChunkToTCList(tcList, tcItem); AssertState(s_tupSerMemCtxt != NULL); if (is_heaptuple_memtuple(tuple)) { addByteStringToChunkList(tcList, (char *)tuple, memtuple_get_size((MemTuple)tuple, NULL), &pSerInfo->chunkCache); addPadding(tcList, &pSerInfo->chunkCache, memtuple_get_size((MemTuple)tuple, NULL)); } else { TupSerHeader tsh; unsigned int datalen; unsigned int nullslen; HeapTupleHeader t_data = tuple->t_data; datalen = tuple->t_len - t_data->t_hoff; if (HeapTupleHasNulls(tuple)) nullslen = BITMAPLEN(HeapTupleHeaderGetNatts(t_data)); else nullslen = 0; tsh.tuplen = sizeof(TupSerHeader) + TYPEALIGN(TUPLE_CHUNK_ALIGN,nullslen) + datalen; tsh.natts = HeapTupleHeaderGetNatts(t_data); tsh.infomask = t_data->t_infomask; addByteStringToChunkList(tcList, (char *)&tsh, sizeof(TupSerHeader), &pSerInfo->chunkCache); /* If we don't have any attributes which have been toasted, we * can be very very simple: just send the raw data. */ if ((tsh.infomask & HEAP_HASEXTERNAL) == 0) { if (nullslen) { addByteStringToChunkList(tcList, (char *)t_data->t_bits, nullslen, &pSerInfo->chunkCache); addPadding(tcList,&pSerInfo->chunkCache,nullslen); } addByteStringToChunkList(tcList, (char *)t_data + t_data->t_hoff, datalen, &pSerInfo->chunkCache); addPadding(tcList,&pSerInfo->chunkCache,datalen); } else { /* We have to be more careful when we have tuples that * have been toasted. Ideally we'd like to send the * untoasted attributes in as "raw" a format as possible * but that makes rebuilding the tuple harder . */ oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt); /* deconstruct the tuple (faster than a heap_getattr loop) */ heap_deform_tuple(tuple, tupdesc, pSerInfo->values, pSerInfo->nulls); MemoryContextSwitchTo(oldCtxt); /* Send the nulls character-array. */ addByteStringToChunkList(tcList, pSerInfo->nulls, natts, &pSerInfo->chunkCache); addPadding(tcList,&pSerInfo->chunkCache,natts); /* * send the attributes of this tuple: NOTE anything which allocates * temporary space (e.g. could result in a PG_DETOAST_DATUM) should be * executed with the memory context set to s_tupSerMemCtxt */ for (i = 0; i < natts; ++i) { SerAttrInfo *attrInfo = pSerInfo->myinfo + i; Datum origattr = pSerInfo->values[i], attr; bytea *outputbytes=0; /* skip null attributes (already taken care of above) */ if (pSerInfo->nulls[i]) continue; /* * If we have a toasted datum, forcibly detoast it here to avoid * memory leakage: we want to force the detoast allocation(s) to * happen in our reset-able serialization context. */ if (attrInfo->typisvarlena) { oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt); /* we want to detoast but leave compressed, if * possible, but we have to handle varlena * attributes (and others ?) differently than we * currently do (first step is to use * heap_tuple_fetch_attr() instead of * PG_DETOAST_DATUM()). */ attr = PointerGetDatum(PG_DETOAST_DATUM(origattr)); MemoryContextSwitchTo(oldCtxt); } else attr = origattr; /* * Assume that the data's output will be handled by the special IO * code, and if not then we can handle it the slow way. */ fHandled = true; switch (attrInfo->atttypid) { case INT4OID: addInt32ToChunkList(tcList, DatumGetInt32(attr), &pSerInfo->chunkCache); break; case CHAROID: addCharToChunkList(tcList, DatumGetChar(attr), &pSerInfo->chunkCache); addPadding(tcList,&pSerInfo->chunkCache,1); break; case BPCHAROID: case VARCHAROID: case INT2VECTOROID: /* postgres serialization logic broken, use our own */ case OIDVECTOROID: /* postgres serialization logic broken, use our own */ case ANYARRAYOID: { text *pText = DatumGetTextP(attr); int32 textSize = VARSIZE(pText) - VARHDRSZ; addInt32ToChunkList(tcList, textSize, &pSerInfo->chunkCache); addByteStringToChunkList(tcList, (char *) VARDATA(pText), textSize, &pSerInfo->chunkCache); addPadding(tcList,&pSerInfo->chunkCache,textSize); break; } case DATEOID: { DateADT date = DatumGetDateADT(attr); addByteStringToChunkList(tcList, (char *) &date, sizeof(DateADT), &pSerInfo->chunkCache); break; } case NUMERICOID: { /* * Treat the numeric as a varlena variable, and just push * the whole shebang to the output-buffer. We don't care * about the guts of the numeric. */ Numeric num = DatumGetNumeric(attr); int32 numSize = VARSIZE(num) - VARHDRSZ; addInt32ToChunkList(tcList, numSize, &pSerInfo->chunkCache); addByteStringToChunkList(tcList, (char *) VARDATA(num), numSize, &pSerInfo->chunkCache); addPadding(tcList,&pSerInfo->chunkCache,numSize); break; } case ACLITEMOID: { AclItem *aip = DatumGetAclItemP(attr); char *outputstring; int32 aclSize ; outputstring = DatumGetCString(DirectFunctionCall1(aclitemout, PointerGetDatum(aip))); aclSize = strlen(outputstring); addInt32ToChunkList(tcList, aclSize, &pSerInfo->chunkCache); addByteStringToChunkList(tcList, outputstring,aclSize, &pSerInfo->chunkCache); addPadding(tcList,&pSerInfo->chunkCache,aclSize); break; } case 210: /* storage manager */ { char *smgrstr; int32 strsize; smgrstr = DatumGetCString(DirectFunctionCall1(smgrout, 0)); strsize = strlen(smgrstr); addInt32ToChunkList(tcList, strsize, &pSerInfo->chunkCache); addByteStringToChunkList(tcList, smgrstr, strsize, &pSerInfo->chunkCache); addPadding(tcList,&pSerInfo->chunkCache,strsize); break; } default: fHandled = false; } if (fHandled) continue; /* * the FunctionCall2 call into the send function may result in some * allocations which we'd like to have contained by our reset-able * context */ oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt); /* Call the attribute type's binary input converter. */ if (attrInfo->send_finfo.fn_nargs == 1) outputbytes = DatumGetByteaP(FunctionCall1(&attrInfo->send_finfo, attr)); else if (attrInfo->send_finfo.fn_nargs == 2) outputbytes = DatumGetByteaP(FunctionCall2(&attrInfo->send_finfo, attr, ObjectIdGetDatum(attrInfo->send_typio_param))); else if (attrInfo->send_finfo.fn_nargs == 3) outputbytes = DatumGetByteaP(FunctionCall3(&attrInfo->send_finfo, attr, ObjectIdGetDatum(attrInfo->send_typio_param), Int32GetDatum(tupdesc->attrs[i]->atttypmod))); else { ereport(ERROR, (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), errmsg("Conversion function takes %d args",attrInfo->recv_finfo.fn_nargs))); } MemoryContextSwitchTo(oldCtxt); /* We assume the result will not have been toasted */ addInt32ToChunkList(tcList, VARSIZE(outputbytes) - VARHDRSZ, &pSerInfo->chunkCache); addByteStringToChunkList(tcList, VARDATA(outputbytes), VARSIZE(outputbytes) - VARHDRSZ, &pSerInfo->chunkCache); addPadding(tcList,&pSerInfo->chunkCache,VARSIZE(outputbytes) - VARHDRSZ); /* * this was allocated in our reset-able context, but we *are* done * with it; and for tuples with several large columns it'd be nice to * free the memory back to the context */ pfree(outputbytes); } MemoryContextReset(s_tupSerMemCtxt); } } /* * if we have more than 1 chunk we have to set the chunk types on our * first chunk and last chunk */ if (tcList->num_chunks > 1) { TupleChunkListItem first, last; first = tcList->p_first; last = tcList->p_last; Assert(first != NULL); Assert(first != last); Assert(last != NULL); SetChunkType(first->chunk_data, TC_PARTIAL_START); SetChunkType(last->chunk_data, TC_PARTIAL_END); /* * any intervening chunks are already set to TC_PARTIAL_MID when * allocated */ } return; }
void InitPostgres(char *name) /* database name */ { bool bootstrap; /* true if BootstrapProcessing */ /* ---------------- * see if we're running in BootstrapProcessing mode * ---------------- */ bootstrap = IsBootstrapProcessingMode(); /* ---------------- * turn on the exception handler. Note: we cannot use elog, Assert, * AssertState, etc. until after exception handling is on. * ---------------- */ EnableExceptionHandling(true); /* ---------------- * A stupid check to make sure we don't call this more than once. * But things like ReinitPostgres() get around this by just diddling * the PostgresIsInitialized flag. * ---------------- */ AssertState(!PostgresIsInitialized); /* ---------------- * Memory system initialization. * (we may call palloc after EnableMemoryContext()) * * Note EnableMemoryContext() must happen before EnablePortalManager(). * ---------------- */ EnableMemoryContext(true); /* initializes the "top context" */ EnablePortalManager(true); /* memory for portal/transaction stuff */ /* ---------------- * initialize the backend local portal stack used by * internal PQ function calls. see src/lib/libpq/be-dumpdata.c * This is different from the "portal manager" so this goes here. * -cim 2/12/91 * ---------------- */ be_portalinit(); /* ---------------- * attach to shared memory and semaphores, and initialize our * input/output/debugging file descriptors. * ---------------- */ InitCommunication(); InitStdio(); /* * initialize the local buffer manager */ InitLocalBuffer(); if (!TransactionFlushEnabled()) on_exitpg(FlushBufferPool, (caddr_t) NULL); /* ---------------- * check for valid "meta gunk" (??? -cim 10/5/90) and change to * database directory. * * Note: DatabaseName, MyDatabaseName, and DatabasePath are all * initialized with DatabaseMetaGunkIsConsistent(), strncpy() and * DoChdirAndInitDatabase() below! XXX clean this crap up! * -cim 10/5/90 * ---------------- */ { char myPath[MAXPGPATH] = "."; /* DatabasePath points here! */ /* ---------------- * DatabaseMetaGunkIsConsistent fills in myPath, but what about * when bootstrap or Noversion is true?? -cim 10/5/90 * ---------------- */ if (! bootstrap && ! DatabaseMetaGunkIsConsistent(name, myPath) && ! Noversion) { elog(NOTICE, "InitPostgres: could not locate valid PG_VERSION\n"); elog(NOTICE, "files for %s and %s.", DataDir, name); elog(FATAL, "Have you run initdb/createdb and set PGDATA properly?"); } /* ---------------- * ok, we've figured out myName and myPath, now save these * and chdir to myPath. * ---------------- */ DoChdirAndInitDatabaseNameAndPath(name, myPath); } /* ******************************** * code after this point assumes we are in the proper directory! * ******************************** */ /* ---------------- * initialize the database id used for system caches and lock tables * ---------------- */ InitMyDatabaseId(); smgrinit(); /* ---------------- * initialize the transaction system and the relation descriptor * cache. Note we have to make certain the lock manager is off while * we do this. * ---------------- */ AmiTransactionOverride(IsBootstrapProcessingMode()); LockDisable(true); /* * Part of the initialization processing done here sets a read * lock on pg_log. Since locking is disabled the set doesn't have * intended effect of locking out writers, but this is ok, since * we only lock it to examine AMI transaction status, and this is * never written after initdb is done. -mer 15 June 1992 */ RelationInitialize(); /* pre-allocated reldescs created here */ InitializeTransactionSystem(); /* pg_log,etc init/crash recovery here */ LockDisable(false); /* ---------------- * anyone knows what this does? something having to do with * system catalog cache invalidation in the case of multiple * backends, I think -cim 10/3/90 * Sets up MyBackendId a unique backend identifier. * ---------------- */ InitSharedInvalidationState(); /* ---------------- * Set up a per backend process in shared memory. Must be done after * InitSharedInvalidationState() as it relies on MyBackendId being * initialized already. XXX -mer 11 Aug 1991 * ---------------- */ InitProcess(PostgresIpcKey); if (MyBackendId > MaxBackendId || MyBackendId <= 0) { elog(FATAL, "cinit2: bad backend id %d (%d)", MyBackendTag, MyBackendId); } /* ---------------- * initialize the access methods. * ---------------- */ initam(); /* ---------------- * initialize all the system catalog caches. * ---------------- */ zerocaches(); InitCatalogCache(); /* ---------------- * set ourselves to the proper user id and figure out our postgres * user id. If we ever add security so that we check for valid * postgres users, we might do it here. * ---------------- */ InitUserid(); /* ---------------- * ok, all done, now let's make sure we don't do it again. * ---------------- */ PostgresIsInitialized = true; /* on_exitpg(DestroyLocalRelList, (caddr_t) NULL); */ /* ---------------- * Done with "InitPostgres", now change to NormalProcessing unless * we're in BootstrapProcessing mode. * ---------------- */ if (!bootstrap) SetProcessingMode(NormalProcessing); /* if (testFlag || lockingOff) */ if (lockingOff) LockDisable(true); }
static TupleTableSlot * execMotionUnsortedReceiver(MotionState * node) { /* RECEIVER LOGIC */ TupleTableSlot *slot; HeapTuple tuple; Motion *motion = (Motion *) node->ps.plan; ReceiveReturnCode recvRC; AssertState(motion->motionType == MOTIONTYPE_HASH || (motion->motionType == MOTIONTYPE_EXPLICIT && motion->segidColIdx > 0) || (motion->motionType == MOTIONTYPE_FIXED && motion->numOutputSegs <= 1)); Assert(node->ps.state->motionlayer_context); Assert(node->ps.state->interconnect_context); if (node->stopRequested) { SendStopMessage(node->ps.state->motionlayer_context, node->ps.state->interconnect_context, motion->motionID); return NULL; } recvRC = RecvTupleFrom(node->ps.state->motionlayer_context, node->ps.state->interconnect_context, motion->motionID, &tuple, ANY_ROUTE); if (recvRC == END_OF_STREAM) { #ifdef CDB_MOTION_DEBUG if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG) elog(DEBUG4, "motionID=%d saw end of stream", motion->motionID); #endif Assert(node->numTuplesFromAMS == node->numTuplesToParent); Assert(node->numTuplesFromChild == 0); Assert(node->numTuplesToAMS == 0); return NULL; } node->numTuplesFromAMS++; node->numTuplesToParent++; /* store it in our result slot and return this. */ slot = node->ps.ps_ResultTupleSlot; slot = ExecStoreGenericTuple(tuple, slot, true /* shouldFree */); #ifdef CDB_MOTION_DEBUG if (node->numTuplesToParent <= 20) { StringInfoData buf; initStringInfo(&buf); appendStringInfo(&buf, " motion%-3d rcv %5d.", motion->motionID, node->numTuplesToParent); formatTuple(&buf, tuple, ExecGetResultType(&node->ps), node->outputFunArray); elog(DEBUG3, buf.data); pfree(buf.data); } #endif return slot; }
static TupleTableSlot * execMotionSender(MotionState * node) { /* SENDER LOGIC */ TupleTableSlot *outerTupleSlot; PlanState *outerNode; Motion *motion = (Motion *) node->ps.plan; bool done = false; #ifdef MEASURE_MOTION_TIME struct timeval time1; struct timeval time2; gettimeofday(&time1, NULL); #endif AssertState(motion->motionType == MOTIONTYPE_HASH || (motion->motionType == MOTIONTYPE_EXPLICIT && motion->segidColIdx > 0) || (motion->motionType == MOTIONTYPE_FIXED && motion->numOutputSegs <= 1)); Assert(node->ps.state->interconnect_context); while (!done) { /* grab TupleTableSlot from our child. */ outerNode = outerPlanState(node); outerTupleSlot = ExecProcNode(outerNode); #ifdef MEASURE_MOTION_TIME gettimeofday(&time2, NULL); node->otherTime.tv_sec += time2.tv_sec - time1.tv_sec; node->otherTime.tv_usec += time2.tv_usec - time1.tv_usec; while (node->otherTime.tv_usec < 0) { node->otherTime.tv_usec += 1000000; node->otherTime.tv_sec--; } while (node->otherTime.tv_usec >= 1000000) { node->otherTime.tv_usec -= 1000000; node->otherTime.tv_sec++; } #endif if (done || TupIsNull(outerTupleSlot)) { doSendEndOfStream(motion, node); done = true; } else { doSendTuple(motion, node, outerTupleSlot); /* doSendTuple() may have set node->stopRequested as a side-effect */ Gpmon_M_Incr_Rows_Out(GpmonPktFromMotionState(node)); setMotionStatsForGpmon(node); CheckSendPlanStateGpmonPkt(&node->ps); if (node->stopRequested) { elog(gp_workfile_caching_loglevel, "Motion initiating Squelch walker"); /* propagate stop notification to our children */ ExecSquelchNode(outerNode); done = true; } } #ifdef MEASURE_MOTION_TIME gettimeofday(&time1, NULL); node->motionTime.tv_sec += time1.tv_sec - time2.tv_sec; node->motionTime.tv_usec += time1.tv_usec - time2.tv_usec; while (node->motionTime.tv_usec < 0) { node->motionTime.tv_usec += 1000000; node->motionTime.tv_sec--; } while (node->motionTime.tv_usec >= 1000000) { node->motionTime.tv_usec -= 1000000; node->motionTime.tv_sec++; } #endif } Assert(node->stopRequested || node->numTuplesFromChild == node->numTuplesToAMS); /* nothing else to send out, so we return NULL up the tree. */ return NULL; }
/* * Deserialize a HeapTuple's data from a byte-array. * * This code is based on the binary input handling functions in copy.c. */ HeapTuple DeserializeTuple(SerTupInfo * pSerInfo, StringInfo serialTup) { MemoryContext oldCtxt; TupleDesc tupdesc; HeapTuple htup; int natts; SerAttrInfo *attrInfo; int i; AssertArg(pSerInfo != NULL); AssertArg(serialTup != NULL); tupdesc = pSerInfo->tupdesc; natts = tupdesc->natts; /* * Flip to our tuple-serialization memory-context, to speed up memory * reclamation operations. */ AssertState(s_tupSerMemCtxt != NULL); oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt); /* Receive nulls character-array. */ pq_copymsgbytes(serialTup, pSerInfo->nulls, natts); skipPadding(serialTup); /* Deserialize the non-NULL attributes of this tuple */ for (i = 0; i < natts; ++i) { attrInfo = pSerInfo->myinfo + i; if (pSerInfo->nulls[i]) /* NULL field. */ { pSerInfo->values[i] = (Datum) 0; continue; } if (attrInfo->typlen == -1) { int32 sz; struct varlena *p; /* Read length first */ pq_copymsgbytes(serialTup, (char *) &sz, sizeof(int32)); if (sz < 0) elog(ERROR, "invalid length received for a varlen Datum"); p = palloc(sz + VARHDRSZ); pq_copymsgbytes(serialTup, VARDATA(p), sz); SET_VARSIZE(p, sz + VARHDRSZ); pSerInfo->values[i] = PointerGetDatum(p); } else if (attrInfo->typlen == -2) { int32 sz; char *p; /* CString, with terminating '\0' included */ /* Read length first */ pq_copymsgbytes(serialTup, (char *) &sz, sizeof(int32)); if (sz < 0) elog(ERROR, "invalid length received for a CString"); p = palloc(sz + VARHDRSZ); /* Then data */ pq_copymsgbytes(serialTup, p, sz); pSerInfo->values[i] = CStringGetDatum(p); } else if (attrInfo->typbyval) { /* Read a whole Datum */ pq_copymsgbytes(serialTup, (char *) &(pSerInfo->values[i]), sizeof(Datum)); } else { /* fixed width, pass-by-ref */ char *p = palloc(attrInfo->typlen); pq_copymsgbytes(serialTup, p, attrInfo->typlen); pSerInfo->values[i] = PointerGetDatum(p); } } /* * Construct the tuple from the Datums and nulls values. NOTE: Switch * out of our temporary context before we form the tuple! */ MemoryContextSwitchTo(oldCtxt); htup = heap_form_tuple(tupdesc, pSerInfo->values, pSerInfo->nulls); MemoryContextReset(s_tupSerMemCtxt); /* Trouble if it didn't eat the whole buffer */ if (serialTup->cursor != serialTup->len) ereport(ERROR, (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), errmsg("incorrect binary data format"))); /* All done. Return the result. */ return htup; }
/* * Convert a HeapTuple into a byte-sequence, and store it directly * into a chunklist for transmission. * * This code is based on the printtup_internal_20() function in printtup.c. */ void SerializeTupleIntoChunks(GenericTuple gtuple, SerTupInfo *pSerInfo, TupleChunkList tcList) { TupleChunkListItem tcItem = NULL; MemoryContext oldCtxt; TupleDesc tupdesc; int i, natts; AssertArg(tcList != NULL); AssertArg(gtuple != NULL); AssertArg(pSerInfo != NULL); tupdesc = pSerInfo->tupdesc; natts = tupdesc->natts; /* get ready to go */ tcList->p_first = NULL; tcList->p_last = NULL; tcList->num_chunks = 0; tcList->serialized_data_length = 0; tcList->max_chunk_length = Gp_max_tuple_chunk_size; if (natts == 0) { tcItem = getChunkFromCache(&pSerInfo->chunkCache); if (tcItem == NULL) { ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("Could not allocate space for first chunk item in new chunk list."))); } /* TC_EMTPY is just one chunk */ SetChunkType(tcItem->chunk_data, TC_EMPTY); tcItem->chunk_length = TUPLE_CHUNK_HEADER_SIZE; appendChunkToTCList(tcList, tcItem); return; } tcItem = getChunkFromCache(&pSerInfo->chunkCache); if (tcItem == NULL) { ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("Could not allocate space for first chunk item in new chunk list."))); } /* assume that we'll take a single chunk */ SetChunkType(tcItem->chunk_data, TC_WHOLE); tcItem->chunk_length = TUPLE_CHUNK_HEADER_SIZE; appendChunkToTCList(tcList, tcItem); AssertState(s_tupSerMemCtxt != NULL); if (is_memtuple(gtuple)) { MemTuple mtuple = (MemTuple) gtuple; addByteStringToChunkList(tcList, (char *) mtuple, memtuple_get_size(mtuple), &pSerInfo->chunkCache); addPadding(tcList, &pSerInfo->chunkCache, memtuple_get_size(mtuple)); } else { HeapTuple tuple = (HeapTuple) gtuple; HeapTupleHeader t_data = tuple->t_data; TupSerHeader tsh; unsigned int datalen; unsigned int nullslen; datalen = tuple->t_len - t_data->t_hoff; if (HeapTupleHasNulls(tuple)) nullslen = BITMAPLEN(HeapTupleHeaderGetNatts(t_data)); else nullslen = 0; tsh.tuplen = sizeof(TupSerHeader) + TYPEALIGN(TUPLE_CHUNK_ALIGN,nullslen) + datalen; tsh.natts = HeapTupleHeaderGetNatts(t_data); tsh.infomask = t_data->t_infomask; addByteStringToChunkList(tcList, (char *)&tsh, sizeof(TupSerHeader), &pSerInfo->chunkCache); /* If we don't have any attributes which have been toasted, we * can be very very simple: just send the raw data. */ if ((tsh.infomask & HEAP_HASEXTERNAL) == 0) { if (nullslen) { addByteStringToChunkList(tcList, (char *)t_data->t_bits, nullslen, &pSerInfo->chunkCache); addPadding(tcList,&pSerInfo->chunkCache,nullslen); } addByteStringToChunkList(tcList, (char *)t_data + t_data->t_hoff, datalen, &pSerInfo->chunkCache); addPadding(tcList,&pSerInfo->chunkCache,datalen); } else { /* We have to be more careful when we have tuples that * have been toasted. Ideally we'd like to send the * untoasted attributes in as "raw" a format as possible * but that makes rebuilding the tuple harder . */ oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt); /* deconstruct the tuple (faster than a heap_getattr loop) */ heap_deform_tuple(tuple, tupdesc, pSerInfo->values, pSerInfo->nulls); MemoryContextSwitchTo(oldCtxt); /* Send the nulls character-array. */ addByteStringToChunkList(tcList, pSerInfo->nulls, natts, &pSerInfo->chunkCache); addPadding(tcList,&pSerInfo->chunkCache,natts); /* * send the attributes of this tuple: NOTE anything which allocates * temporary space (e.g. could result in a PG_DETOAST_DATUM) should be * executed with the memory context set to s_tupSerMemCtxt */ for (i = 0; i < natts; ++i) { SerAttrInfo *attrInfo = pSerInfo->myinfo + i; Datum origattr = pSerInfo->values[i], attr; /* skip null attributes (already taken care of above) */ if (pSerInfo->nulls[i]) continue; if (attrInfo->typlen == -1) { int32 sz; char *data; /* * If we have a toasted datum, forcibly detoast it here to avoid * memory leakage: we want to force the detoast allocation(s) to * happen in our reset-able serialization context. */ oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt); attr = PointerGetDatum(PG_DETOAST_DATUM_PACKED(origattr)); MemoryContextSwitchTo(oldCtxt); sz = VARSIZE_ANY_EXHDR(attr); data = VARDATA_ANY(attr); /* Send length first, then data */ addInt32ToChunkList(tcList, sz, &pSerInfo->chunkCache); addByteStringToChunkList(tcList, data, sz, &pSerInfo->chunkCache); addPadding(tcList, &pSerInfo->chunkCache, sz); } else if (attrInfo->typlen == -2) { int32 sz; char *data; /* CString, we would send the string with the terminating '\0' */ data = DatumGetCString(origattr); sz = strlen(data) + 1; /* Send length first, then data */ addInt32ToChunkList(tcList, sz, &pSerInfo->chunkCache); addByteStringToChunkList(tcList, data, sz, &pSerInfo->chunkCache); addPadding(tcList, &pSerInfo->chunkCache, sz); } else if (attrInfo->typbyval) { /* * We send a full-width Datum for all pass-by-value types, regardless of * the actual size. */ addByteStringToChunkList(tcList, (char *) &origattr, sizeof(Datum), &pSerInfo->chunkCache); addPadding(tcList, &pSerInfo->chunkCache, sizeof(Datum)); } else { addByteStringToChunkList(tcList, DatumGetPointer(origattr), attrInfo->typlen, &pSerInfo->chunkCache); addPadding(tcList, &pSerInfo->chunkCache, attrInfo->typlen); attr = origattr; } } MemoryContextReset(s_tupSerMemCtxt); } } /* * if we have more than 1 chunk we have to set the chunk types on our * first chunk and last chunk */ if (tcList->num_chunks > 1) { TupleChunkListItem first, last; first = tcList->p_first; last = tcList->p_last; Assert(first != NULL); Assert(first != last); Assert(last != NULL); SetChunkType(first->chunk_data, TC_PARTIAL_START); SetChunkType(last->chunk_data, TC_PARTIAL_END); /* * any intervening chunks are already set to TC_PARTIAL_MID when * allocated */ } return; }
/* * Convert RecordCache into a byte-sequence, and store it directly * into a chunklist for transmission. * * This code is based on the printtup_internal_20() function in printtup.c. */ void SerializeRecordCacheIntoChunks(SerTupInfo *pSerInfo, TupleChunkList tcList, MotionConn *conn) { TupleChunkListItem tcItem = NULL; MemoryContext oldCtxt; TupSerHeader tsh; List *typelist = NULL; int size = -1; char * buf = NULL; AssertArg(tcList != NULL); AssertArg(pSerInfo != NULL); /* get ready to go */ tcList->p_first = NULL; tcList->p_last = NULL; tcList->num_chunks = 0; tcList->serialized_data_length = 0; tcList->max_chunk_length = Gp_max_tuple_chunk_size; tcItem = getChunkFromCache(&pSerInfo->chunkCache); if (tcItem == NULL) { ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("Could not allocate space for first chunk item in new chunk list."))); } /* assume that we'll take a single chunk */ SetChunkType(tcItem->chunk_data, TC_WHOLE); tcItem->chunk_length = TUPLE_CHUNK_HEADER_SIZE; appendChunkToTCList(tcList, tcItem); AssertState(s_tupSerMemCtxt != NULL); /* * To avoid inconsistency of record cache between sender and receiver in * the same motion, send the serialized record cache to receiver before the * first tuple is sent, the receiver is responsible for registering the * records to its own local cache and remapping the typmod of tuples sent * by sender. */ oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt); typelist = build_tuple_node_list(conn->sent_record_typmod); buf = serializeNode((Node *) typelist, &size, NULL); MemoryContextSwitchTo(oldCtxt); tsh.tuplen = sizeof(TupSerHeader) + size; /* * we use natts==0xffff and infomask==0xffff to identify this special * tuple which actually carry the serialized record cache table. */ tsh.natts = RECORD_CACHE_MAGIC_NATTS; tsh.infomask = RECORD_CACHE_MAGIC_INFOMASK; addByteStringToChunkList(tcList, (char *)&tsh, sizeof(TupSerHeader), &pSerInfo->chunkCache); addByteStringToChunkList(tcList, buf, size, &pSerInfo->chunkCache); addPadding(tcList, &pSerInfo->chunkCache, size); /* * if we have more than 1 chunk we have to set the chunk types on our * first chunk and last chunk */ if (tcList->num_chunks > 1) { TupleChunkListItem first, last; first = tcList->p_first; last = tcList->p_last; Assert(first != NULL); Assert(first != last); Assert(last != NULL); SetChunkType(first->chunk_data, TC_PARTIAL_START); SetChunkType(last->chunk_data, TC_PARTIAL_END); /* * any intervening chunks are already set to TC_PARTIAL_MID when * allocated */ } return; }