/* * tuplestore_gettupleslot - exported function to fetch a MinimalTuple * * If successful, put tuple in slot and return TRUE; else, clear the slot * and return FALSE. * * If copy is TRUE, the slot receives a copied tuple (allocated in current * memory context) that will stay valid regardless of future manipulations of * the tuplestore's state. If copy is FALSE, the slot may just receive a * pointer to a tuple held within the tuplestore. The latter is more * efficient but the slot contents may be corrupted if additional writes to * the tuplestore occur. (If using tuplestore_trim, see comments therein.) */ bool tuplestore_gettupleslot(Tuplestorestate *state, bool forward, bool copy, TupleTableSlot *slot) { MinimalTuple tuple; bool should_free; tuple = (MinimalTuple) tuplestore_gettuple(state, forward, &should_free); if (tuple) { if (copy && !should_free) { tuple = heap_copy_minimal_tuple(tuple); should_free = true; } ExecStoreMinimalTuple(tuple, slot, should_free); return true; } else { ExecClearTuple(slot); return false; } }
/* * tuplestore_gettupleslot - exported function to fetch a tuple into a slot * * If successful, put tuple in slot and return TRUE; else, clear the slot * and return FALSE. * * If copy is TRUE, the slot receives a copied tuple (allocated in current * memory context) that will stay valid regardless of future manipulations of * the tuplestore's state. If copy is FALSE, the slot may just receive a * pointer to a tuple held within the tuplestore. The latter is more * efficient but the slot contents may be corrupted if additional writes to * the tuplestore occur. (If using tuplestore_trim, see comments therein.) */ bool tuplestore_gettupleslot(Tuplestorestate *state, bool forward, bool copy, TupleTableSlot *slot) { GenericTuple tuple; bool should_free; tuple = tuplestore_gettuple(state, forward, &should_free); if (tuple) { if (copy && !should_free) { if (is_memtuple(tuple)) tuple = (GenericTuple) memtuple_copy_to((MemTuple) tuple, NULL, NULL); else tuple = (GenericTuple) heap_copytuple((HeapTuple) tuple); should_free = true; } ExecStoreGenericTuple(tuple, slot, should_free); return true; } else { ExecClearTuple(slot); return false; } }
/* * tuplestore_advance - exported function to adjust position without fetching * * We could optimize this case to avoid palloc/pfree overhead, but for the * moment it doesn't seem worthwhile. */ bool tuplestore_advance(Tuplestorestate *state, bool forward) { void *tuple; bool should_free; tuple = tuplestore_gettuple(state, forward, &should_free); if (tuple) { if (should_free) pfree(tuple); return true; } else { return false; } }
/* * tuplestore_gettupleslot - exported function to fetch a MinimalTuple * * If successful, put tuple in slot and return TRUE; else, clear the slot * and return FALSE. */ bool tuplestore_gettupleslot_pos(Tuplestorestate *state, TuplestorePos *pos, bool forward, TupleTableSlot *slot) { MemTuple tuple; bool should_free = false; tuple = (MemTuple) tuplestore_gettuple(state, pos, forward, &should_free); if (tuple) { ExecStoreMinimalTuple(tuple, slot, should_free); return true; } else { ExecClearTuple(slot); return false; } }
/* * Advance over N tuples in either forward or back direction, * without returning any data. N<=0 is a no-op. * Returns TRUE if successful, FALSE if ran out of tuples. */ bool tuplestore_skiptuples(Tuplestorestate *state, int64 ntuples, bool forward) { TSReadPointer *readptr = &state->readptrs[state->activeptr]; Assert(forward || (readptr->eflags & EXEC_FLAG_BACKWARD)); if (ntuples <= 0) return true; switch (state->status) { case TSS_INMEM: if (forward) { if (readptr->eof_reached) return false; if (state->memtupcount - readptr->current >= ntuples) { readptr->current += ntuples; return true; } readptr->current = state->memtupcount; readptr->eof_reached = true; return false; } else { if (readptr->eof_reached) { readptr->current = state->memtupcount; readptr->eof_reached = false; ntuples--; } if (readptr->current - state->memtupdeleted > ntuples) { readptr->current -= ntuples; return true; } Assert(!state->truncated); readptr->current = state->memtupdeleted; return false; } break; default: /* We don't currently try hard to optimize other cases */ while (ntuples-- > 0) { void *tuple; bool should_free; tuple = tuplestore_gettuple(state, forward, &should_free); if (tuple == NULL) return false; if (should_free) pfree(tuple); CHECK_FOR_INTERRUPTS(); } return true; } }
/* * PersistHoldablePortal * * Prepare the specified Portal for access outside of the current * transaction. When this function returns, all future accesses to the * portal must be done via the Tuplestore (not by invoking the * executor). */ void PersistHoldablePortal(Portal portal) { QueryDesc *queryDesc = PortalGetQueryDesc(portal); MemoryContext savePortalContext; MemoryContext saveQueryContext; MemoryContext oldcxt; /* * If we're preserving a holdable portal, we had better be inside the * transaction that originally created it. */ Assert(portal->createXact == GetCurrentTransactionId()); Assert(queryDesc != NULL); Assert(portal->portalReady); Assert(!portal->portalDone); /* * Caller must have created the tuplestore already. */ Assert(portal->holdContext != NULL); Assert(portal->holdStore != NULL); /* * Before closing down the executor, we must copy the tupdesc into * long-term memory, since it was created in executor memory. */ oldcxt = MemoryContextSwitchTo(portal->holdContext); portal->tupDesc = CreateTupleDescCopy(portal->tupDesc); MemoryContextSwitchTo(oldcxt); /* * Check for improper portal use, and mark portal active. */ if (portal->portalActive) ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), errmsg("portal \"%s\" already active", portal->name))); portal->portalActive = true; /* * Set global portal context pointers. */ savePortalContext = PortalContext; PortalContext = PortalGetHeapMemory(portal); saveQueryContext = QueryContext; QueryContext = portal->queryContext; MemoryContextSwitchTo(PortalContext); /* * Rewind the executor: we need to store the entire result set in the * tuplestore, so that subsequent backward FETCHs can be processed. */ ExecutorRewind(queryDesc); /* Change the destination to output to the tuplestore */ queryDesc->dest = CreateDestReceiver(Tuplestore, portal); /* Fetch the result set into the tuplestore */ ExecutorRun(queryDesc, ForwardScanDirection, 0L); (*queryDesc->dest->rDestroy) (queryDesc->dest); queryDesc->dest = NULL; /* * Now shut down the inner executor. */ portal->queryDesc = NULL; /* prevent double shutdown */ ExecutorEnd(queryDesc); /* Mark portal not active */ portal->portalActive = false; PortalContext = savePortalContext; QueryContext = saveQueryContext; /* * Reset the position in the result set: ideally, this could be * implemented by just skipping straight to the tuple # that we need * to be at, but the tuplestore API doesn't support that. So we start * at the beginning of the tuplestore and iterate through it until we * reach where we need to be. FIXME someday? */ MemoryContextSwitchTo(portal->holdContext); if (!portal->atEnd) { long store_pos; if (portal->posOverflow) /* oops, cannot trust portalPos */ ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("could not reposition held cursor"))); tuplestore_rescan(portal->holdStore); for (store_pos = 0; store_pos < portal->portalPos; store_pos++) { HeapTuple tup; bool should_free; tup = tuplestore_gettuple(portal->holdStore, true, &should_free); if (tup == NULL) elog(ERROR, "unexpected end of tuple stream"); if (should_free) pfree(tup); } } MemoryContextSwitchTo(oldcxt); /* * We can now release any subsidiary memory of the portal's heap * context; we'll never use it again. The executor already dropped * its context, but this will clean up anything that glommed onto the * portal's heap via PortalContext. */ MemoryContextDeleteChildren(PortalGetHeapMemory(portal)); }