/* * spi_printtup * store tuple retrieved by Executor into SPITupleTable * of current SPI procedure */ void spi_printtup(HeapTuple tuple, TupleDesc tupdesc, DestReceiver *self) { SPITupleTable *tuptable; MemoryContext oldcxt; /* * When called by Executor _SPI_curid expected to be equal to * _SPI_connected */ if (_SPI_curid != _SPI_connected || _SPI_connected < 0) elog(ERROR, "improper call to spi_printtup"); if (_SPI_current != &(_SPI_stack[_SPI_curid])) elog(ERROR, "SPI stack corrupted"); tuptable = _SPI_current->tuptable; if (tuptable == NULL) elog(ERROR, "improper call to spi_printtup"); oldcxt = MemoryContextSwitchTo(tuptable->tuptabcxt); if (tuptable->free == 0) { tuptable->free = 256; tuptable->alloced += tuptable->free; tuptable->vals = (HeapTuple *) repalloc(tuptable->vals, tuptable->alloced * sizeof(HeapTuple)); } tuptable->vals[tuptable->alloced - tuptable->free] = heap_copytuple(tuple); (tuptable->free)--; MemoryContextSwitchTo(oldcxt); }
/* * ConstraintSetParentConstraint * Set a partition's constraint as child of its parent table's * * This updates the constraint's pg_constraint row to show it as inherited, and * add a dependency to the parent so that it cannot be removed on its own. */ void ConstraintSetParentConstraint(Oid childConstrId, Oid parentConstrId) { Relation constrRel; Form_pg_constraint constrForm; HeapTuple tuple, newtup; ObjectAddress depender; ObjectAddress referenced; constrRel = heap_open(ConstraintRelationId, RowExclusiveLock); tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(childConstrId)); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for constraint %u", childConstrId); newtup = heap_copytuple(tuple); constrForm = (Form_pg_constraint) GETSTRUCT(newtup); constrForm->conislocal = false; constrForm->coninhcount++; constrForm->conparentid = parentConstrId; CatalogTupleUpdate(constrRel, &tuple->t_self, newtup); ReleaseSysCache(tuple); ObjectAddressSet(referenced, ConstraintRelationId, parentConstrId); ObjectAddressSet(depender, ConstraintRelationId, childConstrId); recordDependencyOn(&depender, &referenced, DEPENDENCY_INTERNAL_AUTO); heap_close(constrRel, RowExclusiveLock); }
HeapTuple SPI_copytuple(HeapTuple tuple) { MemoryContext oldcxt = NULL; HeapTuple ctuple; if (tuple == NULL) { SPI_result = SPI_ERROR_ARGUMENT; return NULL; } if (_SPI_curid + 1 == _SPI_connected) /* connected */ { if (_SPI_current != &(_SPI_stack[_SPI_curid + 1])) elog(ERROR, "SPI stack corrupted"); oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt); } ctuple = heap_copytuple(tuple); if (oldcxt) MemoryContextSwitchTo(oldcxt); return ctuple; }
/* * tuplestore_gettupleslot - exported function to fetch a tuple into a slot * * If successful, put tuple in slot and return TRUE; else, clear the slot * and return FALSE. * * If copy is TRUE, the slot receives a copied tuple (allocated in current * memory context) that will stay valid regardless of future manipulations of * the tuplestore's state. If copy is FALSE, the slot may just receive a * pointer to a tuple held within the tuplestore. The latter is more * efficient but the slot contents may be corrupted if additional writes to * the tuplestore occur. (If using tuplestore_trim, see comments therein.) */ bool tuplestore_gettupleslot(Tuplestorestate *state, bool forward, bool copy, TupleTableSlot *slot) { GenericTuple tuple; bool should_free; tuple = tuplestore_gettuple(state, forward, &should_free); if (tuple) { if (copy && !should_free) { if (is_memtuple(tuple)) tuple = (GenericTuple) memtuple_copy_to((MemTuple) tuple, NULL, NULL); else tuple = (GenericTuple) heap_copytuple((HeapTuple) tuple); should_free = true; } ExecStoreGenericTuple(tuple, slot, should_free); return true; } else { ExecClearTuple(slot); return false; } }
jobject TupleTable_createFromSlot(TupleTableSlot* tts) { HeapTuple tuple; jobject tupdesc; jobjectArray tuples; MemoryContext curr; if(tts == 0) return 0; curr = MemoryContextSwitchTo(JavaMemoryContext); #if (PGSQL_MAJOR_VER == 8 && PGSQL_MINOR_VER == 0) tupdesc = TupleDesc_internalCreate(tts->ttc_tupleDescriptor); tuple = heap_copytuple(tts->val); #else tupdesc = TupleDesc_internalCreate(tts->tts_tupleDescriptor); tuple = ExecCopySlotTuple(tts); #endif tuples = Tuple_createArray(&tuple, 1, false); MemoryContextSwitchTo(curr); return JNI_newObject(s_TupleTable_class, s_TupleTable_init, tupdesc, tuples); }
/* * Read the tuple for given reader in nowait mode, and form the tuple array. */ static void form_tuple_array(GatherMergeState *gm_state, int reader) { GMReaderTupleBuffer *tuple_buffer = &gm_state->gm_tuple_buffers[reader]; int i; /* Last slot is for leader and we don't build tuple array for leader */ if (reader == gm_state->nreaders) return; /* * We here because we already read all the tuples from the tuple array, so * initialize the counter to zero. */ if (tuple_buffer->nTuples == tuple_buffer->readCounter) tuple_buffer->nTuples = tuple_buffer->readCounter = 0; /* Tuple array is already full? */ if (tuple_buffer->nTuples == MAX_TUPLE_STORE) return; for (i = tuple_buffer->nTuples; i < MAX_TUPLE_STORE; i++) { tuple_buffer->tuple[i] = heap_copytuple(gm_readnext_tuple(gm_state, reader, false, &tuple_buffer->done)); if (!HeapTupleIsValid(tuple_buffer->tuple[i])) break; tuple_buffer->nTuples++; } }
Datum variant_cast_out(PG_FUNCTION_ARGS) { Oid targettypid = get_fn_expr_rettype(fcinfo->flinfo); VariantInt vi; Datum out; if( PG_ARGISNULL(0) ) PG_RETURN_NULL(); /* No reason to format type name, so use IOFunc_input instead of IOFunc_output */ vi = make_variant_int(PG_GETARG_VARIANT(0), fcinfo, IOFunc_input); /* If original was NULL then we MUST return NULL */ if( vi->isnull ) PG_RETURN_NULL(); /* If our types match exactly we don't need to cast */ if( vi->typid == targettypid ) PG_RETURN_DATUM(vi->data); /* Keep cruft localized to just here */ { bool do_pop; int ret; bool isnull; MemoryContext cctx = CurrentMemoryContext; HeapTuple tup; StringInfoData cmdd; StringInfo cmd = &cmdd; char *nulls = " "; do_pop = _SPI_conn(); initStringInfo(cmd); appendStringInfo( cmd, "SELECT $1::%s", format_type_be(targettypid) ); /* command, nargs, Oid *argument_types, *values, *nulls, read_only, count */ if( (ret = SPI_execute_with_args( cmd->data, 1, &vi->typid, &vi->data, nulls, true, 0 )) != SPI_OK_SELECT ) elog( ERROR, "SPI_execute_with_args returned %s", SPI_result_code_string(ret)); /* * Make a copy of result tuple in previous memory context. Copying the * entire tuple is wasteful; it would be better to only copy the actual * attribute; but in this case the difference isn't very large. */ MemoryContextSwitchTo(cctx); tup = heap_copytuple(SPI_tuptable->vals[0]); out = heap_getattr(tup, 1, SPI_tuptable->tupdesc, &isnull); // getTypeOutputInfo(typoid, &foutoid, &typisvarlena); /* Remember this frees everything palloc'd since our connect/push call */ _SPI_disc(do_pop); } /* End cruft */ PG_RETURN_DATUM(out); }
/* * AlterConstraintNamespaces * Find any constraints belonging to the specified object, * and move them to the specified new namespace. * * isType indicates whether the owning object is a type or a relation. */ void AlterConstraintNamespaces(Oid ownerId, Oid oldNspId, Oid newNspId, bool isType) { Relation conRel; ScanKeyData key[1]; SysScanDesc scan; HeapTuple tup; conRel = heap_open(ConstraintRelationId, RowExclusiveLock); if (isType) { ScanKeyInit(&key[0], Anum_pg_constraint_contypid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ownerId)); scan = systable_beginscan(conRel, ConstraintTypidIndexId, true, SnapshotNow, 1, key); } else { ScanKeyInit(&key[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ownerId)); scan = systable_beginscan(conRel, ConstraintRelidIndexId, true, SnapshotNow, 1, key); } while (HeapTupleIsValid((tup = systable_getnext(scan)))) { Form_pg_constraint conform = (Form_pg_constraint) GETSTRUCT(tup); if (conform->connamespace == oldNspId) { tup = heap_copytuple(tup); conform = (Form_pg_constraint) GETSTRUCT(tup); conform->connamespace = newNspId; simple_heap_update(conRel, &tup->t_self, tup); CatalogUpdateIndexes(conRel, tup); /* * Note: currently, the constraint will not have its own * dependency on the namespace, so we don't need to do * changeDependencyFor(). */ } } systable_endscan(scan); heap_close(conRel, RowExclusiveLock); }
HeapTuple TriggerData_getTriggerReturnTuple(jobject jtd, bool* wasNull) { Ptr2Long p2l; HeapTuple ret = 0; p2l.longVal = JNI_callLongMethod(jtd, s_TriggerData_getTriggerReturnTuple); if(p2l.longVal != 0) ret = heap_copytuple((HeapTuple)p2l.ptrVal); else *wasNull = true; return ret; }
/* * Reset a sequence to its initial value. * * The change is made transactionally, so that on failure of the current * transaction, the sequence will be restored to its previous state. * We do that by creating a whole new___ relfilenode for the sequence; so this * works much like the rewriting forms of ALTER TABLE. * * Caller is assumed to have acquired AccessExclusiveLock on the sequence, * which must not be released until end of transaction. Caller is also * responsible for permissions checking. */ void ResetSequence(Oid seq_relid) { Relation seq_rel; SeqTable elm; Form_pg_sequence seq; Buffer buf; HeapTupleData seqtuple; HeapTuple tuple; /* * Read the old sequence. This does a bit more work than really * necessary, but it's simple, and we do want to double-check that it's * indeed a sequence. */ init_sequence(seq_relid, &elm, &seq_rel); (void) read_seq_tuple(elm, seq_rel, &buf, &seqtuple); /* * Copy the existing sequence tuple. */ tuple = heap_copytuple(&seqtuple); /* Now we're done with the old page */ UnlockReleaseBuffer(buf); /* * Modify the copied tuple to execute the restart (compare the RESTART * action in AlterSequence) */ seq = (Form_pg_sequence) GETSTRUCT(tuple); seq->last_value = seq->start_value; seq->is_called = false; seq->log_cnt = 0; /* * Create a new___ storage file for the sequence. We want to keep the * sequence's relfrozenxid at 0, since it won't contain any unfrozen XIDs. * Same with relminmxid, since a sequence will never contain multixacts. */ RelationSetNewRelfilenode(seq_rel, seq_rel->rd_rel->relpersistence, InvalidTransactionId, InvalidMultiXactId); /* * Insert the modified tuple into the new___ storage file. */ fill_seq_with_data(seq_rel, tuple); /* Clear local cache so that we don't think we have cached numbers */ /* Note that we do not change the currval() state */ elm->cached = elm->last; relation_close(seq_rel, NoLock); }
jobject Tuple_internalCreate(HeapTuple ht, bool mustCopy) { jobject jht; Ptr2Long htH; if(mustCopy) ht = heap_copytuple(ht); htH.longVal = 0L; /* ensure that the rest is zeroed out */ htH.ptrVal = ht; jht = JNI_newObject(s_Tuple_class, s_Tuple_init, htH.longVal); return jht; }
/* * SearchSysCacheCopyAttName * * As above, an attisdropped-aware version of SearchSysCacheCopy. */ HeapTuple SearchSysCacheCopyAttName(Oid relid, const char *attname) { HeapTuple tuple, newtuple; tuple = SearchSysCacheAttName(relid, attname); if (!HeapTupleIsValid(tuple)) return tuple; newtuple = heap_copytuple(tuple); ReleaseSysCache(tuple); return newtuple; }
/* * SearchSysCacheCopyAttNum * * As above, an attisdropped-aware version of SearchSysCacheCopy. */ HeapTuple SearchSysCacheCopyAttNum(Oid relid, int16 attnum) { HeapTuple tuple, newtuple; tuple = SearchSysCacheAttNum(relid, attnum); if (!HeapTupleIsValid(tuple)) return NULL; newtuple = heap_copytuple(tuple); ReleaseSysCache(tuple); return newtuple; }
/* * SearchSysCacheCopy * * A convenience routine that does SearchSysCache and (if successful) * returns a modifiable copy of the syscache entry. The original * syscache entry is released before returning. The caller should * heap_freetuple() the result when done with it. */ HeapTuple SearchSysCacheCopy(int cacheId, Datum key1, Datum key2, Datum key3, Datum key4) { HeapTuple tuple, newtuple; tuple = SearchSysCache(cacheId, key1, key2, key3, key4); if (!HeapTupleIsValid(tuple)) return tuple; newtuple = heap_copytuple(tuple); ReleaseSysCache(tuple); return newtuple; }
/* -------------------------------- * ExecCopySlotTuple * Obtain a copy of a slot's regular physical tuple. The copy is * palloc'd in the current memory context. * * This works even if the slot contains a virtual or minimal tuple; * however the "system columns" of the result will not be meaningful. * -------------------------------- */ HeapTuple ExecCopySlotHeapTuple(TupleTableSlot *slot) { /* * sanity checks */ Assert(!TupIsNull(slot)); if(slot->PRIVATE_tts_heaptuple) return heap_copytuple(slot->PRIVATE_tts_heaptuple); slot_getallattrs(slot); /* * Otherwise we need to build a tuple from the Datum array. */ return heap_form_tuple(slot->tts_tupleDescriptor, slot_get_values(slot), slot_get_isnull(slot)); }
/* * GetRoleTupleByOid -- as above, but search by role OID */ static HeapTuple GetRoleTupleByName(const char * rolename) { HeapTuple tuple; Relation relation; SysScanDesc scan; ScanKeyData key[1]; /* * form a scan key */ ScanKeyInit(&key[0], Anum_pg_authid_rolname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(rolename)); /* * Open pg_authid and fetch a tuple. Force heap scan if we haven't yet * built the critical shared relcache entries (i.e., we're starting up * without a shared relcache cache file). */ relation = heap_open(AuthIdRelationId, AccessShareLock); scan = systable_beginscan(relation, AuthIdRolnameIndexId, criticalSharedRelcachesBuilt, SNAPSHOT, 1, key); tuple = systable_getnext(scan); /* Must copy tuple before releasing buffer */ if (HeapTupleIsValid(tuple)) tuple = heap_copytuple(tuple); /* all done */ systable_endscan(scan); heap_close(relation, AccessShareLock); return tuple; }
/* -------------------------------- * ExecCopySlotTuple * Obtain a copy of a slot's physical tuple. The copy is * palloc'd in the current memory context. * * This works even if the slot contains a virtual tuple; * however the "system columns" of the result will not be meaningful. * -------------------------------- */ HeapTuple ExecCopySlotTuple(TupleTableSlot *slot) { /* * sanity checks */ Assert(slot != NULL); Assert(!slot->tts_isempty); /* * If we have a physical tuple then just copy it. */ if (slot->tts_tuple) return heap_copytuple(slot->tts_tuple); /* * Otherwise we need to build a tuple from the Datum array. */ return heap_form_tuple(slot->tts_tupleDescriptor, slot->tts_values, slot->tts_isnull); }
/* * GetDatabaseTupleByOid -- as above, but search by database OID */ static HeapTuple GetDatabaseTupleByOid(Oid dboid) { HeapTuple tuple; Relation relation; SysScanDesc scan; ScanKeyData key[1]; /* * form a scan key */ ScanKeyInit(&key[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(dboid)); /* * Open pg_database and fetch a tuple. Force heap scan if we haven't yet * built the critical shared relcache entries (i.e., we're starting up * without a shared relcache cache file). */ relation = heap_open(DatabaseRelationId, AccessShareLock); scan = systable_beginscan(relation, DatabaseOidIndexId, criticalSharedRelcachesBuilt, NULL, 1, key); tuple = systable_getnext(scan); /* Must copy tuple before releasing buffer */ if (HeapTupleIsValid(tuple)) tuple = heap_copytuple(tuple); /* all done */ systable_endscan(scan); heap_close(relation, AccessShareLock); return tuple; }
/* * Fetch a tuple from a tuple queue reader. * * The return value is NULL if there are no remaining tuples or if * nowait = true and no tuple is ready to return. *done, if not NULL, * is set to true when there are no remaining tuples and otherwise to false. * * The returned tuple, if any, is allocated in CurrentMemoryContext. * Note that this routine must not leak memory! (We used to allow that, * but not any more.) * * Even when shm_mq_receive() returns SHM_MQ_WOULD_BLOCK, this can still * accumulate bytes from a partially-read message, so it's useful to call * this with nowait = true even if nothing is returned. */ HeapTuple TupleQueueReaderNext(TupleQueueReader *reader, bool nowait, bool *done) { HeapTupleData htup; shm_mq_result result; Size nbytes; void *data; if (done != NULL) *done = false; /* Attempt to read a message. */ result = shm_mq_receive(reader->queue, &nbytes, &data, nowait); /* If queue is detached, set *done and return NULL. */ if (result == SHM_MQ_DETACHED) { if (done != NULL) *done = true; return NULL; } /* In non-blocking mode, bail out if no message ready yet. */ if (result == SHM_MQ_WOULD_BLOCK) return NULL; Assert(result == SHM_MQ_SUCCESS); /* * Set up a dummy HeapTupleData pointing to the data from the shm_mq * (which had better be sufficiently aligned). */ ItemPointerSetInvalid(&htup.t_self); htup.t_tableOid = InvalidOid; htup.t_len = nbytes; htup.t_data = data; return heap_copytuple(&htup); }
/* * Rename a tablespace */ ObjectAddress RenameTableSpace(const char *oldname, const char *newname) { Oid tspId; Relation rel; ScanKeyData entry[1]; HeapScanDesc scan; HeapTuple tup; HeapTuple newtuple; Form_pg_tablespace newform; ObjectAddress address; /* Search pg_tablespace */ rel = heap_open(TableSpaceRelationId, RowExclusiveLock); ScanKeyInit(&entry[0], Anum_pg_tablespace_spcname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(oldname)); scan = heap_beginscan_catalog(rel, 1, entry); tup = heap_getnext(scan, ForwardScanDirection); if (!HeapTupleIsValid(tup)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("tablespace \"%s\" does not exist", oldname))); tspId = HeapTupleGetOid(tup); newtuple = heap_copytuple(tup); newform = (Form_pg_tablespace) GETSTRUCT(newtuple); heap_endscan(scan); /* Must be owner */ if (!pg_tablespace_ownercheck(HeapTupleGetOid(newtuple), GetUserId())) aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_TABLESPACE, oldname); /* Validate new name */ if (!allowSystemTableMods && IsReservedName(newname)) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("unacceptable tablespace name \"%s\"", newname), errdetail("The prefix \"pg_\" is reserved for system tablespaces."))); /* Make sure the new name doesn't exist */ ScanKeyInit(&entry[0], Anum_pg_tablespace_spcname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(newname)); scan = heap_beginscan_catalog(rel, 1, entry); tup = heap_getnext(scan, ForwardScanDirection); if (HeapTupleIsValid(tup)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("tablespace \"%s\" already exists", newname))); heap_endscan(scan); /* OK, update the entry */ namestrcpy(&(newform->spcname), newname); simple_heap_update(rel, &newtuple->t_self, newtuple); CatalogUpdateIndexes(rel, newtuple); InvokeObjectPostAlterHook(TableSpaceRelationId, tspId, 0); ObjectAddressSet(address, TableSpaceRelationId, tspId); heap_close(rel, NoLock); return address; }
/* * GetFileSegInfo * * Get the catalog entry for an appendonly (row-oriented) relation from the * pg_aoseg_* relation that belongs to the currently used * AppendOnly table. * * If a caller intends to append to this file segment entry they must * already hold a relation Append-Only segment file (transaction-scope) lock (tag * LOCKTAG_RELATION_APPENDONLY_SEGMENT_FILE) in order to guarantee * stability of the pg_aoseg information on this segment file and exclusive right * to append data to the segment file. */ ParquetFileSegInfo * GetParquetFileSegInfo(Relation parentrel, AppendOnlyEntry *aoEntry, Snapshot parquetMetaDataSnapshot, int segno) { Relation pg_parquetseg_rel; TupleDesc pg_parquetseg_dsc; HeapTuple tuple; ScanKeyData key[1]; SysScanDesc parquetscan; Datum eof, eof_uncompressed, tupcount; bool isNull; bool indexOK; Oid indexid; ParquetFileSegInfo *fsinfo; /* * Check the pg_paqseg relation to be certain the parquet table segment file * is there. */ pg_parquetseg_rel = heap_open(aoEntry->segrelid, AccessShareLock); pg_parquetseg_dsc = RelationGetDescr(pg_parquetseg_rel); if (Gp_role == GP_ROLE_EXECUTE) { indexOK = FALSE; indexid = InvalidOid; } else { indexOK = TRUE; indexid = aoEntry->segidxid; } /* * Setup a scan key to fetch from the index by segno. */ ScanKeyInit(&key[0], (AttrNumber) Anum_pg_parquetseg_segno, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(segno)); parquetscan = systable_beginscan(pg_parquetseg_rel, indexid, indexOK, SnapshotNow, 1, &key[0]); tuple = systable_getnext(parquetscan); if (!HeapTupleIsValid(tuple)) { /* This segment file does not have an entry. */ systable_endscan(parquetscan); heap_close(pg_parquetseg_rel, AccessShareLock); return NULL ; } tuple = heap_copytuple(tuple); systable_endscan(parquetscan); Assert(HeapTupleIsValid(tuple)); fsinfo = (ParquetFileSegInfo *) palloc0(sizeof(ParquetFileSegInfo)); /* get the eof */ eof = fastgetattr(tuple, Anum_pg_parquetseg_eof, pg_parquetseg_dsc, &isNull); if (isNull) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("got invalid eof value: NULL"))); /* get the tupcount */ tupcount = fastgetattr(tuple, Anum_pg_parquetseg_tupcount, pg_parquetseg_dsc, &isNull); if (isNull) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("got invalid tupcount value: NULL"))); /* get the uncompressed eof */ eof_uncompressed = fastgetattr(tuple, Anum_pg_parquetseg_eofuncompressed, pg_parquetseg_dsc, &isNull); /* * Confusing: This eof_uncompressed variable is never used. It appears we only * call fastgetattr to get the isNull value. this variable "eof_uncompressed" is * not at all the same as fsinfo->eof_uncompressed. */ if (isNull) { /* * NULL is allowed. Tables that were created before the release of the * eof_uncompressed catalog column will have a NULL instead of a value. */ fsinfo->eof_uncompressed = InvalidUncompressedEof; } else { fsinfo->eof_uncompressed = (int64) DatumGetFloat8(eof_uncompressed); } fsinfo->segno = segno; fsinfo->eof = (int64) DatumGetFloat8(eof); fsinfo->tupcount = (int64) DatumGetFloat8(tupcount); ItemPointerSetInvalid(&fsinfo->sequence_tid); if (fsinfo->eof < 0) ereport(ERROR, (errcode(ERRCODE_GP_INTERNAL_ERROR), errmsg("invalid eof " INT64_FORMAT " for relation %s", fsinfo->eof, RelationGetRelationName(parentrel)))); /* Finish up scan and close appendonly catalog. */ heap_close(pg_parquetseg_rel, AccessShareLock); return fsinfo; }
/* * Adjust dependency record(s) to point to a different object of the same type * * classId/objectId specify the referencing object. * refClassId/oldRefObjectId specify the old referenced object. * newRefObjectId is the new referenced object (must be of class refClassId). * * Note the lack of objsubid parameters. If there are subobject references * they will all be readjusted. * * Returns the number of records updated. */ long changeDependencyFor(Oid classId, Oid objectId, Oid refClassId, Oid oldRefObjectId, Oid newRefObjectId) { long count = 0; Relation depRel; ScanKeyData key[2]; SysScanDesc scan; HeapTuple tup; ObjectAddress objAddr; bool newIsPinned; depRel = heap_open(DependRelationId, RowExclusiveLock); /* * If oldRefObjectId is pinned, there won't be any dependency entries on * it --- we can't cope in that case. (This isn't really worth expending * code to fix, in current usage; it just means you can't rename stuff out * of pg_catalog, which would likely be a bad move anyway.) */ objAddr.classId = refClassId; objAddr.objectId = oldRefObjectId; objAddr.objectSubId = 0; if (isObjectPinned(&objAddr, depRel)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot remove dependency on %s because it is a system object", getObjectDescription(&objAddr)))); /* * We can handle adding a dependency on something pinned, though, since * that just means deleting the dependency entry. */ objAddr.objectId = newRefObjectId; newIsPinned = isObjectPinned(&objAddr, depRel); /* Now search for dependency records */ ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(classId)); ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(objectId)); scan = systable_beginscan(depRel, DependDependerIndexId, true, NULL, 2, key); while (HeapTupleIsValid((tup = systable_getnext(scan)))) { Form_pg_depend depform = (Form_pg_depend) GETSTRUCT(tup); if (depform->refclassid == refClassId && depform->refobjid == oldRefObjectId) { if (newIsPinned) simple_heap_delete(depRel, &tup->t_self); else { /* make a modifiable copy */ tup = heap_copytuple(tup); depform = (Form_pg_depend) GETSTRUCT(tup); depform->refobjid = newRefObjectId; simple_heap_update(depRel, &tup->t_self, tup); CatalogUpdateIndexes(depRel, tup); heap_freetuple(tup); } count++; } } systable_endscan(scan); heap_close(depRel, RowExclusiveLock); return count; }
/* * Add a tuple to the new heap. * * Visibility information is copied from the original tuple, except that * we "freeze" very-old tuples. Note that since we scribble on new_tuple, * it had better be temp storage not a pointer to the original tuple. * * state opaque state as returned by begin_heap_rewrite * old_tuple original tuple in the old heap * new_tuple new, rewritten tuple to be inserted to new heap */ void rewrite_heap_tuple(RewriteState state, HeapTuple old_tuple, HeapTuple new_tuple) { MemoryContext old_cxt; ItemPointerData old_tid; TidHashKey hashkey; bool found; bool free_new; old_cxt = MemoryContextSwitchTo(state->rs_cxt); /* * Copy the original tuple's visibility information into new_tuple. * * XXX we might later need to copy some t_infomask2 bits, too? Right now, * we intentionally clear the HOT status bits. */ memcpy(&new_tuple->t_data->t_choice.t_heap, &old_tuple->t_data->t_choice.t_heap, sizeof(HeapTupleFields)); new_tuple->t_data->t_infomask &= ~HEAP_XACT_MASK; new_tuple->t_data->t_infomask2 &= ~HEAP2_XACT_MASK; new_tuple->t_data->t_infomask |= old_tuple->t_data->t_infomask & HEAP_XACT_MASK; /* * While we have our hands on the tuple, we may as well freeze any * eligible xmin or xmax, so that future VACUUM effort can be saved. */ heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid, state->rs_cutoff_multi); /* * Invalid ctid means that ctid should point to the tuple itself. We'll * override it later if the tuple is part of an update chain. */ ItemPointerSetInvalid(&new_tuple->t_data->t_ctid); /* * If the tuple has been updated, check the old-to-new mapping hash table. */ if (!((old_tuple->t_data->t_infomask & HEAP_XMAX_INVALID) || HeapTupleHeaderIsOnlyLocked(old_tuple->t_data)) && !(ItemPointerEquals(&(old_tuple->t_self), &(old_tuple->t_data->t_ctid)))) { OldToNewMapping mapping; memset(&hashkey, 0, sizeof(hashkey)); hashkey.xmin = HeapTupleHeaderGetUpdateXid(old_tuple->t_data); hashkey.tid = old_tuple->t_data->t_ctid; mapping = (OldToNewMapping) hash_search(state->rs_old_new_tid_map, &hashkey, HASH_FIND, NULL); if (mapping != NULL) { /* * We've already copied the tuple that t_ctid points to, so we can * set the ctid of this tuple to point to the new location, and * insert it right away. */ new_tuple->t_data->t_ctid = mapping->new_tid; /* We don't need the mapping entry anymore */ hash_search(state->rs_old_new_tid_map, &hashkey, HASH_REMOVE, &found); Assert(found); } else { /* * We haven't seen the tuple t_ctid points to yet. Stash this * tuple into unresolved_tups to be written later. */ UnresolvedTup unresolved; unresolved = hash_search(state->rs_unresolved_tups, &hashkey, HASH_ENTER, &found); Assert(!found); unresolved->old_tid = old_tuple->t_self; unresolved->tuple = heap_copytuple(new_tuple); /* * We can't do anything more now, since we don't know where the * tuple will be written. */ MemoryContextSwitchTo(old_cxt); return; } } /* * Now we will write the tuple, and then check to see if it is the B tuple * in any new or known pair. When we resolve a known pair, we will be * able to write that pair's A tuple, and then we have to check if it * resolves some other pair. Hence, we need a loop here. */ old_tid = old_tuple->t_self; free_new = false; for (;;) { ItemPointerData new_tid; /* Insert the tuple and find out where it's put in new_heap */ raw_heap_insert(state, new_tuple); new_tid = new_tuple->t_self; /* * If the tuple is the updated version of a row, and the prior version * wouldn't be DEAD yet, then we need to either resolve the prior * version (if it's waiting in rs_unresolved_tups), or make an entry * in rs_old_new_tid_map (so we can resolve it when we do see it). The * previous tuple's xmax would equal this one's xmin, so it's * RECENTLY_DEAD if and only if the xmin is not before OldestXmin. */ if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) && !TransactionIdPrecedes(HeapTupleHeaderGetXmin(new_tuple->t_data), state->rs_oldest_xmin)) { /* * Okay, this is B in an update pair. See if we've seen A. */ UnresolvedTup unresolved; memset(&hashkey, 0, sizeof(hashkey)); hashkey.xmin = HeapTupleHeaderGetXmin(new_tuple->t_data); hashkey.tid = old_tid; unresolved = hash_search(state->rs_unresolved_tups, &hashkey, HASH_FIND, NULL); if (unresolved != NULL) { /* * We have seen and memorized the previous tuple already. Now * that we know where we inserted the tuple its t_ctid points * to, fix its t_ctid and insert it to the new heap. */ if (free_new) heap_freetuple(new_tuple); new_tuple = unresolved->tuple; free_new = true; old_tid = unresolved->old_tid; new_tuple->t_data->t_ctid = new_tid; /* * We don't need the hash entry anymore, but don't free its * tuple just yet. */ hash_search(state->rs_unresolved_tups, &hashkey, HASH_REMOVE, &found); Assert(found); /* loop back to insert the previous tuple in the chain */ continue; } else { /* * Remember the new tid of this tuple. We'll use it to set the * ctid when we find the previous tuple in the chain. */ OldToNewMapping mapping; mapping = hash_search(state->rs_old_new_tid_map, &hashkey, HASH_ENTER, &found); Assert(!found); mapping->new_tid = new_tid; } } /* Done with this (chain of) tuples, for now */ if (free_new) heap_freetuple(new_tuple); break; } MemoryContextSwitchTo(old_cxt); }
/* * shdepChangeDep * * Update shared dependency records to account for an updated referenced * object. This is an internal workhorse for operations such as changing * an object's owner. * * There must be no more than one existing entry for the given dependent * object and dependency type! So in practice this can only be used for * updating SHARED_DEPENDENCY_OWNER entries, which should have that property. * * If there is no previous entry, we assume it was referencing a PINned * object, so we create a new entry. If the new referenced object is * PINned, we don't create an entry (and drop the old one, if any). * * sdepRel must be the pg_shdepend relation, already opened and suitably * locked. */ static void shdepChangeDep(Relation sdepRel, Oid classid, Oid objid, int32 objsubid, Oid refclassid, Oid refobjid, SharedDependencyType deptype) { Oid dbid = classIdGetDbId(classid); HeapTuple oldtup = NULL; HeapTuple scantup; ScanKeyData key[4]; SysScanDesc scan; /* * Make sure the new referenced object doesn't go away while we record the * dependency. */ shdepLockAndCheckObject(refclassid, refobjid); /* * Look for a previous entry */ ScanKeyInit(&key[0], Anum_pg_shdepend_dbid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(dbid)); ScanKeyInit(&key[1], Anum_pg_shdepend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(classid)); ScanKeyInit(&key[2], Anum_pg_shdepend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(objid)); ScanKeyInit(&key[3], Anum_pg_shdepend_objsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(objsubid)); scan = systable_beginscan(sdepRel, SharedDependDependerIndexId, true, NULL, 4, key); while ((scantup = systable_getnext(scan)) != NULL) { /* Ignore if not of the target dependency type */ if (((Form_pg_shdepend) GETSTRUCT(scantup))->deptype != deptype) continue; /* Caller screwed up if multiple matches */ if (oldtup) elog(ERROR, "multiple pg_shdepend entries for object %u/%u/%d deptype %c", classid, objid, objsubid, deptype); oldtup = heap_copytuple(scantup); } systable_endscan(scan); if (isSharedObjectPinned(refclassid, refobjid, sdepRel)) { /* No new entry needed, so just delete existing entry if any */ if (oldtup) CatalogTupleDelete(sdepRel, &oldtup->t_self); } else if (oldtup) { /* Need to update existing entry */ Form_pg_shdepend shForm = (Form_pg_shdepend) GETSTRUCT(oldtup); /* Since oldtup is a copy, we can just modify it in-memory */ shForm->refclassid = refclassid; shForm->refobjid = refobjid; CatalogTupleUpdate(sdepRel, &oldtup->t_self, oldtup); } else { /* Need to insert new entry */ Datum values[Natts_pg_shdepend]; bool nulls[Natts_pg_shdepend]; memset(nulls, false, sizeof(nulls)); values[Anum_pg_shdepend_dbid - 1] = ObjectIdGetDatum(dbid); values[Anum_pg_shdepend_classid - 1] = ObjectIdGetDatum(classid); values[Anum_pg_shdepend_objid - 1] = ObjectIdGetDatum(objid); values[Anum_pg_shdepend_objsubid - 1] = Int32GetDatum(objsubid); values[Anum_pg_shdepend_refclassid - 1] = ObjectIdGetDatum(refclassid); values[Anum_pg_shdepend_refobjid - 1] = ObjectIdGetDatum(refobjid); values[Anum_pg_shdepend_deptype - 1] = CharGetDatum(deptype); /* * we are reusing oldtup just to avoid declaring a new variable, but * it's certainly a new tuple */ oldtup = heap_form_tuple(RelationGetDescr(sdepRel), values, nulls); CatalogTupleInsert(sdepRel, oldtup); } if (oldtup) heap_freetuple(oldtup); }
/* ---------------------------------------------------------------- * ExecUpdate * * note: we can't run UPDATE queries with transactions * off because UPDATEs are actually INSERTs and our * scan will mistakenly loop forever, updating the tuple * it just inserted.. This should be fixed but until it * is, we don't want to get stuck in an infinite loop * which corrupts your database.. * ---------------------------------------------------------------- */ void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid, TupleTableSlot *planSlot, DestReceiver *dest, EState *estate) { HeapTuple tuple; ResultRelInfo *resultRelInfo; Relation resultRelationDesc; HTSU_Result result; ItemPointerData update_ctid; TransactionId update_xmax; /* * abort the operation if not running transactions */ if (IsBootstrapProcessingMode()) elog(ERROR, "cannot UPDATE during bootstrap"); /* * get the heap tuple out of the tuple table slot, making sure we have a * writable copy */ tuple = ExecFetchSlotHeapTuple(slot); /* * get information on the (current) result relation */ resultRelInfo = estate->es_result_relation_info; resultRelationDesc = resultRelInfo->ri_RelationDesc; /* see if this update would move the tuple to a different partition */ if (estate->es_result_partitions) { AttrNumber max_attr; Datum *values; bool *nulls; Oid targetid; Assert(estate->es_partition_state != NULL && estate->es_partition_state->accessMethods != NULL); if (!estate->es_partition_state->accessMethods->part_cxt) estate->es_partition_state->accessMethods->part_cxt = GetPerTupleExprContext(estate)->ecxt_per_tuple_memory; Assert(PointerIsValid(estate->es_result_partitions)); max_attr = estate->es_partition_state->max_partition_attr; slot_getsomeattrs(slot, max_attr); values = slot_get_values(slot); nulls = slot_get_isnull(slot); targetid = selectPartition(estate->es_result_partitions, values, nulls, slot->tts_tupleDescriptor, estate->es_partition_state->accessMethods); if (!OidIsValid(targetid)) ereport(ERROR, (errcode(ERRCODE_NO_PARTITION_FOR_PARTITIONING_KEY), errmsg("no partition for partitioning key"))); if (RelationGetRelid(resultRelationDesc) != targetid) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("moving tuple from partition \"%s\" to " "partition \"%s\" not supported", get_rel_name(RelationGetRelid(resultRelationDesc)), get_rel_name(targetid)), errOmitLocation(true))); } } /* BEFORE ROW UPDATE Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0) { HeapTuple newtuple; newtuple = ExecBRUpdateTriggers(estate, resultRelInfo, tupleid, tuple, estate->es_snapshot->curcid); if (newtuple == NULL) /* "do nothing" */ return; if (newtuple != tuple) /* modified by Trigger(s) */ { /* * Put the modified tuple into a slot for convenience of routines * below. We assume the tuple was allocated in per-tuple memory * context, and therefore will go away by itself. The tuple table * slot should not try to clear it. */ TupleTableSlot *newslot = estate->es_trig_tuple_slot; if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); ExecStoreGenericTuple(newtuple, newslot, false); newslot->tts_tableOid = slot->tts_tableOid; /* for constraints */ slot = newslot; tuple = newtuple; } } /* * Check the constraints of the tuple * * If we generate a new candidate tuple after EvalPlanQual testing, we * must loop back here and recheck constraints. (We don't need to redo * triggers, however. If there are any BEFORE triggers then trigger.c * will have done heap_lock_tuple to lock the correct tuple, so there's no * need to do them again.) */ lreplace:; if (resultRelationDesc->rd_att->constr) ExecConstraints(resultRelInfo, slot, estate); if (!GpPersistent_IsPersistentRelation(resultRelationDesc->rd_id)) { /* * Normal UPDATE path. */ /* * replace the heap tuple * * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that * the row to be updated is visible to that snapshot, and throw a can't- * serialize error if not. This is a special-case behavior needed for * referential integrity updates in serializable transactions. */ result = heap_update(resultRelationDesc, tupleid, tuple, &update_ctid, &update_xmax, estate->es_snapshot->curcid, estate->es_crosscheck_snapshot, true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: /* already deleted by self; nothing to do */ return; case HeapTupleMayBeUpdated: break; case HeapTupleUpdated: if (IsXactIsoLevelSerializable) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); else if (!ItemPointerEquals(tupleid, &update_ctid)) { TupleTableSlot *epqslot; epqslot = EvalPlanQual(estate, resultRelInfo->ri_RangeTableIndex, &update_ctid, update_xmax, estate->es_snapshot->curcid); if (!TupIsNull(epqslot)) { *tupleid = update_ctid; slot = ExecFilterJunk(estate->es_junkFilter, epqslot); tuple = ExecFetchSlotHeapTuple(slot); goto lreplace; } } /* tuple already deleted; nothing to do */ return; default: elog(ERROR, "unrecognized heap_update status: %u", result); return; } } else { HeapTuple persistentTuple; /* * Persistent metadata path. */ persistentTuple = heap_copytuple(tuple); persistentTuple->t_self = *tupleid; frozen_heap_inplace_update(resultRelationDesc, persistentTuple); heap_freetuple(persistentTuple); } IncrReplaced(); (estate->es_processed)++; /* * Note: instead of having to update the old index tuples associated with * the heap tuple, all we do is form and insert new index tuples. This is * because UPDATEs are actually DELETEs and INSERTs, and index tuple * deletion is done later by VACUUM (see notes in ExecDelete). All we do * here is insert new index tuples. -cim 9/27/89 */ /* * insert index entries for tuple * * Note: heap_update returns the tid (location) of the new tuple in the * t_self field. */ if (resultRelInfo->ri_NumIndices > 0) ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false); /* AFTER ROW UPDATE Triggers */ ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple); }
/* ---------------------------------------------------------------- * caql_getfirst_only() * Return a copy the first tuple, pallocd in the current memory context, * and end the scan. Clients should heap_freetuple() as necessary. * If pbOnly is not NULL, return TRUE if a second tuple is not found, * else return FALSE * NOTE: this function will return NULL if no tuples satisfy the * caql predicate -- use HeapTupleIsValid() to detect this condition. * ---------------------------------------------------------------- */ HeapTuple caql_getfirst_only(cqContext *pCtx0, bool *pbOnly, cq_list *pcql) { const char* caql_str = pcql->caqlStr; const char* filenam = pcql->filename; int lineno = pcql->lineno; struct caql_hash_cookie *pchn = cq_lookup(caql_str, strlen(caql_str), pcql); cqContext *pCtx; cqContext cqc; HeapTuple tuple, newTup = NULL; if (NULL == pchn) elog(ERROR, "invalid caql string: %s\nfile: %s, line %d", caql_str, filenam, lineno); Assert(!pchn->bInsert); /* INSERT not allowed */ /* use the provided context, or provide a clean local ctx */ if (pCtx0) pCtx = pCtx0; else pCtx = cqclr(&cqc); pCtx = caql_switch(pchn, pCtx, pcql); /* NOTE: caql_switch frees the pcql */ if (pbOnly) *pbOnly = true; /* use the SysCache */ if (pCtx->cq_usesyscache) { tuple = SearchSysCacheKeyArray(pCtx->cq_cacheId, pCtx->cq_NumKeys, pCtx->cq_cacheKeys); if (HeapTupleIsValid(tuple)) { newTup = heap_copytuple(tuple); ReleaseSysCache(tuple); /* only one */ } caql_heapclose(pCtx); pCtx->cq_lasttup = newTup; /* need this for update/delete */ return (newTup); } if (HeapTupleIsValid(tuple = systable_getnext(pCtx->cq_sysScan))) { /* always copy the tuple, because the endscan releases tup memory */ newTup = heap_copytuple(tuple); if (pbOnly) { *pbOnly = !(HeapTupleIsValid(systable_getnext(pCtx->cq_sysScan))); } } systable_endscan(pCtx->cq_sysScan); caql_heapclose(pCtx); pCtx->cq_lasttup = newTup; /* need this for update/delete */ return (newTup); }
/* * AlterConstraintNamespaces * Find any constraints belonging to the specified object, * and move them to the specified new namespace. * * isType indicates whether the owning object is a type or a relation. */ void AlterConstraintNamespaces(Oid ownerId, Oid oldNspId, Oid newNspId, bool isType, ObjectAddresses *objsMoved) { Relation conRel; ScanKeyData key[1]; SysScanDesc scan; HeapTuple tup; conRel = heap_open(ConstraintRelationId, RowExclusiveLock); if (isType) { ScanKeyInit(&key[0], Anum_pg_constraint_contypid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ownerId)); scan = systable_beginscan(conRel, ConstraintTypidIndexId, true, NULL, 1, key); } else { ScanKeyInit(&key[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ownerId)); scan = systable_beginscan(conRel, ConstraintRelidIndexId, true, NULL, 1, key); } while (HeapTupleIsValid((tup = systable_getnext(scan)))) { Form_pg_constraint conform = (Form_pg_constraint) GETSTRUCT(tup); ObjectAddress thisobj; thisobj.classId = ConstraintRelationId; thisobj.objectId = HeapTupleGetOid(tup); thisobj.objectSubId = 0; if (object_address_present(&thisobj, objsMoved)) continue; if (conform->connamespace == oldNspId) { tup = heap_copytuple(tup); conform = (Form_pg_constraint) GETSTRUCT(tup); conform->connamespace = newNspId; simple_heap_update(conRel, &tup->t_self, tup); CatalogUpdateIndexes(conRel, tup); /* * Note: currently, the constraint will not have its own * dependency on the namespace, so we don't need to do * changeDependencyFor(). */ } InvokeObjectPostAlterHook(ConstraintRelationId, thisobj.objectId, 0); add_exact_object_address(&thisobj, objsMoved); } systable_endscan(scan); heap_close(conRel, RowExclusiveLock); }
/* * shdepChangeDep * * Update shared dependency records to account for an updated referenced * object. This is an internal workhorse for operations such as changing * an object's owner. * * There must be no more than one existing entry for the given dependent * object and dependency type! So in practice this can only be used for * updating SHARED_DEPENDENCY_OWNER entries, which should have that property. * * If there is no previous entry, we assume it was referencing a PINned * object, so we create a new entry. If the new referenced object is * PINned, we don't create an entry (and drop the old one, if any). * * sdepRel must be the pg_shdepend relation, already opened and suitably * locked. */ static void shdepChangeDep(Relation sdepRel, Oid classid, Oid objid, Oid refclassid, Oid refobjid, SharedDependencyType deptype) { Oid dbid = classIdGetDbId(classid); bool bGotOne = false; HeapTuple oldtup = NULL; HeapTuple scantup; cqContext *pcqCtx; cqContext cqc; /* * Make sure the new referenced object doesn't go away while we record the * dependency. */ shdepLockAndCheckObject(refclassid, refobjid); /* * Look for a previous entry */ pcqCtx = caql_beginscan( caql_addrel(cqclr(&cqc), sdepRel), cql("SELECT * FROM pg_shdepend " " WHERE dbid = :1 " " AND classid = :2 " " AND objid = :3 " " FOR UPDATE ", ObjectIdGetDatum(dbid), ObjectIdGetDatum(classid), ObjectIdGetDatum(objid))); while (HeapTupleIsValid(scantup = caql_getnext(pcqCtx))) { /* Ignore if not of the target dependency type */ if (((Form_pg_shdepend) GETSTRUCT(scantup))->deptype != deptype) continue; /* Caller screwed up if multiple matches */ if (bGotOne) elog(ERROR, "multiple pg_shdepend entries for object %u/%u deptype %c", classid, objid, deptype); bGotOne = true; } caql_endscan(pcqCtx); /* XXX XXX XXX XXX XXX XXX XXX XXX XXX * Should match this logic: * * if isSharedObjectpinned * if Gotone then drop it * else * if Gotone * then update it * else insert it * * XXX XXX XXX XXX XXX XXX XXX XXX XXX */ if (!bGotOne) /* no match */ { /* if no match and pinned, new entry not needed */ if (isSharedObjectPinned(refclassid, refobjid, sdepRel)) { /* just return -- don't need to free anything because * sdelRel was passed in, and pcqCtx is freed */ return; } pcqCtx = caql_beginscan( caql_addrel(cqclr(&cqc), sdepRel), cql("INSERT INTO pg_shdepend ", NULL)); /* Need to insert new entry */ Datum values[Natts_pg_shdepend]; bool nulls[Natts_pg_shdepend]; memset(nulls, 0, sizeof(nulls)); values[Anum_pg_shdepend_dbid - 1] = ObjectIdGetDatum(dbid); values[Anum_pg_shdepend_classid - 1] = ObjectIdGetDatum(classid); values[Anum_pg_shdepend_objid - 1] = ObjectIdGetDatum(objid); values[Anum_pg_shdepend_refclassid - 1] = ObjectIdGetDatum(refclassid); values[Anum_pg_shdepend_refobjid - 1] = ObjectIdGetDatum(refobjid); values[Anum_pg_shdepend_deptype - 1] = CharGetDatum(deptype); /* * we are reusing oldtup just to avoid declaring a new variable, but * it's certainly a new tuple */ oldtup = caql_form_tuple(pcqCtx, values, nulls); caql_insert(pcqCtx, oldtup); /* and Update indexes (implicit) */ heap_freetuple(oldtup); caql_endscan(pcqCtx); } else { /* XXX XXX Do the scan again, but do the update/delete this time */ pcqCtx = caql_beginscan( caql_addrel(cqclr(&cqc), sdepRel), cql("SELECT * FROM pg_shdepend " " WHERE dbid = :1 " " AND classid = :2 " " AND objid = :3 " " FOR UPDATE ", ObjectIdGetDatum(dbid), ObjectIdGetDatum(classid), ObjectIdGetDatum(objid))); while (HeapTupleIsValid(scantup = caql_getnext(pcqCtx))) { /* Ignore if not of the target dependency type */ if (((Form_pg_shdepend) GETSTRUCT(scantup))->deptype != deptype) continue; /* * NOTE: already tested for multiple matches - just use * first one */ if (isSharedObjectPinned(refclassid, refobjid, sdepRel)) { /* No new entry needed, so just delete existing entry if any */ caql_delete_current(pcqCtx); } else { oldtup = heap_copytuple(scantup); /* Need to update existing entry */ Form_pg_shdepend shForm = (Form_pg_shdepend) GETSTRUCT(oldtup); /* Since oldtup is a copy, we can just modify it in-memory */ shForm->refclassid = refclassid; shForm->refobjid = refobjid; caql_update_current(pcqCtx, oldtup); /* and Update indexes (implicit) */ heap_freetuple(oldtup); } break; } caql_endscan(pcqCtx); } }
/* * ExecGroup - * * Return one tuple for each group of matching input tuples. */ TupleTableSlot * ExecGroup(GroupState *node) { EState *estate; ExprContext *econtext; TupleDesc tupdesc; int numCols; AttrNumber *grpColIdx; HeapTuple outerTuple = NULL; HeapTuple firsttuple; TupleTableSlot *outerslot; ProjectionInfo *projInfo; TupleTableSlot *resultSlot; /* * get state info from node */ if (node->grp_done) return NULL; estate = node->ss.ps.state; econtext = node->ss.ps.ps_ExprContext; tupdesc = ExecGetScanType(&node->ss); numCols = ((Group *) node->ss.ps.plan)->numCols; grpColIdx = ((Group *) node->ss.ps.plan)->grpColIdx; /* * We need not call ResetExprContext here because execTuplesMatch will * reset the per-tuple memory context once per input tuple. */ /* If we don't already have first tuple of group, fetch it */ /* this should occur on the first call only */ firsttuple = node->grp_firstTuple; if (firsttuple == NULL) { outerslot = ExecProcNode(outerPlanState(node)); if (TupIsNull(outerslot)) { node->grp_done = TRUE; return NULL; } node->grp_firstTuple = firsttuple = heap_copytuple(outerslot->val); } /* * Scan over all tuples that belong to this group */ for (;;) { outerslot = ExecProcNode(outerPlanState(node)); if (TupIsNull(outerslot)) { node->grp_done = TRUE; outerTuple = NULL; break; } outerTuple = outerslot->val; /* * Compare with first tuple and see if this tuple is of the same * group. */ if (!execTuplesMatch(firsttuple, outerTuple, tupdesc, numCols, grpColIdx, node->eqfunctions, econtext->ecxt_per_tuple_memory)) break; } /* * form a projection tuple based on the (copied) first tuple of the * group, and store it in the result tuple slot. */ ExecStoreTuple(firsttuple, node->ss.ss_ScanTupleSlot, InvalidBuffer, false); econtext->ecxt_scantuple = node->ss.ss_ScanTupleSlot; projInfo = node->ss.ps.ps_ProjInfo; resultSlot = ExecProject(projInfo, NULL); /* save first tuple of next group, if we are not done yet */ if (!node->grp_done) { heap_freetuple(firsttuple); node->grp_firstTuple = heap_copytuple(outerTuple); } return resultSlot; }
/* ---------------------------------------------------------------- * ExecUpdate * * note: we can't run UPDATE queries with transactions * off because UPDATEs are actually INSERTs and our * scan will mistakenly loop forever, updating the tuple * it just inserted.. This should be fixed but until it * is, we don't want to get stuck in an infinite loop * which corrupts your database.. * ---------------------------------------------------------------- */ void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid, TupleTableSlot *planSlot, DestReceiver *dest, EState *estate) { void* tuple; ResultRelInfo *resultRelInfo; Relation resultRelationDesc; HTSU_Result result; ItemPointerData update_ctid; TransactionId update_xmax; AOTupleId aoTupleId = AOTUPLEID_INIT; TupleTableSlot *partslot = NULL; /* * abort the operation if not running transactions */ if (IsBootstrapProcessingMode()) elog(ERROR, "cannot UPDATE during bootstrap"); /* * get information on the (current) result relation */ resultRelInfo = estate->es_result_relation_info; resultRelationDesc = resultRelInfo->ri_RelationDesc; bool rel_is_heap = RelationIsHeap(resultRelationDesc); bool rel_is_aorows = RelationIsAoRows(resultRelationDesc); bool rel_is_aocols = RelationIsAoCols(resultRelationDesc); bool rel_is_external = RelationIsExternal(resultRelationDesc); /* * get the heap tuple out of the tuple table slot, making sure we have a * writable copy */ if (rel_is_heap) { partslot = slot; tuple = ExecFetchSlotHeapTuple(partslot); } else if (rel_is_aorows || rel_is_aocols) { /* * It is necessary to reconstruct a logically compatible tuple to * a phyiscally compatible tuple. The slot's tuple descriptor comes * from the projection target list, which doesn't indicate dropped * columns, and MemTuple cannot deal with cases without converting * the target list back into the original relation's tuple desc. */ partslot = reconstructMatchingTupleSlot(slot, resultRelInfo); /* * We directly inline toasted columns here as update with toasted columns * would create two references to the same toasted value. */ tuple = ExecFetchSlotMemTuple(partslot, true); } else if (rel_is_external) { if (estate->es_result_partitions && estate->es_result_partitions->part->parrelid != 0) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Update external partitions not supported."))); return; } else { partslot = slot; tuple = ExecFetchSlotHeapTuple(partslot); } } else { Insist(false); } /* see if this update would move the tuple to a different partition */ if (estate->es_result_partitions) checkPartitionUpdate(estate, partslot, resultRelInfo); /* BEFORE ROW UPDATE Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0) { HeapTuple newtuple; newtuple = ExecBRUpdateTriggers(estate, resultRelInfo, tupleid, tuple, estate->es_snapshot->curcid); if (newtuple == NULL) /* "do nothing" */ return; if (newtuple != tuple) /* modified by Trigger(s) */ { /* * Put the modified tuple into a slot for convenience of routines * below. We assume the tuple was allocated in per-tuple memory * context, and therefore will go away by itself. The tuple table * slot should not try to clear it. */ TupleTableSlot *newslot = estate->es_trig_tuple_slot; if (newslot->tts_tupleDescriptor != partslot->tts_tupleDescriptor) ExecSetSlotDescriptor(newslot, partslot->tts_tupleDescriptor); ExecStoreGenericTuple(newtuple, newslot, false); newslot->tts_tableOid = partslot->tts_tableOid; /* for constraints */ partslot = newslot; tuple = newtuple; } } /* * Check the constraints of the tuple * * If we generate a new candidate tuple after EvalPlanQual testing, we * must loop back here and recheck constraints. (We don't need to redo * triggers, however. If there are any BEFORE triggers then trigger.c * will have done heap_lock_tuple to lock the correct tuple, so there's no * need to do them again.) */ lreplace:; if (resultRelationDesc->rd_att->constr) ExecConstraints(resultRelInfo, partslot, estate); if (!GpPersistent_IsPersistentRelation(resultRelationDesc->rd_id)) { /* * Normal UPDATE path. */ /* * replace the heap tuple * * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that * the row to be updated is visible to that snapshot, and throw a can't- * serialize error if not. This is a special-case behavior needed for * referential integrity updates in serializable transactions. */ if (rel_is_heap) { result = heap_update(resultRelationDesc, tupleid, tuple, &update_ctid, &update_xmax, estate->es_snapshot->curcid, estate->es_crosscheck_snapshot, true /* wait for commit */ ); } else if (rel_is_aorows) { if (IsXactIsoLevelSerializable) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Updates on append-only tables are not supported in serializable transactions."))); } if (resultRelInfo->ri_updateDesc == NULL) { ResultRelInfoSetSegno(resultRelInfo, estate->es_result_aosegnos); resultRelInfo->ri_updateDesc = (AppendOnlyUpdateDesc) appendonly_update_init(resultRelationDesc, ActiveSnapshot, resultRelInfo->ri_aosegno); } result = appendonly_update(resultRelInfo->ri_updateDesc, tuple, (AOTupleId *) tupleid, &aoTupleId); } else if (rel_is_aocols) { if (IsXactIsoLevelSerializable) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Updates on append-only tables are not supported in serializable transactions."))); } if (resultRelInfo->ri_updateDesc == NULL) { ResultRelInfoSetSegno(resultRelInfo, estate->es_result_aosegnos); resultRelInfo->ri_updateDesc = (AppendOnlyUpdateDesc) aocs_update_init(resultRelationDesc, resultRelInfo->ri_aosegno); } result = aocs_update(resultRelInfo->ri_updateDesc, partslot, (AOTupleId *) tupleid, &aoTupleId); } else { Assert(!"We should not be here"); } switch (result) { case HeapTupleSelfUpdated: /* already deleted by self; nothing to do */ return; case HeapTupleMayBeUpdated: break; case HeapTupleUpdated: if (IsXactIsoLevelSerializable) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); else if (!ItemPointerEquals(tupleid, &update_ctid)) { TupleTableSlot *epqslot; epqslot = EvalPlanQual(estate, resultRelInfo->ri_RangeTableIndex, &update_ctid, update_xmax, estate->es_snapshot->curcid); if (!TupIsNull(epqslot)) { *tupleid = update_ctid; partslot = ExecFilterJunk(estate->es_junkFilter, epqslot); tuple = ExecFetchSlotHeapTuple(partslot); goto lreplace; } } /* tuple already deleted; nothing to do */ return; default: elog(ERROR, "unrecognized heap_update status: %u", result); return; } } else { HeapTuple persistentTuple; /* * Persistent metadata path. */ persistentTuple = heap_copytuple(tuple); persistentTuple->t_self = *tupleid; frozen_heap_inplace_update(resultRelationDesc, persistentTuple); heap_freetuple(persistentTuple); } IncrReplaced(); (estate->es_processed)++; (resultRelInfo->ri_aoprocessed)++; /* * Note: instead of having to update the old index tuples associated with * the heap tuple, all we do is form and insert new index tuples. This is * because UPDATEs are actually DELETEs and INSERTs, and index tuple * deletion is done later by VACUUM (see notes in ExecDelete). All we do * here is insert new index tuples. -cim 9/27/89 */ /* * insert index entries for tuple * * Note: heap_update returns the tid (location) of the new tuple in the * t_self field. */ if (rel_is_aorows || rel_is_aocols) { if (resultRelInfo->ri_NumIndices > 0) ExecInsertIndexTuples(partslot, (ItemPointer)&aoTupleId, estate, false); } else { if (resultRelInfo->ri_NumIndices > 0) ExecInsertIndexTuples(partslot, &(((HeapTuple) tuple)->t_self), estate, false); } /* AFTER ROW UPDATE Triggers */ ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple); }