/* ---------------------------------------------------------------- * ExecDelete * * DELETE is like UPDATE, except that we delete the tuple and no * index modifications are needed. * DELETE can be part of an update operation when * there is a preceding SplitUpdate node. * * ---------------------------------------------------------------- */ void ExecDelete(ItemPointer tupleid, TupleTableSlot *planSlot, DestReceiver *dest, EState *estate, PlanGenerator planGen, bool isUpdate) { ResultRelInfo *resultRelInfo; Relation resultRelationDesc; HTSU_Result result; ItemPointerData update_ctid; TransactionId update_xmax; /* * Get information on the (current) result relation. */ if (estate->es_result_partitions && planGen == PLANGEN_OPTIMIZER) { Assert(estate->es_result_partitions->part->parrelid); #ifdef USE_ASSERT_CHECKING Oid parent = estate->es_result_partitions->part->parrelid; #endif /* Obtain part for current tuple. */ resultRelInfo = slot_get_partition(planSlot, estate); estate->es_result_relation_info = resultRelInfo; #ifdef USE_ASSERT_CHECKING Oid part = RelationGetRelid(resultRelInfo->ri_RelationDesc); #endif Assert(parent != part); } else { resultRelInfo = estate->es_result_relation_info; } resultRelationDesc = resultRelInfo->ri_RelationDesc; Assert (!resultRelInfo->ri_projectReturning); if (planGen == PLANGEN_PLANNER) { /* BEFORE ROW DELETE Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0) { bool dodelete; dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid, estate->es_snapshot->curcid); if (!dodelete) /* "do nothing" */ return; } } /* * delete the tuple * * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that * the row to be deleted is visible to that snapshot, and throw a can't- * serialize error if not. This is a special-case behavior needed for * referential integrity updates in serializable transactions. */ ldelete:; result = heap_delete(resultRelationDesc, tupleid, &update_ctid, &update_xmax, estate->es_snapshot->curcid, estate->es_crosscheck_snapshot, true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: /* already deleted by self; nothing to do */ /* * In an scenario in which R(a,b) and S(a,b) have * R S * ________ ________ * (1, 1) (1, 2) * (1, 7) * * An update query such as: * UPDATE R SET a = S.b FROM S WHERE R.b = S.a; * * will have an non-deterministic output. The tuple in R * can be updated to (2,1) or (7,1). * Since the introduction of SplitUpdate, these queries will * send multiple requests to delete the same tuple. Therefore, * in order to avoid a non-deterministic output, * an error is reported in such scenario. */ if (isUpdate) { ereport(ERROR, (errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION ), errmsg("multiple updates to a row by the same query is not allowed"))); } return; case HeapTupleMayBeUpdated: break; case HeapTupleUpdated: if (IsXactIsoLevelSerializable) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); else if (!ItemPointerEquals(tupleid, &update_ctid)) { TupleTableSlot *epqslot; epqslot = EvalPlanQual(estate, resultRelInfo->ri_RangeTableIndex, &update_ctid, update_xmax, estate->es_snapshot->curcid); if (!TupIsNull(epqslot)) { *tupleid = update_ctid; goto ldelete; } } /* tuple already deleted; nothing to do */ return; default: elog(ERROR, "unrecognized heap_delete status: %u", result); return; } if (!isUpdate) { IncrDeleted(); (estate->es_processed)++; } /* * Note: Normally one would think that we have to delete index tuples * associated with the heap tuple now... * * ... but in POSTGRES, we have no need to do this because VACUUM will * take care of it later. We can't delete index tuples immediately * anyway, since the tuple is still visible to other transactions. */ if (planGen == PLANGEN_PLANNER) { /* AFTER ROW DELETE Triggers */ ExecARDeleteTriggers(estate, resultRelInfo, tupleid); } }
/* ---------------------------------------------------------------- * ExecUpdate * * note: we can't run UPDATE queries with transactions * off because UPDATEs are actually INSERTs and our * scan will mistakenly loop forever, updating the tuple * it just inserted.. This should be fixed but until it * is, we don't want to get stuck in an infinite loop * which corrupts your database.. * ---------------------------------------------------------------- */ void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid, TupleTableSlot *planSlot, DestReceiver *dest, EState *estate) { HeapTuple tuple; ResultRelInfo *resultRelInfo; Relation resultRelationDesc; HTSU_Result result; ItemPointerData update_ctid; TransactionId update_xmax; /* * abort the operation if not running transactions */ if (IsBootstrapProcessingMode()) elog(ERROR, "cannot UPDATE during bootstrap"); /* * get the heap tuple out of the tuple table slot, making sure we have a * writable copy */ tuple = ExecFetchSlotHeapTuple(slot); /* * get information on the (current) result relation */ resultRelInfo = estate->es_result_relation_info; resultRelationDesc = resultRelInfo->ri_RelationDesc; /* see if this update would move the tuple to a different partition */ if (estate->es_result_partitions) { AttrNumber max_attr; Datum *values; bool *nulls; Oid targetid; Assert(estate->es_partition_state != NULL && estate->es_partition_state->accessMethods != NULL); if (!estate->es_partition_state->accessMethods->part_cxt) estate->es_partition_state->accessMethods->part_cxt = GetPerTupleExprContext(estate)->ecxt_per_tuple_memory; Assert(PointerIsValid(estate->es_result_partitions)); max_attr = estate->es_partition_state->max_partition_attr; slot_getsomeattrs(slot, max_attr); values = slot_get_values(slot); nulls = slot_get_isnull(slot); targetid = selectPartition(estate->es_result_partitions, values, nulls, slot->tts_tupleDescriptor, estate->es_partition_state->accessMethods); if (!OidIsValid(targetid)) ereport(ERROR, (errcode(ERRCODE_NO_PARTITION_FOR_PARTITIONING_KEY), errmsg("no partition for partitioning key"))); if (RelationGetRelid(resultRelationDesc) != targetid) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("moving tuple from partition \"%s\" to " "partition \"%s\" not supported", get_rel_name(RelationGetRelid(resultRelationDesc)), get_rel_name(targetid)), errOmitLocation(true))); } } /* BEFORE ROW UPDATE Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0) { HeapTuple newtuple; newtuple = ExecBRUpdateTriggers(estate, resultRelInfo, tupleid, tuple, estate->es_snapshot->curcid); if (newtuple == NULL) /* "do nothing" */ return; if (newtuple != tuple) /* modified by Trigger(s) */ { /* * Put the modified tuple into a slot for convenience of routines * below. We assume the tuple was allocated in per-tuple memory * context, and therefore will go away by itself. The tuple table * slot should not try to clear it. */ TupleTableSlot *newslot = estate->es_trig_tuple_slot; if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); ExecStoreGenericTuple(newtuple, newslot, false); newslot->tts_tableOid = slot->tts_tableOid; /* for constraints */ slot = newslot; tuple = newtuple; } } /* * Check the constraints of the tuple * * If we generate a new candidate tuple after EvalPlanQual testing, we * must loop back here and recheck constraints. (We don't need to redo * triggers, however. If there are any BEFORE triggers then trigger.c * will have done heap_lock_tuple to lock the correct tuple, so there's no * need to do them again.) */ lreplace:; if (resultRelationDesc->rd_att->constr) ExecConstraints(resultRelInfo, slot, estate); if (!GpPersistent_IsPersistentRelation(resultRelationDesc->rd_id)) { /* * Normal UPDATE path. */ /* * replace the heap tuple * * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that * the row to be updated is visible to that snapshot, and throw a can't- * serialize error if not. This is a special-case behavior needed for * referential integrity updates in serializable transactions. */ result = heap_update(resultRelationDesc, tupleid, tuple, &update_ctid, &update_xmax, estate->es_snapshot->curcid, estate->es_crosscheck_snapshot, true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: /* already deleted by self; nothing to do */ return; case HeapTupleMayBeUpdated: break; case HeapTupleUpdated: if (IsXactIsoLevelSerializable) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); else if (!ItemPointerEquals(tupleid, &update_ctid)) { TupleTableSlot *epqslot; epqslot = EvalPlanQual(estate, resultRelInfo->ri_RangeTableIndex, &update_ctid, update_xmax, estate->es_snapshot->curcid); if (!TupIsNull(epqslot)) { *tupleid = update_ctid; slot = ExecFilterJunk(estate->es_junkFilter, epqslot); tuple = ExecFetchSlotHeapTuple(slot); goto lreplace; } } /* tuple already deleted; nothing to do */ return; default: elog(ERROR, "unrecognized heap_update status: %u", result); return; } } else { HeapTuple persistentTuple; /* * Persistent metadata path. */ persistentTuple = heap_copytuple(tuple); persistentTuple->t_self = *tupleid; frozen_heap_inplace_update(resultRelationDesc, persistentTuple); heap_freetuple(persistentTuple); } IncrReplaced(); (estate->es_processed)++; /* * Note: instead of having to update the old index tuples associated with * the heap tuple, all we do is form and insert new index tuples. This is * because UPDATEs are actually DELETEs and INSERTs, and index tuple * deletion is done later by VACUUM (see notes in ExecDelete). All we do * here is insert new index tuples. -cim 9/27/89 */ /* * insert index entries for tuple * * Note: heap_update returns the tid (location) of the new tuple in the * t_self field. */ if (resultRelInfo->ri_NumIndices > 0) ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false); /* AFTER ROW UPDATE Triggers */ ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple); }
/* ---------------------------------------------------------------- * ExecUpdate * * note: we can't run UPDATE queries with transactions * off because UPDATEs are actually INSERTs and our * scan will mistakenly loop forever, updating the tuple * it just inserted.. This should be fixed but until it * is, we don't want to get stuck in an infinite loop * which corrupts your database.. * ---------------------------------------------------------------- */ void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid, TupleTableSlot *planSlot, DestReceiver *dest, EState *estate) { void* tuple; ResultRelInfo *resultRelInfo; Relation resultRelationDesc; HTSU_Result result; ItemPointerData update_ctid; TransactionId update_xmax; AOTupleId aoTupleId = AOTUPLEID_INIT; TupleTableSlot *partslot = NULL; /* * abort the operation if not running transactions */ if (IsBootstrapProcessingMode()) elog(ERROR, "cannot UPDATE during bootstrap"); /* * get information on the (current) result relation */ resultRelInfo = estate->es_result_relation_info; resultRelationDesc = resultRelInfo->ri_RelationDesc; bool rel_is_heap = RelationIsHeap(resultRelationDesc); bool rel_is_aorows = RelationIsAoRows(resultRelationDesc); bool rel_is_aocols = RelationIsAoCols(resultRelationDesc); bool rel_is_external = RelationIsExternal(resultRelationDesc); /* * get the heap tuple out of the tuple table slot, making sure we have a * writable copy */ if (rel_is_heap) { partslot = slot; tuple = ExecFetchSlotHeapTuple(partslot); } else if (rel_is_aorows || rel_is_aocols) { /* * It is necessary to reconstruct a logically compatible tuple to * a phyiscally compatible tuple. The slot's tuple descriptor comes * from the projection target list, which doesn't indicate dropped * columns, and MemTuple cannot deal with cases without converting * the target list back into the original relation's tuple desc. */ partslot = reconstructMatchingTupleSlot(slot, resultRelInfo); /* * We directly inline toasted columns here as update with toasted columns * would create two references to the same toasted value. */ tuple = ExecFetchSlotMemTuple(partslot, true); } else if (rel_is_external) { if (estate->es_result_partitions && estate->es_result_partitions->part->parrelid != 0) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Update external partitions not supported."))); return; } else { partslot = slot; tuple = ExecFetchSlotHeapTuple(partslot); } } else { Insist(false); } /* see if this update would move the tuple to a different partition */ if (estate->es_result_partitions) checkPartitionUpdate(estate, partslot, resultRelInfo); /* BEFORE ROW UPDATE Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0) { HeapTuple newtuple; newtuple = ExecBRUpdateTriggers(estate, resultRelInfo, tupleid, tuple, estate->es_snapshot->curcid); if (newtuple == NULL) /* "do nothing" */ return; if (newtuple != tuple) /* modified by Trigger(s) */ { /* * Put the modified tuple into a slot for convenience of routines * below. We assume the tuple was allocated in per-tuple memory * context, and therefore will go away by itself. The tuple table * slot should not try to clear it. */ TupleTableSlot *newslot = estate->es_trig_tuple_slot; if (newslot->tts_tupleDescriptor != partslot->tts_tupleDescriptor) ExecSetSlotDescriptor(newslot, partslot->tts_tupleDescriptor); ExecStoreGenericTuple(newtuple, newslot, false); newslot->tts_tableOid = partslot->tts_tableOid; /* for constraints */ partslot = newslot; tuple = newtuple; } } /* * Check the constraints of the tuple * * If we generate a new candidate tuple after EvalPlanQual testing, we * must loop back here and recheck constraints. (We don't need to redo * triggers, however. If there are any BEFORE triggers then trigger.c * will have done heap_lock_tuple to lock the correct tuple, so there's no * need to do them again.) */ lreplace:; if (resultRelationDesc->rd_att->constr) ExecConstraints(resultRelInfo, partslot, estate); if (!GpPersistent_IsPersistentRelation(resultRelationDesc->rd_id)) { /* * Normal UPDATE path. */ /* * replace the heap tuple * * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that * the row to be updated is visible to that snapshot, and throw a can't- * serialize error if not. This is a special-case behavior needed for * referential integrity updates in serializable transactions. */ if (rel_is_heap) { result = heap_update(resultRelationDesc, tupleid, tuple, &update_ctid, &update_xmax, estate->es_snapshot->curcid, estate->es_crosscheck_snapshot, true /* wait for commit */ ); } else if (rel_is_aorows) { if (IsXactIsoLevelSerializable) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Updates on append-only tables are not supported in serializable transactions."))); } if (resultRelInfo->ri_updateDesc == NULL) { ResultRelInfoSetSegno(resultRelInfo, estate->es_result_aosegnos); resultRelInfo->ri_updateDesc = (AppendOnlyUpdateDesc) appendonly_update_init(resultRelationDesc, ActiveSnapshot, resultRelInfo->ri_aosegno); } result = appendonly_update(resultRelInfo->ri_updateDesc, tuple, (AOTupleId *) tupleid, &aoTupleId); } else if (rel_is_aocols) { if (IsXactIsoLevelSerializable) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Updates on append-only tables are not supported in serializable transactions."))); } if (resultRelInfo->ri_updateDesc == NULL) { ResultRelInfoSetSegno(resultRelInfo, estate->es_result_aosegnos); resultRelInfo->ri_updateDesc = (AppendOnlyUpdateDesc) aocs_update_init(resultRelationDesc, resultRelInfo->ri_aosegno); } result = aocs_update(resultRelInfo->ri_updateDesc, partslot, (AOTupleId *) tupleid, &aoTupleId); } else { Assert(!"We should not be here"); } switch (result) { case HeapTupleSelfUpdated: /* already deleted by self; nothing to do */ return; case HeapTupleMayBeUpdated: break; case HeapTupleUpdated: if (IsXactIsoLevelSerializable) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); else if (!ItemPointerEquals(tupleid, &update_ctid)) { TupleTableSlot *epqslot; epqslot = EvalPlanQual(estate, resultRelInfo->ri_RangeTableIndex, &update_ctid, update_xmax, estate->es_snapshot->curcid); if (!TupIsNull(epqslot)) { *tupleid = update_ctid; partslot = ExecFilterJunk(estate->es_junkFilter, epqslot); tuple = ExecFetchSlotHeapTuple(partslot); goto lreplace; } } /* tuple already deleted; nothing to do */ return; default: elog(ERROR, "unrecognized heap_update status: %u", result); return; } } else { HeapTuple persistentTuple; /* * Persistent metadata path. */ persistentTuple = heap_copytuple(tuple); persistentTuple->t_self = *tupleid; frozen_heap_inplace_update(resultRelationDesc, persistentTuple); heap_freetuple(persistentTuple); } IncrReplaced(); (estate->es_processed)++; (resultRelInfo->ri_aoprocessed)++; /* * Note: instead of having to update the old index tuples associated with * the heap tuple, all we do is form and insert new index tuples. This is * because UPDATEs are actually DELETEs and INSERTs, and index tuple * deletion is done later by VACUUM (see notes in ExecDelete). All we do * here is insert new index tuples. -cim 9/27/89 */ /* * insert index entries for tuple * * Note: heap_update returns the tid (location) of the new tuple in the * t_self field. */ if (rel_is_aorows || rel_is_aocols) { if (resultRelInfo->ri_NumIndices > 0) ExecInsertIndexTuples(partslot, (ItemPointer)&aoTupleId, estate, false); } else { if (resultRelInfo->ri_NumIndices > 0) ExecInsertIndexTuples(partslot, &(((HeapTuple) tuple)->t_self), estate, false); } /* AFTER ROW UPDATE Triggers */ ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple); }