Exemplo n.º 1
0
void Output(int v)
{
    // Kick out of heap
    heap_remove(Heap, v);

    // In cost < Out cost, should go first
    if (DeltaCost[v] < 0)
    {
        Ordered[iFirst++] = v;
        // Out cost < In cost, should go last
    }
    else if (DeltaCost[v] > 0)
    {
        Ordered[iLast--] = v;
        // In cost == Out cost, check degrees
        // If out degree  is higher, should go first
    }
    else if (InDegree[v] < OutDegree[v])
    {
        Ordered[iFirst++] = v;
        // otherwise should go last
    }
    else
    {
        Ordered[iLast--] = v;
    }

    // Update data due to removal of vertex
    PARC pA = ArcStart[v];

    for (int i = 0; i < ArcCount[v]; i++)
    {
        if (heap_position(Heap, pA[i].j) <= 0) { continue; }

        DeltaCost[pA[i].j] -= pA[i].c;
        heap_update(Heap, pA[i].j, -abs(DeltaCost[pA[i].j]));

        // out arc
        if (pA[i].c > 0)
        {
            InDegree[pA[i].j]--;

            if (InDegree[pA[i].j] == 0 && OutDegree[pA[i].j] != 0)
            {
                Zero[nZero++] = pA[i].j;
            }

            // in arc
        }
        else
        {
            OutDegree[pA[i].j]--;

            if (OutDegree[pA[i].j] == 0 && InDegree[pA[i].j] != 0)
            {
                Zero[nZero++] = pA[i].j;
            }
        }
    }
}
Exemplo n.º 2
0
void
timer_update(suptimer_t *tmr, int increment)
{
	timer_fill_target(tmr, increment);

	heap_update(&timers, &tmr->item);
}
Exemplo n.º 3
0
static void
heap_referenced(RemovalPolicy * policy, const StoreEntry * entry,
    RemovalPolicyNode * node)
{
    HeapPolicyData *heap = policy->_data;
    heap_node *hnode = node->data;
    if (!hnode)
	return;
    heap_update(heap->heap, hnode, (StoreEntry *) entry);
}
Exemplo n.º 4
0
/*
 * disk_update_instance - This updates an object that had previously been
 * reserved with the acutal contents.
 *    return: NO_ERROR if successful, error code otherwise
 *    classop(in): class object
 *    obj(in): description of object
 *    oid(in): destination oid
 */
int
disk_update_instance (MOP classop, DESC_OBJ * obj, OID * oid)
{
  int error = NO_ERROR;
  HFID *hfid;
  int newsize;
  bool has_indexes = false, oldflag;

  Diskrec->length = 0;
  if (desc_obj_to_disk (obj, Diskrec, &has_indexes))
    {
      /* make the record larger */
      newsize = -Diskrec->length + DB_PAGESIZE;
      free_recdes (Diskrec);
      Diskrec = alloc_recdes (newsize);
      /* try one more time */
      if (Diskrec == NULL
	  || desc_obj_to_disk (obj, Diskrec, &has_indexes) != 0)
	{
	  error = ER_LDR_CANT_TRANSFORM;
	  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error, 0);
	}
    }
  if (!error)
    {
      hfid = get_class_heap (classop, obj->class_);
      if (hfid == NULL)
	{
	  error = er_errid ();
	}
      else if (heap_update (NULL, hfid, oid, Diskrec, &oldflag, NULL) != oid)
	{
	  error = er_errid ();
	}
      else
	{
	  if (oldflag)
	    {
	      fprintf (stdout, msgcat_message (MSGCAT_CATALOG_UTILS,
					       MSGCAT_UTIL_SET_LOADDB,
					       LOADDB_MSG_UPDATE_WARNING));
	    }
	  else if (has_indexes)
	    {
	      error = update_indexes (WS_OID (classop), oid, Diskrec);
	    }
	}
    }

  return (error);
}
Exemplo n.º 5
0
static bool
matrel_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
{
	HTSU_Result result;
	HeapUpdateFailureData hufd;
	LockTupleMode lockmode;

	result = heap_update(relation, otid, tup,
						 GetCurrentCommandId(true), InvalidSnapshot,
						 true /* wait for commit */ ,
						 &hufd, &lockmode);
	switch (result)
	{
		case HeapTupleSelfUpdated:
			/* Tuple was already updated in current command? */
			elog(ERROR, "tuple already updated by self");
			break;

		case HeapTupleMayBeUpdated:
			/* done successfully */
			break;

		case HeapTupleUpdated:
			/*
			 * Tuple updated by a concurrent transaction? The only legal case is if the tuple was deleted
			 * which can happen if the auto-vacuumer deletes the tuple while we were trying to update it.
			 */
			if (memcmp(&hufd.ctid, otid, sizeof(ItemPointerData)) == 0)
				return false;
			elog(ERROR, "tuple concurrently updated");
			break;

		default:
			elog(ERROR, "unrecognized heap_update status: %u", result);
			break;
	}

	return true;
}
Exemplo n.º 6
0
long long solve() {
    long i, j, t, tt;
    long long tv;
    tot = 0;
    for (i = 0; i < n * m; i++) cost[i] = INF, pos[i] = -1;
    for (j = 0; j < m - 2; j++) {
        t = j;
        cost[t] = h[0][t];
        heap_insert(t);
    }
    cost[m - 2] = h[0][m - 2] < v[0][m - 1] ? h[0][m - 2] : v[0][m - 1];
    heap_insert(m - 2);
    for (i = 1; i < n - 1; i++) {
        t = i * (m - 1) + m - 2;
        cost[t] = v[i][m - 1];
        heap_insert(t);
    }
    while (tot) {
        t = heap_pop();
        i = t / (m - 1);
        j = t % (m - 1);
        if (i > 0) {
            tt = (i - 1) * (m - 1) + j;
            tv = cost[t] + h[i][j];
            if (tv < cost[tt]) {
                cost[tt] = tv;
                if (pos[tt] == -1) heap_insert(tt);
                else heap_update(pos[tt]);
            }
        }
        if (i < n - 2) {
            tt = (i + 1) * (m - 1) + j;
            tv = cost[t] + h[i + 1][j];
            if (tv < cost[tt]) {
                cost[tt] = tv;
                if (pos[tt] == -1) heap_insert(tt);
                else heap_update(pos[tt]);
            }
        }
        if (j > 0) {
            tt = i * (m - 1) + j - 1;
            tv = cost[t] + v[i][j];
            if (tv < cost[tt]) {
                cost[tt] = tv;
                if (pos[tt] == -1) heap_insert(tt);
                else heap_update(pos[tt]);
            }
        }
        if (j < m - 2) {
            tt = i * (m - 1) + j + 1;
            tv = cost[t] + v[i][j + 1];
            if (tv < cost[tt]) {
                cost[tt] = tv;
                if (pos[tt] == -1) heap_insert(tt);
                else heap_update(pos[tt]);
            }
        }
    }
    long long ret = INF;
    for (i = 0; i < n - 1; i++) {
        tv = cost[i * (m - 1)] + v[i][0];
        if (tv < ret) ret = tv;
    }
    for (j = 0; j < m - 1; j++) {
        tv = cost[(n - 2) * (m - 1) + j] + h[n - 1][j];
        if (tv < ret) ret = tv;
    }
    return ret;
}
Exemplo n.º 7
0
/*
 *--------------------------------------------------------------
 * AtCommit_Notify
 *
 *		This is called at transaction commit.
 *
 *		If there are outbound notify requests in the pendingNotifies list,
 *		scan pg_listener for matching tuples, and either signal the other
 *		backend or send a message to our own frontend.
 *
 *		NOTE: we are still inside the current transaction, therefore can
 *		piggyback on its committing of changes.
 *
 * Results:
 *		XXX
 *
 * Side effects:
 *		Tuples in pg_listener that have matching relnames and other peoples'
 *		listenerPIDs are updated with a nonzero notification field.
 *
 *--------------------------------------------------------------
 */
void
AtCommit_Notify(void)
{
	Relation	lRel;
	TupleDesc	tdesc;
	HeapScanDesc scan;
	HeapTuple	lTuple,
				rTuple;
	Datum		value[Natts_pg_listener];
	char		repl[Natts_pg_listener],
				nulls[Natts_pg_listener];

	if (pendingNotifies == NIL)
		return;					/* no NOTIFY statements in this transaction */

	/*
	 * NOTIFY is disabled if not normal processing mode. This test used to be
	 * in xact.c, but it seems cleaner to do it here.
	 */
	if (!IsNormalProcessingMode())
	{
		ClearPendingNotifies();
		return;
	}

	if (Trace_notify)
		elog(DEBUG1, "AtCommit_Notify");

	/* preset data to update notify column to MyProcPid */
	nulls[0] = nulls[1] = nulls[2] = ' ';
	repl[0] = repl[1] = repl[2] = ' ';
	repl[Anum_pg_listener_notify - 1] = 'r';
	value[0] = value[1] = value[2] = (Datum) 0;
	value[Anum_pg_listener_notify - 1] = Int32GetDatum(MyProcPid);

	lRel = heap_open(ListenerRelationId, ExclusiveLock);
	tdesc = RelationGetDescr(lRel);
	scan = heap_beginscan(lRel, SnapshotNow, 0, NULL);

	while ((lTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
	{
		Form_pg_listener listener = (Form_pg_listener) GETSTRUCT(lTuple);
		char	   *relname = NameStr(listener->relname);
		int32		listenerPID = listener->listenerpid;

		if (!AsyncExistsPendingNotify(relname))
			continue;

		if (listenerPID == MyProcPid)
		{
			/*
			 * Self-notify: no need to bother with table update. Indeed, we
			 * *must not* clear the notification field in this path, or we
			 * could lose an outside notify, which'd be bad for applications
			 * that ignore self-notify messages.
			 */

			if (Trace_notify)
				elog(DEBUG1, "AtCommit_Notify: notifying self");

			NotifyMyFrontEnd(relname, listenerPID);
		}
		else
		{
			if (Trace_notify)
				elog(DEBUG1, "AtCommit_Notify: notifying pid %d",
					 listenerPID);

			/*
			 * If someone has already notified this listener, we don't bother
			 * modifying the table, but we do still send a SIGUSR2 signal,
			 * just in case that backend missed the earlier signal for some
			 * reason.	It's OK to send the signal first, because the other
			 * guy can't read pg_listener until we unlock it.
			 */
			if (kill(listenerPID, SIGUSR2) < 0)
			{
				/*
				 * Get rid of pg_listener entry if it refers to a PID that no
				 * longer exists.  Presumably, that backend crashed without
				 * deleting its pg_listener entries. This code used to only
				 * delete the entry if errno==ESRCH, but as far as I can see
				 * we should just do it for any failure (certainly at least
				 * for EPERM too...)
				 */
				simple_heap_delete(lRel, &lTuple->t_self);
			}
			else if (listener->notification == 0)
			{
				HTSU_Result result;
				ItemPointerData update_ctid;
				TransactionId update_xmax;

				rTuple = heap_modifytuple(lTuple, tdesc,
										  value, nulls, repl);

				/*
				 * We cannot use simple_heap_update here because the tuple
				 * could have been modified by an uncommitted transaction;
				 * specifically, since UNLISTEN releases exclusive lock on the
				 * table before commit, the other guy could already have tried
				 * to unlisten.  There are no other cases where we should be
				 * able to see an uncommitted update or delete. Therefore, our
				 * response to a HeapTupleBeingUpdated result is just to
				 * ignore it.  We do *not* wait for the other guy to commit
				 * --- that would risk deadlock, and we don't want to block
				 * while holding the table lock anyway for performance
				 * reasons. We also ignore HeapTupleUpdated, which could occur
				 * if the other guy commits between our heap_getnext and
				 * heap_update calls.
				 */
				result = heap_update(lRel, &lTuple->t_self, rTuple,
									 &update_ctid, &update_xmax,
									 GetCurrentCommandId(), InvalidSnapshot,
									 false /* no wait for commit */ );
				switch (result)
				{
					case HeapTupleSelfUpdated:
						/* Tuple was already updated in current command? */
						elog(ERROR, "tuple already updated by self");
						break;

					case HeapTupleMayBeUpdated:
						/* done successfully */
#ifdef NOT_USED					/* currently there are no indexes */
						CatalogUpdateIndexes(lRel, rTuple);
#endif
						break;

					case HeapTupleBeingUpdated:
						/* ignore uncommitted tuples */
						break;

					case HeapTupleUpdated:
						/* ignore just-committed tuples */
						break;

					default:
						elog(ERROR, "unrecognized heap_update status: %u",
							 result);
						break;
				}
			}
		}
	}

	heap_endscan(scan);

	/*
	 * We do NOT release the lock on pg_listener here; we need to hold it
	 * until end of transaction (which is about to happen, anyway) to ensure
	 * that notified backends see our tuple updates when they look. Else they
	 * might disregard the signal, which would make the application programmer
	 * very unhappy.
	 */
	heap_close(lRel, NoLock);

	ClearPendingNotifies();

	if (Trace_notify)
		elog(DEBUG1, "AtCommit_Notify: done");
}
Exemplo n.º 8
0
 void Astar()
 {
 	int i,v1,v2,j=0;
 	node tmp,now;
 	memset(close,0,sizeof(close));
 	memset(open,0,sizeof(open));
 	heaplen=1;
	for(i=0;i<strlen(s);++i){
		if(s[i]=='x'){
			heap[1].p=j;
			heap[1].s[j++]='0';
		}
		if(s[i]>'0'&&s[i]<='9') heap[1].s[j++]=s[i];
	}
	for(i=0;i<=8;i++)
		if(heap[1].s[i]=='x'){
			heap[1].p=i;
			heap[1].s[i]='0';
		}
	if(inverse(heap[1].s)%2){
		printf("unsolvable\n");
		return;
	}
	heap[1].h=figureh(heap[1]);
	heap[1].g=0;
	figuref(heap[1]);
	dir[cantor_expansion(heap[1])]=-1;
 	while(heaplen){
 		now=heap_min();
 		v1=cantor_expansion(now);
 		open[v1]=0;
 		close[v1]=1;
 		closef[v1]=now.f;
 		//printf("%s**",now.s);print(v1);printf("\n");
 		if(strcmp(now.s,"123456780")==0){
		 	print(v1);
			printf("\n");
			return;
			}
 		tmp.g=now.g+1;
 		for(i=0;i<4;i++){
 			tmp.p=now.p+x[i];
 			if(tmp.p<0||tmp.p>8) continue;
 			if((i==0||i==1)&&tmp.p/3!=now.p/3) continue;
 			strcpy(tmp.s,now.s);
 			tmp.s[now.p]=tmp.s[tmp.p];
 			tmp.s[tmp.p]='0';
 			//printf("%s\n",tmp.s);
 			tmp.h=figureh(tmp);
 			figuref(tmp);
 			v2=cantor_expansion(tmp);
 			if(open[v2]==0&&close[v2]==0){
 				open[v2]=1;
 				dir[v2]=i;
 				heap_insert(tmp);
 				pre[v2]=v1;
 			}
 			else{
 				if(open[v2]&&heap_update(tmp)){
				 	dir[v2]=i;
				 	pre[v2]=v1;
				 }
 				else if(tmp.f<closef[v2]){
 					open[v2]=1;
 					close[v2]=0;
 					dir[v2]=i;
 					pre[v2]=v1;
 					heap_insert(tmp);
 				}	
 			}
 		}
 	}
 }
Exemplo n.º 9
0
/* ----------------------------------------------------------------
 *		ExecUpdate
 *
 *		note: we can't run UPDATE queries with transactions
 *		off because UPDATEs are actually INSERTs and our
 *		scan will mistakenly loop forever, updating the tuple
 *		it just inserted..	This should be fixed but until it
 *		is, we don't want to get stuck in an infinite loop
 *		which corrupts your database..
 * ----------------------------------------------------------------
 */
void
ExecUpdate(TupleTableSlot *slot,
		   ItemPointer tupleid,
		   TupleTableSlot *planSlot,
		   DestReceiver *dest,
		   EState *estate)
{
	HeapTuple	tuple;
	ResultRelInfo *resultRelInfo;
	Relation	resultRelationDesc;
	HTSU_Result result;
	ItemPointerData update_ctid;
	TransactionId update_xmax;

	/*
	 * abort the operation if not running transactions
	 */
	if (IsBootstrapProcessingMode())
		elog(ERROR, "cannot UPDATE during bootstrap");

	/*
	 * get the heap tuple out of the tuple table slot, making sure we have a
	 * writable copy
	 */
	tuple = ExecFetchSlotHeapTuple(slot);

	/*
	 * get information on the (current) result relation
	 */
	resultRelInfo = estate->es_result_relation_info;
	resultRelationDesc = resultRelInfo->ri_RelationDesc;

	/* see if this update would move the tuple to a different partition */
	if (estate->es_result_partitions)
	{
		AttrNumber max_attr;
		Datum *values;
		bool *nulls;
		Oid targetid;

		Assert(estate->es_partition_state != NULL &&
			   estate->es_partition_state->accessMethods != NULL);
		if (!estate->es_partition_state->accessMethods->part_cxt)
			estate->es_partition_state->accessMethods->part_cxt =
				GetPerTupleExprContext(estate)->ecxt_per_tuple_memory;

		Assert(PointerIsValid(estate->es_result_partitions));

		max_attr = estate->es_partition_state->max_partition_attr;

		slot_getsomeattrs(slot, max_attr);
		values = slot_get_values(slot);
		nulls = slot_get_isnull(slot);

		targetid = selectPartition(estate->es_result_partitions, values,
								   nulls, slot->tts_tupleDescriptor,
								   estate->es_partition_state->accessMethods);

		if (!OidIsValid(targetid))
			ereport(ERROR,
					(errcode(ERRCODE_NO_PARTITION_FOR_PARTITIONING_KEY),
					 errmsg("no partition for partitioning key")));

		if (RelationGetRelid(resultRelationDesc) != targetid)
		{
			ereport(ERROR,
					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
					 errmsg("moving tuple from partition \"%s\" to "
							"partition \"%s\" not supported",
							get_rel_name(RelationGetRelid(resultRelationDesc)),
							get_rel_name(targetid)),
					 errOmitLocation(true)));
		}
	}

	/* BEFORE ROW UPDATE Triggers */
	if (resultRelInfo->ri_TrigDesc &&
		resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
	{
		HeapTuple	newtuple;

		newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
										tupleid, tuple,
										estate->es_snapshot->curcid);

		if (newtuple == NULL)	/* "do nothing" */
			return;

		if (newtuple != tuple)	/* modified by Trigger(s) */
		{
			/*
			 * Put the modified tuple into a slot for convenience of routines
			 * below.  We assume the tuple was allocated in per-tuple memory
			 * context, and therefore will go away by itself. The tuple table
			 * slot should not try to clear it.
			 */
			TupleTableSlot *newslot = estate->es_trig_tuple_slot;

			if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
				ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
			ExecStoreGenericTuple(newtuple, newslot, false);
            newslot->tts_tableOid = slot->tts_tableOid; /* for constraints */
			slot = newslot;
			tuple = newtuple;
		}
	}

	/*
	 * Check the constraints of the tuple
	 *
	 * If we generate a new candidate tuple after EvalPlanQual testing, we
	 * must loop back here and recheck constraints.  (We don't need to redo
	 * triggers, however.  If there are any BEFORE triggers then trigger.c
	 * will have done heap_lock_tuple to lock the correct tuple, so there's no
	 * need to do them again.)
	 */
lreplace:;
	if (resultRelationDesc->rd_att->constr)
		ExecConstraints(resultRelInfo, slot, estate);

	if (!GpPersistent_IsPersistentRelation(resultRelationDesc->rd_id))
	{
		/*
		 * Normal UPDATE path.
		 */

		/*
		 * replace the heap tuple
		 *
		 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
		 * the row to be updated is visible to that snapshot, and throw a can't-
		 * serialize error if not.	This is a special-case behavior needed for
		 * referential integrity updates in serializable transactions.
		 */
		result = heap_update(resultRelationDesc, tupleid, tuple,
							 &update_ctid, &update_xmax,
							 estate->es_snapshot->curcid,
							 estate->es_crosscheck_snapshot,
							 true /* wait for commit */ );
		switch (result)
		{
			case HeapTupleSelfUpdated:
				/* already deleted by self; nothing to do */
				return;

			case HeapTupleMayBeUpdated:
				break;

			case HeapTupleUpdated:
				if (IsXactIsoLevelSerializable)
					ereport(ERROR,
							(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
							 errmsg("could not serialize access due to concurrent update")));
				else if (!ItemPointerEquals(tupleid, &update_ctid))
				{
					TupleTableSlot *epqslot;

					epqslot = EvalPlanQual(estate,
										   resultRelInfo->ri_RangeTableIndex,
										   &update_ctid,
										   update_xmax,
										   estate->es_snapshot->curcid);
					if (!TupIsNull(epqslot))
					{
						*tupleid = update_ctid;
						slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
						tuple = ExecFetchSlotHeapTuple(slot);
						goto lreplace;
					}
				}
				/* tuple already deleted; nothing to do */
				return;

			default:
				elog(ERROR, "unrecognized heap_update status: %u", result);
				return;
		}
	}
	else
	{
		HeapTuple persistentTuple;

		/*
		 * Persistent metadata path.
		 */
		persistentTuple = heap_copytuple(tuple);
		persistentTuple->t_self = *tupleid;

		frozen_heap_inplace_update(resultRelationDesc, persistentTuple);

		heap_freetuple(persistentTuple);
	}

	IncrReplaced();
	(estate->es_processed)++;

	/*
	 * Note: instead of having to update the old index tuples associated with
	 * the heap tuple, all we do is form and insert new index tuples. This is
	 * because UPDATEs are actually DELETEs and INSERTs, and index tuple
	 * deletion is done later by VACUUM (see notes in ExecDelete).	All we do
	 * here is insert new index tuples.  -cim 9/27/89
	 */
	/*
	 * insert index entries for tuple
	 *
	 * Note: heap_update returns the tid (location) of the new tuple in the
	 * t_self field.
	 */
	if (resultRelInfo->ri_NumIndices > 0)
		ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);

	/* AFTER ROW UPDATE Triggers */
	ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);

}
Exemplo n.º 10
0
int main(int argc, char *argv[])
{
  register uint i,j;
  uint ant,n1,n2,n3;
  uint write_count,update,opt_delete,check2,dupp_keys,found_key;
  int error;
  ulong pos;
  unsigned long key_check;
  uchar record[128],record2[128],record3[128],key[10];
  const char *filename,*filename2;
  HP_INFO *file,*file2;
  HP_SHARE *tmp_share;
  HP_KEYDEF keyinfo[MAX_KEYS];
  HA_KEYSEG keyseg[MAX_KEYS*5];
  HEAP_PTR UNINIT_VAR(position);
  HP_CREATE_INFO hp_create_info;
  CHARSET_INFO *cs= &my_charset_latin1;
  my_bool unused;
  MY_INIT(argv[0]);		/* init my_sys library & pthreads */

  filename= "test2";
  filename2= "test2_2";
  file=file2=0;
  get_options(argc,argv);

  bzero(&hp_create_info, sizeof(hp_create_info));
  hp_create_info.max_table_size= 2*1024L*1024L;
  hp_create_info.keys= keys;
  hp_create_info.keydef= keyinfo;
  hp_create_info.reclength= reclength;
  hp_create_info.max_records= (ulong) flag*100000L;
  hp_create_info.min_records= (ulong) recant/2;

  write_count=update=opt_delete=0;
  key_check=0;

  keyinfo[0].seg=keyseg;
  keyinfo[0].keysegs=1;
  keyinfo[0].flag= 0;
  keyinfo[0].algorithm= HA_KEY_ALG_HASH;
  keyinfo[0].seg[0].type=HA_KEYTYPE_BINARY;
  keyinfo[0].seg[0].start=0;
  keyinfo[0].seg[0].length=6;
  keyinfo[0].seg[0].null_bit=0;
  keyinfo[0].seg[0].charset=cs;
  keyinfo[1].seg=keyseg+1;
  keyinfo[1].keysegs=2;
  keyinfo[1].flag=0;
  keyinfo[1].algorithm= HA_KEY_ALG_HASH;
  keyinfo[1].seg[0].type=HA_KEYTYPE_BINARY;
  keyinfo[1].seg[0].start=7;
  keyinfo[1].seg[0].length=6;
  keyinfo[1].seg[0].null_bit=0;
  keyinfo[1].seg[0].charset=cs;
  keyinfo[1].seg[1].type=HA_KEYTYPE_TEXT;
  keyinfo[1].seg[1].start=0;			/* key in two parts */
  keyinfo[1].seg[1].length=6;
  keyinfo[1].seg[1].null_bit=0;
  keyinfo[1].seg[1].charset=cs;
  keyinfo[2].seg=keyseg+3;
  keyinfo[2].keysegs=1;
  keyinfo[2].flag=HA_NOSAME;
  keyinfo[2].algorithm= HA_KEY_ALG_HASH;
  keyinfo[2].seg[0].type=HA_KEYTYPE_BINARY;
  keyinfo[2].seg[0].start=12;
  keyinfo[2].seg[0].length=8;
  keyinfo[2].seg[0].null_bit=0;
  keyinfo[2].seg[0].charset=cs;
  keyinfo[3].seg=keyseg+4;
  keyinfo[3].keysegs=1;
  keyinfo[3].flag=HA_NOSAME;
  keyinfo[3].algorithm= HA_KEY_ALG_HASH;
  keyinfo[3].seg[0].type=HA_KEYTYPE_BINARY;
  keyinfo[3].seg[0].start=37;
  keyinfo[3].seg[0].length=1;
  keyinfo[3].seg[0].null_bit=1;
  keyinfo[3].seg[0].null_pos=38;
  keyinfo[3].seg[0].charset=cs;

  bzero((char*) key1,sizeof(key1));
  bzero((char*) key3,sizeof(key3));

  printf("- Creating heap-file\n");
  if (heap_create(filename, &hp_create_info, &tmp_share, &unused) ||
      !(file= heap_open(filename, 2)))
    goto err;
  signal(SIGINT,endprog);

  printf("- Writing records:s\n");
  strmov((char*) record,"          ..... key");

  for (i=0 ; i < recant ; i++)
  {
    n1=rnd(1000); n2=rnd(100); n3=rnd(MY_MIN(recant*5,MAX_RECORDS));
    make_record(record,n1,n2,n3,"Pos",write_count);

    if (heap_write(file,record))
    {
      if (my_errno != HA_ERR_FOUND_DUPP_KEY || key3[n3] == 0)
      {
	printf("Error: %d in write at record: %d\n",my_errno,i);
	goto err;
      }
      if (verbose) printf("   Double key: %d\n",n3);
    }
    else
    {
      if (key3[n3] == 1)
      {
	printf("Error: Didn't get error when writing second key: '%8d'\n",n3);
	goto err;
      }
      write_count++; key1[n1]++; key3[n3]=1;
      key_check+=n1;
    }
    if (testflag == 1 && heap_check_heap(file,0))
    {
      puts("Heap keys crashed");
      goto err;
    }
  }
  if (testflag == 1)
    goto end;
  if (heap_check_heap(file,0))
  {
    puts("Heap keys crashed");
    goto err;
  }

  printf("- Delete\n");
  for (i=0 ; i < write_count/10 ; i++)
  {
    for (j=rnd(1000)+1 ; j>0 && key1[j] == 0 ; j--) ;
    if (j != 0)
    {
      sprintf((char*) key,"%6d",j);
      if (heap_rkey(file,record,0,key,6, HA_READ_KEY_EXACT))
      {
	printf("can't find key1: \"%s\"\n",(char*) key);
	goto err;
      }
      if (heap_delete(file,record))
      {
	printf("error: %d; can't delete record: \"%s\"\n", my_errno,(char*) record);
	goto err;
      }
      opt_delete++;
      key1[atoi((char*) record+keyinfo[0].seg[0].start)]--;
      key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0;
      key_check-=atoi((char*) record);
      if (testflag == 2 && heap_check_heap(file,0))
      {
	puts("Heap keys crashed");
	goto err;
      }
    }
    else
      puts("Warning: Skipping delete test because no dupplicate keys");
  }
  if (testflag==2) goto end;
  if (heap_check_heap(file,0))
  {
    puts("Heap keys crashed");
    goto err;
  }

  printf("- Update\n");
  for (i=0 ; i < write_count/10 ; i++)
  {
    n1=rnd(1000); n2=rnd(100); n3=rnd(MY_MIN(recant*2,MAX_RECORDS));
    make_record(record2, n1, n2, n3, "XXX", update);
    if (rnd(2) == 1)
    {
      if (heap_scan_init(file))
	goto err;
      j=rnd(write_count-opt_delete);
      while ((error=heap_scan(file,record) == HA_ERR_RECORD_DELETED) ||
	     (!error && j))
      {
	if (!error)
	  j--;
      }
      if (error)
	goto err;
    }
    else
    {
      for (j=rnd(1000)+1 ; j>0 && key1[j] == 0 ; j--) ;
      if (!key1[j])
	continue;
      sprintf((char*) key,"%6d",j);
      if (heap_rkey(file,record,0,key,6, HA_READ_KEY_EXACT))
      {
	printf("can't find key1: \"%s\"\n",(char*) key);
	goto err;
      }
    }
    if (heap_update(file,record,record2))
    {
      if (my_errno != HA_ERR_FOUND_DUPP_KEY || key3[n3] == 0)
      {
	printf("error: %d; can't update:\nFrom: \"%s\"\nTo:   \"%s\"\n",
	       my_errno,(char*) record, (char*) record2);
	goto err;
      }
      if (verbose)
	printf("Double key when tried to update:\nFrom: \"%s\"\nTo:   \"%s\"\n",
               (char*) record, (char*) record2);
    }
    else
    {
      key1[atoi((char*) record+keyinfo[0].seg[0].start)]--;
      key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0;
      key1[n1]++; key3[n3]=1;
      update++;
      key_check=key_check-atoi((char*) record)+n1;
    }
    if (testflag == 3 && heap_check_heap(file,0))
    {
      puts("Heap keys crashed");
      goto err;
    }
  }
  if (testflag == 3) goto end;
  if (heap_check_heap(file,0))
  {
    puts("Heap keys crashed");
    goto err;
  }

  for (i=999, dupp_keys=found_key=0 ; i>0 ; i--)
  {
    if (key1[i] > dupp_keys) { dupp_keys=key1[i]; found_key=i; }
    sprintf((char*) key,"%6d",found_key);
  }

  if (dupp_keys > 3)
  {
    if (!silent)
      printf("- Read first key - next - delete - next -> last\n");
    DBUG_PRINT("progpos",("first - next - delete - next -> last"));

    if (heap_rkey(file,record,0,key,6, HA_READ_KEY_EXACT))
      goto err;
    if (heap_rnext(file,record3)) goto err;
    if (heap_delete(file,record3)) goto err;
    key_check-=atoi((char*) record3);
    key1[atoi((char*) record+keyinfo[0].seg[0].start)]--;
    key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0;
    opt_delete++;
    ant=2;
    while ((error=heap_rnext(file,record3)) == 0 ||
	   error == HA_ERR_RECORD_DELETED)
      if (! error)
	ant++;
    if (ant != dupp_keys)
    {
      printf("next: I can only find: %d records of %d\n",
	     ant,dupp_keys);
      goto end;
    }
    dupp_keys--;
    if (heap_check_heap(file,0))
    {
      puts("Heap keys crashed");
      goto err;
    }

    if (!silent)
      printf("- Read last key - delete - prev - prev - opt_delete - prev -> first\n");

    if (heap_rprev(file,record))
      goto err;
    if (heap_delete(file,record3)) goto err;
    key_check-=atoi((char*) record3);
    key1[atoi((char*) record+keyinfo[0].seg[0].start)]--;
    key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0;
    opt_delete++;
    if (heap_rprev(file,record3) || heap_rprev(file,record3))
      goto err;
    if (heap_delete(file,record3)) goto err;
    key_check-=atoi((char*) record3);
    key1[atoi((char*) record+keyinfo[0].seg[0].start)]--;
    key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0;
    opt_delete++;
    ant=3;
    while ((error=heap_rprev(file,record3)) == 0 ||
	   error == HA_ERR_RECORD_DELETED)
    {
      if (! error)
	ant++;
    }
    if (ant != dupp_keys)
    {
      printf("next: I can only find: %d records of %d\n",
	     ant,dupp_keys);
      goto end;
    }
    dupp_keys-=2;
    if (heap_check_heap(file,0))
    {
      puts("Heap keys crashed");
      goto err;
    }
  }
  else
    puts("Warning: Not enough duplicated keys:  Skipping delete key check");

  if (!silent)
    printf("- Read (first) - next - delete - next -> last\n");
  DBUG_PRINT("progpos",("first - next - delete - next -> last"));

  if (heap_scan_init(file))
    goto err;
  while ((error=heap_scan(file,record3) == HA_ERR_RECORD_DELETED)) ;
  if (error)
    goto err;
  if (heap_delete(file,record3)) goto err;
  key_check-=atoi((char*) record3);
  opt_delete++;
  key1[atoi((char*) record+keyinfo[0].seg[0].start)]--;
  key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0;
  ant=0;
  while ((error=heap_scan(file,record3)) == 0 ||
	 error == HA_ERR_RECORD_DELETED)
    if (! error)
      ant++;
  if (ant != write_count-opt_delete)
  {
    printf("next: Found: %d records of %d\n",ant,write_count-opt_delete);
    goto end;
  }
  if (heap_check_heap(file,0))
  {
    puts("Heap keys crashed");
    goto err;
  }

  puts("- Test if: Read rrnd - same - rkey - same");
  DBUG_PRINT("progpos",("Read rrnd - same"));
  pos=rnd(write_count-opt_delete-5)+5;
  heap_scan_init(file);
  i=5;
  while ((error=heap_scan(file,record)) == HA_ERR_RECORD_DELETED ||
	 (error == 0 && pos))
  {
    if (!error)
      pos--;
    if (i-- == 0)
    {
      bmove(record3,record,reclength);
      position=heap_position(file);
    }
  }
  if (error)
    goto err;
  bmove(record2,record,reclength);
  if (heap_rsame(file,record,-1) || heap_rsame(file,record2,2))
    goto err;
  if (memcmp(record2,record,reclength))
  {
    puts("heap_rsame didn't find right record");
    goto end;
  }

  puts("- Test of read through position");
  if (heap_rrnd(file,record,position))
    goto err;
  if (memcmp(record3,record,reclength))
  {
    puts("heap_frnd didn't find right record");
    goto end;
  }

  printf("- heap_info\n");
  {
    HEAPINFO info;
    heap_info(file,&info,0);
    /* We have to test with opt_delete +1 as this may be the case if the last
       inserted row was a duplicate key */
    if (info.records != write_count-opt_delete ||
	(info.deleted != opt_delete && info.deleted != opt_delete+1))
    {
      puts("Wrong info from heap_info");
      printf("Got: records: %ld(%d)  deleted: %ld(%d)\n",
	     info.records,write_count-opt_delete,info.deleted,opt_delete);
    }
  }

#ifdef OLD_HEAP_VERSION
  {
    uint check;
    printf("- Read through all records with rnd\n");
    if (heap_extra(file,HA_EXTRA_RESET) || heap_extra(file,HA_EXTRA_CACHE))
    {
      puts("got error from heap_extra");
      goto end;
    }
    ant=check=0;
    while ((error=heap_rrnd(file,record,(ulong) -1)) != HA_ERR_END_OF_FILE &&
	   ant < write_count + 10)
    {
      if (!error)
      {
	ant++;
	check+=calc_check(record,reclength);
      }
    }
    if (ant != write_count-opt_delete)
    {
      printf("rrnd: I can only find: %d records of %d\n", ant,
	     write_count-opt_delete);
      goto end;
    }
    if (heap_extra(file,HA_EXTRA_NO_CACHE))
    {
      puts("got error from heap_extra(HA_EXTRA_NO_CACHE)");
      goto end;
    }
  }
#endif

  printf("- Read through all records with scan\n");
  if (heap_reset(file) || heap_extra(file,HA_EXTRA_CACHE))
  {
    puts("got error from heap_extra");
    goto end;
  }
  ant=check2=0;
  heap_scan_init(file);
  while ((error=heap_scan(file,record)) != HA_ERR_END_OF_FILE &&
	 ant < write_count + 10)
  {
    if (!error)
    {
      ant++;
      check2+=calc_check(record,reclength);
    }
  }
  if (ant != write_count-opt_delete)
  {
    printf("scan: I can only find: %d records of %d\n", ant,
	   write_count-opt_delete);
    goto end;
  }
#ifdef OLD_HEAP_VERSION
  if (check != check2)
  {
    puts("scan: Checksum didn't match reading with rrnd");
    goto end;
  }
#endif


  if (heap_extra(file,HA_EXTRA_NO_CACHE))
  {
    puts("got error from heap_extra(HA_EXTRA_NO_CACHE)");
    goto end;
  }

  for (i=999, dupp_keys=found_key=0 ; i>0 ; i--)
  {
    if (key1[i] > dupp_keys) { dupp_keys=key1[i]; found_key=i; }
    sprintf((char*) key,"%6d",found_key);
  }
  printf("- Read through all keys with first-next-last-prev\n");
  ant=0;
  for (error=heap_rkey(file,record,0,key,6, HA_READ_KEY_EXACT);
      ! error ;
       error=heap_rnext(file,record))
    ant++;
  if (ant != dupp_keys)
  {
    printf("first-next: I can only find: %d records of %d\n", ant,
	   dupp_keys);
    goto end;
  }

  ant=0;
  for (error=heap_rprev(file,record) ;
      ! error ;
      error=heap_rprev(file,record))
  {
    ant++;
    check2+=calc_check(record,reclength);
  }
  if (ant != dupp_keys)
  {
    printf("last-prev: I can only find: %d records of %d\n", ant,
	   dupp_keys);
    goto end;
  }

  if (testflag == 4) goto end;

  printf("- Reading through all rows through keys\n");
  if (!(file2=heap_open(filename, 2)))
    goto err;
  if (heap_scan_init(file))
    goto err;
  while ((error=heap_scan(file,record)) != HA_ERR_END_OF_FILE)
  {
    if (error == 0)
    {
      if (heap_rkey(file2,record2,2,record+keyinfo[2].seg[0].start,8,
		    HA_READ_KEY_EXACT))
      {
	printf("can't find key3: \"%.8s\"\n",
	       record+keyinfo[2].seg[0].start);
	goto err;
      }
    }
  }
  heap_close(file2);

  printf("- Creating output heap-file 2\n");
  hp_create_info.keys= 1;
  hp_create_info.max_records= 0;
  hp_create_info.min_records= 0;
  if (heap_create(filename2, &hp_create_info, &tmp_share, &unused) ||
      !(file2= heap_open_from_share_and_register(tmp_share, 2)))
    goto err;

  printf("- Copying and removing records\n");
  if (heap_scan_init(file))
    goto err;
  while ((error=heap_scan(file,record)) != HA_ERR_END_OF_FILE)
  {
    if (error == 0)
    {
      if (heap_write(file2,record))
	goto err;
      key_check-=atoi((char*) record);
      write_count++;
      if (heap_delete(file,record))
	goto err;
      opt_delete++;
    }
    pos++;
  }
  printf("- Checking heap tables\n");
  if (heap_check_heap(file,1) || heap_check_heap(file2,1))
  {
    puts("Heap keys crashed");
    goto err;
  }

  if (my_errno != HA_ERR_END_OF_FILE)
    printf("error: %d from heap_rrnd\n",my_errno);
  if (key_check)
    printf("error: Some read got wrong: check is %ld\n",(long) key_check);

end:
  printf("\nFollowing test have been made:\n");
  printf("Write records: %d\nUpdate records: %d\nDelete records: %d\n", write_count,update,opt_delete);
  heap_clear(file);
  if (heap_close(file) || (file2 && heap_close(file2)))
    goto err;
  heap_delete_table(filename2);
  hp_panic(HA_PANIC_CLOSE);
  my_end(MY_GIVE_INFO);
  return(0);
err:
  printf("Got error: %d when using heap-database\n",my_errno);
  (void) heap_close(file);
  return(1);
} /* main */
Exemplo n.º 11
0
/*
 * disk_update_instance_using_mobj - updates an object that had previously
 * been reserved with the acutal contents.
 *    return: NO_ERROR if successful, error code otherwise
 *    classop(in): class object
 *    classobj(in): class memory object
 *    obj(in): object memory.
 *    oid(in): oid of the destination
 */
int
disk_update_instance_using_mobj (MOP classop, MOBJ classobj,
				 MOBJ obj, OID * oid)
{
  int error = NO_ERROR;
  HFID *hfid;
  bool has_indexes = false;
  volatile int newsize = 0;
  bool oldflag;
  TF_STATUS tf_status = TF_SUCCESS;

  Diskrec->length = 0;
  /*
   * tf_mem_to_disk() is used to get an estimate of the disk space requirements
   * for the object. When dealing with collections the estimate returned is
   * not always a good one, hence we need to enclose this block in a loop
   * increasing the space by increments of DB_PAGESIZE until we hit the correct
   * space requirement.
   */
  while ((tf_status =
	  tf_mem_to_disk (classop, classobj, obj, Diskrec,
			  &has_indexes)) == TF_OUT_OF_SPACE)
    {
      /* make the record larger */
      if (newsize)
	{
	  newsize += DB_PAGESIZE;
	}
      else
	{
	  newsize = -Diskrec->length + DB_PAGESIZE;
	}
      free_recdes (Diskrec);
      Diskrec = alloc_recdes (newsize);
      if (Diskrec == NULL)
	{
	  error = er_errid ();
	  break;
	}
    }
  if (tf_status != TF_SUCCESS)
    {
      error = ER_LDR_CANT_TRANSFORM;
      er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error, 0);
    }
  if (error == NO_ERROR && Diskrec != NULL)
    {
      hfid = get_class_heap (classop, (SM_CLASS *) classop->object);
      if (hfid == NULL)
	{
	  error = er_errid ();
	}
      else if (heap_update (NULL, hfid, oid, Diskrec, &oldflag, NULL) != oid)
	{
	  error = er_errid ();
	}
      else
	{
	  if (oldflag)
	    {
	      fprintf (stdout, msgcat_message (MSGCAT_CATALOG_UTILS,
					       MSGCAT_UTIL_SET_LOADDB,
					       LOADDB_MSG_UPDATE_WARNING));
	    }
	  else if (has_indexes)
	    {
	      error = update_indexes (WS_OID (classop), oid, Diskrec);
	    }
	}
    }

  return (error);
}
Exemplo n.º 12
0
/* ----------------------------------------------------------------
 *		ExecUpdate
 *
 *		note: we can't run UPDATE queries with transactions
 *		off because UPDATEs are actually INSERTs and our
 *		scan will mistakenly loop forever, updating the tuple
 *		it just inserted..	This should be fixed but until it
 *		is, we don't want to get stuck in an infinite loop
 *		which corrupts your database..
 * ----------------------------------------------------------------
 */
void
ExecUpdate(TupleTableSlot *slot,
		   ItemPointer tupleid,
		   TupleTableSlot *planSlot,
		   DestReceiver *dest,
		   EState *estate)
{
	void*	tuple;
	ResultRelInfo *resultRelInfo;
	Relation	resultRelationDesc;
	HTSU_Result result;
	ItemPointerData update_ctid;
	TransactionId update_xmax;
	AOTupleId	aoTupleId = AOTUPLEID_INIT;
	TupleTableSlot *partslot = NULL;

	/*
	 * abort the operation if not running transactions
	 */
	if (IsBootstrapProcessingMode())
		elog(ERROR, "cannot UPDATE during bootstrap");
	
	/*
	 * get information on the (current) result relation
	 */
	resultRelInfo = estate->es_result_relation_info;
	resultRelationDesc = resultRelInfo->ri_RelationDesc;

	bool		rel_is_heap = RelationIsHeap(resultRelationDesc);
	bool 		rel_is_aorows = RelationIsAoRows(resultRelationDesc);
	bool		rel_is_aocols = RelationIsAoCols(resultRelationDesc);
	bool		rel_is_external = RelationIsExternal(resultRelationDesc);

	/*
	 * get the heap tuple out of the tuple table slot, making sure we have a
	 * writable copy
	 */
	if (rel_is_heap)
	{
		partslot = slot;
		tuple = ExecFetchSlotHeapTuple(partslot);
	}
	else if (rel_is_aorows || rel_is_aocols)
	{
		/*
		 * It is necessary to reconstruct a logically compatible tuple to
		 * a phyiscally compatible tuple.  The slot's tuple descriptor comes
		 * from the projection target list, which doesn't indicate dropped
		 * columns, and MemTuple cannot deal with cases without converting
		 * the target list back into the original relation's tuple desc.
		 */
		partslot = reconstructMatchingTupleSlot(slot, resultRelInfo);

		/*
		 * We directly inline toasted columns here as update with toasted columns
		 * would create two references to the same toasted value.
		 */
		tuple = ExecFetchSlotMemTuple(partslot, true);
	}
	else if (rel_is_external) 
	{
		if (estate->es_result_partitions && 
			estate->es_result_partitions->part->parrelid != 0)
		{
			ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				errmsg("Update external partitions not supported.")));			
			return;
		}
		else
		{
			partslot = slot;
			tuple = ExecFetchSlotHeapTuple(partslot);
		}
	}
	else 
	{
		Insist(false);
	}

	/* see if this update would move the tuple to a different partition */
	if (estate->es_result_partitions)
		checkPartitionUpdate(estate, partslot, resultRelInfo);

	/* BEFORE ROW UPDATE Triggers */
	if (resultRelInfo->ri_TrigDesc &&
		resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
	{
		HeapTuple	newtuple;

		newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
										tupleid, tuple,
										estate->es_snapshot->curcid);

		if (newtuple == NULL)	/* "do nothing" */
			return;

		if (newtuple != tuple)	/* modified by Trigger(s) */
		{
			/*
			 * Put the modified tuple into a slot for convenience of routines
			 * below.  We assume the tuple was allocated in per-tuple memory
			 * context, and therefore will go away by itself. The tuple table
			 * slot should not try to clear it.
			 */
			TupleTableSlot *newslot = estate->es_trig_tuple_slot;

			if (newslot->tts_tupleDescriptor != partslot->tts_tupleDescriptor)
				ExecSetSlotDescriptor(newslot, partslot->tts_tupleDescriptor);
			ExecStoreGenericTuple(newtuple, newslot, false);
            newslot->tts_tableOid = partslot->tts_tableOid; /* for constraints */
			partslot = newslot;
			tuple = newtuple;
		}
	}

	/*
	 * Check the constraints of the tuple
	 *
	 * If we generate a new candidate tuple after EvalPlanQual testing, we
	 * must loop back here and recheck constraints.  (We don't need to redo
	 * triggers, however.  If there are any BEFORE triggers then trigger.c
	 * will have done heap_lock_tuple to lock the correct tuple, so there's no
	 * need to do them again.)
	 */
lreplace:;
	if (resultRelationDesc->rd_att->constr)
		ExecConstraints(resultRelInfo, partslot, estate);

	if (!GpPersistent_IsPersistentRelation(resultRelationDesc->rd_id))
	{
		/*
		 * Normal UPDATE path.
		 */

		/*
		 * replace the heap tuple
		 *
		 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
		 * the row to be updated is visible to that snapshot, and throw a can't-
		 * serialize error if not.	This is a special-case behavior needed for
		 * referential integrity updates in serializable transactions.
		 */
		if (rel_is_heap)
		{
			result = heap_update(resultRelationDesc, tupleid, tuple,
							 &update_ctid, &update_xmax,
							 estate->es_snapshot->curcid,
							 estate->es_crosscheck_snapshot,
							 true /* wait for commit */ );
		} 
		else if (rel_is_aorows)
		{
			if (IsXactIsoLevelSerializable)
			{
				ereport(ERROR,
					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
					errmsg("Updates on append-only tables are not supported in serializable transactions.")));			
			}

			if (resultRelInfo->ri_updateDesc == NULL)
			{
				ResultRelInfoSetSegno(resultRelInfo, estate->es_result_aosegnos);
				resultRelInfo->ri_updateDesc = (AppendOnlyUpdateDesc)
					appendonly_update_init(resultRelationDesc, ActiveSnapshot, resultRelInfo->ri_aosegno);
			}
			result = appendonly_update(resultRelInfo->ri_updateDesc,
								 tuple, (AOTupleId *) tupleid, &aoTupleId);
		}
		else if (rel_is_aocols)
		{
			if (IsXactIsoLevelSerializable)
			{
				ereport(ERROR,
					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
					errmsg("Updates on append-only tables are not supported in serializable transactions.")));			
			}

			if (resultRelInfo->ri_updateDesc == NULL)
			{
				ResultRelInfoSetSegno(resultRelInfo, estate->es_result_aosegnos);
				resultRelInfo->ri_updateDesc = (AppendOnlyUpdateDesc)
					aocs_update_init(resultRelationDesc, resultRelInfo->ri_aosegno);
			}
			result = aocs_update(resultRelInfo->ri_updateDesc,
								 partslot, (AOTupleId *) tupleid, &aoTupleId);
		}
		else
		{
			Assert(!"We should not be here");
		}
		switch (result)
		{
			case HeapTupleSelfUpdated:
				/* already deleted by self; nothing to do */
				return;

			case HeapTupleMayBeUpdated:
				break;

			case HeapTupleUpdated:
				if (IsXactIsoLevelSerializable)
					ereport(ERROR,
							(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
							 errmsg("could not serialize access due to concurrent update")));
				else if (!ItemPointerEquals(tupleid, &update_ctid))
				{
					TupleTableSlot *epqslot;

					epqslot = EvalPlanQual(estate,
										   resultRelInfo->ri_RangeTableIndex,
										   &update_ctid,
										   update_xmax,
										   estate->es_snapshot->curcid);
					if (!TupIsNull(epqslot))
					{
						*tupleid = update_ctid;
						partslot = ExecFilterJunk(estate->es_junkFilter, epqslot);
						tuple = ExecFetchSlotHeapTuple(partslot);
						goto lreplace;
					}
				}
				/* tuple already deleted; nothing to do */
				return;

			default:
				elog(ERROR, "unrecognized heap_update status: %u", result);
				return;
		}
	}
	else
	{
		HeapTuple persistentTuple;

		/*
		 * Persistent metadata path.
		 */
		persistentTuple = heap_copytuple(tuple);
		persistentTuple->t_self = *tupleid;

		frozen_heap_inplace_update(resultRelationDesc, persistentTuple);

		heap_freetuple(persistentTuple);
	}

	IncrReplaced();
	(estate->es_processed)++;
	(resultRelInfo->ri_aoprocessed)++;

	/*
	 * Note: instead of having to update the old index tuples associated with
	 * the heap tuple, all we do is form and insert new index tuples. This is
	 * because UPDATEs are actually DELETEs and INSERTs, and index tuple
	 * deletion is done later by VACUUM (see notes in ExecDelete).	All we do
	 * here is insert new index tuples.  -cim 9/27/89
	 */
	/*
	 * insert index entries for tuple
	 *
	 * Note: heap_update returns the tid (location) of the new tuple in the
	 * t_self field.
	 */
	if (rel_is_aorows || rel_is_aocols)
	{
		if (resultRelInfo->ri_NumIndices > 0)
			ExecInsertIndexTuples(partslot, (ItemPointer)&aoTupleId, estate, false);
	}
	else
	{
		if (resultRelInfo->ri_NumIndices > 0)
			ExecInsertIndexTuples(partslot, &(((HeapTuple) tuple)->t_self), estate, false);
	}

	/* AFTER ROW UPDATE Triggers */
	ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);

}