/*
 * Returns the number of hidden tuples in a given segment file
 */ 
int64
AppendOnlyVisimapStore_GetSegmentFileHiddenTupleCount(
	AppendOnlyVisimapStore *visiMapStore,
	AppendOnlyVisimapEntry *visiMapEntry,
	int segmentFileNum)
{
	ScanKeyData scanKey;
	IndexScanDesc indexScan;
	int64 hiddenTupcount = 0;

	Assert(visiMapStore);
	Assert(visiMapEntry);
	Assert(RelationIsValid(visiMapStore->visimapRelation));
	Assert(RelationIsValid(visiMapStore->visimapIndex));
	
	ScanKeyInit(&scanKey,
			Anum_pg_aovisimap_segno, /* segno */
			BTEqualStrategyNumber,
			F_INT4EQ,
			Int32GetDatum(segmentFileNum));

	indexScan = AppendOnlyVisimapStore_BeginScan(
			visiMapStore,
			1,
			&scanKey);
	
	while (AppendOnlyVisimapStore_GetNext(visiMapStore,
		indexScan, ForwardScanDirection,
		visiMapEntry, NULL))
	{
		hiddenTupcount += AppendOnlyVisimapEntry_GetHiddenTupleCount(visiMapEntry);
	}
	AppendOnlyVisimapStore_EndScan(visiMapStore, indexScan);
	return hiddenTupcount;
}
/*
 * Fetches the next entry from a visimap store index scan.
 *
 * Parameter visiMapEntry may be NULL. If it is not NULL and
 * the scan returns an entry, the entry data is copied to the
 * visimapEntry.
 * Parameter tupleTid may be NULL. If it is not NULL and the scan
 * returns an entry, the (heap) tuple id is copied to the parameter.
 */
bool
AppendOnlyVisimapStore_GetNext(
	AppendOnlyVisimapStore *visiMapStore,
	IndexScanDesc indexScan,
	ScanDirection scanDirection,
	AppendOnlyVisimapEntry* visiMapEntry,
	ItemPointerData *tupleTid)
{
	HeapTuple tuple;
	TupleDesc heapTupleDesc;

	Assert(visiMapStore);
	Assert(RelationIsValid(visiMapStore->visimapRelation));
	Assert(RelationIsValid(visiMapStore->visimapIndex));
	Assert(indexScan);

	tuple = AppendOnlyVisimapStore_GetNextTuple(visiMapStore, indexScan, scanDirection);
	if (tuple == NULL)
	{
		return false;
	}
	heapTupleDesc = RelationGetDescr(visiMapStore->visimapRelation);
	if (visiMapEntry)
	{
		AppendOnlyVisimapEntry_Copyout(visiMapEntry, tuple,
			heapTupleDesc);
	}
	if (tupleTid)
	{
		ItemPointerCopy(&tuple->t_self, tupleTid);
	}
	return true;
}
/*
 * Returns the number of hidden tuples in a given releation
 */ 
int64
AppendOnlyVisimapStore_GetRelationHiddenTupleCount(
	AppendOnlyVisimapStore *visiMapStore,
	AppendOnlyVisimapEntry *visiMapEntry)
{
	IndexScanDesc indexScan;
	int64 hiddenTupcount = 0;

	Assert(visiMapStore);
	Assert(visiMapEntry);
	Assert(RelationIsValid(visiMapStore->visimapRelation));
	Assert(RelationIsValid(visiMapStore->visimapIndex));
	
	indexScan = AppendOnlyVisimapStore_BeginScan(
			visiMapStore,
			0,
			NULL);
	
	while (AppendOnlyVisimapStore_GetNext(visiMapStore,
		indexScan, ForwardScanDirection,
		visiMapEntry, NULL))
	{
		hiddenTupcount += AppendOnlyVisimapEntry_GetHiddenTupleCount(visiMapEntry);
	}
	AppendOnlyVisimapStore_EndScan(visiMapStore, indexScan);
	return hiddenTupcount;
}
/*
 * Returns true iff there are entries stored for the
 * given segment file num.
 */ 
bool
AppendOnlyVisimapStore_IsSegmentFileFullyVisible(
	AppendOnlyVisimapStore *visiMapStore,
	int segmentFileNum)
{
	ScanKeyData scanKey;
	IndexScanDesc indexScan;
	bool found_visimap_entry;

	Assert(visiMapStore);
	Assert(RelationIsValid(visiMapStore->visimapRelation));
	Assert(RelationIsValid(visiMapStore->visimapIndex));
	
	ScanKeyInit(&scanKey,
			Anum_pg_aovisimap_segno, /* segno */
			BTEqualStrategyNumber,
			F_INT4EQ,
			Int32GetDatum(segmentFileNum));

	indexScan = AppendOnlyVisimapStore_BeginScan(
			visiMapStore,
			1,
			&scanKey);
	
	found_visimap_entry = AppendOnlyVisimapStore_GetNext(
			visiMapStore,
			indexScan,
			ForwardScanDirection,
			NULL, NULL);
	AppendOnlyVisimapStore_EndScan(visiMapStore, indexScan);
	return !found_visimap_entry;
}
Exemple #5
0
cqContext	*caql_addrel(cqContext *pCtx, Relation rel)
{
	if (RelationIsValid(rel))
	{
		Assert(!RelationIsValid(pCtx->cq_heap_rel));
		pCtx->cq_heap_rel  = rel;
		pCtx->cq_externrel = true;
	}
	return (pCtx);
}
/*
 * Fetches the next entry from a visimap store index scan.
 *
 * It is the responsibility of the caller to decode the return value
 * correctly.
 *
 */
static HeapTuple
AppendOnlyVisimapStore_GetNextTuple(
	AppendOnlyVisimapStore *visiMapStore,
	IndexScanDesc indexScan,
	ScanDirection scanDirection)
{
	Assert(visiMapStore);
	Assert(RelationIsValid(visiMapStore->visimapRelation));
	Assert(RelationIsValid(visiMapStore->visimapIndex));
	Assert(indexScan);

	return index_getnext(indexScan, scanDirection);
}
/**
 * Finds the visibility map entry tuple for a given
 * segmentFileNum and firstRowNum.
 *
 * Note: The firstRowNum needs to be a valid firstRowNum. It is
 * especially not the tuple id of the append-only tuple checked, updated, 
 * or deleted.
 *
 * Returns true if there is such a tuple and
 * the tuple is used as current tuple.
 * Otherwise false is returned.
 *
 * Assumes that the store data structure has been initialized, but not finished.
 */
bool
AppendOnlyVisimapStore_Find(
		AppendOnlyVisimapStore* visiMapStore,
		int32 segmentFileNum,
		int64 firstRowNum,
		AppendOnlyVisimapEntry* visiMapEntry)
{
	ScanKey scanKeys;
	IndexScanDesc indexScan;

	Assert(visiMapStore);
	Assert(visiMapEntry);
	Assert(RelationIsValid(visiMapStore->visimapRelation));
	Assert(RelationIsValid(visiMapStore->visimapIndex));
	
	elogif (Debug_appendonly_print_visimap, LOG, 
			"Append-only visi map store: Load entry: "
			"(segFileNum, firstRowNum) = (%u, " INT64_FORMAT ")",
			segmentFileNum, firstRowNum);

	scanKeys = visiMapStore->scanKeys;
	scanKeys[0].sk_argument = Int32GetDatum(segmentFileNum);
	scanKeys[1].sk_argument = Int64GetDatum(firstRowNum);

	indexScan = AppendOnlyVisimapStore_BeginScan(
			visiMapStore,
			APPENDONLY_VISIMAP_INDEX_SCAN_KEY_NUM,
			scanKeys);

	if (!AppendOnlyVisimapStore_GetNext(
				visiMapStore,
				indexScan,
				BackwardScanDirection,
				visiMapEntry,
				&visiMapEntry->tupleTid))
	{
		elogif(Debug_appendonly_print_visimap, LOG, 
				"Append-only visi map store: Visimap entry does not exist: "
				"(segFileNum, firstRowNum) = (%u, " INT64_FORMAT ")",
				segmentFileNum, firstRowNum);
		
		// failed to lookup row
		AppendOnlyVisimapStore_EndScan(visiMapStore, indexScan);
		return false;
	}
	AppendOnlyVisimapStore_EndScan(visiMapStore, indexScan);
	return true;
}
Exemple #8
0
/* ----------------------------------------------------------------
 * caql_insert_inmem()
 * during beginscan/endscan iteration, insert a tuple to in-memory-only relation
 * ----------------------------------------------------------------
 */
void
caql_insert_inmem(cqContext *pCtx, HeapTuple tup)
{
	InMemHeapRelation inmemrel = NULL;
	Relation rel = pCtx->cq_heap_rel;

	Assert(RelationIsValid(rel));

	disable_catalog_check(pCtx, tup);

	{ /* scope for caql_iud_switch */
		caql_iud_switch(pCtx, 1, NULL, tup, true /* dontWait */);

		inmemrel = OidGetInMemHeapRelation(rel->rd_id, INMEM_ONLY_MAPPING);
		if (NULL == inmemrel)
		{
			inmemrel = InMemHeap_Create(rel->rd_id, rel, FALSE /* ownRel */, 10 /* initSize */, pCtx->cq_lockmode /*AccessShareLock*/,
					RelationGetRelationName(rel), FALSE /* createIndex */, 0 /* keyAttrno */, INMEM_ONLY_MAPPING);
			elog(DEBUG2, "Created new entry for in memory table %s", RelationGetRelationName(rel));
		}
		Assert(NULL != inmemrel);
		InMemHeap_CheckConstraints(inmemrel, tup);
		InMemHeap_Insert(inmemrel, tup, -1 /* valid for all segments */);

		// TODO: is it relevant for in-memory?
		/* keep the catalog indexes up to date (if has any) */
		//caql_UpdateIndexes(pCtx, rel, tup);
	}
}
Exemple #9
0
Datum
pg_relation_size_oid(PG_FUNCTION_ARGS)
{
	Oid			relOid = PG_GETARG_OID(0);
	Relation	rel;
	int64		size = 0;
	
	if (GP_ROLE_EXECUTE == Gp_role)
	{
		ereport(ERROR,
				(errcode(ERRCODE_GP_COMMAND_ERROR),
						errmsg("pg_relation_size: cannot be executed in segment")));
	}

	rel = try_relation_open(relOid, AccessShareLock, false);
		
	/*
	 * While we scan pg_class with an MVCC snapshot,
 	 * someone else might drop the table. It's better to return NULL for
	 * already-dropped tables than throw an error and abort the whole query.
	 */
	if (!RelationIsValid(rel))
  		PG_RETURN_NULL();
	
	if (relOid == 0 || rel->rd_node.relNode == 0)
		size = 0;
	else
		size = calculate_relation_size(rel); 
	
	relation_close(rel, AccessShareLock);

	PG_RETURN_INT64(size);
}
Exemple #10
0
/*
 * MUST BE CALLED ONLY ON RECOVERY.
 *
 * Check if exists valid (inserted by not aborted xaction) heap tuple
 * for given item pointer
 */
bool
XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
{
	Relation	reln;
	Buffer		buffer;
	Page		page;
	ItemId		lp;
	HeapTupleHeader htup;

	reln = XLogOpenRelation(false, RM_HEAP_ID, hnode);
	if (!RelationIsValid(reln))
		return (false);

	buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr));
	if (!BufferIsValid(buffer))
		return (false);

	LockBuffer(buffer, BUFFER_LOCK_SHARE);
	page = (Page) BufferGetPage(buffer);
	if (PageIsNew((PageHeader) page) ||
		ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page))
	{
		UnlockAndReleaseBuffer(buffer);
		return (false);
	}

	if (PageGetSUI(page) != ThisStartUpID)
	{
		Assert(PageGetSUI(page) < ThisStartUpID);
		UnlockAndReleaseBuffer(buffer);
		return (true);
	}

	lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr));
	if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
	{
		UnlockAndReleaseBuffer(buffer);
		return (false);
	}

	htup = (HeapTupleHeader) PageGetItem(page, lp);

	/* MUST CHECK WASN'T TUPLE INSERTED IN PREV STARTUP */

	if (!(htup->t_infomask & HEAP_XMIN_COMMITTED))
	{
		if (htup->t_infomask & HEAP_XMIN_INVALID ||
			(htup->t_infomask & HEAP_MOVED_IN &&
			 TransactionIdDidAbort(HeapTupleHeaderGetXvac(htup))) ||
			TransactionIdDidAbort(HeapTupleHeaderGetXmin(htup)))
		{
			UnlockAndReleaseBuffer(buffer);
			return (false);
		}
	}

	UnlockAndReleaseBuffer(buffer);
	return (true);
}
Exemple #11
0
/*
 *	Compute the on-disk size of files for the relation according to the
 *	stat function, including heap data, index data, toast data, aoseg data,
 *  aoblkdir data, and aovisimap data.
 */
static int64
calculate_total_relation_size(Oid Relid)
{
	Relation	heapRel;
	Oid			toastOid;
	int64		size;
	ListCell   *cell;
	ForkNumber	forkNum;

	heapRel = try_relation_open(Relid, AccessShareLock, false);

	if (!RelationIsValid(heapRel))
		return 0;

	toastOid = heapRel->rd_rel->reltoastrelid;

	/* Get the heap size */
	if (Relid == 0 || heapRel->rd_node.relNode == 0)
		size = 0;
	else
	{
		size = 0;
		for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
			size += calculate_relation_size(heapRel, forkNum);
	}

	/* Include any dependent indexes */
	if (heapRel->rd_rel->relhasindex)
	{
		List	   *index_oids = RelationGetIndexList(heapRel);

		foreach(cell, index_oids)
		{
			Oid			idxOid = lfirst_oid(cell);
			Relation	iRel;

			iRel = try_relation_open(idxOid, AccessShareLock, false);

			if (RelationIsValid(iRel))
			{
				for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
					size += calculate_relation_size(iRel, forkNum);

				relation_close(iRel, AccessShareLock);
			}
		}
Exemple #12
0
/*
 *	Compute the on-disk size of files for the relation according to the
 *	stat function, including heap data, index data, toast data, aoseg data,
 *  aoblkdir data, and aovisimap data.
 */
static int64
calculate_total_relation_size(Oid Relid)
{
	Relation	heapRel;
	Oid			toastOid;
	AppendOnlyEntry *aoEntry = NULL;
	int64		size;
	ListCell   *cell;

	heapRel = try_relation_open(Relid, AccessShareLock, false);

	if (!RelationIsValid(heapRel))
		return 0;

	toastOid = heapRel->rd_rel->reltoastrelid;

	if (RelationIsAoRows(heapRel) || RelationIsAoCols(heapRel))
		aoEntry = GetAppendOnlyEntry(Relid, SnapshotNow);
	
	/* Get the heap size */
	if (Relid == 0 || heapRel->rd_node.relNode == 0)
		size = 0;
	else
		size = calculate_relation_size(heapRel); 

	/* Include any dependent indexes */
	if (heapRel->rd_rel->relhasindex)
	{
		List	   *index_oids = RelationGetIndexList(heapRel);

		foreach(cell, index_oids)
		{
			Oid			idxOid = lfirst_oid(cell);
			Relation	iRel;

			iRel = try_relation_open(idxOid, AccessShareLock, false);

			if (RelationIsValid(iRel))
			{
				size += calculate_relation_size(iRel); 

				relation_close(iRel, AccessShareLock);
			}
		}
Exemple #13
0
Datum
pg_relation_size(PG_FUNCTION_ARGS)
{
	Oid			relOid = PG_GETARG_OID(0);
	text	   *forkName = PG_GETARG_TEXT_P(1);
	Relation	rel;
	int64		size = 0;

	/**
	 * This function is peculiar in that it does its own dispatching.
	 * It does not work on entry db since we do not support dispatching
	 * from entry-db currently.
	 */
	if (Gp_role == GP_ROLE_EXECUTE && Gp_segment == -1)
		elog(ERROR, "This query is not currently supported by GPDB.");

	rel = try_relation_open(relOid, AccessShareLock, false);

	/*
	 * While we scan pg_class with an MVCC snapshot,
 	 * someone else might drop the table. It's better to return NULL for
	 * already-dropped tables than throw an error and abort the whole query.
	 */
	if (!RelationIsValid(rel))
  		PG_RETURN_NULL();

	if (relOid == 0 || rel->rd_node.relNode == 0)
		size = 0;
	else
		size = calculate_relation_size(rel,
									   forkname_to_number(text_to_cstring(forkName)));

	if (Gp_role == GP_ROLE_DISPATCH)
	{
		StringInfoData buffer;
		char *schemaName;
		char *relName;

		schemaName = get_namespace_name(get_rel_namespace(relOid));
		if (schemaName == NULL)
			elog(ERROR, "Cannot find schema for oid %d", relOid);
		relName = get_rel_name(relOid);
		if (relName == NULL)
			elog(ERROR, "Cannot find relation for oid %d", relOid);

		initStringInfo(&buffer);

		appendStringInfo(&buffer, "select sum(pg_relation_size('%s.%s'))::int8 from gp_dist_random('gp_id');", quote_identifier(schemaName), quote_identifier(relName));

		size += get_size_from_segDBs(buffer.data);
	}

	relation_close(rel, AccessShareLock);

	PG_RETURN_INT64(size);
}
Exemple #14
0
/* ----------------------------------------------------------------
 * caql_delete_current()
 * during beginscan/endscan iteration, delete current tuple 
 * ----------------------------------------------------------------
 */
void caql_delete_current(cqContext *pCtx)
{
	Relation				 rel;

	rel  = pCtx->cq_heap_rel;
	Assert(RelationIsValid(rel));

	if (HeapTupleIsValid(pCtx->cq_lasttup))
		simple_heap_delete(rel, &(pCtx->cq_lasttup)->t_self);
}
Exemple #15
0
/*
 * RelationInitLockInfo
 *		Initializes the lock information in a relation descriptor.
 *
 *		relcache.c must call this during creation of any reldesc.
 */
void
RelationInitLockInfo(Relation relation)
{
	Assert(RelationIsValid(relation));
	Assert(OidIsValid(RelationGetRelid(relation)));

	relation->rd_lockInfo.lockRelId.relId = RelationGetRelid(relation);

	if (relation->rd_rel->relisshared)
		relation->rd_lockInfo.lockRelId.dbId = InvalidOid;
	else
		relation->rd_lockInfo.lockRelId.dbId = MyDatabaseId;
}
/*
 * Deletes all visibility map information from a given
 * segment file.
 */ 
void
AppendOnlyVisimapStore_DeleteSegmentFile(
	AppendOnlyVisimapStore *visiMapStore,
	int segmentFileNum)
{
	ScanKeyData scanKey;
	IndexScanDesc indexScan;
	ItemPointerData tid;

	Assert(visiMapStore);
	Assert(RelationIsValid(visiMapStore->visimapRelation));
	Assert(RelationIsValid(visiMapStore->visimapIndex));
	
	elogif(Debug_appendonly_print_visimap, LOG, 
			"Append-only visi map store: Delete segment file: "
			"(segFileNum) = (%u)", segmentFileNum);

	ScanKeyInit(&scanKey,
			Anum_pg_aovisimap_segno, /* segno */
			BTEqualStrategyNumber,
			F_INT4EQ,
			Int32GetDatum(segmentFileNum));

	indexScan = AppendOnlyVisimapStore_BeginScan(
			visiMapStore,
			1,
			&scanKey);

	while (AppendOnlyVisimapStore_GetNext(visiMapStore,
				indexScan,
				ForwardScanDirection,
				NULL,
				&tid))
	{
		simple_heap_delete(visiMapStore->visimapRelation,
				&tid);
	}
	AppendOnlyVisimapStore_EndScan(visiMapStore, indexScan);
}
Exemple #17
0
/*
 * XactLockTableWaitErrorContextCb
 *		Error context callback for transaction lock waits.
 */
static void
XactLockTableWaitErrorCb(void *arg)
{
	XactLockTableWaitInfo *info = (XactLockTableWaitInfo *) arg;

	/*
	 * We would like to print schema name too, but that would require a
	 * syscache lookup.
	 */
	if (info->oper != XLTW_None &&
		ItemPointerIsValid(info->ctid) && RelationIsValid(info->rel))
	{
		const char *cxt;

		switch (info->oper)
		{
			case XLTW_Update:
				cxt = gettext_noop("while updating tuple (%u,%u) in relation \"%s\"");
				break;
			case XLTW_Delete:
				cxt = gettext_noop("while deleting tuple (%u,%u) in relation \"%s\"");
				break;
			case XLTW_Lock:
				cxt = gettext_noop("while locking tuple (%u,%u) in relation \"%s\"");
				break;
			case XLTW_LockUpdated:
				cxt = gettext_noop("while locking updated version (%u,%u) of tuple in relation \"%s\"");
				break;
			case XLTW_InsertIndex:
				cxt = gettext_noop("while inserting index tuple (%u,%u) in relation \"%s\"");
				break;
			case XLTW_InsertIndexUnique:
				cxt = gettext_noop("while checking uniqueness of tuple (%u,%u) in relation \"%s\"");
				break;
			case XLTW_FetchUpdated:
				cxt = gettext_noop("while rechecking updated tuple (%u,%u) in relation \"%s\"");
				break;
			case XLTW_RecheckExclusionConstr:
				cxt = gettext_noop("while checking exclusion constraint on tuple (%u,%u) in relation \"%s\"");
				break;

			default:
				return;
		}

		errcontext(cxt,
				   ItemPointerGetBlockNumber(info->ctid),
				   ItemPointerGetOffsetNumber(info->ctid),
				   RelationGetRelationName(info->rel));
	}
}
Exemple #18
0
static void
bitmap_xlog_insert_bitmap_lastwords(bool redo, XLogRecPtr lsn, XLogRecord* record)
{
	xl_bm_bitmap_lastwords	*xlrec = 
		(xl_bm_bitmap_lastwords*) XLogRecGetData(record);
	Relation reln;

	reln = XLogOpenRelation(xlrec->bm_node);
	if (!RelationIsValid(reln))
		return;

	if (redo)
	{
		Buffer		lovBuffer;
		Page		lovPage;
		BMLOVItem	lovItem;

#ifdef BM_DEBUG
		ereport(LOG, (errcode(LOG), 
			errmsg("call bitmap_xlog_insert_bitmap_lastwords: redo=%d\n", 
					redo)));
#endif

		lovBuffer = XLogReadBuffer(false, reln, xlrec->bm_lov_blkno);
		if (!BufferIsValid(lovBuffer))
			elog(PANIC, "bm_insert_redo: block unfound: %d",
				 xlrec->bm_lov_blkno);

		lovPage = BufferGetPage(lovBuffer);

		if (XLByteLT(PageGetLSN(lovPage), lsn))
		{
			lovItem = (BMLOVItem)
				PageGetItem(lovPage, PageGetItemId(lovPage, xlrec->bm_lov_offset));

			lovItem->bm_last_compword = xlrec->bm_last_compword;
			lovItem->bm_last_word = xlrec->bm_last_word;
			lovItem->bm_last_two_headerbits = xlrec->bm_last_two_headerbits;

			PageSetLSN(lovPage, lsn);
			PageSetTLI(lovPage, ThisTimeLineID);
			_bitmap_wrtbuf(lovBuffer);
		}

		else
			_bitmap_relbuf(lovBuffer);
	}

	else
		elog(PANIC, "bm_insert_undo: not implemented.");
}
/*
 * Returns true iff there are entries of the relation are visible
 */ 
bool
AppendOnlyVisimapStore_IsRelationFullyVisible(
	AppendOnlyVisimapStore *visiMapStore)
{
	IndexScanDesc indexScan;
	bool found_visimap_entry;

	Assert(visiMapStore);
	Assert(RelationIsValid(visiMapStore->visimapRelation));
	Assert(RelationIsValid(visiMapStore->visimapIndex));
	
	indexScan = AppendOnlyVisimapStore_BeginScan(
			visiMapStore,
			0,
			NULL);
	found_visimap_entry = AppendOnlyVisimapStore_GetNext(
			visiMapStore,
			indexScan,
			ForwardScanDirection,
			NULL, NULL);
	AppendOnlyVisimapStore_EndScan(visiMapStore, indexScan);
	return !found_visimap_entry;
}
Exemple #20
0
/* ----------------------------------------------------------------
 * caql_update_current()
 * during beginscan/endscan iteration, update current tuple,
 * and update catalog indexes if necessary 
 * NOTE: a separate call to CatalogUpdateIndexes after this will 
 * cause an error
 * ----------------------------------------------------------------
 */
void caql_update_current(cqContext *pCtx, HeapTuple tup)
{
	Relation				 rel;

	rel  = pCtx->cq_heap_rel;
	Assert(RelationIsValid(rel));

	Insist(HeapTupleIsValid(pCtx->cq_lasttup));

	simple_heap_update(rel, &(pCtx->cq_lasttup)->t_self, tup);

	/* keep the catalog indexes up to date (if has any) */
	caql_UpdateIndexes(pCtx, rel, tup);
}
Exemple #21
0
/* ----------------------------------------------------------------
 * caql_insert()
 * during beginscan/endscan iteration, insert a tuple
 * NOTE: a separate call to CatalogUpdateIndexes after this will 
 * cause an error
 * ----------------------------------------------------------------
 */
Oid caql_insert(cqContext *pCtx, HeapTuple tup)
{
	Relation		 rel;
	Oid				 result;

	rel  = pCtx->cq_heap_rel;
	Assert(RelationIsValid(rel));

	result = simple_heap_insert(rel, tup);

	/* keep the catalog indexes up to date (if has any) */
	caql_UpdateIndexes(pCtx, rel, tup);

	return (result);
}
Exemple #22
0
/* ----------------------------------------------------------------
 * caql_delete_current()
 * during beginscan/endscan iteration, delete current tuple
 * ----------------------------------------------------------------
 */
void
caql_delete_current(cqContext *pCtx)
{
	Relation				 rel;

	rel  = pCtx->cq_heap_rel;
	Assert(RelationIsValid(rel));

	disable_catalog_check(pCtx, pCtx->cq_lasttup);
	if (HeapTupleIsValid(pCtx->cq_lasttup))
	{
		caql_iud_switch(pCtx, 0, pCtx->cq_lasttup, NULL, true /* dontWait */);
		simple_heap_delete(rel, &(pCtx->cq_lasttup)->t_self);
	}
}
/*
 * Starts a scan over the visimap store.
 *
 * Parameter keys may be NULL iff nkeys is zero.
 */ 
IndexScanDesc
AppendOnlyVisimapStore_BeginScan(
	AppendOnlyVisimapStore *visiMapStore,
		int nkeys,
		ScanKey keys)
{
	Assert(visiMapStore);
	Assert(RelationIsValid(visiMapStore->visimapRelation));

	return index_beginscan(
			visiMapStore->visimapRelation,
			visiMapStore->visimapIndex,
			visiMapStore->snapshot,
			nkeys,
			keys);
}
Exemple #24
0
/*
 *		XactLockTableWait
 *
 * Wait for the specified transaction to commit or abort.  If an operation
 * is specified, an error context callback is set up.  If 'oper' is passed as
 * None, no error context callback is set up.
 *
 * Note that this does the right thing for subtransactions: if we wait on a
 * subtransaction, we will exit as soon as it aborts or its top parent commits.
 * It takes some extra work to ensure this, because to save on shared memory
 * the XID lock of a subtransaction is released when it ends, whether
 * successfully or unsuccessfully.  So we have to check if it's "still running"
 * and if so wait for its parent.
 */
void
XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid,
				  XLTW_Oper oper)
{
	LOCKTAG		tag;
	XactLockTableWaitInfo info;
	ErrorContextCallback callback;

	/*
	 * If an operation is specified, set up our verbose error context
	 * callback.
	 */
	if (oper != XLTW_None)
	{
		Assert(RelationIsValid(rel));
		Assert(ItemPointerIsValid(ctid));

		info.rel = rel;
		info.ctid = ctid;
		info.oper = oper;

		callback.callback = XactLockTableWaitErrorCb;
		callback.arg = &info;
		callback.previous = error_context_stack;
		error_context_stack = &callback;
	}

	for (;;)
	{
		Assert(TransactionIdIsValid(xid));
		Assert(!TransactionIdEquals(xid, GetTopTransactionIdIfAny()));

		SET_LOCKTAG_TRANSACTION(tag, xid);

		(void) LockAcquire(&tag, ShareLock, false, false);

		LockRelease(&tag, ShareLock, false);

		if (!TransactionIdIsInProgress(xid))
			break;
		xid = SubTransGetParent(xid);
	}

	if (oper != XLTW_None)
		error_context_stack = callback.previous;
}
Exemple #25
0
/* ----------------------------------------------------------------
 * caql_form_tuple()
 * during beginscan/endscan iteration, form a tuple
 * ----------------------------------------------------------------
 */
HeapTuple caql_form_tuple(cqContext *pCtx, Datum *replValues,
						  bool *replIsnull)
{
	Relation				 rel;
	HeapTuple				 tuple = NULL;

	rel  = pCtx->cq_heap_rel;
	Assert(RelationIsValid(rel));

	{
		tuple = heap_form_tuple(RelationGetDescr(rel), 
								replValues,
								replIsnull);
	}

	return (tuple);
}
Exemple #26
0
/*
 * Check if specified heap tuple was inserted by given
 * xaction/command and return
 *
 * - -1 if not
 * - 0	if there is no tuple at all
 * - 1	if yes
 */
int
XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
				   TransactionId xid, CommandId cid)
{
	Relation	reln;
	Buffer		buffer;
	Page		page;
	ItemId		lp;
	HeapTupleHeader htup;

	reln = XLogOpenRelation(false, RM_HEAP_ID, hnode);
	if (!RelationIsValid(reln))
		return (0);

	buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr));
	if (!BufferIsValid(buffer))
		return (0);

	LockBuffer(buffer, BUFFER_LOCK_SHARE);
	page = (Page) BufferGetPage(buffer);
	if (PageIsNew((PageHeader) page) ||
		ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page))
	{
		UnlockAndReleaseBuffer(buffer);
		return (0);
	}
	lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr));
	if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
	{
		UnlockAndReleaseBuffer(buffer);
		return (0);
	}

	htup = (HeapTupleHeader) PageGetItem(page, lp);

	Assert(PageGetSUI(page) == ThisStartUpID);
	if (!TransactionIdEquals(HeapTupleHeaderGetXmin(htup), xid) ||
		HeapTupleHeaderGetCmin(htup) != cid)
	{
		UnlockAndReleaseBuffer(buffer);
		return (-1);
	}

	UnlockAndReleaseBuffer(buffer);
	return (1);
}
Exemple #27
0
/*
 *	PrepareToInvalidateCacheTuple()
 *
 *	This is part of a rather subtle chain of events, so pay attention:
 *
 *	When a tuple is inserted or deleted, it cannot be flushed from the
 *	catcaches immediately, for reasons explained at the top of cache/inval.c.
 *	Instead we have to add entry(s) for the tuple to a list of pending tuple
 *	invalidations that will be done at the end of the command or transaction.
 *
 *	The lists of tuples that need to be flushed are kept by inval.c.  This
 *	routine is a helper routine for inval.c.  Given a tuple belonging to
 *	the specified relation, find all catcaches it could be in, compute the
 *	correct hash value for each such catcache, and call the specified function
 *	to record the cache id, hash value, and tuple ItemPointer in inval.c's
 *	lists.	CatalogCacheIdInvalidate will be called later, if appropriate,
 *	using the recorded information.
 *
 *	Note that it is irrelevant whether the given tuple is actually loaded
 *	into the catcache at the moment.  Even if it's not there now, it might
 *	be by the end of the command, or there might be a matching negative entry
 *	to flush --- or other backends' caches might have such entries --- so
 *	we have to make list entries to flush it later.
 *
 *	Also note that it's not an error if there are no catcaches for the
 *	specified relation.  inval.c doesn't know exactly which rels have
 *	catcaches --- it will call this routine for any tuple that's in a
 *	system relation.
 */
void
PrepareToInvalidateCacheTuple(Relation relation,
							  HeapTuple tuple,
						void (*function) (int, uint32, ItemPointer, Oid))
{
	CatCache   *ccp;
	Oid			reloid;

	CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");

	/*
	 * sanity checks
	 */
	Assert(RelationIsValid(relation));
	Assert(HeapTupleIsValid(tuple));
	Assert(PointerIsValid(function));
	Assert(CacheHdr != NULL);

	reloid = RelationGetRelid(relation);

	/* ----------------
	 *	for each cache
	 *	   if the cache contains tuples from the specified relation
	 *		   compute the tuple's hash value in this cache,
	 *		   and call the passed function to register the information.
	 * ----------------
	 */

	for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
	{
		/* Just in case cache hasn't finished initialization yet... */
		if (ccp->cc_tupdesc == NULL)
			CatalogCacheInitializeCache(ccp);

		if (ccp->cc_reloid != reloid)
			continue;

		(*function) (ccp->id,
					 CatalogCacheComputeTupleHashValue(ccp, tuple),
					 &tuple->t_self,
					 ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId);
	}
}
/*
 * Starts a scan over the visimap store.
 *
 * Parameter keys may be NULL iff nkeys is zero.
 */
IndexScanDesc
AppendOnlyVisimapStore_BeginScan(AppendOnlyVisimapStore *visiMapStore,
								 int nkeys,
								 ScanKey keys)
{
	IndexScanDesc scandesc;

	Assert(visiMapStore);
	Assert(RelationIsValid(visiMapStore->visimapRelation));

	scandesc = index_beginscan(visiMapStore->visimapRelation,
							   visiMapStore->visimapIndex,
							   visiMapStore->snapshot,
							   nkeys,
							   0);
	index_rescan(scandesc, keys, nkeys, NULL, 0);

	return scandesc;
}
Exemple #29
0
static void
bitmap_xlog_insert_meta(bool redo, XLogRecPtr lsn, XLogRecord* record)
{
	xl_bm_metapage	*xlrec = (xl_bm_metapage*) XLogRecGetData(record);
	Relation		reln;

	reln = XLogOpenRelation(xlrec->bm_node);
	
	if (!RelationIsValid(reln))
		return;

	if (redo)
	{
		Buffer			metabuf;
		BMMetaPage			metapage;

#ifdef BM_DEBUG
		ereport(LOG, (errcode(LOG), 
			errmsg("call bitmap_xlog_insert_meta: redo=%d\n", redo)));
#endif

		metabuf = XLogReadBuffer(false, reln, BM_METAPAGE);
		if (!BufferIsValid(metabuf))
			elog(PANIC, "bm_insert_redo: block unfound: %d", BM_METAPAGE);

		/* restore the page */
		metapage = (BMMetaPage)BufferGetPage(metabuf);

		if (XLByteLT(PageGetLSN(metapage), lsn))
		{
			PageSetLSN(metapage, lsn);
			PageSetTLI(metapage, ThisTimeLineID);
			_bitmap_wrtbuf(metabuf);
		}

		else
			_bitmap_relbuf(metabuf);
	}

	else
		elog(PANIC, "bm_insert_undo: not implemented.");
}
Exemple #30
0
/* ----------------------------------------------------------------
 * caql_update_current()
 * during beginscan/endscan iteration, update current tuple,
 * and update catalog indexes if necessary
 * NOTE: a separate call to CatalogUpdateIndexes after this will
 * cause an error
 * ----------------------------------------------------------------
 */
void
caql_update_current(cqContext *pCtx, HeapTuple tup)
{
	Relation				 rel;

	rel  = pCtx->cq_heap_rel;
	Assert(RelationIsValid(rel));

	Insist(HeapTupleIsValid(pCtx->cq_lasttup));

	disable_catalog_check(pCtx, pCtx->cq_lasttup);

	{
		caql_iud_switch(pCtx, 2, pCtx->cq_lasttup, tup, true /* dontWait */);
		simple_heap_update(rel, &(pCtx->cq_lasttup)->t_self, tup);

		/* keep the catalog indexes up to date (if has any) */
		caql_UpdateIndexes(pCtx, rel, tup);
	}
}